diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..43849cc5 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,57 @@ +dist: trusty + +sudo: false + +git: + depth: false + +language: c + +cache: ccache + +compiler: + - gcc + +addons: + apt: + packages: + - binutils-dev + - liblzma-dev + - syslinux + - genisoimage + coverity_scan: + project: + name: "ipxe/ipxe" + version: $TRAVIS_COMMIT + build_command_prepend: "make -C src bin/deps" + build_command: "make -C src bin/blib.a" + branch_pattern: coverity_scan + +env: + global: + - MAKEFLAGS="-j 4" + +script: + - make -C src bin/blib.a + - make -C src bin/ipxe.pxe + - make -C src bin/ipxe.usb + - make -C src bin/ipxe.iso + - make -C src bin/8086100e.mrom + - make -C src bin-x86_64-pcbios/blib.a + - make -C src bin-x86_64-pcbios/ipxe.pxe + - make -C src bin-x86_64-pcbios/ipxe.usb + - make -C src bin-x86_64-pcbios/ipxe.iso + - make -C src bin-x86_64-pcbios/8086100e.mrom + - make -C src bin-x86_64-efi/blib.a + - make -C src bin-x86_64-efi/ipxe.efi + - make -C src bin-x86_64-efi/intel.efidrv + - make -C src bin-x86_64-efi/intel.efirom + - make -C src bin-i386-efi/blib.a + - make -C src bin-i386-efi/ipxe.efi + - make -C src bin-i386-efi/intel.efidrv + - make -C src bin-i386-efi/intel.efirom + - make -C src bin-x86_64-linux/blib.a + - make -C src bin-x86_64-linux/tap.linux + - make -C src bin-x86_64-linux/af_packet.linux + - make -C src bin-x86_64-linux/tests.linux + - ./src/bin-x86_64-linux/tests.linux diff --git a/COPYING.GPLv2 b/COPYING.GPLv2 new file mode 100644 index 00000000..d159169d --- /dev/null +++ b/COPYING.GPLv2 @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/COPYING.UBDL b/COPYING.UBDL new file mode 100644 index 00000000..780ddcd7 --- /dev/null +++ b/COPYING.UBDL @@ -0,0 +1,59 @@ +UNMODIFIED BINARY DISTRIBUTION LICENCE + + +PREAMBLE + +The GNU General Public License provides a legal guarantee that +software covered by it remains free (in the sense of freedom, not +price). It achieves this guarantee by imposing obligations on anyone +who chooses to distribute the software. + +Some of these obligations may be seen as unnecessarily burdensome. In +particular, when the source code for the software is already publicly +and freely available, there is minimal value in imposing upon each +distributor the obligation to provide the complete source code (or an +equivalent written offer to provide the complete source code). + +This Licence allows for the distribution of unmodified binaries built +from publicly available source code, without imposing the obligations +of the GNU General Public License upon anyone who chooses to +distribute only the unmodified binaries built from that source code. + +The extra permissions granted by this Licence apply only to unmodified +binaries built from source code which has already been made available +to the public in accordance with the terms of the GNU General Public +Licence. Nothing in this Licence allows for the creation of +closed-source modified versions of the Program. Any modified versions +of the Program are subject to the usual terms and conditions of the +GNU General Public License. + + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +This Licence applies to any Program or other work which contains a +notice placed by the copyright holder saying it may be distributed +under the terms of this Unmodified Binary Distribution Licence. All +terms used in the text of this Licence are to be interpreted as they +are used in version 2 of the GNU General Public License as published +by the Free Software Foundation. + +If you have made this Program available to the public in both source +code and executable form in accordance with the terms of the GNU +General Public License as published by the Free Software Foundation; +either version 2 of the License, or (at your option) any later +version, then you are hereby granted an additional permission to use, +copy, and distribute the unmodified executable form of this Program +(the "Unmodified Binary") without restriction, including the right to +permit persons to whom the Unmodified Binary is furnished to do +likewise, subject to the following conditions: + +- when started running, the Program must display an announcement which + includes the details of your existing publication of the Program + made in accordance with the terms of the GNU General Public License. + For example, the Program could display the URL of the publicly + available source code from which the Unmodified Binary was built. + +- when exercising your right to grant permissions under this Licence, + you do not need to refer directly to the text of this Licence, but + you may not grant permissions beyond those granted to you by this + Licence. diff --git a/contrib/coverity/model.c b/contrib/coverity/model.c new file mode 100644 index 00000000..43bac58d --- /dev/null +++ b/contrib/coverity/model.c @@ -0,0 +1,29 @@ +/* + * Coverity modelling file + * + */ + +typedef long off_t; +typedef void * userptr_t; +typedef long long time_t; +struct tm; +typedef unsigned short wchar_t; +typedef void mbstate_t; +struct digest_algorithm; + +/* Inhibit use of built-in models for functions where Coverity's + * assumptions about the modelled function are incorrect for iPXE. + */ +char * strerror ( int errno ) { +} +void copy_from_user ( void *dest, userptr_t src, off_t src_off, size_t len ) { +} +time_t mktime ( struct tm *tm ) { +} +int getchar ( void ) { +} +size_t wcrtomb ( char *buf, wchar_t wc, mbstate_t *ps ) { +} +void hmac_init ( struct digest_algorithm *digest, void *digest_ctx, + void *key, size_t *key_len ) { +} diff --git a/src/Makefile.efi b/src/Makefile.efi new file mode 100644 index 00000000..10f3fe74 --- /dev/null +++ b/src/Makefile.efi @@ -0,0 +1,54 @@ +# -*- makefile -*- : Force emacs to use Makefile mode + +# Enable stack protection if available +# +SPG_TEST = $(CC) -fstack-protector-strong -mstack-protector-guard=global \ + -x c -c /dev/null -o /dev/null >/dev/null 2>&1 +SPG_FLAGS := $(shell $(SPG_TEST) && $(ECHO) '-fstack-protector-strong ' \ + '-mstack-protector-guard=global') +CFLAGS += $(SPG_FLAGS) + +# The EFI linker script +# +LDSCRIPT = scripts/efi.lds + +# Retain relocation information for elf2efi +# +LDFLAGS += -q -S + +# Media types. +# +NON_AUTO_MEDIA += efi +NON_AUTO_MEDIA += efidrv +NON_AUTO_MEDIA += drv.efi +NON_AUTO_MEDIA += efirom + +# Include SNP driver in the all-drivers build +# +DRIVERS_net += snp + +# Rules for building EFI files +# +$(BIN)/%.efi : $(BIN)/%.efi.tmp $(ELF2EFI) + $(QM)$(ECHO) " [FINISH] $@" + $(Q)$(ELF2EFI) --subsystem=10 $< $@ + +$(BIN)/%.efidrv : $(BIN)/%.efidrv.tmp $(ELF2EFI) + $(QM)$(ECHO) " [FINISH] $@" + $(Q)$(ELF2EFI) --subsystem=11 $< $@ + +$(BIN)/%.drv.efi : $(BIN)/%.efidrv + $(QM)$(ECHO) " [FINISH] $@" + $(Q)$(CP) $< $@ + +$(BIN)/%.efirom : $(BIN)/%.efidrv $(EFIROM) + $(QM)$(ECHO) " [FINISH] $@" + $(Q)$(EFIROM) -v $(TGT_PCI_VENDOR) -d $(TGT_PCI_DEVICE) $< $@ + +$(BIN)/efidrv.cab : $(BIN)/alldrv.efis # $(ALL_drv.efi) is not yet defined + $(QM)$(ECHO) " [CAB] $@" + $(Q)$(LCAB) -n -q $(ALL_drv.efi) $@ + +$(BIN)/%.usb : $(BIN)/%.efi + $(QM)$(ECHO) " [GENEFIDSK] $@" + $(Q)bash util/genefidsk -o $@ -b $(EFI_BOOT_FILE) $< diff --git a/src/arch/arm/Makefile b/src/arch/arm/Makefile new file mode 100644 index 00000000..3cee5f3a --- /dev/null +++ b/src/arch/arm/Makefile @@ -0,0 +1,12 @@ +# Assembler section type character +# +ASM_TCHAR := % +ASM_TCHAR_OPS := %% + +# Include common ARM headers +# +INCDIRS += arch/arm/include + +# ARM-specific directories containing source files +# +SRCDIRS += arch/arm/interface/efi diff --git a/src/arch/arm/Makefile.efi b/src/arch/arm/Makefile.efi new file mode 100644 index 00000000..f04be425 --- /dev/null +++ b/src/arch/arm/Makefile.efi @@ -0,0 +1,6 @@ +# -*- makefile -*- : Force emacs to use Makefile mode + +# Include generic EFI Makefile +# +MAKEDEPS += Makefile.efi +include Makefile.efi diff --git a/src/arch/arm/core/arm_io.c b/src/arch/arm/core/arm_io.c new file mode 100644 index 00000000..1ef571fc --- /dev/null +++ b/src/arch/arm/core/arm_io.c @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** @file + * + * iPXE I/O API for ARM + * + */ + +/** An ARM I/O qword */ +union arm32_io_qword { + uint64_t qword; + uint32_t dword[2]; +}; + +/** + * Read 64-bit qword from memory-mapped device + * + * @v io_addr I/O address + * @ret data Value read + * + * This is not atomic for ARM32. + */ +static uint64_t arm32_readq ( volatile uint64_t *io_addr ) { + volatile union arm32_io_qword *ptr = + container_of ( io_addr, union arm32_io_qword, qword ); + union arm32_io_qword tmp; + + tmp.dword[0] = readl ( &ptr->dword[0] ); + tmp.dword[1] = readl ( &ptr->dword[1] ); + return tmp.qword; +} + +/** + * Write 64-bit qword to memory-mapped device + * + * @v data Value to write + * @v io_addr I/O address + * + * This is not atomic for ARM32. + */ +static void arm32_writeq ( uint64_t data, volatile uint64_t *io_addr ) { + volatile union arm32_io_qword *ptr = + container_of ( io_addr, union arm32_io_qword, qword ); + union arm32_io_qword tmp; + + tmp.qword = data; + writel ( tmp.dword[0], &ptr->dword[0] ); + writel ( tmp.dword[1], &ptr->dword[1] ); +} + +PROVIDE_IOAPI_INLINE ( arm, phys_to_bus ); +PROVIDE_IOAPI_INLINE ( arm, bus_to_phys ); +PROVIDE_IOAPI_INLINE ( arm, readb ); +PROVIDE_IOAPI_INLINE ( arm, readw ); +PROVIDE_IOAPI_INLINE ( arm, readl ); +PROVIDE_IOAPI_INLINE ( arm, writeb ); +PROVIDE_IOAPI_INLINE ( arm, writew ); +PROVIDE_IOAPI_INLINE ( arm, writel ); +PROVIDE_IOAPI_INLINE ( arm, iodelay ); +PROVIDE_IOAPI_INLINE ( arm, mb ); +#ifdef __aarch64__ +PROVIDE_IOAPI_INLINE ( arm, readq ); +PROVIDE_IOAPI_INLINE ( arm, writeq ); +#else +PROVIDE_IOAPI ( arm, readq, arm32_readq ); +PROVIDE_IOAPI ( arm, writeq, arm32_writeq ); +#endif diff --git a/src/arch/arm/include/bits/acpi.h b/src/arch/arm/include/bits/acpi.h new file mode 100644 index 00000000..f9f2f00e --- /dev/null +++ b/src/arch/arm/include/bits/acpi.h @@ -0,0 +1,12 @@ +#ifndef _BITS_ACPI_H +#define _BITS_ACPI_H + +/** @file + * + * ARM-specific ACPI API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#endif /* _BITS_ACPI_H */ diff --git a/src/arch/arm/include/bits/endian.h b/src/arch/arm/include/bits/endian.h new file mode 100644 index 00000000..4506711a --- /dev/null +++ b/src/arch/arm/include/bits/endian.h @@ -0,0 +1,13 @@ +#ifndef _BITS_ENDIAN_H +#define _BITS_ENDIAN_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/* ARM may be either little-endian or big-endian */ +#ifdef __ARM_BIG_ENDIAN +#define __BYTE_ORDER __BIG_ENDIAN +#else +#define __BYTE_ORDER __LITTLE_ENDIAN +#endif + +#endif /* _BITS_ENDIAN_H */ diff --git a/src/arch/arm/include/bits/entropy.h b/src/arch/arm/include/bits/entropy.h new file mode 100644 index 00000000..75fdc90e --- /dev/null +++ b/src/arch/arm/include/bits/entropy.h @@ -0,0 +1,12 @@ +#ifndef _BITS_ENTROPY_H +#define _BITS_ENTROPY_H + +/** @file + * + * ARM-specific entropy API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#endif /* _BITS_ENTROPY_H */ diff --git a/src/arch/arm/include/bits/errfile.h b/src/arch/arm/include/bits/errfile.h new file mode 100644 index 00000000..65f7f719 --- /dev/null +++ b/src/arch/arm/include/bits/errfile.h @@ -0,0 +1,19 @@ +#ifndef _BITS_ERRFILE_H +#define _BITS_ERRFILE_H + +/** @file + * + * ARM-specific error file identifiers + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @addtogroup errfile Error file identifiers + * @{ + */ + +/** @} */ + +#endif /* _BITS_ERRFILE_H */ diff --git a/src/arch/arm/include/bits/hyperv.h b/src/arch/arm/include/bits/hyperv.h new file mode 100644 index 00000000..f0e0c879 --- /dev/null +++ b/src/arch/arm/include/bits/hyperv.h @@ -0,0 +1,12 @@ +#ifndef _BITS_HYPERV_H +#define _BITS_HYPERV_H + +/** @file + * + * Hyper-V interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#endif /* _BITS_HYPERV_H */ diff --git a/src/arch/arm/include/bits/io.h b/src/arch/arm/include/bits/io.h new file mode 100644 index 00000000..90f6455e --- /dev/null +++ b/src/arch/arm/include/bits/io.h @@ -0,0 +1,14 @@ +#ifndef _BITS_IO_H +#define _BITS_IO_H + +/** @file + * + * ARM-specific I/O API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#endif /* _BITS_IO_H */ diff --git a/src/arch/arm/include/bits/iomap.h b/src/arch/arm/include/bits/iomap.h new file mode 100644 index 00000000..ae953c45 --- /dev/null +++ b/src/arch/arm/include/bits/iomap.h @@ -0,0 +1,12 @@ +#ifndef _BITS_IOMAP_H +#define _BITS_IOMAP_H + +/** @file + * + * ARM-specific I/O mapping API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#endif /* _BITS_IOMAP_H */ diff --git a/src/arch/arm/include/bits/nap.h b/src/arch/arm/include/bits/nap.h new file mode 100644 index 00000000..e30a7146 --- /dev/null +++ b/src/arch/arm/include/bits/nap.h @@ -0,0 +1,14 @@ +#ifndef _BITS_NAP_H +#define _BITS_NAP_H + +/** @file + * + * ARM-specific CPU sleeping API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#endif /* _BITS_MAP_H */ diff --git a/src/arch/arm/include/bits/pci_io.h b/src/arch/arm/include/bits/pci_io.h new file mode 100644 index 00000000..fba0eb97 --- /dev/null +++ b/src/arch/arm/include/bits/pci_io.h @@ -0,0 +1,14 @@ +#ifndef _BITS_PCI_IO_H +#define _BITS_PCI_IO_H + +/** @file + * + * ARM PCI I/O API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#endif /* _BITS_PCI_IO_H */ diff --git a/src/arch/arm/include/bits/reboot.h b/src/arch/arm/include/bits/reboot.h new file mode 100644 index 00000000..88c50250 --- /dev/null +++ b/src/arch/arm/include/bits/reboot.h @@ -0,0 +1,12 @@ +#ifndef _BITS_REBOOT_H +#define _BITS_REBOOT_H + +/** @file + * + * ARM-specific reboot API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#endif /* _BITS_REBOOT_H */ diff --git a/src/arch/arm/include/bits/sanboot.h b/src/arch/arm/include/bits/sanboot.h new file mode 100644 index 00000000..abd4c79a --- /dev/null +++ b/src/arch/arm/include/bits/sanboot.h @@ -0,0 +1,12 @@ +#ifndef _BITS_SANBOOT_H +#define _BITS_SANBOOT_H + +/** @file + * + * ARM-specific sanboot API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#endif /* _BITS_SANBOOT_H */ diff --git a/src/arch/arm/include/bits/smbios.h b/src/arch/arm/include/bits/smbios.h new file mode 100644 index 00000000..d9421811 --- /dev/null +++ b/src/arch/arm/include/bits/smbios.h @@ -0,0 +1,12 @@ +#ifndef _BITS_SMBIOS_H +#define _BITS_SMBIOS_H + +/** @file + * + * ARM-specific SMBIOS API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#endif /* _BITS_SMBIOS_H */ diff --git a/src/arch/arm/include/bits/time.h b/src/arch/arm/include/bits/time.h new file mode 100644 index 00000000..724d8b93 --- /dev/null +++ b/src/arch/arm/include/bits/time.h @@ -0,0 +1,12 @@ +#ifndef _BITS_TIME_H +#define _BITS_TIME_H + +/** @file + * + * ARM-specific time API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#endif /* _BITS_TIME_H */ diff --git a/src/arch/arm/include/bits/uaccess.h b/src/arch/arm/include/bits/uaccess.h new file mode 100644 index 00000000..87f11509 --- /dev/null +++ b/src/arch/arm/include/bits/uaccess.h @@ -0,0 +1,12 @@ +#ifndef _BITS_UACCESS_H +#define _BITS_UACCESS_H + +/** @file + * + * ARM-specific user access API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#endif /* _BITS_UACCESS_H */ diff --git a/src/arch/arm/include/bits/uart.h b/src/arch/arm/include/bits/uart.h new file mode 100644 index 00000000..6f85975f --- /dev/null +++ b/src/arch/arm/include/bits/uart.h @@ -0,0 +1,12 @@ +#ifndef _BITS_UART_H +#define _BITS_UART_H + +/** @file + * + * 16550-compatible UART + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#endif /* _BITS_UART_H */ diff --git a/src/arch/arm/include/bits/umalloc.h b/src/arch/arm/include/bits/umalloc.h new file mode 100644 index 00000000..27970d7b --- /dev/null +++ b/src/arch/arm/include/bits/umalloc.h @@ -0,0 +1,12 @@ +#ifndef _BITS_UMALLOC_H +#define _BITS_UMALLOC_H + +/** @file + * + * ARM-specific user memory allocation API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#endif /* _BITS_UMALLOC_H */ diff --git a/src/arch/arm/include/bits/xen.h b/src/arch/arm/include/bits/xen.h new file mode 100644 index 00000000..34f64790 --- /dev/null +++ b/src/arch/arm/include/bits/xen.h @@ -0,0 +1,158 @@ +#ifndef _BITS_XEN_H +#define _BITS_XEN_H + +/** @file + * + * Xen interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/* Hypercall registers */ +#ifdef __aarch64__ +#define XEN_HC "x16" +#define XEN_REG1 "x0" +#define XEN_REG2 "x1" +#define XEN_REG3 "x2" +#define XEN_REG4 "x3" +#define XEN_REG5 "x4" +#else +#define XEN_HC "r12" +#define XEN_REG1 "r0" +#define XEN_REG2 "r1" +#define XEN_REG3 "r2" +#define XEN_REG4 "r3" +#define XEN_REG5 "r4" +#endif + +/** + * Issue hypercall with one argument + * + * @v xen Xen hypervisor + * @v hypercall Hypercall number + * @v arg1 First argument + * @ret retval Return value + */ +static inline __attribute__ (( always_inline )) unsigned long +xen_hypercall_1 ( struct xen_hypervisor *xen __unused, unsigned int hypercall, + unsigned long arg1 ) { + register unsigned long hc asm ( XEN_HC ) = hypercall; + register unsigned long reg1 asm ( XEN_REG1 ) = arg1; + + __asm__ __volatile__ ( "hvc %1" + : "+r" ( reg1 ) + : "i" ( XEN_HYPERCALL_TAG ), "r" ( hc ) + : "memory", "cc" ); + return reg1; +} + +/** + * Issue hypercall with two arguments + * + * @v xen Xen hypervisor + * @v hypercall Hypercall number + * @v arg1 First argument + * @v arg2 Second argument + * @ret retval Return value + */ +static inline __attribute__ (( always_inline )) unsigned long +xen_hypercall_2 ( struct xen_hypervisor *xen __unused, unsigned int hypercall, + unsigned long arg1, unsigned long arg2 ) { + register unsigned long hc asm ( XEN_HC ) = hypercall; + register unsigned long reg1 asm ( XEN_REG1 ) = arg1; + register unsigned long reg2 asm ( XEN_REG2 ) = arg2; + + __asm__ __volatile__ ( "hvc %2" + : "+r" ( reg1 ), "+r" ( reg2 ) + : "i" ( XEN_HYPERCALL_TAG ), "r" ( hc ) + : "memory", "cc" ); + return reg1; +} + +/** + * Issue hypercall with three arguments + * + * @v xen Xen hypervisor + * @v hypercall Hypercall number + * @v arg1 First argument + * @v arg2 Second argument + * @v arg3 Third argument + * @ret retval Return value + */ +static inline __attribute__ (( always_inline )) unsigned long +xen_hypercall_3 ( struct xen_hypervisor *xen __unused, unsigned int hypercall, + unsigned long arg1, unsigned long arg2, unsigned long arg3 ) { + register unsigned long hc asm ( XEN_HC ) = hypercall; + register unsigned long reg1 asm ( XEN_REG1 ) = arg1; + register unsigned long reg2 asm ( XEN_REG2 ) = arg2; + register unsigned long reg3 asm ( XEN_REG3 ) = arg3; + + __asm__ __volatile__ ( "hvc %3" + : "+r" ( reg1 ), "+r" ( reg2 ), "+r" ( reg3 ) + : "i" ( XEN_HYPERCALL_TAG ), "r" ( hc ) + : "memory", "cc" ); + return reg1; +} + +/** + * Issue hypercall with four arguments + * + * @v xen Xen hypervisor + * @v hypercall Hypercall number + * @v arg1 First argument + * @v arg2 Second argument + * @v arg3 Third argument + * @v arg4 Fourth argument + * @ret retval Return value + */ +static inline __attribute__ (( always_inline )) unsigned long +xen_hypercall_4 ( struct xen_hypervisor *xen __unused, unsigned int hypercall, + unsigned long arg1, unsigned long arg2, unsigned long arg3, + unsigned long arg4 ) { + register unsigned long hc asm ( XEN_HC ) = hypercall; + register unsigned long reg1 asm ( XEN_REG1 ) = arg1; + register unsigned long reg2 asm ( XEN_REG2 ) = arg2; + register unsigned long reg3 asm ( XEN_REG3 ) = arg3; + register unsigned long reg4 asm ( XEN_REG4 ) = arg4; + + __asm__ __volatile__ ( "hvc %4" + : "+r" ( reg1 ), "+r" ( reg2 ), "+r" ( reg3 ), + "+r" ( reg4 ) + : "i" ( XEN_HYPERCALL_TAG ), "r" ( hc ) + : "memory", "cc" ); + return reg1; +} + +/** + * Issue hypercall with five arguments + * + * @v xen Xen hypervisor + * @v hypercall Hypercall number + * @v arg1 First argument + * @v arg2 Second argument + * @v arg3 Third argument + * @v arg4 Fourth argument + * @v arg5 Fifth argument + * @ret retval Return value + */ +static inline __attribute__ (( always_inline )) unsigned long +xen_hypercall_5 ( struct xen_hypervisor *xen __unused, unsigned int hypercall, + unsigned long arg1, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5 ) { + register unsigned long hc asm ( XEN_HC ) = hypercall; + register unsigned long reg1 asm ( XEN_REG1 ) = arg1; + register unsigned long reg2 asm ( XEN_REG2 ) = arg2; + register unsigned long reg3 asm ( XEN_REG3 ) = arg3; + register unsigned long reg4 asm ( XEN_REG4 ) = arg4; + register unsigned long reg5 asm ( XEN_REG5 ) = arg5; + + __asm__ __volatile__ ( "hvc %5" + : "+r" ( reg1 ), "+r" ( reg2 ), "+r" ( reg3 ), + "+r" ( reg4 ), "+r" ( reg5 ) + : "i" ( XEN_HYPERCALL_TAG ), "r" ( hc ) + : "memory", "cc" ); + return reg1; +} + +#endif /* _BITS_XEN_H */ diff --git a/src/arch/arm/include/ipxe/arm_io.h b/src/arch/arm/include/ipxe/arm_io.h new file mode 100644 index 00000000..105f22bf --- /dev/null +++ b/src/arch/arm/include/ipxe/arm_io.h @@ -0,0 +1,146 @@ +#ifndef _IPXE_ARM_IO_H +#define _IPXE_ARM_IO_H + +/** @file + * + * iPXE I/O API for ARM + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef IOAPI_ARM +#define IOAPI_PREFIX_arm +#else +#define IOAPI_PREFIX_arm __arm_ +#endif + +/* + * Memory space mappings + * + */ + +/** Page shift */ +#define PAGE_SHIFT 12 + +/* + * Physical<->Bus address mappings + * + */ + +static inline __always_inline unsigned long +IOAPI_INLINE ( arm, phys_to_bus ) ( unsigned long phys_addr ) { + return phys_addr; +} + +static inline __always_inline unsigned long +IOAPI_INLINE ( arm, bus_to_phys ) ( unsigned long bus_addr ) { + return bus_addr; +} + +/* + * MMIO reads and writes up to native word size + * + */ + +#define ARM_READX( _suffix, _type, _insn_suffix, _reg_prefix ) \ +static inline __always_inline _type \ +IOAPI_INLINE ( arm, read ## _suffix ) ( volatile _type *io_addr ) { \ + _type data; \ + __asm__ __volatile__ ( "ldr" _insn_suffix " %" _reg_prefix "0, %1" \ + : "=r" ( data ) : "Qo" ( *io_addr ) ); \ + return data; \ +} +#ifdef __aarch64__ +ARM_READX ( b, uint8_t, "b", "w" ); +ARM_READX ( w, uint16_t, "h", "w" ); +ARM_READX ( l, uint32_t, "", "w" ); +ARM_READX ( q, uint64_t, "", "" ); +#else +ARM_READX ( b, uint8_t, "b", "" ); +ARM_READX ( w, uint16_t, "h", "" ); +ARM_READX ( l, uint32_t, "", "" ); +#endif + +#define ARM_WRITEX( _suffix, _type, _insn_suffix, _reg_prefix ) \ +static inline __always_inline void \ +IOAPI_INLINE ( arm, write ## _suffix ) ( _type data, \ + volatile _type *io_addr ) { \ + __asm__ __volatile__ ( "str" _insn_suffix " %" _reg_prefix "0, %1" \ + : : "r" ( data ), "Qo" ( *io_addr ) ); \ +} +#ifdef __aarch64__ +ARM_WRITEX ( b, uint8_t, "b", "w" ); +ARM_WRITEX ( w, uint16_t, "h", "w" ); +ARM_WRITEX ( l, uint32_t, "", "w" ); +ARM_WRITEX ( q, uint64_t, "", "" ); +#else +ARM_WRITEX ( b, uint8_t, "b", "" ); +ARM_WRITEX ( w, uint16_t, "h", "" ); +ARM_WRITEX ( l, uint32_t, "", "" ); +#endif + +/* + * Dummy PIO reads and writes up to 32 bits + * + * There is no common standard for I/O-space access for ARM, and + * non-MMIO peripherals are vanishingly rare. Provide dummy + * implementations that will allow code to link and should cause + * drivers to simply fail to detect hardware at runtime. + * + */ + +#define ARM_INX( _suffix, _type ) \ +static inline __always_inline _type \ +IOAPI_INLINE ( arm, in ## _suffix ) ( volatile _type *io_addr __unused) { \ + return ~( (_type) 0 ); \ +} \ +static inline __always_inline void \ +IOAPI_INLINE ( arm, ins ## _suffix ) ( volatile _type *io_addr __unused, \ + _type *data, unsigned int count ) { \ + memset ( data, 0xff, count * sizeof ( *data ) ); \ +} +ARM_INX ( b, uint8_t ); +ARM_INX ( w, uint16_t ); +ARM_INX ( l, uint32_t ); + +#define ARM_OUTX( _suffix, _type ) \ +static inline __always_inline void \ +IOAPI_INLINE ( arm, out ## _suffix ) ( _type data __unused, \ + volatile _type *io_addr __unused ) { \ + /* Do nothing */ \ +} \ +static inline __always_inline void \ +IOAPI_INLINE ( arm, outs ## _suffix ) ( volatile _type *io_addr __unused, \ + const _type *data __unused, \ + unsigned int count __unused ) { \ + /* Do nothing */ \ +} +ARM_OUTX ( b, uint8_t ); +ARM_OUTX ( w, uint16_t ); +ARM_OUTX ( l, uint32_t ); + +/* + * Slow down I/O + * + */ +static inline __always_inline void +IOAPI_INLINE ( arm, iodelay ) ( void ) { + /* Nothing to do */ +} + +/* + * Memory barrier + * + */ +static inline __always_inline void +IOAPI_INLINE ( arm, mb ) ( void ) { + +#ifdef __aarch64__ + __asm__ __volatile__ ( "dmb sy" ); +#else + __asm__ __volatile__ ( "dmb" ); +#endif +} + +#endif /* _IPXE_ARM_IO_H */ diff --git a/src/arch/arm/include/ipxe/efi/efiarm_nap.h b/src/arch/arm/include/ipxe/efi/efiarm_nap.h new file mode 100644 index 00000000..dcbdd3e2 --- /dev/null +++ b/src/arch/arm/include/ipxe/efi/efiarm_nap.h @@ -0,0 +1,18 @@ +#ifndef _IPXE_EFIARM_NAP_H +#define _IPXE_EFIARM_NAP_H + +/** @file + * + * EFI CPU sleeping + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef NAP_EFIARM +#define NAP_PREFIX_efiarm +#else +#define NAP_PREFIX_efiarm __efiarm_ +#endif + +#endif /* _IPXE_EFIARM_NAP_H */ diff --git a/src/arch/arm/interface/efi/efiarm_nap.c b/src/arch/arm/interface/efi/efiarm_nap.c new file mode 100644 index 00000000..9ed638e9 --- /dev/null +++ b/src/arch/arm/interface/efi/efiarm_nap.c @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** @file + * + * iPXE CPU sleeping API for EFI + * + */ + +/** + * Sleep until next interrupt + * + */ +static void efiarm_cpu_nap ( void ) { + /* + * I can't find any EFI API that allows us to put the CPU to + * sleep. The CpuSleep() function is defined in CpuLib.h, but + * isn't part of any exposed protocol so we have no way to + * call it. + * + * The EFI shell doesn't seem to bother sleeping the CPU; it + * just sits there idly burning power. + * + */ + __asm__ __volatile__ ( "wfi" ); +} + +PROVIDE_NAP ( efiarm, cpu_nap, efiarm_cpu_nap ); diff --git a/src/arch/arm32/Makefile b/src/arch/arm32/Makefile new file mode 100644 index 00000000..3a7c0923 --- /dev/null +++ b/src/arch/arm32/Makefile @@ -0,0 +1,23 @@ +# ARM32-specific directories containing source files +# +SRCDIRS += arch/arm32/core +SRCDIRS += arch/arm32/libgcc + +# ARM32-specific flags +# +CFLAGS += -mthumb -mcpu=cortex-a15 -mabi=aapcs -mfloat-abi=soft +CFLAGS += -mword-relocations +ASFLAGS += -mthumb -mcpu=cortex-a15 + +# EFI requires -fshort-wchar, and nothing else currently uses wchar_t +# +CFLAGS += -fshort-wchar + +# Include common ARM Makefile +MAKEDEPS += arch/arm/Makefile +include arch/arm/Makefile + +# Include platform-specific Makefile +# +MAKEDEPS += arch/arm32/Makefile.$(PLATFORM) +include arch/arm32/Makefile.$(PLATFORM) diff --git a/src/arch/arm32/Makefile.efi b/src/arch/arm32/Makefile.efi new file mode 100644 index 00000000..e139a055 --- /dev/null +++ b/src/arch/arm32/Makefile.efi @@ -0,0 +1,18 @@ +# -*- makefile -*- : Force emacs to use Makefile mode + +# UEFI requires that enums are always 32 bits +# +CFLAGS += -fno-short-enums + +# Specify EFI image builder +# +ELF2EFI = $(ELF2EFI32) + +# Specify EFI boot file +# +EFI_BOOT_FILE = bootarm.efi + +# Include generic EFI Makefile +# +MAKEDEPS += arch/arm/Makefile.efi +include arch/arm/Makefile.efi diff --git a/src/arch/arm32/core/arm32_bigint.c b/src/arch/arm32/core/arm32_bigint.c new file mode 100644 index 00000000..839bead1 --- /dev/null +++ b/src/arch/arm32/core/arm32_bigint.c @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * Big integer support + */ + +/** + * Multiply big integers + * + * @v multiplicand0 Element 0 of big integer to be multiplied + * @v multiplier0 Element 0 of big integer to be multiplied + * @v result0 Element 0 of big integer to hold result + * @v size Number of elements + */ +void bigint_multiply_raw ( const uint32_t *multiplicand0, + const uint32_t *multiplier0, + uint32_t *result0, unsigned int size ) { + const bigint_t ( size ) __attribute__ (( may_alias )) *multiplicand = + ( ( const void * ) multiplicand0 ); + const bigint_t ( size ) __attribute__ (( may_alias )) *multiplier = + ( ( const void * ) multiplier0 ); + bigint_t ( size * 2 ) __attribute__ (( may_alias )) *result = + ( ( void * ) result0 ); + unsigned int i; + unsigned int j; + uint32_t multiplicand_element; + uint32_t multiplier_element; + uint32_t *result_elements; + uint32_t discard_low; + uint32_t discard_high; + uint32_t discard_temp; + + /* Zero result */ + memset ( result, 0, sizeof ( *result ) ); + + /* Multiply integers one element at a time */ + for ( i = 0 ; i < size ; i++ ) { + multiplicand_element = multiplicand->element[i]; + for ( j = 0 ; j < size ; j++ ) { + multiplier_element = multiplier->element[j]; + result_elements = &result->element[ i + j ]; + /* Perform a single multiply, and add the + * resulting double-element into the result, + * carrying as necessary. The carry can + * never overflow beyond the end of the + * result, since: + * + * a < 2^{n}, b < 2^{n} => ab < 2^{2n} + */ + __asm__ __volatile__ ( "umull %1, %2, %5, %6\n\t" + "ldr %3, [%0]\n\t" + "adds %3, %1\n\t" + "stmia %0!, {%3}\n\t" + "ldr %3, [%0]\n\t" + "adcs %3, %2\n\t" + "stmia %0!, {%3}\n\t" + "bcc 2f\n\t" + "\n1:\n\t" + "ldr %3, [%0]\n\t" + "adcs %3, #0\n\t" + "stmia %0!, {%3}\n\t" + "bcs 1b\n\t" + "\n2:\n\t" + : "+l" ( result_elements ), + "=l" ( discard_low ), + "=l" ( discard_high ), + "=l" ( discard_temp ), + "+m" ( *result ) + : "l" ( multiplicand_element ), + "l" ( multiplier_element ) + : "cc" ); + } + } +} diff --git a/src/arch/arm32/core/setjmp.S b/src/arch/arm32/core/setjmp.S new file mode 100644 index 00000000..7e7b0fe5 --- /dev/null +++ b/src/arch/arm32/core/setjmp.S @@ -0,0 +1,32 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .text + .arm + +/* + * Save stack context for non-local goto + */ + .globl setjmp + .type setjmp, %function +setjmp: + /* Store registers */ + stmia r0, { r4, r5, r6, r7, r8, r9, r10, fp, sp, lr } + /* Return 0 when returning as setjmp() */ + mov r0, #0 + bx lr + .size setjmp, . - setjmp + +/* + * Non-local jump to a saved stack context + */ + .globl longjmp + .type longjmp, %function +longjmp: + /* Restore registers */ + ldmia r0, { r4, r5, r6, r7, r8, r9, r10, fp, sp, lr } + /* Force result to non-zero */ + movs r0, r1 + moveq r0, #1 + /* Return to setjmp() caller */ + bx lr + .size longjmp, . - longjmp diff --git a/src/arch/arm32/include/bits/bigint.h b/src/arch/arm32/include/bits/bigint.h new file mode 100644 index 00000000..103c6c48 --- /dev/null +++ b/src/arch/arm32/include/bits/bigint.h @@ -0,0 +1,316 @@ +#ifndef _BITS_BIGINT_H +#define _BITS_BIGINT_H + +/** @file + * + * Big integer support + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** Element of a big integer */ +typedef uint32_t bigint_element_t; + +/** + * Initialise big integer + * + * @v value0 Element 0 of big integer to initialise + * @v size Number of elements + * @v data Raw data + * @v len Length of raw data + */ +static inline __attribute__ (( always_inline )) void +bigint_init_raw ( uint32_t *value0, unsigned int size, + const void *data, size_t len ) { + size_t pad_len = ( sizeof ( bigint_t ( size ) ) - len ); + uint8_t *value_byte = ( ( void * ) value0 ); + const uint8_t *data_byte = ( data + len ); + + /* Copy raw data in reverse order, padding with zeros */ + while ( len-- ) + *(value_byte++) = *(--data_byte); + while ( pad_len-- ) + *(value_byte++) = 0; +} + +/** + * Add big integers + * + * @v addend0 Element 0 of big integer to add + * @v value0 Element 0 of big integer to be added to + * @v size Number of elements + */ +static inline __attribute__ (( always_inline )) void +bigint_add_raw ( const uint32_t *addend0, uint32_t *value0, + unsigned int size ) { + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); + uint32_t *discard_addend; + uint32_t *discard_value; + uint32_t *discard_end; + uint32_t discard_addend_i; + uint32_t discard_value_i; + + __asm__ __volatile__ ( "adds %2, %0, %8, lsl #2\n\t" /* clear CF */ + "\n1:\n\t" + "ldmia %0!, {%3}\n\t" + "ldr %4, [%1]\n\t" + "adcs %4, %3\n\t" + "stmia %1!, {%4}\n\t" + "teq %0, %2\n\t" + "bne 1b\n\t" + : "=l" ( discard_addend ), + "=l" ( discard_value ), + "=l" ( discard_end ), + "=l" ( discard_addend_i ), + "=l" ( discard_value_i ), + "+m" ( *value ) + : "0" ( addend0 ), "1" ( value0 ), "l" ( size ) + : "cc" ); +} + +/** + * Subtract big integers + * + * @v subtrahend0 Element 0 of big integer to subtract + * @v value0 Element 0 of big integer to be subtracted from + * @v size Number of elements + */ +static inline __attribute__ (( always_inline )) void +bigint_subtract_raw ( const uint32_t *subtrahend0, uint32_t *value0, + unsigned int size ) { + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); + uint32_t *discard_subtrahend; + uint32_t *discard_value; + uint32_t *discard_end; + uint32_t discard_subtrahend_i; + uint32_t discard_value_i; + + __asm__ __volatile__ ( "add %2, %0, %8, lsl #2\n\t" + "cmp %2, %0\n\t" /* set CF */ + "\n1:\n\t" + "ldmia %0!, {%3}\n\t" + "ldr %4, [%1]\n\t" + "sbcs %4, %3\n\t" + "stmia %1!, {%4}\n\t" + "teq %0, %2\n\t" + "bne 1b\n\t" + : "=l" ( discard_subtrahend ), + "=l" ( discard_value ), + "=l" ( discard_end ), + "=l" ( discard_subtrahend_i ), + "=l" ( discard_value_i ), + "+m" ( *value ) + : "0" ( subtrahend0 ), "1" ( value0 ), + "l" ( size ) + : "cc" ); +} + +/** + * Rotate big integer left + * + * @v value0 Element 0 of big integer + * @v size Number of elements + */ +static inline __attribute__ (( always_inline )) void +bigint_rol_raw ( uint32_t *value0, unsigned int size ) { + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); + uint32_t *discard_value; + uint32_t *discard_end; + uint32_t discard_value_i; + + __asm__ __volatile__ ( "adds %1, %0, %5, lsl #2\n\t" /* clear CF */ + "\n1:\n\t" + "ldr %2, [%0]\n\t" + "adcs %2, %2\n\t" + "stmia %0!, {%2}\n\t" + "teq %0, %1\n\t" + "bne 1b\n\t" + : "=l" ( discard_value ), + "=l" ( discard_end ), + "=l" ( discard_value_i ), + "+m" ( *value ) + : "0" ( value0 ), "1" ( size ) + : "cc" ); +} + +/** + * Rotate big integer right + * + * @v value0 Element 0 of big integer + * @v size Number of elements + */ +static inline __attribute__ (( always_inline )) void +bigint_ror_raw ( uint32_t *value0, unsigned int size ) { + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); + uint32_t *discard_value; + uint32_t *discard_end; + uint32_t discard_value_i; + + __asm__ __volatile__ ( "adds %1, %0, %5, lsl #2\n\t" /* clear CF */ + "\n1:\n\t" + "ldmdb %1!, {%2}\n\t" + "rrxs %2, %2\n\t" + "str %2, [%1]\n\t" + "teq %0, %1\n\t" + "bne 1b\n\t" + : "=l" ( discard_value ), + "=l" ( discard_end ), + "=l" ( discard_value_i ), + "+m" ( *value ) + : "0" ( value0 ), "1" ( size ) + : "cc" ); +} + +/** + * Test if big integer is equal to zero + * + * @v value0 Element 0 of big integer + * @v size Number of elements + * @ret is_zero Big integer is equal to zero + */ +static inline __attribute__ (( always_inline, pure )) int +bigint_is_zero_raw ( const uint32_t *value0, unsigned int size ) { + const uint32_t *value = value0; + uint32_t value_i; + + do { + value_i = *(value++); + if ( value_i ) + break; + } while ( --size ); + + return ( value_i == 0 ); +} + +/** + * Compare big integers + * + * @v value0 Element 0 of big integer + * @v reference0 Element 0 of reference big integer + * @v size Number of elements + * @ret geq Big integer is greater than or equal to the reference + */ +static inline __attribute__ (( always_inline, pure )) int +bigint_is_geq_raw ( const uint32_t *value0, const uint32_t *reference0, + unsigned int size ) { + const uint32_t *value = ( value0 + size ); + const uint32_t *reference = ( reference0 + size ); + uint32_t value_i; + uint32_t reference_i; + + do { + value_i = *(--value); + reference_i = *(--reference); + if ( value_i != reference_i ) + break; + } while ( --size ); + + return ( value_i >= reference_i ); +} + +/** + * Test if bit is set in big integer + * + * @v value0 Element 0 of big integer + * @v size Number of elements + * @v bit Bit to test + * @ret is_set Bit is set + */ +static inline __attribute__ (( always_inline )) int +bigint_bit_is_set_raw ( const uint32_t *value0, unsigned int size, + unsigned int bit ) { + const bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( const void * ) value0 ); + unsigned int index = ( bit / ( 8 * sizeof ( value->element[0] ) ) ); + unsigned int subindex = ( bit % ( 8 * sizeof ( value->element[0] ) ) ); + + return ( value->element[index] & ( 1 << subindex ) ); +} + +/** + * Find highest bit set in big integer + * + * @v value0 Element 0 of big integer + * @v size Number of elements + * @ret max_bit Highest bit set + 1 (or 0 if no bits set) + */ +static inline __attribute__ (( always_inline )) int +bigint_max_set_bit_raw ( const uint32_t *value0, unsigned int size ) { + const uint32_t *value = ( value0 + size ); + int max_bit = ( 8 * sizeof ( bigint_t ( size ) ) ); + uint32_t value_i; + + do { + value_i = *(--value); + max_bit -= ( 32 - fls ( value_i ) ); + if ( value_i ) + break; + } while ( --size ); + + return max_bit; +} + +/** + * Grow big integer + * + * @v source0 Element 0 of source big integer + * @v source_size Number of elements in source big integer + * @v dest0 Element 0 of destination big integer + * @v dest_size Number of elements in destination big integer + */ +static inline __attribute__ (( always_inline )) void +bigint_grow_raw ( const uint32_t *source0, unsigned int source_size, + uint32_t *dest0, unsigned int dest_size ) { + unsigned int pad_size = ( dest_size - source_size ); + + memcpy ( dest0, source0, sizeof ( bigint_t ( source_size ) ) ); + memset ( ( dest0 + source_size ), 0, sizeof ( bigint_t ( pad_size ) ) ); +} + +/** + * Shrink big integer + * + * @v source0 Element 0 of source big integer + * @v source_size Number of elements in source big integer + * @v dest0 Element 0 of destination big integer + * @v dest_size Number of elements in destination big integer + */ +static inline __attribute__ (( always_inline )) void +bigint_shrink_raw ( const uint32_t *source0, unsigned int source_size __unused, + uint32_t *dest0, unsigned int dest_size ) { + + memcpy ( dest0, source0, sizeof ( bigint_t ( dest_size ) ) ); +} + +/** + * Finalise big integer + * + * @v value0 Element 0 of big integer to finalise + * @v size Number of elements + * @v out Output buffer + * @v len Length of output buffer + */ +static inline __attribute__ (( always_inline )) void +bigint_done_raw ( const uint32_t *value0, unsigned int size __unused, + void *out, size_t len ) { + const uint8_t *value_byte = ( ( const void * ) value0 ); + uint8_t *out_byte = ( out + len ); + + /* Copy raw data in reverse order */ + while ( len-- ) + *(--out_byte) = *(value_byte++); +} + +extern void bigint_multiply_raw ( const uint32_t *multiplicand0, + const uint32_t *multiplier0, + uint32_t *value0, unsigned int size ); + +#endif /* _BITS_BIGINT_H */ diff --git a/src/arch/arm32/include/bits/bitops.h b/src/arch/arm32/include/bits/bitops.h new file mode 100644 index 00000000..9a5fe14c --- /dev/null +++ b/src/arch/arm32/include/bits/bitops.h @@ -0,0 +1,100 @@ +#ifndef _BITS_BITOPS_H +#define _BITS_BITOPS_H + +/** @file + * + * ARM bit operations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** + * Test and set bit atomically + * + * @v bit Bit to set + * @v bits Bit field + * @ret old Old value of bit (zero or non-zero) + */ +static inline __attribute__ (( always_inline )) int +test_and_set_bit ( unsigned int bit, volatile void *bits ) { + unsigned int index = ( bit / 32 ); + unsigned int offset = ( bit % 32 ); + volatile uint32_t *dword = ( ( ( volatile uint32_t * ) bits ) + index ); + uint32_t mask = ( 1UL << offset ); + uint32_t old; + uint32_t new; + uint32_t flag; + + __asm__ __volatile__ ( "\n1:\n\t" + "ldrex %0, %3\n\t" + "orr %1, %0, %4\n\t" + "strex %2, %1, %3\n\t" + "tst %2, %2\n\t" + "bne 1b\n\t" + : "=&r" ( old ), "=&r" ( new ), "=&l" ( flag ), + "+Q" ( *dword ) + : "r" ( mask ) + : "cc" ); + + return ( old & mask ); +} + +/** + * Test and clear bit atomically + * + * @v bit Bit to set + * @v bits Bit field + * @ret old Old value of bit (zero or non-zero) + */ +static inline __attribute__ (( always_inline )) int +test_and_clear_bit ( unsigned int bit, volatile void *bits ) { + unsigned int index = ( bit / 32 ); + unsigned int offset = ( bit % 32 ); + volatile uint32_t *dword = ( ( ( volatile uint32_t * ) bits ) + index ); + uint32_t mask = ( 1UL << offset ); + uint32_t old; + uint32_t new; + uint32_t flag; + + __asm__ __volatile__ ( "\n1:\n\t" + "ldrex %0, %3\n\t" + "bic %1, %0, %4\n\t" + "strex %2, %1, %3\n\t" + "tst %2, %2\n\t" + "bne 1b\n\t" + : "=&r" ( old ), "=&r" ( new ), "=&l" ( flag ), + "+Q" ( *dword ) + : "r" ( mask ) + : "cc" ); + + return ( old & mask ); +} + +/** + * Set bit atomically + * + * @v bit Bit to set + * @v bits Bit field + */ +static inline __attribute__ (( always_inline )) void +set_bit ( unsigned int bit, volatile void *bits ) { + + test_and_set_bit ( bit, bits ); +} + +/** + * Clear bit atomically + * + * @v bit Bit to set + * @v bits Bit field + */ +static inline __attribute__ (( always_inline )) void +clear_bit ( unsigned int bit, volatile void *bits ) { + + test_and_clear_bit ( bit, bits ); +} + +#endif /* _BITS_BITOPS_H */ diff --git a/src/arch/arm32/include/bits/byteswap.h b/src/arch/arm32/include/bits/byteswap.h new file mode 100644 index 00000000..1fc884bd --- /dev/null +++ b/src/arch/arm32/include/bits/byteswap.h @@ -0,0 +1,52 @@ +#ifndef _BITS_BYTESWAP_H +#define _BITS_BYTESWAP_H + +/** @file + * + * Byte-order swapping functions + * + */ + +#include + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +static inline __attribute__ (( always_inline, const )) uint16_t +__bswap_variable_16 ( uint16_t x ) { + __asm__ ( "rev16 %0, %1" : "=l" ( x ) : "l" ( x ) ); + return x; +} + +static inline __attribute__ (( always_inline )) void +__bswap_16s ( uint16_t *x ) { + *x = __bswap_variable_16 ( *x ); +} + +static inline __attribute__ (( always_inline, const )) uint32_t +__bswap_variable_32 ( uint32_t x ) { + __asm__ ( "rev %0, %1" : "=l" ( x ) : "l" ( x ) ); + return x; +} + +static inline __attribute__ (( always_inline )) void +__bswap_32s ( uint32_t *x ) { + *x = __bswap_variable_32 ( *x ); +} + +static inline __attribute__ (( always_inline, const )) uint64_t +__bswap_variable_64 ( uint64_t x ) { + uint32_t in_high = ( x >> 32 ); + uint32_t in_low = ( x & 0xffffffffUL ); + uint32_t out_high = __bswap_variable_32 ( in_low ); + uint32_t out_low = __bswap_variable_32 ( in_high ); + + return ( ( ( ( uint64_t ) out_high ) << 32 ) | + ( ( uint64_t ) out_low ) ); +} + +static inline __attribute__ (( always_inline )) void +__bswap_64s ( uint64_t *x ) { + *x = __bswap_variable_64 ( *x ); +} + +#endif diff --git a/src/arch/arm32/include/bits/compiler.h b/src/arch/arm32/include/bits/compiler.h new file mode 100644 index 00000000..e420cf92 --- /dev/null +++ b/src/arch/arm32/include/bits/compiler.h @@ -0,0 +1,16 @@ +#ifndef _BITS_COMPILER_H +#define _BITS_COMPILER_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** Dummy relocation type */ +#define RELOC_TYPE_NONE R_ARM_NONE + +#ifndef ASSEMBLY + +#define __asmcall +#define __libgcc + +#endif /* ASSEMBLY */ + +#endif /*_BITS_COMPILER_H */ diff --git a/src/arch/arm32/include/bits/profile.h b/src/arch/arm32/include/bits/profile.h new file mode 100644 index 00000000..2b15d160 --- /dev/null +++ b/src/arch/arm32/include/bits/profile.h @@ -0,0 +1,30 @@ +#ifndef _BITS_PROFILE_H +#define _BITS_PROFILE_H + +/** @file + * + * Profiling + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** + * Get profiling timestamp + * + * @ret timestamp Timestamp + */ +static inline __attribute__ (( always_inline )) uint64_t +profile_timestamp ( void ) { + uint32_t cycles; + + /* Read cycle counter */ + __asm__ __volatile__ ( "mcr p15, 0, %1, c9, c12, 0\n\t" + "mrc p15, 0, %0, c9, c13, 0\n\t" + : "=r" ( cycles ) : "r" ( 1 ) ); + return cycles; +} + +#endif /* _BITS_PROFILE_H */ diff --git a/src/arch/arm32/include/bits/stdint.h b/src/arch/arm32/include/bits/stdint.h new file mode 100644 index 00000000..fe1f9946 --- /dev/null +++ b/src/arch/arm32/include/bits/stdint.h @@ -0,0 +1,23 @@ +#ifndef _BITS_STDINT_H +#define _BITS_STDINT_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +typedef __SIZE_TYPE__ size_t; +typedef signed long ssize_t; +typedef signed long off_t; + +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; + +typedef signed char int8_t; +typedef signed short int16_t; +typedef signed int int32_t; +typedef signed long long int64_t; + +typedef unsigned long physaddr_t; +typedef unsigned long intptr_t; + +#endif /* _BITS_STDINT_H */ diff --git a/src/arch/arm32/include/bits/string.h b/src/arch/arm32/include/bits/string.h new file mode 100644 index 00000000..5b1c1505 --- /dev/null +++ b/src/arch/arm32/include/bits/string.h @@ -0,0 +1,60 @@ +#ifndef BITS_STRING_H +#define BITS_STRING_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * String functions + * + */ + +/** + * Fill memory region + * + * @v dest Destination region + * @v character Fill character + * @v len Length + * @ret dest Destination region + */ +static inline __attribute__ (( always_inline )) void * +memset ( void *dest, int character, size_t len ) { + + /* Not yet optimised */ + generic_memset ( dest, character, len ); + return dest; +} + +/** + * Copy memory region + * + * @v dest Destination region + * @v src Source region + * @v len Length + * @ret dest Destination region + */ +static inline __attribute__ (( always_inline )) void * +memcpy ( void *dest, const void *src, size_t len ) { + + /* Not yet optimised */ + generic_memcpy ( dest, src, len ); + return dest; +} + +/** + * Copy (possibly overlapping) memory region + * + * @v dest Destination region + * @v src Source region + * @v len Length + * @ret dest Destination region + */ +static inline __attribute__ (( always_inline )) void * +memmove ( void *dest, const void *src, size_t len ) { + + /* Not yet optimised */ + generic_memmove ( dest, src, len ); + return dest; +} + +#endif /* BITS_STRING_H */ diff --git a/src/arch/arm32/include/bits/strings.h b/src/arch/arm32/include/bits/strings.h new file mode 100644 index 00000000..adbd5f4b --- /dev/null +++ b/src/arch/arm32/include/bits/strings.h @@ -0,0 +1,85 @@ +#ifndef _BITS_STRINGS_H +#define _BITS_STRINGS_H + +/** @file + * + * String functions + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * Find first (i.e. least significant) set bit + * + * @v value Value + * @ret lsb Least significant bit set in value (LSB=1), or zero + */ +static inline __attribute__ (( always_inline )) int __ffsl ( long value ) { + unsigned long bits = value; + unsigned long lsb; + unsigned int lz; + + /* Extract least significant set bit */ + lsb = ( bits & -bits ); + + /* Count number of leading zeroes before LSB */ + __asm__ ( "clz %0, %1" : "=r" ( lz ) : "r" ( lsb ) ); + + return ( 32 - lz ); +} + +/** + * Find first (i.e. least significant) set bit + * + * @v value Value + * @ret lsb Least significant bit set in value (LSB=1), or zero + */ +static inline __attribute__ (( always_inline )) int __ffsll ( long long value ){ + unsigned long high = ( value >> 32 ); + unsigned long low = ( value >> 0 ); + + if ( low ) { + return ( __ffsl ( low ) ); + } else if ( high ) { + return ( 32 + __ffsl ( high ) ); + } else { + return 0; + } +} + +/** + * Find last (i.e. most significant) set bit + * + * @v value Value + * @ret msb Most significant bit set in value (LSB=1), or zero + */ +static inline __attribute__ (( always_inline )) int __flsl ( long value ) { + unsigned int lz; + + /* Count number of leading zeroes */ + __asm__ ( "clz %0, %1" : "=r" ( lz ) : "r" ( value ) ); + + return ( 32 - lz ); +} + +/** + * Find last (i.e. most significant) set bit + * + * @v value Value + * @ret msb Most significant bit set in value (LSB=1), or zero + */ +static inline __attribute__ (( always_inline )) int __flsll ( long long value ){ + unsigned long high = ( value >> 32 ); + unsigned long low = ( value >> 0 ); + + if ( high ) { + return ( 32 + __flsl ( high ) ); + } else if ( low ) { + return ( __flsl ( low ) ); + } else { + return 0; + } +} + +#endif /* _BITS_STRINGS_H */ diff --git a/src/arch/arm32/include/bits/tcpip.h b/src/arch/arm32/include/bits/tcpip.h new file mode 100644 index 00000000..fc3c5b3f --- /dev/null +++ b/src/arch/arm32/include/bits/tcpip.h @@ -0,0 +1,19 @@ +#ifndef _BITS_TCPIP_H +#define _BITS_TCPIP_H + +/** @file + * + * Transport-network layer interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +static inline __attribute__ (( always_inline )) uint16_t +tcpip_continue_chksum ( uint16_t partial, const void *data, size_t len ) { + + /* Not yet optimised */ + return generic_tcpip_continue_chksum ( partial, data, len ); +} + +#endif /* _BITS_TCPIP_H */ diff --git a/src/arch/arm32/include/efi/ipxe/dhcp_arch.h b/src/arch/arm32/include/efi/ipxe/dhcp_arch.h new file mode 100644 index 00000000..29a23594 --- /dev/null +++ b/src/arch/arm32/include/efi/ipxe/dhcp_arch.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +#ifndef _DHCP_ARCH_H +#define _DHCP_ARCH_H + +/** @file + * + * Architecture-specific DHCP options + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#define DHCP_ARCH_CLIENT_ARCHITECTURE DHCP_CLIENT_ARCHITECTURE_ARM32 + +#define DHCP_ARCH_CLIENT_NDI 1 /* UNDI */ , 3, 10 /* v3.10 */ + +#endif diff --git a/src/arch/arm32/include/gdbmach.h b/src/arch/arm32/include/gdbmach.h new file mode 100644 index 00000000..cd152eed --- /dev/null +++ b/src/arch/arm32/include/gdbmach.h @@ -0,0 +1,45 @@ +#ifndef GDBMACH_H +#define GDBMACH_H + +/** @file + * + * GDB architecture specifics + * + * This file declares functions for manipulating the machine state and + * debugging context. + * + */ + +#include + +typedef unsigned long gdbreg_t; + +/* Register snapshot */ +enum { + /* Not yet implemented */ + GDBMACH_NREGS, +}; + +#define GDBMACH_SIZEOF_REGS ( GDBMACH_NREGS * sizeof ( gdbreg_t ) ) + +static inline void gdbmach_set_pc ( gdbreg_t *regs, gdbreg_t pc ) { + /* Not yet implemented */ + ( void ) regs; + ( void ) pc; +} + +static inline void gdbmach_set_single_step ( gdbreg_t *regs, int step ) { + /* Not yet implemented */ + ( void ) regs; + ( void ) step; +} + +static inline void gdbmach_breakpoint ( void ) { + /* Not yet implemented */ +} + +extern int gdbmach_set_breakpoint ( int type, unsigned long addr, size_t len, + int enable ); +extern void gdbmach_init ( void ); + +#endif /* GDBMACH_H */ diff --git a/src/arch/arm32/include/limits.h b/src/arch/arm32/include/limits.h new file mode 100644 index 00000000..bb48b75a --- /dev/null +++ b/src/arch/arm32/include/limits.h @@ -0,0 +1,61 @@ +#ifndef LIMITS_H +#define LIMITS_H 1 + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/* Number of bits in a `char' */ +#define CHAR_BIT 8 + +/* Minimum and maximum values a `signed char' can hold */ +#define SCHAR_MIN (-128) +#define SCHAR_MAX 127 + +/* Maximum value an `unsigned char' can hold. (Minimum is 0.) */ +#define UCHAR_MAX 255 + +/* Minimum and maximum values a `char' can hold */ +#define CHAR_MIN SCHAR_MIN +#define CHAR_MAX SCHAR_MAX + +/* Minimum and maximum values a `signed short int' can hold */ +#define SHRT_MIN (-32768) +#define SHRT_MAX 32767 + +/* Maximum value an `unsigned short' can hold. (Minimum is 0.) */ +#define USHRT_MAX 65535 + + +/* Minimum and maximum values a `signed int' can hold */ +#define INT_MIN (-INT_MAX - 1) +#define INT_MAX 2147483647 + +/* Maximum value an `unsigned int' can hold. (Minimum is 0.) */ +#define UINT_MAX 4294967295U + + +/* Minimum and maximum values a `signed int' can hold */ +#define INT_MAX 2147483647 +#define INT_MIN (-INT_MAX - 1) + + +/* Maximum value an `unsigned int' can hold. (Minimum is 0.) */ +#define UINT_MAX 4294967295U + + +/* Minimum and maximum values a `signed long' can hold */ +#define LONG_MAX 2147483647 +#define LONG_MIN (-LONG_MAX - 1L) + +/* Maximum value an `unsigned long' can hold. (Minimum is 0.) */ +#define ULONG_MAX 4294967295UL + +/* Minimum and maximum values a `signed long long' can hold */ +#define LLONG_MAX 9223372036854775807LL +#define LLONG_MIN (-LONG_MAX - 1LL) + + +/* Maximum value an `unsigned long long' can hold. (Minimum is 0.) */ +#define ULLONG_MAX 18446744073709551615ULL + + +#endif /* LIMITS_H */ diff --git a/src/arch/arm32/include/setjmp.h b/src/arch/arm32/include/setjmp.h new file mode 100644 index 00000000..4828b47a --- /dev/null +++ b/src/arch/arm32/include/setjmp.h @@ -0,0 +1,38 @@ +#ifndef _SETJMP_H +#define _SETJMP_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** A jump buffer */ +typedef struct { + /** Saved r4 */ + uint32_t r4; + /** Saved r5 */ + uint32_t r5; + /** Saved r6 */ + uint32_t r6; + /** Saved r7 */ + uint32_t r7; + /** Saved r8 */ + uint32_t r8; + /** Saved r9 */ + uint32_t r9; + /** Saved r10 */ + uint32_t r10; + /** Saved frame pointer (r11) */ + uint32_t fp; + /** Saved stack pointer (r13) */ + uint32_t sp; + /** Saved link register (r14) */ + uint32_t lr; +} jmp_buf[1]; + +extern int __asmcall __attribute__ (( returns_twice )) +setjmp ( jmp_buf env ); + +extern void __asmcall __attribute__ (( noreturn )) +longjmp ( jmp_buf env, int val ); + +#endif /* _SETJMP_H */ diff --git a/src/arch/arm32/libgcc/lldivmod.S b/src/arch/arm32/libgcc/lldivmod.S new file mode 100644 index 00000000..910be4b7 --- /dev/null +++ b/src/arch/arm32/libgcc/lldivmod.S @@ -0,0 +1,50 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .text + .thumb + +/** + * Unsigned long long division + * + * @v r1:r0 Dividend + * @v r3:r2 Divisor + * @ret r1:r0 Quotient + * @ret r3:r2 Remainder + */ + .section ".text.__aeabi_uldivmod", "ax", %progbits + .globl __aeabi_uldivmod + .type __aeabi_uldivmod, %function +__aeabi_uldivmod: + /* Allocate stack space for remainder and pointer to remainder */ + push {r0, r1, r2, r3, r4, lr} + /* Call __udivmoddi4() */ + add r4, sp, #8 + str r4, [sp] + bl __udivmoddi4 + /* Retrieve remainder and return */ + add sp, sp, #8 + pop {r2, r3, r4, pc} + .size __aeabi_uldivmod, . - __aeabi_uldivmod + +/** + * Signed long long division + * + * @v r1:r0 Dividend + * @v r3:r2 Divisor + * @ret r1:r0 Quotient + * @ret r3:r2 Remainder + */ + .section ".text.__aeabi_ldivmod", "ax", %progbits + .globl __aeabi_ldivmod + .type __aeabi_ldivmod, %function +__aeabi_ldivmod: + /* Allocate stack space for remainder and pointer to remainder */ + push {r0, r1, r2, r3, r4, lr} + /* Call __divmoddi4() */ + add r4, sp, #8 + str r4, [sp] + bl __divmoddi4 + /* Retrieve remainder and return */ + add sp, sp, #8 + pop {r2, r3, r4, pc} + .size __aeabi_ldivmod, . - __aeabi_ldivmod diff --git a/src/arch/arm32/libgcc/llshift.S b/src/arch/arm32/libgcc/llshift.S new file mode 100644 index 00000000..cc16e261 --- /dev/null +++ b/src/arch/arm32/libgcc/llshift.S @@ -0,0 +1,88 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .text + .arm + +/** + * Logical shift left + * + * @v r1:r0 Value to shift + * @v r2 Shift amount + * @ret r1:r0 Shifted value + */ + .section ".text.__aeabi_llsl", "ax", %progbits + .globl __aeabi_llsl + .type __aeabi_llsl, %function +__aeabi_llsl: + /* r3 = ( shift - 32 ) */ + subs r3, r2, #32 + /* If shift >= 32, then + * high = ( low << ( shift - 32 ) ) + */ + movpl r1, r0, lsl r3 + /* If shift < 32, then + * high = ( ( high << shift ) | ( low >> ( 32 - shift ) ) ) + */ + movmi r1, r1, lsl r2 + rsbmi r3, r2, #32 + orrmi r1, r1, r0, lsr r3 + /* low = ( low << shift ) */ + mov r0, r0, lsl r2 + bx lr + .size __aeabi_llsl, . - __aeabi_llsl + +/** + * Logical shift right + * + * @v r1:r0 Value to shift + * @v r2 Shift amount + * @ret r1:r0 Shifted value + */ + .section ".text.__aeabi_llsr", "ax", %progbits + .globl __aeabi_llsr + .type __aeabi_llsr, %function +__aeabi_llsr: + /* r3 = ( shift - 32 ) */ + subs r3, r2, #32 + /* If shift >= 32, then + * low = ( high >> ( shift - 32 ) ) + */ + movpl r0, r1, lsr r3 + /* If shift < 32, then + * low = ( ( low >> shift ) | ( high << ( 32 - shift ) ) ) + */ + movmi r0, r0, lsr r2 + rsbmi r3, r2, #32 + orrmi r0, r0, r1, lsl r3 + /* high = ( high >> shift ) */ + mov r1, r1, lsr r2 + bx lr + .size __aeabi_llsr, . - __aeabi_llsr + +/** + * Arithmetic shift right + * + * @v r1:r0 Value to shift + * @v r2 Shift amount + * @ret r1:r0 Shifted value + */ + .section ".text.__aeabi_lasr", "ax", %progbits + .globl __aeabi_lasr + .type __aeabi_lasr, %function +__aeabi_lasr: + /* r3 = ( shift - 32 ) */ + subs r3, r2, #32 + /* If shift >= 32, then + * low = ( high >> ( shift - 32 ) ) + */ + movpl r0, r1, asr r3 + /* If shift < 32, then + * low = ( ( low >> shift ) | ( high << ( 32 - shift ) ) ) + */ + movmi r0, r0, lsr r2 + rsbmi r3, r2, #32 + orrmi r0, r0, r1, lsl r3 + /* high = ( high >> shift ) */ + mov r1, r1, asr r2 + bx lr + .size __aeabi_lasr, . - __aeabi_lasr diff --git a/src/arch/arm64/Makefile b/src/arch/arm64/Makefile new file mode 100644 index 00000000..9b9dd5ec --- /dev/null +++ b/src/arch/arm64/Makefile @@ -0,0 +1,33 @@ +# ARM64-specific directories containing source files +# +SRCDIRS += arch/arm64/core + +# ARM64-specific flags +# +CFLAGS += -mlittle-endian -mcmodel=small +CFLAGS += -fomit-frame-pointer +ASFLAGS += -mabi=lp64 -EL + +# We want to specify the LP64 model. There is an explicit -mabi=lp64 +# on GCC 4.9 and later, and no guarantee as to which is the default +# model. In earlier versions of GCC, there is no -mabi option and the +# default appears to be LP64 anyway. +# +ifeq ($(CCTYPE),gcc) +LP64_TEST = $(CC) -mabi=lp64 -x c -c /dev/null -o /dev/null >/dev/null 2>&1 +LP64_FLAGS := $(shell $(LP64_TEST) && $(ECHO) '-mabi=lp64') +WORKAROUND_CFLAGS += $(LP64_FLAGS) +endif + +# EFI requires -fshort-wchar, and nothing else currently uses wchar_t +# +CFLAGS += -fshort-wchar + +# Include common ARM Makefile +MAKEDEPS += arch/arm/Makefile +include arch/arm/Makefile + +# Include platform-specific Makefile +# +MAKEDEPS += arch/arm64/Makefile.$(PLATFORM) +include arch/arm64/Makefile.$(PLATFORM) diff --git a/src/arch/arm64/Makefile.efi b/src/arch/arm64/Makefile.efi new file mode 100644 index 00000000..eb04c0e2 --- /dev/null +++ b/src/arch/arm64/Makefile.efi @@ -0,0 +1,18 @@ +# -*- makefile -*- : Force emacs to use Makefile mode + +# Avoid untranslatable relocations +# +CFLAGS += -fno-pic + +# Specify EFI image builder +# +ELF2EFI = $(ELF2EFI64) + +# Specify EFI boot file +# +EFI_BOOT_FILE = bootaa64.efi + +# Include generic EFI Makefile +# +MAKEDEPS += arch/arm/Makefile.efi +include arch/arm/Makefile.efi diff --git a/src/arch/arm64/core/arm64_bigint.c b/src/arch/arm64/core/arm64_bigint.c new file mode 100644 index 00000000..bc4ee9a0 --- /dev/null +++ b/src/arch/arm64/core/arm64_bigint.c @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * Big integer support + */ + +/** + * Multiply big integers + * + * @v multiplicand0 Element 0 of big integer to be multiplied + * @v multiplier0 Element 0 of big integer to be multiplied + * @v result0 Element 0 of big integer to hold result + * @v size Number of elements + */ +void bigint_multiply_raw ( const uint64_t *multiplicand0, + const uint64_t *multiplier0, + uint64_t *result0, unsigned int size ) { + const bigint_t ( size ) __attribute__ (( may_alias )) *multiplicand = + ( ( const void * ) multiplicand0 ); + const bigint_t ( size ) __attribute__ (( may_alias )) *multiplier = + ( ( const void * ) multiplier0 ); + bigint_t ( size * 2 ) __attribute__ (( may_alias )) *result = + ( ( void * ) result0 ); + unsigned int i; + unsigned int j; + uint64_t multiplicand_element; + uint64_t multiplier_element; + uint64_t *result_elements; + uint64_t discard_low; + uint64_t discard_high; + uint64_t discard_temp_low; + uint64_t discard_temp_high; + + /* Zero result */ + memset ( result, 0, sizeof ( *result ) ); + + /* Multiply integers one element at a time */ + for ( i = 0 ; i < size ; i++ ) { + multiplicand_element = multiplicand->element[i]; + for ( j = 0 ; j < size ; j++ ) { + multiplier_element = multiplier->element[j]; + result_elements = &result->element[ i + j ]; + /* Perform a single multiply, and add the + * resulting double-element into the result, + * carrying as necessary. The carry can + * never overflow beyond the end of the + * result, since: + * + * a < 2^{n}, b < 2^{n} => ab < 2^{2n} + */ + __asm__ __volatile__ ( "mul %1, %6, %7\n\t" + "umulh %2, %6, %7\n\t" + "ldp %3, %4, [%0]\n\t" + "adds %3, %3, %1\n\t" + "adcs %4, %4, %2\n\t" + "stp %3, %4, [%0], #16\n\t" + "bcc 2f\n\t" + "\n1:\n\t" + "ldr %3, [%0]\n\t" + "adcs %3, %3, xzr\n\t" + "str %3, [%0], #8\n\t" + "bcs 1b\n\t" + "\n2:\n\t" + : "+r" ( result_elements ), + "=&r" ( discard_low ), + "=&r" ( discard_high ), + "=r" ( discard_temp_low ), + "=r" ( discard_temp_high ), + "+m" ( *result ) + : "r" ( multiplicand_element ), + "r" ( multiplier_element ) + : "cc" ); + } + } +} diff --git a/src/arch/arm64/core/arm64_string.c b/src/arch/arm64/core/arm64_string.c new file mode 100644 index 00000000..28a2b73b --- /dev/null +++ b/src/arch/arm64/core/arm64_string.c @@ -0,0 +1,249 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +/** @file + * + * Optimised string operations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** + * Copy memory area + * + * @v dest Destination address + * @v src Source address + * @v len Length + * @ret dest Destination address + */ +void arm64_memcpy ( void *dest, const void *src, size_t len ) { + void *discard_dest; + void *discard_end; + const void *discard_src; + size_t discard_offset; + unsigned long discard_data; + unsigned long discard_low; + unsigned long discard_high; + + /* If length is too short for an "ldp"/"stp" instruction pair, + * then just copy individual bytes. + */ + if ( len < 16 ) { + __asm__ __volatile__ ( "cbz %0, 2f\n\t" + "\n1:\n\t" + "sub %0, %0, #1\n\t" + "ldrb %w1, [%3, %0]\n\t" + "strb %w1, [%2, %0]\n\t" + "cbnz %0, 1b\n\t" + "\n2:\n\t" + : "=&r" ( discard_offset ), + "=&r" ( discard_data ) + : "r" ( dest ), "r" ( src ), "0" ( len ) + : "memory" ); + return; + } + + /* Use "ldp"/"stp" to copy 16 bytes at a time: one initial + * potentially unaligned access, multiple destination-aligned + * accesses, one final potentially unaligned access. + */ + __asm__ __volatile__ ( "ldp %3, %4, [%1], #16\n\t" + "stp %3, %4, [%0], #16\n\t" + "and %3, %0, #15\n\t" + "sub %0, %0, %3\n\t" + "sub %1, %1, %3\n\t" + "bic %2, %5, #15\n\t" + "b 2f\n\t" + "\n1:\n\t" + "ldp %3, %4, [%1], #16\n\t" + "stp %3, %4, [%0], #16\n\t" + "\n2:\n\t" + "cmp %0, %2\n\t" + "bne 1b\n\t" + "ldp %3, %4, [%6, #-16]\n\t" + "stp %3, %4, [%5, #-16]\n\t" + : "=&r" ( discard_dest ), + "=&r" ( discard_src ), + "=&r" ( discard_end ), + "=&r" ( discard_low ), + "=&r" ( discard_high ) + : "r" ( dest + len ), "r" ( src + len ), + "0" ( dest ), "1" ( src ) + : "memory", "cc" ); +} + +/** + * Zero memory region + * + * @v dest Destination region + * @v len Length + */ +void arm64_bzero ( void *dest, size_t len ) { + size_t discard_offset; + void *discard_dest; + void *discard_end; + + /* If length is too short for an "stp" instruction, then just + * zero individual bytes. + */ + if ( len < 16 ) { + __asm__ __volatile__ ( "cbz %0, 2f\n\t" + "\n1:\n\t" + "sub %0, %0, #1\n\t" + "strb wzr, [%1, %0]\n\t" + "cbnz %0, 1b\n\t" + "\n2:\n\t" + : "=&r" ( discard_offset ) + : "r" ( dest ), "0" ( len ) + : "memory" ); + return; + } + + /* Use "stp" to zero 16 bytes at a time: one initial + * potentially unaligned access, multiple aligned accesses, + * one final potentially unaligned access. + */ + __asm__ __volatile__ ( "stp xzr, xzr, [%0], #16\n\t" + "bic %0, %0, #15\n\t" + "bic %1, %2, #15\n\t" + "b 2f\n\t" + "\n1:\n\t" + "stp xzr, xzr, [%0], #16\n\t" + "\n2:\n\t" + "cmp %0, %1\n\t" + "bne 1b\n\t" + "stp xzr, xzr, [%2, #-16]\n\t" + : "=&r" ( discard_dest ), + "=&r" ( discard_end ) + : "r" ( dest + len ), "0" ( dest ) + : "memory", "cc" ); +} + +/** + * Fill memory region + * + * @v dest Destination region + * @v len Length + * @v character Fill character + * + * The unusual parameter order is to allow for more efficient + * tail-calling to arm64_memset() when zeroing a region. + */ +void arm64_memset ( void *dest, size_t len, int character ) { + size_t discard_offset; + + /* Use optimised zeroing code if applicable */ + if ( character == 0 ) { + arm64_bzero ( dest, len ); + return; + } + + /* Fill one byte at a time. Calling memset() with a non-zero + * value is relatively rare and unlikely to be + * performance-critical. + */ + __asm__ __volatile__ ( "cbz %0, 2f\n\t" + "\n1:\n\t" + "sub %0, %0, #1\n\t" + "strb %w2, [%1, %0]\n\t" + "cbnz %0, 1b\n\t" + "\n2:\n\t" + : "=&r" ( discard_offset ) + : "r" ( dest ), "r" ( character ), "0" ( len ) + : "memory" ); +} + +/** + * Copy (possibly overlapping) memory region forwards + * + * @v dest Destination region + * @v src Source region + * @v len Length + */ +void arm64_memmove_forwards ( void *dest, const void *src, size_t len ) { + void *discard_dest; + const void *discard_src; + unsigned long discard_data; + + /* Assume memmove() is not performance-critical, and perform a + * bytewise copy for simplicity. + */ + __asm__ __volatile__ ( "b 2f\n\t" + "\n1:\n\t" + "ldrb %w2, [%1], #1\n\t" + "strb %w2, [%0], #1\n\t" + "\n2:\n\t" + "cmp %0, %3\n\t" + "bne 1b\n\t" + : "=&r" ( discard_dest ), + "=&r" ( discard_src ), + "=&r" ( discard_data ) + : "r" ( dest + len ), "0" ( dest ), "1" ( src ) + : "memory" ); +} + +/** + * Copy (possibly overlapping) memory region backwards + * + * @v dest Destination region + * @v src Source region + * @v len Length + */ +void arm64_memmove_backwards ( void *dest, const void *src, size_t len ) { + size_t discard_offset; + unsigned long discard_data; + + /* Assume memmove() is not performance-critical, and perform a + * bytewise copy for simplicity. + */ + __asm__ __volatile__ ( "cbz %0, 2f\n\t" + "\n1:\n\t" + "sub %0, %0, #1\n\t" + "ldrb %w1, [%3, %0]\n\t" + "strb %w1, [%2, %0]\n\t" + "cbnz %0, 1b\n\t" + "\n2:\n\t" + : "=&r" ( discard_offset ), + "=&r" ( discard_data ) + : "r" ( dest ), "r" ( src ), "0" ( len ) + : "memory" ); +} + +/** + * Copy (possibly overlapping) memory region + * + * @v dest Destination region + * @v src Source region + * @v len Length + */ +void arm64_memmove ( void *dest, const void *src, size_t len ) { + + if ( dest <= src ) { + arm64_memmove_forwards ( dest, src, len ); + } else { + arm64_memmove_backwards ( dest, src, len ); + } +} diff --git a/src/arch/arm64/core/arm64_tcpip.c b/src/arch/arm64/core/arm64_tcpip.c new file mode 100644 index 00000000..0ef04ea4 --- /dev/null +++ b/src/arch/arm64/core/arm64_tcpip.c @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * TCP/IP checksum + * + */ + +#include +#include + +/** Alignment used by main checksumming loop */ +#define TCPIP_CHKSUM_ALIGN 16 + +/** Number of steps in each iteration of the unrolled main checksumming loop */ +#define TCPIP_CHKSUM_UNROLL 4 + +/** + * Calculate continued TCP/IP checkum + * + * @v sum Checksum of already-summed data, in network byte order + * @v data Data buffer + * @v len Length of data buffer + * @ret sum Updated checksum, in network byte order + */ +uint16_t tcpip_continue_chksum ( uint16_t sum, const void *data, + size_t len ) { + intptr_t start; + intptr_t end; + intptr_t mid; + unsigned int pre; + unsigned int post; + unsigned int first; + uint64_t discard_low; + uint64_t discard_high; + + /* Avoid potentially undefined shift operation */ + if ( len == 0 ) + return sum; + + /* Find maximally-aligned midpoint. For short blocks of data, + * this may be aligned to fewer than 16 bytes. + */ + start = ( ( intptr_t ) data ); + end = ( start + len ); + mid = ( end & + ~( ( ~( 1UL << 63 ) ) >> ( 64 - flsl ( start ^ end ) ) ) ); + + /* Calculate pre- and post-alignment lengths */ + pre = ( ( mid - start ) & ( TCPIP_CHKSUM_ALIGN - 1 ) ); + post = ( ( end - mid ) & ( TCPIP_CHKSUM_ALIGN - 1 ) ); + + /* Calculate number of steps in first iteration of unrolled loop */ + first = ( ( ( len - pre - post ) / TCPIP_CHKSUM_ALIGN ) & + ( TCPIP_CHKSUM_UNROLL - 1 ) ); + + /* Calculate checksum */ + __asm__ ( /* Invert sum */ + "eor %w0, %w0, #0xffff\n\t" + /* Clear carry flag */ + "cmn xzr, xzr\n\t" + /* Byteswap and sum pre-alignment byte, if applicable */ + "tbz %w4, #0, 1f\n\t" + "ldrb %w2, [%1], #1\n\t" + "rev16 %w0, %w0\n\t" + "rev16 %w2, %w2\n\t" + "adcs %0, %0, %2\n\t" + "\n1:\n\t" + /* Sum pre-alignment halfword, if applicable */ + "tbz %w4, #1, 1f\n\t" + "ldrh %w2, [%1], #2\n\t" + "adcs %0, %0, %2\n\t" + "\n1:\n\t" + /* Sum pre-alignment word, if applicable */ + "tbz %w4, #2, 1f\n\t" + "ldr %w2, [%1], #4\n\t" + "adcs %0, %0, %2\n\t" + "\n1:\n\t" + /* Sum pre-alignment doubleword, if applicable */ + "tbz %w4, #3, 1f\n\t" + "ldr %2, [%1], #8\n\t" + "adcs %0, %0, %2\n\t" + "\n1:\n\t" + /* Jump into unrolled (x4) main loop */ + "adr %2, 2f\n\t" + "sub %2, %2, %5, lsl #3\n\t" + "sub %2, %2, %5, lsl #2\n\t" + "br %2\n\t" + "\n1:\n\t" + "ldp %2, %3, [%1], #16\n\t" + "adcs %0, %0, %2\n\t" + "adcs %0, %0, %3\n\t" + "ldp %2, %3, [%1], #16\n\t" + "adcs %0, %0, %2\n\t" + "adcs %0, %0, %3\n\t" + "ldp %2, %3, [%1], #16\n\t" + "adcs %0, %0, %2\n\t" + "adcs %0, %0, %3\n\t" + "ldp %2, %3, [%1], #16\n\t" + "adcs %0, %0, %2\n\t" + "adcs %0, %0, %3\n\t" + "\n2:\n\t" + "sub %2, %1, %6\n\t" + "cbnz %2, 1b\n\t" + /* Sum post-alignment doubleword, if applicable */ + "tbz %w7, #3, 1f\n\t" + "ldr %2, [%1], #8\n\t" + "adcs %0, %0, %2\n\t" + "\n1:\n\t" + /* Sum post-alignment word, if applicable */ + "tbz %w7, #2, 1f\n\t" + "ldr %w2, [%1], #4\n\t" + "adcs %0, %0, %2\n\t" + "\n1:\n\t" + /* Sum post-alignment halfword, if applicable */ + "tbz %w7, #1, 1f\n\t" + "ldrh %w2, [%1], #2\n\t" + "adcs %0, %0, %2\n\t" + "\n1:\n\t" + /* Sum post-alignment byte, if applicable */ + "tbz %w7, #0, 1f\n\t" + "ldrb %w2, [%1], #1\n\t" + "adcs %0, %0, %2\n\t" + "\n1:\n\t" + /* Fold down to a uint32_t plus carry flag */ + "lsr %2, %0, #32\n\t" + "adcs %w0, %w0, %w2\n\t" + /* Fold down to a uint16_t plus carry in bit 16 */ + "ubfm %2, %0, #0, #15\n\t" + "ubfm %3, %0, #16, #31\n\t" + "adc %w0, %w2, %w3\n\t" + /* Fold down to a uint16_t */ + "tbz %w0, #16, 1f\n\t" + "mov %w2, #0xffff\n\t" + "sub %w0, %w0, %w2\n\t" + "tbz %w0, #16, 1f\n\t" + "sub %w0, %w0, %w2\n\t" + "\n1:\n\t" + /* Byteswap back, if applicable */ + "tbz %w4, #0, 1f\n\t" + "rev16 %w0, %w0\n\t" + "\n1:\n\t" + /* Invert sum */ + "eor %w0, %w0, #0xffff\n\t" + : "+r" ( sum ), "+r" ( data ), "=&r" ( discard_low ), + "=&r" ( discard_high ) + : "r" ( pre ), "r" ( first ), "r" ( end - post ), + "r" ( post ) + : "cc" ); + + return sum; +} diff --git a/src/arch/arm64/core/setjmp.S b/src/arch/arm64/core/setjmp.S new file mode 100644 index 00000000..fa47aa0a --- /dev/null +++ b/src/arch/arm64/core/setjmp.S @@ -0,0 +1,56 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .text + + /* Must match jmp_buf structure layout */ + .struct 0 +env_x19_x20: .quad 0, 0 +env_x21_x22: .quad 0, 0 +env_x23_x24: .quad 0, 0 +env_x25_x26: .quad 0, 0 +env_x27_x28: .quad 0, 0 +env_x29_x30: .quad 0, 0 +env_sp: .quad 0 + .previous + +/* + * Save stack context for non-local goto + */ + .globl setjmp + .type setjmp, %function +setjmp: + /* Store registers */ + stp x19, x20, [x0, #env_x19_x20] + stp x21, x22, [x0, #env_x21_x22] + stp x23, x24, [x0, #env_x23_x24] + stp x25, x26, [x0, #env_x25_x26] + stp x27, x28, [x0, #env_x27_x28] + stp x29, x30, [x0, #env_x29_x30] + mov x16, sp + str x16, [x0, #env_sp] + /* Return 0 when returning as setjmp() */ + mov x0, #0 + ret + .size setjmp, . - setjmp + +/* + * Non-local jump to a saved stack context + */ + .globl longjmp + .type longjmp, %function +longjmp: + /* Restore registers */ + ldp x19, x20, [x0, #env_x19_x20] + ldp x21, x22, [x0, #env_x21_x22] + ldp x23, x24, [x0, #env_x23_x24] + ldp x25, x26, [x0, #env_x25_x26] + ldp x27, x28, [x0, #env_x27_x28] + ldp x29, x30, [x0, #env_x29_x30] + ldr x16, [x0, #env_sp] + mov sp, x16 + /* Force result to non-zero */ + cmp w1, #0 + csinc w0, w1, w1, ne + /* Return to setjmp() caller */ + br x30 + .size longjmp, . - longjmp diff --git a/src/arch/arm64/include/bits/bigint.h b/src/arch/arm64/include/bits/bigint.h new file mode 100644 index 00000000..79983b41 --- /dev/null +++ b/src/arch/arm64/include/bits/bigint.h @@ -0,0 +1,317 @@ +#ifndef _BITS_BIGINT_H +#define _BITS_BIGINT_H + +/** @file + * + * Big integer support + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** Element of a big integer */ +typedef uint64_t bigint_element_t; + +/** + * Initialise big integer + * + * @v value0 Element 0 of big integer to initialise + * @v size Number of elements + * @v data Raw data + * @v len Length of raw data + */ +static inline __attribute__ (( always_inline )) void +bigint_init_raw ( uint64_t *value0, unsigned int size, + const void *data, size_t len ) { + size_t pad_len = ( sizeof ( bigint_t ( size ) ) - len ); + uint8_t *value_byte = ( ( void * ) value0 ); + const uint8_t *data_byte = ( data + len ); + + /* Copy raw data in reverse order, padding with zeros */ + while ( len-- ) + *(value_byte++) = *(--data_byte); + while ( pad_len-- ) + *(value_byte++) = 0; +} + +/** + * Add big integers + * + * @v addend0 Element 0 of big integer to add + * @v value0 Element 0 of big integer to be added to + * @v size Number of elements + */ +static inline __attribute__ (( always_inline )) void +bigint_add_raw ( const uint64_t *addend0, uint64_t *value0, + unsigned int size ) { + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); + uint64_t *discard_addend; + uint64_t *discard_value; + uint64_t discard_addend_i; + uint64_t discard_value_i; + unsigned int discard_size; + + __asm__ __volatile__ ( "cmn xzr, xzr\n\t" /* clear CF */ + "\n1:\n\t" + "ldr %3, [%0], #8\n\t" + "ldr %4, [%1]\n\t" + "adcs %4, %4, %3\n\t" + "str %4, [%1], #8\n\t" + "sub %w2, %w2, #1\n\t" + "cbnz %w2, 1b\n\t" + : "=r" ( discard_addend ), + "=r" ( discard_value ), + "=r" ( discard_size ), + "=r" ( discard_addend_i ), + "=r" ( discard_value_i ), + "+m" ( *value ) + : "0" ( addend0 ), "1" ( value0 ), "2" ( size ) + : "cc" ); +} + +/** + * Subtract big integers + * + * @v subtrahend0 Element 0 of big integer to subtract + * @v value0 Element 0 of big integer to be subtracted from + * @v size Number of elements + */ +static inline __attribute__ (( always_inline )) void +bigint_subtract_raw ( const uint64_t *subtrahend0, uint64_t *value0, + unsigned int size ) { + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); + uint64_t *discard_subtrahend; + uint64_t *discard_value; + uint64_t discard_subtrahend_i; + uint64_t discard_value_i; + unsigned int discard_size; + + __asm__ __volatile__ ( "cmp xzr, xzr\n\t" /* set CF */ + "\n1:\n\t" + "ldr %3, [%0], #8\n\t" + "ldr %4, [%1]\n\t" + "sbcs %4, %4, %3\n\t" + "str %4, [%1], #8\n\t" + "sub %w2, %w2, #1\n\t" + "cbnz %w2, 1b\n\t" + : "=r" ( discard_subtrahend ), + "=r" ( discard_value ), + "=r" ( discard_size ), + "=r" ( discard_subtrahend_i ), + "=r" ( discard_value_i ), + "+m" ( *value ) + : "0" ( subtrahend0 ), "1" ( value0 ), + "2" ( size ) + : "cc" ); +} + +/** + * Rotate big integer left + * + * @v value0 Element 0 of big integer + * @v size Number of elements + */ +static inline __attribute__ (( always_inline )) void +bigint_rol_raw ( uint64_t *value0, unsigned int size ) { + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); + uint64_t *discard_value; + uint64_t discard_value_i; + unsigned int discard_size; + + __asm__ __volatile__ ( "cmn xzr, xzr\n\t" /* clear CF */ + "\n1:\n\t" + "ldr %2, [%0]\n\t" + "adcs %2, %2, %2\n\t" + "str %2, [%0], #8\n\t" + "sub %w1, %w1, #1\n\t" + "cbnz %w1, 1b\n\t" + : "=r" ( discard_value ), + "=r" ( discard_size ), + "=r" ( discard_value_i ), + "+m" ( *value ) + : "0" ( value0 ), "1" ( size ) + : "cc" ); +} + +/** + * Rotate big integer right + * + * @v value0 Element 0 of big integer + * @v size Number of elements + */ +static inline __attribute__ (( always_inline )) void +bigint_ror_raw ( uint64_t *value0, unsigned int size ) { + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); + uint64_t *discard_value; + uint64_t discard_value_i; + uint64_t discard_value_j; + unsigned int discard_size; + + __asm__ __volatile__ ( "mov %3, #0\n\t" + "\n1:\n\t" + "sub %w1, %w1, #1\n\t" + "ldr %2, [%0, %1, lsl #3]\n\t" + "extr %3, %3, %2, #1\n\t" + "str %3, [%0, %1, lsl #3]\n\t" + "mov %3, %2\n\t" + "cbnz %w1, 1b\n\t" + : "=r" ( discard_value ), + "=r" ( discard_size ), + "=r" ( discard_value_i ), + "=r" ( discard_value_j ), + "+m" ( *value ) + : "0" ( value0 ), "1" ( size ) ); +} + +/** + * Test if big integer is equal to zero + * + * @v value0 Element 0 of big integer + * @v size Number of elements + * @ret is_zero Big integer is equal to zero + */ +static inline __attribute__ (( always_inline, pure )) int +bigint_is_zero_raw ( const uint64_t *value0, unsigned int size ) { + const uint64_t *value = value0; + uint64_t value_i; + + do { + value_i = *(value++); + if ( value_i ) + break; + } while ( --size ); + + return ( value_i == 0 ); +} + +/** + * Compare big integers + * + * @v value0 Element 0 of big integer + * @v reference0 Element 0 of reference big integer + * @v size Number of elements + * @ret geq Big integer is greater than or equal to the reference + */ +static inline __attribute__ (( always_inline, pure )) int +bigint_is_geq_raw ( const uint64_t *value0, const uint64_t *reference0, + unsigned int size ) { + const uint64_t *value = ( value0 + size ); + const uint64_t *reference = ( reference0 + size ); + uint64_t value_i; + uint64_t reference_i; + + do { + value_i = *(--value); + reference_i = *(--reference); + if ( value_i != reference_i ) + break; + } while ( --size ); + + return ( value_i >= reference_i ); +} + +/** + * Test if bit is set in big integer + * + * @v value0 Element 0 of big integer + * @v size Number of elements + * @v bit Bit to test + * @ret is_set Bit is set + */ +static inline __attribute__ (( always_inline )) int +bigint_bit_is_set_raw ( const uint64_t *value0, unsigned int size, + unsigned int bit ) { + const bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( const void * ) value0 ); + unsigned int index = ( bit / ( 8 * sizeof ( value->element[0] ) ) ); + unsigned int subindex = ( bit % ( 8 * sizeof ( value->element[0] ) ) ); + + return ( !! ( value->element[index] & ( 1UL << subindex ) ) ); +} + +/** + * Find highest bit set in big integer + * + * @v value0 Element 0 of big integer + * @v size Number of elements + * @ret max_bit Highest bit set + 1 (or 0 if no bits set) + */ +static inline __attribute__ (( always_inline )) int +bigint_max_set_bit_raw ( const uint64_t *value0, unsigned int size ) { + const uint64_t *value = ( value0 + size ); + int max_bit = ( 8 * sizeof ( bigint_t ( size ) ) ); + uint64_t value_i; + + do { + value_i = *(--value); + max_bit -= ( 64 - fls ( value_i ) ); + if ( value_i ) + break; + } while ( --size ); + + return max_bit; +} + +/** + * Grow big integer + * + * @v source0 Element 0 of source big integer + * @v source_size Number of elements in source big integer + * @v dest0 Element 0 of destination big integer + * @v dest_size Number of elements in destination big integer + */ +static inline __attribute__ (( always_inline )) void +bigint_grow_raw ( const uint64_t *source0, unsigned int source_size, + uint64_t *dest0, unsigned int dest_size ) { + unsigned int pad_size = ( dest_size - source_size ); + + memcpy ( dest0, source0, sizeof ( bigint_t ( source_size ) ) ); + memset ( ( dest0 + source_size ), 0, sizeof ( bigint_t ( pad_size ) ) ); +} + +/** + * Shrink big integer + * + * @v source0 Element 0 of source big integer + * @v source_size Number of elements in source big integer + * @v dest0 Element 0 of destination big integer + * @v dest_size Number of elements in destination big integer + */ +static inline __attribute__ (( always_inline )) void +bigint_shrink_raw ( const uint64_t *source0, unsigned int source_size __unused, + uint64_t *dest0, unsigned int dest_size ) { + + memcpy ( dest0, source0, sizeof ( bigint_t ( dest_size ) ) ); +} + +/** + * Finalise big integer + * + * @v value0 Element 0 of big integer to finalise + * @v size Number of elements + * @v out Output buffer + * @v len Length of output buffer + */ +static inline __attribute__ (( always_inline )) void +bigint_done_raw ( const uint64_t *value0, unsigned int size __unused, + void *out, size_t len ) { + const uint8_t *value_byte = ( ( const void * ) value0 ); + uint8_t *out_byte = ( out + len ); + + /* Copy raw data in reverse order */ + while ( len-- ) + *(--out_byte) = *(value_byte++); +} + +extern void bigint_multiply_raw ( const uint64_t *multiplicand0, + const uint64_t *multiplier0, + uint64_t *value0, unsigned int size ); + +#endif /* _BITS_BIGINT_H */ diff --git a/src/arch/arm64/include/bits/bitops.h b/src/arch/arm64/include/bits/bitops.h new file mode 100644 index 00000000..4350f622 --- /dev/null +++ b/src/arch/arm64/include/bits/bitops.h @@ -0,0 +1,100 @@ +#ifndef _BITS_BITOPS_H +#define _BITS_BITOPS_H + +/** @file + * + * ARM bit operations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** + * Test and set bit atomically + * + * @v bit Bit to set + * @v bits Bit field + * @ret old Old value of bit (zero or non-zero) + */ +static inline __attribute__ (( always_inline )) int +test_and_set_bit ( unsigned int bit, volatile void *bits ) { + unsigned int index = ( bit / 64 ); + unsigned int offset = ( bit % 64 ); + volatile uint64_t *qword = ( ( ( volatile uint64_t * ) bits ) + index ); + uint64_t mask = ( 1UL << offset ); + uint64_t old; + uint64_t new; + uint32_t flag; + + __asm__ __volatile__ ( "\n1:\n\t" + "ldxr %0, %3\n\t" + "orr %1, %0, %4\n\t" + "stxr %w2, %1, %3\n\t" + "tst %w2, %w2\n\t" + "bne 1b\n\t" + : "=&r" ( old ), "=&r" ( new ), "=&r" ( flag ), + "+Q" ( *qword ) + : "r" ( mask ) + : "cc" ); + + return ( !! ( old & mask ) ); +} + +/** + * Test and clear bit atomically + * + * @v bit Bit to set + * @v bits Bit field + * @ret old Old value of bit (zero or non-zero) + */ +static inline __attribute__ (( always_inline )) int +test_and_clear_bit ( unsigned int bit, volatile void *bits ) { + unsigned int index = ( bit / 64 ); + unsigned int offset = ( bit % 64 ); + volatile uint64_t *qword = ( ( ( volatile uint64_t * ) bits ) + index ); + uint64_t mask = ( 1UL << offset ); + uint64_t old; + uint64_t new; + uint32_t flag; + + __asm__ __volatile__ ( "\n1:\n\t" + "ldxr %0, %3\n\t" + "bic %1, %0, %4\n\t" + "stxr %w2, %1, %3\n\t" + "tst %w2, %w2\n\t" + "bne 1b\n\t" + : "=&r" ( old ), "=&r" ( new ), "=&r" ( flag ), + "+Q" ( *qword ) + : "r" ( mask ) + : "cc" ); + + return ( !! ( old & mask ) ); +} + +/** + * Set bit atomically + * + * @v bit Bit to set + * @v bits Bit field + */ +static inline __attribute__ (( always_inline )) void +set_bit ( unsigned int bit, volatile void *bits ) { + + test_and_set_bit ( bit, bits ); +} + +/** + * Clear bit atomically + * + * @v bit Bit to set + * @v bits Bit field + */ +static inline __attribute__ (( always_inline )) void +clear_bit ( unsigned int bit, volatile void *bits ) { + + test_and_clear_bit ( bit, bits ); +} + +#endif /* _BITS_BITOPS_H */ diff --git a/src/arch/arm64/include/bits/byteswap.h b/src/arch/arm64/include/bits/byteswap.h new file mode 100644 index 00000000..169d6c20 --- /dev/null +++ b/src/arch/arm64/include/bits/byteswap.h @@ -0,0 +1,47 @@ +#ifndef _BITS_BYTESWAP_H +#define _BITS_BYTESWAP_H + +/** @file + * + * Byte-order swapping functions + * + */ + +#include + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +static inline __attribute__ (( always_inline, const )) uint16_t +__bswap_variable_16 ( uint16_t x ) { + __asm__ ( "rev16 %0, %1" : "=r" ( x ) : "r" ( x ) ); + return x; +} + +static inline __attribute__ (( always_inline )) void +__bswap_16s ( uint16_t *x ) { + *x = __bswap_variable_16 ( *x ); +} + +static inline __attribute__ (( always_inline, const )) uint32_t +__bswap_variable_32 ( uint32_t x ) { + __asm__ ( "rev32 %0, %1" : "=r" ( x ) : "r" ( x ) ); + return x; +} + +static inline __attribute__ (( always_inline )) void +__bswap_32s ( uint32_t *x ) { + *x = __bswap_variable_32 ( *x ); +} + +static inline __attribute__ (( always_inline, const )) uint64_t +__bswap_variable_64 ( uint64_t x ) { + __asm__ ( "rev %0, %1" : "=r" ( x ) : "r" ( x ) ); + return x; +} + +static inline __attribute__ (( always_inline )) void +__bswap_64s ( uint64_t *x ) { + *x = __bswap_variable_64 ( *x ); +} + +#endif diff --git a/src/arch/arm64/include/bits/compiler.h b/src/arch/arm64/include/bits/compiler.h new file mode 100644 index 00000000..3b129c2f --- /dev/null +++ b/src/arch/arm64/include/bits/compiler.h @@ -0,0 +1,16 @@ +#ifndef _BITS_COMPILER_H +#define _BITS_COMPILER_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** Dummy relocation type */ +#define RELOC_TYPE_NONE R_AARCH64_NULL + +#ifndef ASSEMBLY + +#define __asmcall +#define __libgcc + +#endif /* ASSEMBLY */ + +#endif /*_BITS_COMPILER_H */ diff --git a/src/arch/arm64/include/bits/profile.h b/src/arch/arm64/include/bits/profile.h new file mode 100644 index 00000000..62ffa377 --- /dev/null +++ b/src/arch/arm64/include/bits/profile.h @@ -0,0 +1,28 @@ +#ifndef _BITS_PROFILE_H +#define _BITS_PROFILE_H + +/** @file + * + * Profiling + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** + * Get profiling timestamp + * + * @ret timestamp Timestamp + */ +static inline __attribute__ (( always_inline )) uint64_t +profile_timestamp ( void ) { + uint64_t cycles; + + /* Read cycle counter */ + __asm__ __volatile__ ( "mrs %0, CNTVCT_EL0\n\t" : "=r" ( cycles ) ); + return cycles; +} + +#endif /* _BITS_PROFILE_H */ diff --git a/src/arch/arm64/include/bits/stdint.h b/src/arch/arm64/include/bits/stdint.h new file mode 100644 index 00000000..9eb72e9c --- /dev/null +++ b/src/arch/arm64/include/bits/stdint.h @@ -0,0 +1,21 @@ +#ifndef _BITS_STDINT_H +#define _BITS_STDINT_H + +typedef __SIZE_TYPE__ size_t; +typedef signed long ssize_t; +typedef signed long off_t; + +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; + +typedef signed char int8_t; +typedef signed short int16_t; +typedef signed int int32_t; +typedef signed long long int64_t; + +typedef unsigned long physaddr_t; +typedef unsigned long intptr_t; + +#endif /* _BITS_STDINT_H */ diff --git a/src/arch/arm64/include/bits/string.h b/src/arch/arm64/include/bits/string.h new file mode 100644 index 00000000..c05fbe34 --- /dev/null +++ b/src/arch/arm64/include/bits/string.h @@ -0,0 +1,106 @@ +#ifndef BITS_STRING_H +#define BITS_STRING_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * String functions + * + */ + +extern void arm64_bzero ( void *dest, size_t len ); +extern void arm64_memset ( void *dest, size_t len, int character ); +extern void arm64_memcpy ( void *dest, const void *src, size_t len ); +extern void arm64_memmove_forwards ( void *dest, const void *src, size_t len ); +extern void arm64_memmove_backwards ( void *dest, const void *src, size_t len ); +extern void arm64_memmove ( void *dest, const void *src, size_t len ); + +/** + * Fill memory region + * + * @v dest Destination region + * @v character Fill character + * @v len Length + * @ret dest Destination region + */ +static inline __attribute__ (( always_inline )) void * +memset ( void *dest, int character, size_t len ) { + + /* Allow gcc to generate inline "stX xzr" instructions for + * small, constant lengths. + */ + if ( __builtin_constant_p ( character ) && ( character == 0 ) && + __builtin_constant_p ( len ) && ( len <= 64 ) ) { + __builtin_memset ( dest, 0, len ); + return dest; + } + + /* For zeroing larger or non-constant lengths, use the + * optimised variable-length zeroing code. + */ + if ( __builtin_constant_p ( character ) && ( character == 0 ) ) { + arm64_bzero ( dest, len ); + return dest; + } + + /* Not necessarily zeroing: use basic variable-length code */ + arm64_memset ( dest, len, character ); + return dest; +} + +/** + * Copy memory region + * + * @v dest Destination region + * @v src Source region + * @v len Length + * @ret dest Destination region + */ +static inline __attribute__ (( always_inline )) void * +memcpy ( void *dest, const void *src, size_t len ) { + + /* Allow gcc to generate inline "ldX"/"stX" instructions for + * small, constant lengths. + */ + if ( __builtin_constant_p ( len ) && ( len <= 64 ) ) { + __builtin_memcpy ( dest, src, len ); + return dest; + } + + /* Otherwise, use variable-length code */ + arm64_memcpy ( dest, src, len ); + return dest; +} + +/** + * Copy (possibly overlapping) memory region + * + * @v dest Destination region + * @v src Source region + * @v len Length + * @ret dest Destination region + */ +static inline __attribute__ (( always_inline )) void * +memmove ( void *dest, const void *src, size_t len ) { + ssize_t offset = ( dest - src ); + + /* If required direction of copy is known at build time, then + * use the appropriate forwards/backwards copy directly. + */ + if ( __builtin_constant_p ( offset ) ) { + if ( offset <= 0 ) { + arm64_memmove_forwards ( dest, src, len ); + return dest; + } else { + arm64_memmove_backwards ( dest, src, len ); + return dest; + } + } + + /* Otherwise, use ambidirectional copy */ + arm64_memmove ( dest, src, len ); + return dest; +} + +#endif /* BITS_STRING_H */ diff --git a/src/arch/arm64/include/bits/strings.h b/src/arch/arm64/include/bits/strings.h new file mode 100644 index 00000000..d5340f48 --- /dev/null +++ b/src/arch/arm64/include/bits/strings.h @@ -0,0 +1,69 @@ +#ifndef _BITS_STRINGS_H +#define _BITS_STRINGS_H + +/** @file + * + * String functions + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * Find first (i.e. least significant) set bit + * + * @v value Value + * @ret lsb Least significant bit set in value (LSB=1), or zero + */ +static inline __attribute__ (( always_inline )) int __ffsll ( long long value ){ + unsigned long long bits = value; + unsigned long long lsb; + unsigned int lz; + + /* Extract least significant set bit */ + lsb = ( bits & -bits ); + + /* Count number of leading zeroes before LSB */ + __asm__ ( "clz %0, %1" : "=r" ( lz ) : "r" ( lsb ) ); + + return ( 64 - lz ); +} + +/** + * Find first (i.e. least significant) set bit + * + * @v value Value + * @ret lsb Least significant bit set in value (LSB=1), or zero + */ +static inline __attribute__ (( always_inline )) int __ffsl ( long value ) { + + return __ffsll ( value ); +} + +/** + * Find last (i.e. most significant) set bit + * + * @v value Value + * @ret msb Most significant bit set in value (LSB=1), or zero + */ +static inline __attribute__ (( always_inline )) int __flsll ( long long value ){ + unsigned int lz; + + /* Count number of leading zeroes */ + __asm__ ( "clz %0, %1" : "=r" ( lz ) : "r" ( value ) ); + + return ( 64 - lz ); +} + +/** + * Find last (i.e. most significant) set bit + * + * @v value Value + * @ret msb Most significant bit set in value (LSB=1), or zero + */ +static inline __attribute__ (( always_inline )) int __flsl ( long value ) { + + return __flsll ( value ); +} + +#endif /* _BITS_STRINGS_H */ diff --git a/src/arch/arm64/include/bits/tcpip.h b/src/arch/arm64/include/bits/tcpip.h new file mode 100644 index 00000000..68686534 --- /dev/null +++ b/src/arch/arm64/include/bits/tcpip.h @@ -0,0 +1,15 @@ +#ifndef _BITS_TCPIP_H +#define _BITS_TCPIP_H + +/** @file + * + * Transport-network layer interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern uint16_t tcpip_continue_chksum ( uint16_t sum, const void *data, + size_t len ); + +#endif /* _BITS_TCPIP_H */ diff --git a/src/arch/arm64/include/efi/ipxe/dhcp_arch.h b/src/arch/arm64/include/efi/ipxe/dhcp_arch.h new file mode 100644 index 00000000..bb26aae4 --- /dev/null +++ b/src/arch/arm64/include/efi/ipxe/dhcp_arch.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +#ifndef _DHCP_ARCH_H +#define _DHCP_ARCH_H + +/** @file + * + * Architecture-specific DHCP options + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#define DHCP_ARCH_CLIENT_ARCHITECTURE DHCP_CLIENT_ARCHITECTURE_ARM64 + +#define DHCP_ARCH_CLIENT_NDI 1 /* UNDI */ , 3, 10 /* v3.10 */ + +#endif diff --git a/src/arch/arm64/include/gdbmach.h b/src/arch/arm64/include/gdbmach.h new file mode 100644 index 00000000..cd152eed --- /dev/null +++ b/src/arch/arm64/include/gdbmach.h @@ -0,0 +1,45 @@ +#ifndef GDBMACH_H +#define GDBMACH_H + +/** @file + * + * GDB architecture specifics + * + * This file declares functions for manipulating the machine state and + * debugging context. + * + */ + +#include + +typedef unsigned long gdbreg_t; + +/* Register snapshot */ +enum { + /* Not yet implemented */ + GDBMACH_NREGS, +}; + +#define GDBMACH_SIZEOF_REGS ( GDBMACH_NREGS * sizeof ( gdbreg_t ) ) + +static inline void gdbmach_set_pc ( gdbreg_t *regs, gdbreg_t pc ) { + /* Not yet implemented */ + ( void ) regs; + ( void ) pc; +} + +static inline void gdbmach_set_single_step ( gdbreg_t *regs, int step ) { + /* Not yet implemented */ + ( void ) regs; + ( void ) step; +} + +static inline void gdbmach_breakpoint ( void ) { + /* Not yet implemented */ +} + +extern int gdbmach_set_breakpoint ( int type, unsigned long addr, size_t len, + int enable ); +extern void gdbmach_init ( void ); + +#endif /* GDBMACH_H */ diff --git a/src/arch/arm64/include/limits.h b/src/arch/arm64/include/limits.h new file mode 100644 index 00000000..8cf87b47 --- /dev/null +++ b/src/arch/arm64/include/limits.h @@ -0,0 +1,59 @@ +#ifndef LIMITS_H +#define LIMITS_H 1 + +/* Number of bits in a `char' */ +#define CHAR_BIT 8 + +/* Minimum and maximum values a `signed char' can hold */ +#define SCHAR_MIN (-128) +#define SCHAR_MAX 127 + +/* Maximum value an `unsigned char' can hold. (Minimum is 0.) */ +#define UCHAR_MAX 255 + +/* Minimum and maximum values a `char' can hold */ +#define CHAR_MIN SCHAR_MIN +#define CHAR_MAX SCHAR_MAX + +/* Minimum and maximum values a `signed short int' can hold */ +#define SHRT_MIN (-32768) +#define SHRT_MAX 32767 + +/* Maximum value an `unsigned short' can hold. (Minimum is 0.) */ +#define USHRT_MAX 65535 + + +/* Minimum and maximum values a `signed int' can hold */ +#define INT_MIN (-INT_MAX - 1) +#define INT_MAX 2147483647 + +/* Maximum value an `unsigned int' can hold. (Minimum is 0.) */ +#define UINT_MAX 4294967295U + + +/* Minimum and maximum values a `signed int' can hold */ +#define INT_MAX 2147483647 +#define INT_MIN (-INT_MAX - 1) + + +/* Maximum value an `unsigned int' can hold. (Minimum is 0.) */ +#define UINT_MAX 4294967295U + + +/* Minimum and maximum values a `signed long' can hold */ +#define LONG_MAX 9223372036854775807L +#define LONG_MIN (-LONG_MAX - 1L) + +/* Maximum value an `unsigned long' can hold. (Minimum is 0.) */ +#define ULONG_MAX 18446744073709551615UL + +/* Minimum and maximum values a `signed long long' can hold */ +#define LLONG_MAX 9223372036854775807LL +#define LLONG_MIN (-LONG_MAX - 1LL) + + +/* Maximum value an `unsigned long long' can hold. (Minimum is 0.) */ +#define ULLONG_MAX 18446744073709551615ULL + + +#endif /* LIMITS_H */ diff --git a/src/arch/arm64/include/setjmp.h b/src/arch/arm64/include/setjmp.h new file mode 100644 index 00000000..85a7a9ca --- /dev/null +++ b/src/arch/arm64/include/setjmp.h @@ -0,0 +1,44 @@ +#ifndef _SETJMP_H +#define _SETJMP_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** A jump buffer */ +typedef struct { + /** Saved x19 */ + uint64_t x19; + /** Saved x20 */ + uint64_t x20; + /** Saved x21 */ + uint64_t x21; + /** Saved x22 */ + uint64_t x22; + /** Saved x23 */ + uint64_t x23; + /** Saved x24 */ + uint64_t x24; + /** Saved x25 */ + uint64_t x25; + /** Saved x26 */ + uint64_t x26; + /** Saved x27 */ + uint64_t x27; + /** Saved x28 */ + uint64_t x28; + /** Saved frame pointer (x29) */ + uint64_t x29; + /** Saved link register (x30) */ + uint64_t x30; + /** Saved stack pointer (x31) */ + uint64_t sp; +} jmp_buf[1]; + +extern int __asmcall __attribute__ (( returns_twice )) +setjmp ( jmp_buf env ); + +extern void __asmcall __attribute__ (( noreturn )) +longjmp ( jmp_buf env, int val ); + +#endif /* _SETJMP_H */ diff --git a/src/arch/i386/include/bits/hyperv.h b/src/arch/i386/include/bits/hyperv.h new file mode 100644 index 00000000..0ba58afb --- /dev/null +++ b/src/arch/i386/include/bits/hyperv.h @@ -0,0 +1,49 @@ +#ifndef _BITS_HYPERV_H +#define _BITS_HYPERV_H + +/** @file + * + * Hyper-V interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** + * Issue hypercall + * + * @v hv Hyper-V hypervisor + * @v code Call code + * @v in Input parameters + * @v out Output parameters + * @ret status Status code + */ +static inline __attribute__ (( always_inline )) int +hv_call ( struct hv_hypervisor *hv, unsigned int code, const void *in, + void *out ) { + void *hypercall = hv->hypercall; + uint32_t in_phys; + uint32_t out_phys; + uint32_t discard_ecx; + uint32_t discard_edx; + uint16_t result; + + in_phys = ( ( __builtin_constant_p ( in ) && ( in == NULL ) ) + ? 0 : virt_to_phys ( in ) ); + out_phys = ( ( __builtin_constant_p ( out ) && ( out == NULL ) ) + ? 0 : virt_to_phys ( out ) ); + __asm__ __volatile__ ( "call *%9" + : "=a" ( result ), "=c" ( discard_ecx ), + "=d" ( discard_edx ) + : "d" ( 0 ), "a" ( code ), + "b" ( 0 ), "c" ( in_phys ), + "D" ( 0 ), "S" ( out_phys ), + "m" ( hypercall ) ); + return result; +} + +#endif /* _BITS_HYPERV_H */ diff --git a/src/arch/i386/include/ipxe/msr.h b/src/arch/i386/include/ipxe/msr.h new file mode 100644 index 00000000..5705318f --- /dev/null +++ b/src/arch/i386/include/ipxe/msr.h @@ -0,0 +1,38 @@ +#ifndef _IPXE_MSR_H +#define _IPXE_MSR_H + +/** @file + * + * Model-specific registers + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * Read model-specific register + * + * @v msr Model-specific register + * @ret value Value + */ +static inline __attribute__ (( always_inline )) uint64_t +rdmsr ( unsigned int msr ) { + uint64_t value; + + __asm__ __volatile__ ( "rdmsr" : "=A" ( value ) : "c" ( msr ) ); + return value; +} + +/** + * Write model-specific register + * + * @v msr Model-specific register + * @v value Value + */ +static inline __attribute__ (( always_inline )) void +wrmsr ( unsigned int msr, uint64_t value ) { + + __asm__ __volatile__ ( "wrmsr" : : "c" ( msr ), "A" ( value ) ); +} + +#endif /* _IPXE_MSR_H */ diff --git a/src/arch/i386/tests/gdbstub_test.S b/src/arch/i386/tests/gdbstub_test.S new file mode 100644 index 00000000..739b0527 --- /dev/null +++ b/src/arch/i386/tests/gdbstub_test.S @@ -0,0 +1,54 @@ + .arch i386 + + .section ".data", "aw", @progbits +watch_me: + .long 0xfeedbeef + + .section ".text", "ax", @progbits + .code32 +gdbstub_test: + /* 1. Read registers test */ + movl $0xea010203, %eax + movl $0xeb040506, %ebx + movl $0xec070809, %ecx + movl $0xed0a0b0c, %edx + movl $0x510d0e0f, %esi + movl $0xd1102030, %edi + int $3 + + /* 2. Write registers test */ + int $3 + + /* 3. Read memory test */ + subl $8, %esp + movl $0x11223344, 4(%esp) + movw $0x5566, 2(%esp) + movb $0x77, (%esp) + int $3 + + /* 4. Write memory test */ + int $3 + addl $8, %esp + + /* 5. Step test */ + int $3 + nop + + /* 6. Access watch test */ + movl $0x600d0000, %ecx + movl watch_me, %eax + movl $0xbad00000, %ecx + int $3 + movl $0x600d0001, %ecx + movl %eax, watch_me + movl $0xbad00001, %ecx + int $3 + + /* 7. Write watch test */ + movl $0x600d0002, %ecx + movl %eax, watch_me + movl $0xbad00002, %ecx + int $3 + +1: + jmp 1b diff --git a/src/arch/i386/tests/gdbstub_test.gdb b/src/arch/i386/tests/gdbstub_test.gdb new file mode 100755 index 00000000..bcfa07da --- /dev/null +++ b/src/arch/i386/tests/gdbstub_test.gdb @@ -0,0 +1,116 @@ +#!/usr/bin/gdb -x +# Test suite for GDB remote debugging +# Run: +# make bin/ipxe.hd.tmp +# make +# gdb +# (gdb) target remote :TCPPORT +# OR +# (gdb) target remote udp:IP:UDPPORT +# (gdb) source tests/gdbstub_test.gdb + +define ipxe_load_symbols + file bin/ipxe.hd.tmp +end + +define ipxe_assert + if $arg0 != $arg1 + echo FAIL $arg2\n + else + echo PASS $arg2\n + end +end + +define ipxe_start_tests + jump gdbstub_test +end + +define ipxe_test_regs_read + ipxe_assert $eax 0xea010203 "ipxe_test_regs_read eax" + ipxe_assert $ebx 0xeb040506 "ipxe_test_regs_read ebx" + ipxe_assert $ecx 0xec070809 "ipxe_test_regs_read ecx" + ipxe_assert $edx 0xed0a0b0c "ipxe_test_regs_read edx" + ipxe_assert $esi 0x510d0e0f "ipxe_test_regs_read esi" + ipxe_assert $edi 0xd1102030 "ipxe_test_regs_read edi" +end + +define ipxe_test_regs_write + set $eax = 0xea112233 + set $ebx = 0xeb445566 + set $ecx = 0xec778899 + set $edx = 0xedaabbcc + set $esi = 0x51ddeeff + set $edi = 0xd1010203 + c + ipxe_assert $eax 0xea112233 "ipxe_test_regs_write eax" + ipxe_assert $ebx 0xeb445566 "ipxe_test_regs_write ebx" + ipxe_assert $ecx 0xec778899 "ipxe_test_regs_write ecx" + ipxe_assert $edx 0xedaabbcc "ipxe_test_regs_write edx" + ipxe_assert $esi 0x51ddeeff "ipxe_test_regs_write esi" + ipxe_assert $edi 0xd1010203 "ipxe_test_regs_write edi" + + # This assumes segment selectors are always 0x10 or 0x8 (for code). + ipxe_assert $cs 0x08 "ipxe_test_regs_write cs" + ipxe_assert $ds 0x10 "ipxe_test_regs_write ds" +end + +define ipxe_test_mem_read + c + ipxe_assert ({int}($esp+4)) 0x11223344 "ipxe_test_mem_read int" + ipxe_assert ({short}($esp+2)) 0x5566 "ipxe_test_mem_read short" + ipxe_assert ({char}($esp)) 0x77 "ipxe_test_mem_read char" +end + +define ipxe_test_mem_write + set ({int}($esp+4)) = 0xaabbccdd + set ({short}($esp+2)) = 0xeeff + set ({char}($esp)) = 0x99 + c + ipxe_assert ({int}($esp+4)) 0xaabbccdd "ipxe_test_mem_write int" + ipxe_assert ({short}($esp+2)) (short)0xeeff "ipxe_test_mem_write short" + ipxe_assert ({char}($esp)) (char)0x99 "ipxe_test_mem_write char" +end + +define ipxe_test_step + c + si + ipxe_assert ({char}($eip-1)) (char)0x90 "ipxe_test_step" # nop = 0x90 +end + +define ipxe_test_awatch + awatch watch_me + + c + ipxe_assert $ecx 0x600d0000 "ipxe_test_awatch read" + if $ecx == 0x600d0000 + c + end + + c + ipxe_assert $ecx 0x600d0001 "ipxe_test_awatch write" + if $ecx == 0x600d0001 + c + end + + delete +end + +define ipxe_test_watch + watch watch_me + c + ipxe_assert $ecx 0x600d0002 "ipxe_test_watch" + if $ecx == 0x600d0002 + c + end + delete +end + +ipxe_load_symbols +ipxe_start_tests +ipxe_test_regs_read +ipxe_test_regs_write +ipxe_test_mem_read +ipxe_test_mem_write +ipxe_test_step +ipxe_test_awatch +ipxe_test_watch diff --git a/src/arch/x86/Makefile.pcbios b/src/arch/x86/Makefile.pcbios new file mode 100644 index 00000000..c44eefc1 --- /dev/null +++ b/src/arch/x86/Makefile.pcbios @@ -0,0 +1,132 @@ +# -*- makefile -*- : Force emacs to use Makefile mode + +# BIOS-specific directories containing source files +# +SRCDIRS += arch/x86/drivers/net + +# The i386 linker script +# +LDSCRIPT = arch/x86/scripts/pcbios.lds + +# Stop ld from complaining about our customised linker script +# +LDFLAGS += -N --no-check-sections + +# Prefix always starts at address zero +# +LDFLAGS += --section-start=.prefix=0 + +# Media types. +# +MEDIA += rom +MEDIA += mrom +MEDIA += pcirom +MEDIA += isarom +MEDIA += pxe +MEDIA += kpxe +MEDIA += kkpxe +MEDIA += kkkpxe +MEDIA += lkrn +MEDIA += dsk +MEDIA += nbi +MEDIA += hd +MEDIA += raw +MEDIA += exe + +# Padding rules +# +PAD_rom = $(PERL) $(PADIMG) --blksize=512 --byte=0xff +PAD_mrom = $(PAD_rom) +PAD_pcirom = $(PAD_rom) +PAD_isarom = $(PAD_rom) +PAD_dsk = $(PERL) $(PADIMG) --blksize=512 +PAD_hd = $(PERL) $(PADIMG) --blksize=32768 +PAD_exe = $(PERL) $(PADIMG) --blksize=512 + +# Finalisation rules +# +FINALISE_rom = $(PERL) $(FIXROM) +FINALISE_mrom = $(FINALISE_rom) +FINALISE_pcirom = $(FINALISE_rom) +FINALISE_isarom = $(FINALISE_rom) + +# Use $(ROMS) rather than $(DRIVERS) for "allroms", "allmroms", etc. +# +LIST_NAME_rom := ROMS +LIST_NAME_mrom := ROMS +LIST_NAME_pcirom := ROMS +LIST_NAME_isarom := ROMS + +# Locations of isolinux files +# +SYSLINUX_DIR_LIST := \ + /usr/lib/syslinux \ + /usr/lib/syslinux/bios \ + /usr/lib/syslinux/modules/bios \ + /usr/share/syslinux \ + /usr/share/syslinux/bios \ + /usr/share/syslinux/modules/bios \ + /usr/local/share/syslinux \ + /usr/local/share/syslinux/bios \ + /usr/local/share/syslinux/modules/bios \ + /usr/lib/ISOLINUX +ISOLINUX_BIN_LIST := \ + $(ISOLINUX_BIN) \ + $(patsubst %,%/isolinux.bin,$(SYSLINUX_DIR_LIST)) +LDLINUX_C32_LIST := \ + $(LDLINUX_C32) \ + $(patsubst %,%/ldlinux.c32,$(SYSLINUX_DIR_LIST)) +ISOLINUX_BIN = $(firstword $(wildcard $(ISOLINUX_BIN_LIST))) +LDLINUX_C32 = $(firstword $(wildcard $(LDLINUX_C32_LIST))) + +# rule to make a non-emulation ISO boot image +NON_AUTO_MEDIA += iso +%iso: %lkrn util/geniso + $(QM)$(ECHO) " [GENISO] $@" + $(Q)ISOLINUX_BIN=$(ISOLINUX_BIN) LDLINUX_C32=$(LDLINUX_C32) \ + VERSION="$(VERSION)" bash util/geniso -o $@ $< + +# rule to make a floppy emulation ISO boot image +NON_AUTO_MEDIA += liso +%liso: %lkrn util/geniso + $(QM)$(ECHO) " [GENISO] $@" + $(Q)VERSION="$(VERSION)" bash util/geniso -l -o $@ $< + +# rule to make a syslinux floppy image (mountable, bootable) +NON_AUTO_MEDIA += sdsk +%sdsk: %lkrn util/gensdsk + $(QM)$(ECHO) " [GENSDSK] $@" + $(Q)bash util/gensdsk $@ $< + +# rule to write disk images to /dev/fd0 +NON_AUTO_MEDIA += fd0 +%fd0 : %dsk + $(QM)$(ECHO) " [DD] $@" + $(Q)dd if=$< bs=512 conv=sync of=/dev/fd0 + $(Q)sync + +# Special target for building Master Boot Record binary +$(BIN)/mbr.tmp : $(BIN)/mbr.o + $(QM)$(ECHO) " [LD] $@" + $(Q)$(LD) $(LDFLAGS) -o $@ -e mbr $< + +# rule to make a USB disk image +$(BIN)/usbdisk.tmp : $(BIN)/usbdisk.o + $(QM)$(ECHO) " [LD] $@" + $(Q)$(LD) $(LDFLAGS) -o $@ -e mbr $< + +NON_AUTO_MEDIA += usb +%usb: $(BIN)/usbdisk.bin %hd + $(QM)$(ECHO) " [FINISH] $@" + $(Q)cat $^ > $@ + +NON_AUTO_MEDIA += vhd +%vhd: %usb + $(QM)$(ECHO) " [FINISH] $@" + $(Q)$(QEMUIMG) convert -f raw -O vpc $< $@ + +# Padded floppy image (e.g. for iLO) +NON_AUTO_MEDIA += pdsk +%pdsk : %dsk + $(Q)cp $< $@ + $(Q)$(PADIMG) --blksize=1474560 $@ diff --git a/src/arch/x86/core/basemem_packet.c b/src/arch/x86/core/basemem_packet.c new file mode 100644 index 00000000..9f5fbf33 --- /dev/null +++ b/src/arch/x86/core/basemem_packet.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Packet buffer in base memory. Used by various components which + * need to pass packets to and from external real-mode code. + * + */ + +#include + +#undef basemem_packet +char __bss16_array ( basemem_packet, [BASEMEM_PACKET_LEN] ); diff --git a/src/arch/x86/core/cachedhcp.c b/src/arch/x86/core/cachedhcp.c new file mode 100644 index 00000000..dffafe3c --- /dev/null +++ b/src/arch/x86/core/cachedhcp.c @@ -0,0 +1,179 @@ +/* + * Copyright (C) 2013 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Cached DHCP packet + * + */ + +/** Cached DHCPACK physical address + * + * This can be set by the prefix. + */ +uint32_t __bss16 ( cached_dhcpack_phys ); +#define cached_dhcpack_phys __use_data16 ( cached_dhcpack_phys ) + +/** Colour for debug messages */ +#define colour &cached_dhcpack_phys + +/** Cached DHCPACK */ +static struct dhcp_packet *cached_dhcpack; + +/** + * Cached DHCPACK startup function + * + */ +static void cachedhcp_init ( void ) { + struct dhcp_packet *dhcppkt; + struct dhcp_packet *tmp; + struct dhcphdr *dhcphdr; + size_t max_len; + size_t len; + + /* Do nothing if no cached DHCPACK is present */ + if ( ! cached_dhcpack_phys ) { + DBGC ( colour, "CACHEDHCP found no cached DHCPACK\n" ); + return; + } + + /* No reliable way to determine length before parsing packet; + * start by assuming maximum length permitted by PXE. + */ + max_len = sizeof ( BOOTPLAYER_t ); + + /* Allocate and populate DHCP packet */ + dhcppkt = zalloc ( sizeof ( *dhcppkt ) + max_len ); + if ( ! dhcppkt ) { + DBGC ( colour, "CACHEDHCP could not allocate copy\n" ); + return; + } + dhcphdr = ( ( ( void * ) dhcppkt ) + sizeof ( *dhcppkt ) ); + copy_from_user ( dhcphdr, phys_to_user ( cached_dhcpack_phys ), 0, + max_len ); + dhcppkt_init ( dhcppkt, dhcphdr, max_len ); + + /* Shrink packet to required length. If reallocation fails, + * just continue to use the original packet and waste the + * unused space. + */ + len = dhcppkt_len ( dhcppkt ); + assert ( len <= max_len ); + tmp = realloc ( dhcppkt, ( sizeof ( *dhcppkt ) + len ) ); + if ( tmp ) + dhcppkt = tmp; + + /* Reinitialise packet at new address */ + dhcphdr = ( ( ( void * ) dhcppkt ) + sizeof ( *dhcppkt ) ); + dhcppkt_init ( dhcppkt, dhcphdr, len ); + + /* Store as cached DHCPACK, and mark original copy as consumed */ + DBGC ( colour, "CACHEDHCP found cached DHCPACK at %08x+%zx\n", + cached_dhcpack_phys, len ); + cached_dhcpack = dhcppkt; + cached_dhcpack_phys = 0; +} + +/** + * Cached DHCPACK startup function + * + */ +static void cachedhcp_startup ( void ) { + + /* If cached DHCP packet was not claimed by any network device + * during startup, then free it. + */ + if ( cached_dhcpack ) { + DBGC ( colour, "CACHEDHCP freeing unclaimed cached DHCPACK\n" ); + dhcppkt_put ( cached_dhcpack ); + cached_dhcpack = NULL; + } +} + +/** Cached DHCPACK initialisation function */ +struct init_fn cachedhcp_init_fn __init_fn ( INIT_NORMAL ) = { + .initialise = cachedhcp_init, +}; + +/** Cached DHCPACK startup function */ +struct startup_fn cachedhcp_startup_fn __startup_fn ( STARTUP_LATE ) = { + .name = "cachedhcp", + .startup = cachedhcp_startup, +}; + +/** + * Apply cached DHCPACK to network device, if applicable + * + * @v netdev Network device + * @ret rc Return status code + */ +static int cachedhcp_probe ( struct net_device *netdev ) { + struct ll_protocol *ll_protocol = netdev->ll_protocol; + int rc; + + /* Do nothing unless we have a cached DHCPACK */ + if ( ! cached_dhcpack ) + return 0; + + /* Do nothing unless cached DHCPACK's MAC address matches this + * network device. + */ + if ( memcmp ( netdev->ll_addr, cached_dhcpack->dhcphdr->chaddr, + ll_protocol->ll_addr_len ) != 0 ) { + DBGC ( colour, "CACHEDHCP cached DHCPACK does not match %s\n", + netdev->name ); + return 0; + } + DBGC ( colour, "CACHEDHCP cached DHCPACK is for %s\n", netdev->name ); + + /* Register as DHCP settings for this network device */ + if ( ( rc = register_settings ( &cached_dhcpack->settings, + netdev_settings ( netdev ), + DHCP_SETTINGS_NAME ) ) != 0 ) { + DBGC ( colour, "CACHEDHCP could not register settings: %s\n", + strerror ( rc ) ); + return rc; + } + + /* Claim cached DHCPACK */ + dhcppkt_put ( cached_dhcpack ); + cached_dhcpack = NULL; + + return 0; +} + +/** Cached DHCP packet network device driver */ +struct net_driver cachedhcp_driver __net_driver = { + .name = "cachedhcp", + .probe = cachedhcp_probe, +}; diff --git a/src/arch/x86/core/dumpregs.c b/src/arch/x86/core/dumpregs.c new file mode 100644 index 00000000..a5108ea1 --- /dev/null +++ b/src/arch/x86/core/dumpregs.c @@ -0,0 +1,20 @@ +#include +#include + +void __asmcall _dump_regs ( struct i386_all_regs *ix86 ) { + + __asm__ __volatile__ ( + TEXT16_CODE ( ".globl dump_regs\n\t" + "\ndump_regs:\n\t" + VIRT_CALL ( _dump_regs ) + "ret\n\t" ) : ); + + printf ( "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n" + "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n" + "CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x\n", + ix86->regs.eax, ix86->regs.ebx, ix86->regs.ecx, + ix86->regs.edx, ix86->regs.esi, ix86->regs.edi, + ix86->regs.ebp, ix86->regs.esp, + ix86->segs.cs, ix86->segs.ss, ix86->segs.ds, + ix86->segs.es, ix86->segs.fs, ix86->segs.gs ); +} diff --git a/src/arch/x86/core/gdbmach.c b/src/arch/x86/core/gdbmach.c new file mode 100644 index 00000000..af6abfed --- /dev/null +++ b/src/arch/x86/core/gdbmach.c @@ -0,0 +1,251 @@ +/* + * Copyright (C) 2008 Stefan Hajnoczi . + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * GDB architecture-specific bits for x86 + * + */ + +/** Number of hardware breakpoints */ +#define NUM_HWBP 4 + +/** Debug register 7: Global breakpoint enable */ +#define DR7_G( bp ) ( 2 << ( 2 * (bp) ) ) + +/** Debug register 7: Global exact breakpoint enable */ +#define DR7_GE ( 1 << 9 ) + +/** Debug register 7: Break on data writes */ +#define DR7_RWLEN_WRITE 0x11110000 + +/** Debug register 7: Break on data access */ +#define DR7_RWLEN_ACCESS 0x33330000 + +/** Debug register 7: One-byte length */ +#define DR7_RWLEN_1 0x00000000 + +/** Debug register 7: Two-byte length */ +#define DR7_RWLEN_2 0x44440000 + +/** Debug register 7: Four-byte length */ +#define DR7_RWLEN_4 0xcccc0000 + +/** Debug register 7: Eight-byte length */ +#define DR7_RWLEN_8 0x88880000 + +/** Debug register 7: Breakpoint R/W and length mask */ +#define DR7_RWLEN_MASK( bp ) ( 0xf0000 << ( 4 * (bp) ) ) + +/** Hardware breakpoint addresses (debug registers 0-3) */ +static unsigned long dr[NUM_HWBP]; + +/** Active value of debug register 7 */ +static unsigned long dr7 = DR7_GE; + +/** + * Update debug registers + * + */ +static void gdbmach_update ( void ) { + + /* Set debug registers */ + __asm__ __volatile__ ( "mov %0, %%dr0" : : "r" ( dr[0] ) ); + __asm__ __volatile__ ( "mov %0, %%dr1" : : "r" ( dr[1] ) ); + __asm__ __volatile__ ( "mov %0, %%dr2" : : "r" ( dr[2] ) ); + __asm__ __volatile__ ( "mov %0, %%dr3" : : "r" ( dr[3] ) ); + __asm__ __volatile__ ( "mov %0, %%dr7" : : "r" ( dr7 ) ); +} + +/** + * Find reusable or available hardware breakpoint + * + * @v addr Linear address + * @v rwlen Control bits + * @ret bp Hardware breakpoint, or negative error + */ +static int gdbmach_find ( unsigned long addr, unsigned int rwlen ) { + unsigned int i; + int bp = -ENOENT; + + /* Look for a reusable or available breakpoint */ + for ( i = 0 ; i < NUM_HWBP ; i++ ) { + + /* If breakpoint is not enabled, then it is available */ + if ( ! ( dr7 & DR7_G ( i ) ) ) { + bp = i; + continue; + } + + /* If breakpoint is enabled and has the same address + * and control bits, then reuse it. + */ + if ( ( dr[i] == addr ) && + ( ( ( dr7 ^ rwlen ) & DR7_RWLEN_MASK ( i ) ) == 0 ) ) { + bp = i; + break; + } + } + + return bp; +} + +/** + * Set hardware breakpoint + * + * @v type GDB breakpoint type + * @v addr Virtual address + * @v len Length + * @v enable Enable (not disable) breakpoint + * @ret rc Return status code + */ +int gdbmach_set_breakpoint ( int type, unsigned long addr, size_t len, + int enable ) { + unsigned int rwlen; + unsigned long mask; + int bp; + + /* Parse breakpoint type */ + switch ( type ) { + case GDBMACH_WATCH: + rwlen = DR7_RWLEN_WRITE; + break; + case GDBMACH_AWATCH: + rwlen = DR7_RWLEN_ACCESS; + break; + default: + return -ENOTSUP; + } + + /* Parse breakpoint length */ + switch ( len ) { + case 1: + rwlen |= DR7_RWLEN_1; + break; + case 2: + rwlen |= DR7_RWLEN_2; + break; + case 4: + rwlen |= DR7_RWLEN_4; + break; + case 8: + rwlen |= DR7_RWLEN_8; + break; + default: + return -ENOTSUP; + } + + /* Convert to linear address */ + if ( sizeof ( physaddr_t ) <= sizeof ( uint32_t ) ) + addr = virt_to_phys ( ( void * ) addr ); + + /* Find reusable or available hardware breakpoint */ + bp = gdbmach_find ( addr, rwlen ); + if ( bp < 0 ) + return ( enable ? -ENOBUFS : 0 ); + + /* Configure this breakpoint */ + DBGC ( &dr[0], "GDB bp %d at %p+%zx type %d (%sabled)\n", + bp, ( ( void * ) addr ), len, type, ( enable ? "en" : "dis" ) ); + dr[bp] = addr; + mask = DR7_RWLEN_MASK ( bp ); + dr7 = ( ( dr7 & ~mask ) | ( rwlen & mask ) ); + mask = DR7_G ( bp ); + dr7 &= ~mask; + if ( enable ) + dr7 |= mask; + + /* Update debug registers */ + gdbmach_update(); + + return 0; +} + +/** + * Handle exception + * + * @v signo GDB signal number + * @v regs Register dump + */ +__asmcall void gdbmach_handler ( int signo, gdbreg_t *regs ) { + unsigned long dr7_disabled = DR7_GE; + unsigned long dr6_clear = 0; + + /* Temporarily disable breakpoints */ + __asm__ __volatile__ ( "mov %0, %%dr7\n" : : "r" ( dr7_disabled ) ); + + /* Handle exception */ + DBGC ( &dr[0], "GDB signal %d\n", signo ); + DBGC2_HDA ( &dr[0], 0, regs, ( GDBMACH_NREGS * sizeof ( *regs ) ) ); + gdbstub_handler ( signo, regs ); + DBGC ( &dr[0], "GDB signal %d returning\n", signo ); + DBGC2_HDA ( &dr[0], 0, regs, ( GDBMACH_NREGS * sizeof ( *regs ) ) ); + + /* Clear breakpoint status register */ + __asm__ __volatile__ ( "mov %0, %%dr6\n" : : "r" ( dr6_clear ) ); + + /* Re-enable breakpoints */ + __asm__ __volatile__ ( "mov %0, %%dr7\n" : : "r" ( dr7 ) ); +} + +/** + * CPU exception vectors + * + * Note that we cannot intercept anything from INT8 (double fault) + * upwards, since these overlap by default with IRQ0-7. + */ +static void * gdbmach_vectors[] = { + gdbmach_sigfpe, /* Divide by zero */ + gdbmach_sigtrap, /* Debug trap */ + NULL, /* Non-maskable interrupt */ + gdbmach_sigtrap, /* Breakpoint */ + gdbmach_sigstkflt, /* Overflow */ + gdbmach_sigstkflt, /* Bound range exceeded */ + gdbmach_sigill, /* Invalid opcode */ +}; + +/** + * Initialise GDB + */ +void gdbmach_init ( void ) { + unsigned int i; + + /* Hook CPU exception vectors */ + for ( i = 0 ; i < ( sizeof ( gdbmach_vectors ) / + sizeof ( gdbmach_vectors[0] ) ) ; i++ ) { + if ( gdbmach_vectors[i] ) + set_interrupt_vector ( i, gdbmach_vectors[i] ); + } +} diff --git a/src/arch/x86/core/patch_cf.S b/src/arch/x86/core/patch_cf.S new file mode 100644 index 00000000..4365563f --- /dev/null +++ b/src/arch/x86/core/patch_cf.S @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2009 H. Peter Anvin + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .text + .arch i386 + .code16 + +/**************************************************************************** + * Set/clear CF on the stack as appropriate, assumes stack is as it should + * be immediately before IRET + **************************************************************************** + */ + .section ".text16", "ax", @progbits + .globl patch_cf +patch_cf: + pushw %bp + movw %sp, %bp + setc 8(%bp) /* Set/reset CF; clears PF, AF, ZF, SF */ + popw %bp + ret + .size patch_cf, . - patch_cf diff --git a/src/arch/x86/core/pci_autoboot.c b/src/arch/x86/core/pci_autoboot.c new file mode 100644 index 00000000..33759809 --- /dev/null +++ b/src/arch/x86/core/pci_autoboot.c @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2014 Red Hat Inc. + * Alex Williamson + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include + +uint16_t __bss16 ( autoboot_busdevfn ); +#define autoboot_busdevfn __use_data16 ( autoboot_busdevfn ) + +/** + * Initialise PCI autoboot device + */ +static void pci_autoboot_init ( void ) { + + if ( autoboot_busdevfn ) + set_autoboot_busloc ( BUS_TYPE_PCI, autoboot_busdevfn ); +} + +/** PCI autoboot device initialisation function */ +struct init_fn pci_autoboot_init_fn __init_fn ( INIT_NORMAL ) = { + .initialise = pci_autoboot_init, +}; diff --git a/src/arch/x86/core/pic8259.c b/src/arch/x86/core/pic8259.c new file mode 100644 index 00000000..0a9ea2e0 --- /dev/null +++ b/src/arch/x86/core/pic8259.c @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include + +/** @file + * + * Minimal support for the 8259 Programmable Interrupt Controller + * + */ + +/** + * Send non-specific EOI(s) + * + * @v irq IRQ number + * + * This seems to be inherently unsafe. + */ +static inline void send_nonspecific_eoi ( unsigned int irq ) { + DBG ( "Sending non-specific EOI for IRQ %d\n", irq ); + if ( irq >= IRQ_PIC_CUTOFF ) { + outb ( ICR_EOI_NON_SPECIFIC, PIC2_ICR ); + } + outb ( ICR_EOI_NON_SPECIFIC, PIC1_ICR ); +} + +/** + * Send specific EOI(s) + * + * @v irq IRQ number + */ +static inline void send_specific_eoi ( unsigned int irq ) { + DBG ( "Sending specific EOI for IRQ %d\n", irq ); + if ( irq >= IRQ_PIC_CUTOFF ) { + outb ( ( ICR_EOI_SPECIFIC | ICR_VALUE ( CHAINED_IRQ ) ), + ICR_REG ( CHAINED_IRQ ) ); + } + outb ( ( ICR_EOI_SPECIFIC | ICR_VALUE ( irq ) ), ICR_REG ( irq ) ); +} + +/** + * Send End-Of-Interrupt to the PIC + * + * @v irq IRQ number + */ +void send_eoi ( unsigned int irq ) { + send_specific_eoi ( irq ); +} diff --git a/src/arch/x86/core/pit8254.c b/src/arch/x86/core/pit8254.c new file mode 100644 index 00000000..da209926 --- /dev/null +++ b/src/arch/x86/core/pit8254.c @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * 8254 Programmable Interval Timer + * + */ + +/** + * Delay for a fixed number of timer ticks using the speaker channel + * + * @v ticks Number of timer ticks for which to delay + */ +void pit8254_speaker_delay ( unsigned int ticks ) { + uint8_t spkr; + uint8_t cmd; + uint8_t low; + uint8_t high; + + /* Sanity check */ + assert ( ticks <= 0xffff ); + + /* Disable speaker, set speaker channel gate input high */ + spkr = inb ( PIT8254_SPKR ); + spkr &= ~PIT8254_SPKR_ENABLE; + spkr |= PIT8254_SPKR_GATE; + outb ( spkr, PIT8254_SPKR ); + + /* Program speaker channel to "interrupt" on terminal count */ + cmd = ( PIT8254_CMD_CHANNEL ( PIT8254_CH_SPKR ) | + PIT8254_CMD_ACCESS_LOHI | PIT8254_CMD_OP_TERMINAL | + PIT8254_CMD_BINARY ); + low = ( ( ticks >> 0 ) & 0xff ); + high = ( ( ticks >> 8 ) & 0xff ); + outb ( cmd, PIT8254_CMD ); + outb ( low, PIT8254_DATA ( PIT8254_CH_SPKR ) ); + outb ( high, PIT8254_DATA ( PIT8254_CH_SPKR ) ); + + /* Wait for channel to "interrupt" */ + do { + spkr = inb ( PIT8254_SPKR ); + } while ( ! ( spkr & PIT8254_SPKR_OUT ) ); +} diff --git a/src/arch/x86/core/rdtsc_timer.c b/src/arch/x86/core/rdtsc_timer.c new file mode 100644 index 00000000..bee5f1ca --- /dev/null +++ b/src/arch/x86/core/rdtsc_timer.c @@ -0,0 +1,177 @@ +/* + * Copyright (C) 2008 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * RDTSC timer + * + */ + +#include +#include +#include +#include +#include + +/** Number of microseconds to use for TSC calibration */ +#define TSC_CALIBRATE_US 1024 + +/** TSC increment per microsecond */ +static unsigned long tsc_per_us; + +/** Minimum resolution for scaled TSC timer */ +#define TSC_SCALED_HZ 32 + +/** TSC scale (expressed as a bit shift) + * + * We use this to avoid the need for 64-bit divsion on 32-bit systems. + */ +static unsigned int tsc_scale; + +/** Number of timer ticks per scaled TSC increment */ +static unsigned long ticks_per_scaled_tsc; + +/** Colour for debug messages */ +#define colour &tsc_per_us + +/** + * Get raw TSC value + * + * @ret tsc Raw TSC value + */ +static inline __always_inline unsigned long rdtsc_raw ( void ) { + unsigned long raw; + + __asm__ __volatile__ ( "rdtsc\n\t" : "=a" ( raw ) : : "edx" ); + return raw; +} + +/** + * Get TSC value, shifted to avoid rollover within a realistic timescale + * + * @ret tsc Scaled TSC value + */ +static inline __always_inline unsigned long rdtsc_scaled ( void ) { + unsigned long scaled; + + __asm__ __volatile__ ( "rdtsc\n\t" + "shrdl %b1, %%edx, %%eax\n\t" + : "=a" ( scaled ) : "c" ( tsc_scale ) : "edx" ); + return scaled; +} + +/** + * Get current system time in ticks + * + * @ret ticks Current time, in ticks + */ +static unsigned long rdtsc_currticks ( void ) { + unsigned long scaled; + + scaled = rdtsc_scaled(); + return ( scaled * ticks_per_scaled_tsc ); +} + +/** + * Delay for a fixed number of microseconds + * + * @v usecs Number of microseconds for which to delay + */ +static void rdtsc_udelay ( unsigned long usecs ) { + unsigned long start; + unsigned long elapsed; + unsigned long threshold; + + start = rdtsc_raw(); + threshold = ( usecs * tsc_per_us ); + do { + elapsed = ( rdtsc_raw() - start ); + } while ( elapsed < threshold ); +} + +/** + * Probe RDTSC timer + * + * @ret rc Return status code + */ +static int rdtsc_probe ( void ) { + unsigned long before; + unsigned long after; + unsigned long elapsed; + uint32_t apm; + uint32_t discard_a; + uint32_t discard_b; + uint32_t discard_c; + int rc; + + /* Check that TSC is invariant */ + if ( ( rc = cpuid_supported ( CPUID_APM ) ) != 0 ) { + DBGC ( colour, "RDTSC cannot determine APM features: %s\n", + strerror ( rc ) ); + return rc; + } + cpuid ( CPUID_APM, 0, &discard_a, &discard_b, &discard_c, &apm ); + if ( ! ( apm & CPUID_APM_EDX_TSC_INVARIANT ) ) { + DBGC ( colour, "RDTSC has non-invariant TSC (%#08x)\n", + apm ); + return -ENOTTY; + } + + /* Calibrate udelay() timer via 8254 PIT */ + before = rdtsc_raw(); + pit8254_udelay ( TSC_CALIBRATE_US ); + after = rdtsc_raw(); + elapsed = ( after - before ); + tsc_per_us = ( elapsed / TSC_CALIBRATE_US ); + if ( ! tsc_per_us ) { + DBGC ( colour, "RDTSC has zero TSC per microsecond\n" ); + return -EIO; + } + + /* Calibrate currticks() scaling factor */ + tsc_scale = 31; + ticks_per_scaled_tsc = ( ( 1UL << tsc_scale ) / + ( tsc_per_us * ( 1000000 / TICKS_PER_SEC ) ) ); + while ( ticks_per_scaled_tsc > ( TICKS_PER_SEC / TSC_SCALED_HZ ) ) { + tsc_scale--; + ticks_per_scaled_tsc >>= 1; + } + DBGC ( colour, "RDTSC has %ld tsc per us, %ld ticks per 2^%d tsc\n", + tsc_per_us, ticks_per_scaled_tsc, tsc_scale ); + if ( ! ticks_per_scaled_tsc ) { + DBGC ( colour, "RDTSC has zero ticks per TSC\n" ); + return -EIO; + } + + return 0; +} + +/** RDTSC timer */ +struct timer rdtsc_timer __timer ( TIMER_PREFERRED ) = { + .name = "rdtsc", + .probe = rdtsc_probe, + .currticks = rdtsc_currticks, + .udelay = rdtsc_udelay, +}; diff --git a/src/arch/x86/core/relocate.c b/src/arch/x86/core/relocate.c new file mode 100644 index 00000000..765d4656 --- /dev/null +++ b/src/arch/x86/core/relocate.c @@ -0,0 +1,136 @@ +#include +#include + +/* + * Originally by Eric Biederman + * + * Heavily modified by Michael Brown + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/* Linker symbols */ +extern char _textdata[]; +extern char _etextdata[]; + +/* within 1MB of 4GB is too close. + * MAX_ADDR is the maximum address we can easily do DMA to. + * + * Not sure where this constraint comes from, but kept it from Eric's + * old code - mcb30 + */ +#define MAX_ADDR (0xfff00000UL) + +/* Preserve alignment to a 4kB page + * + * Required for x86_64, and doesn't hurt for i386. + */ +#define ALIGN 4096 + +/** + * Relocate iPXE + * + * @v ebp Maximum address to use for relocation + * @ret esi Current physical address + * @ret edi New physical address + * @ret ecx Length to copy + * + * This finds a suitable location for iPXE near the top of 32-bit + * address space, and returns the physical address of the new location + * to the prefix in %edi. + */ +__asmcall void relocate ( struct i386_all_regs *ix86 ) { + struct memory_map memmap; + uint32_t start, end, size, padded_size, max; + uint32_t new_start, new_end; + unsigned i; + + /* Get memory map and current location */ + get_memmap ( &memmap ); + start = virt_to_phys ( _textdata ); + end = virt_to_phys ( _etextdata ); + size = ( end - start ); + padded_size = ( size + ALIGN - 1 ); + + DBG ( "Relocate: currently at [%x,%x)\n" + "...need %x bytes for %d-byte alignment\n", + start, end, padded_size, ALIGN ); + + /* Determine maximum usable address */ + max = MAX_ADDR; + if ( ix86->regs.ebp < max ) { + max = ix86->regs.ebp; + DBG ( "Limiting relocation to [0,%x)\n", max ); + } + + /* Walk through the memory map and find the highest address + * below 4GB that iPXE will fit into. + */ + new_end = end; + for ( i = 0 ; i < memmap.count ; i++ ) { + struct memory_region *region = &memmap.regions[i]; + uint32_t r_start, r_end; + + DBG ( "Considering [%llx,%llx)\n", region->start, region->end); + + /* Truncate block to maximum address. This will be + * less than 4GB, which means that we can get away + * with using just 32-bit arithmetic after this stage. + */ + if ( region->start > max ) { + DBG ( "...starts after max=%x\n", max ); + continue; + } + r_start = region->start; + if ( region->end > max ) { + DBG ( "...end truncated to max=%x\n", max ); + r_end = max; + } else { + r_end = region->end; + } + DBG ( "...usable portion is [%x,%x)\n", r_start, r_end ); + + /* If we have rounded down r_end below r_ start, skip + * this block. + */ + if ( r_end < r_start ) { + DBG ( "...truncated to negative size\n" ); + continue; + } + + /* Check that there is enough space to fit in iPXE */ + if ( ( r_end - r_start ) < size ) { + DBG ( "...too small (need %x bytes)\n", size ); + continue; + } + + /* If the start address of the iPXE we would + * place in this block is higher than the end address + * of the current highest block, use this block. + * + * Note that this avoids overlaps with the current + * iPXE, as well as choosing the highest of all viable + * blocks. + */ + if ( ( r_end - size ) > new_end ) { + new_end = r_end; + DBG ( "...new best block found.\n" ); + } + } + + /* Calculate new location of iPXE, and align it to the + * required alignemnt. + */ + new_start = new_end - padded_size; + new_start += ( ( start - new_start ) & ( ALIGN - 1 ) ); + new_end = new_start + size; + + DBG ( "Relocating from [%x,%x) to [%x,%x)\n", + start, end, new_start, new_end ); + + /* Let prefix know what to copy */ + ix86->regs.esi = start; + ix86->regs.edi = new_start; + ix86->regs.ecx = size; +} diff --git a/src/arch/x86/core/runtime.c b/src/arch/x86/core/runtime.c new file mode 100644 index 00000000..f96b23af --- /dev/null +++ b/src/arch/x86/core/runtime.c @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2011 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * Command line and initrd passed to iPXE at runtime + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** Command line physical address + * + * This can be set by the prefix. + */ +uint32_t __bss16 ( cmdline_phys ); +#define cmdline_phys __use_data16 ( cmdline_phys ) + +/** initrd physical address + * + * This can be set by the prefix. + */ +uint32_t __bss16 ( initrd_phys ); +#define initrd_phys __use_data16 ( initrd_phys ) + +/** initrd length + * + * This can be set by the prefix. + */ +uint32_t __bss16 ( initrd_len ); +#define initrd_len __use_data16 ( initrd_len ) + +/** Internal copy of the command line */ +static char *cmdline_copy; + +/** Free command line image */ +static void cmdline_image_free ( struct refcnt *refcnt ) { + struct image *image = container_of ( refcnt, struct image, refcnt ); + + DBGC ( image, "RUNTIME freeing command line\n" ); + free ( cmdline_copy ); +} + +/** Embedded script representing the command line */ +static struct image cmdline_image = { + .refcnt = REF_INIT ( cmdline_image_free ), + .name = "", + .type = &script_image_type, +}; + +/** Colour for debug messages */ +#define colour &cmdline_image + +/** + * Strip unwanted cruft from command line + * + * @v cmdline Command line + * @v cruft Initial substring of cruft to strip + */ +static void cmdline_strip ( char *cmdline, const char *cruft ) { + char *strip; + char *strip_end; + + /* Find unwanted cruft, if present */ + if ( ! ( strip = strstr ( cmdline, cruft ) ) ) + return; + + /* Strip unwanted cruft */ + strip_end = strchr ( strip, ' ' ); + if ( strip_end ) { + *strip_end = '\0'; + DBGC ( colour, "RUNTIME stripping \"%s\"\n", strip ); + strcpy ( strip, ( strip_end + 1 ) ); + } else { + DBGC ( colour, "RUNTIME stripping \"%s\"\n", strip ); + *strip = '\0'; + } +} + +/** + * Initialise command line + * + * @ret rc Return status code + */ +static int cmdline_init ( void ) { + userptr_t cmdline_user; + char *cmdline; + size_t len; + int rc; + + /* Do nothing if no command line was specified */ + if ( ! cmdline_phys ) { + DBGC ( colour, "RUNTIME found no command line\n" ); + return 0; + } + cmdline_user = phys_to_user ( cmdline_phys ); + len = ( strlen_user ( cmdline_user, 0 ) + 1 /* NUL */ ); + + /* Allocate and copy command line */ + cmdline_copy = malloc ( len ); + if ( ! cmdline_copy ) { + DBGC ( colour, "RUNTIME could not allocate %zd bytes for " + "command line\n", len ); + rc = -ENOMEM; + goto err_alloc_cmdline_copy; + } + cmdline = cmdline_copy; + copy_from_user ( cmdline, cmdline_user, 0, len ); + DBGC ( colour, "RUNTIME found command line \"%s\" at %08x\n", + cmdline, cmdline_phys ); + + /* Mark command line as consumed */ + cmdline_phys = 0; + + /* Strip unwanted cruft from the command line */ + cmdline_strip ( cmdline, "BOOT_IMAGE=" ); + cmdline_strip ( cmdline, "initrd=" ); + while ( isspace ( *cmdline ) ) + cmdline++; + DBGC ( colour, "RUNTIME using command line \"%s\"\n", cmdline ); + + /* Prepare and register image */ + cmdline_image.data = virt_to_user ( cmdline ); + cmdline_image.len = strlen ( cmdline ); + if ( cmdline_image.len ) { + if ( ( rc = register_image ( &cmdline_image ) ) != 0 ) { + DBGC ( colour, "RUNTIME could not register command " + "line: %s\n", strerror ( rc ) ); + goto err_register_image; + } + } + + /* Drop our reference to the image */ + image_put ( &cmdline_image ); + + return 0; + + err_register_image: + image_put ( &cmdline_image ); + err_alloc_cmdline_copy: + return rc; +} + +/** + * Initialise initrd + * + * @ret rc Return status code + */ +static int initrd_init ( void ) { + struct image *image; + int rc; + + /* Do nothing if no initrd was specified */ + if ( ! initrd_phys ) { + DBGC ( colour, "RUNTIME found no initrd\n" ); + return 0; + } + if ( ! initrd_len ) { + DBGC ( colour, "RUNTIME found empty initrd\n" ); + return 0; + } + DBGC ( colour, "RUNTIME found initrd at [%x,%x)\n", + initrd_phys, ( initrd_phys + initrd_len ) ); + + /* Allocate image */ + image = alloc_image ( NULL ); + if ( ! image ) { + DBGC ( colour, "RUNTIME could not allocate image for " + "initrd\n" ); + rc = -ENOMEM; + goto err_alloc_image; + } + if ( ( rc = image_set_name ( image, "" ) ) != 0 ) { + DBGC ( colour, "RUNTIME could not set image name: %s\n", + strerror ( rc ) ); + goto err_set_name; + } + + /* Allocate and copy initrd content */ + image->data = umalloc ( initrd_len ); + if ( ! image->data ) { + DBGC ( colour, "RUNTIME could not allocate %d bytes for " + "initrd\n", initrd_len ); + rc = -ENOMEM; + goto err_umalloc; + } + image->len = initrd_len; + memcpy_user ( image->data, 0, phys_to_user ( initrd_phys ), 0, + initrd_len ); + + /* Mark initrd as consumed */ + initrd_phys = 0; + + /* Register image */ + if ( ( rc = register_image ( image ) ) != 0 ) { + DBGC ( colour, "RUNTIME could not register initrd: %s\n", + strerror ( rc ) ); + goto err_register_image; + } + + /* Drop our reference to the image */ + image_put ( image ); + + return 0; + + err_register_image: + err_umalloc: + err_set_name: + image_put ( image ); + err_alloc_image: + return rc; +} + +/** + * Initialise command line and initrd + * + */ +static void runtime_init ( void ) { + int rc; + + /* Initialise command line */ + if ( ( rc = cmdline_init() ) != 0 ) { + /* No way to report failure */ + return; + } + + /* Initialise initrd */ + if ( ( rc = initrd_init() ) != 0 ) { + /* No way to report failure */ + return; + } +} + +/** Command line and initrd initialisation function */ +struct startup_fn runtime_startup_fn __startup_fn ( STARTUP_NORMAL ) = { + .name = "runtime", + .startup = runtime_init, +}; diff --git a/src/arch/x86/core/stack.S b/src/arch/x86/core/stack.S new file mode 100644 index 00000000..995c397c --- /dev/null +++ b/src/arch/x86/core/stack.S @@ -0,0 +1,21 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .arch i386 + +#ifdef __x86_64__ +#define STACK_SIZE 8192 +#else +#define STACK_SIZE 4096 +#endif + +/**************************************************************************** + * Internal stack + **************************************************************************** + */ + .section ".stack", "aw", @nobits + .align 8 + .globl _stack +_stack: + .space STACK_SIZE + .globl _estack +_estack: diff --git a/src/arch/x86/core/stack16.S b/src/arch/x86/core/stack16.S new file mode 100644 index 00000000..4bc6f081 --- /dev/null +++ b/src/arch/x86/core/stack16.S @@ -0,0 +1,15 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .arch i386 + +/**************************************************************************** + * Internal stack + **************************************************************************** + */ + .section ".stack16", "aw", @nobits + .align 8 + .globl _stack16 +_stack16: + .space 4096 + .globl _estack16 +_estack16: diff --git a/src/arch/x86/core/video_subr.c b/src/arch/x86/core/video_subr.c new file mode 100644 index 00000000..f5cc4cdd --- /dev/null +++ b/src/arch/x86/core/video_subr.c @@ -0,0 +1,113 @@ +/* + * + * modified from linuxbios code + * by Cai Qiang + * + */ + +#include "stddef.h" +#include "string.h" +#include +#include +#include +#include "vga.h" +#include + +/* Set default console usage if applicable */ +#if ! ( defined ( CONSOLE_DIRECT_VGA ) && \ + CONSOLE_EXPLICIT ( CONSOLE_DIRECT_VGA ) ) +#undef CONSOLE_DIRECT_VGA +#define CONSOLE_DIRECT_VGA ( CONSOLE_USAGE_ALL & ~CONSOLE_USAGE_LOG ) +#endif + +struct console_driver vga_console __console_driver; + +static char *vidmem; /* The video buffer */ +static int video_line, video_col; + +#define VIDBUFFER 0xB8000 + +static void memsetw(void *s, int c, unsigned int n) +{ + unsigned int i; + u16 *ss = (u16 *) s; + + for (i = 0; i < n; i++) { + ss[i] = ( u16 ) c; + } +} + +static void video_init(void) +{ + static int inited=0; + + vidmem = (char *)phys_to_virt(VIDBUFFER); + + if (!inited) { + video_line = 0; + video_col = 0; + + memsetw(vidmem, VGA_ATTR_CLR_WHT, 2*1024); // + + inited=1; + } +} + +static void video_scroll(void) +{ + int i; + + memmove(vidmem, vidmem + COLS * 2, (LINES - 1) * COLS * 2); + for (i = (LINES - 1) * COLS * 2; i < LINES * COLS * 2; i += 2) + vidmem[i] = ' '; +} + +static void vga_putc(int byte) +{ + if (byte == '\n') { + video_line++; + video_col = 0; + + } else if (byte == '\r') { + video_col = 0; + + } else if (byte == '\b') { + video_col--; + + } else if (byte == '\t') { + video_col += 4; + + } else if (byte == '\a') { + //beep + //beep(500); + + } else { + vidmem[((video_col + (video_line *COLS)) * 2)] = byte; + vidmem[((video_col + (video_line *COLS)) * 2) +1] = VGA_ATTR_CLR_WHT; + video_col++; + } + if (video_col < 0) { + video_col = 0; + } + if (video_col >= COLS) { + video_line++; + video_col = 0; + } + if (video_line >= LINES) { + video_scroll(); + video_line--; + } + // move the cursor + write_crtc((video_col + (video_line *COLS)) >> 8, CRTC_CURSOR_HI); + write_crtc((video_col + (video_line *COLS)) & 0x0ff, CRTC_CURSOR_LO); +} + +struct console_driver vga_console __console_driver = { + .putchar = vga_putc, + .disabled = CONSOLE_DISABLED, + .usage = CONSOLE_DIRECT_VGA, +}; + +struct init_fn video_init_fn __init_fn ( INIT_EARLY ) = { + .initialise = video_init, +}; diff --git a/src/arch/x86/core/vram_settings.c b/src/arch/x86/core/vram_settings.c new file mode 100644 index 00000000..9c169b40 --- /dev/null +++ b/src/arch/x86/core/vram_settings.c @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** @file + * + * Video RAM dump + * + */ + +/** Video RAM base address */ +#define VRAM_BASE 0xb8000 + +/** Video RAM length */ +#define VRAM_LEN \ + ( 80 /* columns */ * 25 /* rows */ * 2 /* bytes per character */ ) + +/** + * Fetch video RAM setting + * + * @v data Buffer to fill with setting data + * @v len Length of buffer + * @ret len Length of setting data, or negative error + */ +static int vram_fetch ( void *data, size_t len ) { + userptr_t vram = phys_to_user ( VRAM_BASE ); + + /* Copy video RAM */ + if ( len > VRAM_LEN ) + len = VRAM_LEN; + copy_from_user ( data, vram, 0, len ); + + return VRAM_LEN; +} + +/** Video RAM setting */ +const struct setting vram_setting __setting ( SETTING_MISC, vram ) = { + .name = "vram", + .description = "Video RAM", + .type = &setting_type_base64, + .scope = &builtin_scope, +}; + +/** Video RAM built-in setting */ +struct builtin_setting vram_builtin_setting __builtin_setting = { + .setting = &vram_setting, + .fetch = vram_fetch, +}; diff --git a/src/arch/x86/core/x86_uart.c b/src/arch/x86/core/x86_uart.c new file mode 100644 index 00000000..e455775b --- /dev/null +++ b/src/arch/x86/core/x86_uart.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * 16550-compatible UART + * + */ + +#include +#include + +/** UART port bases */ +static uint16_t uart_base[] = { + [COM1] = 0x3f8, + [COM2] = 0x2f8, + [COM3] = 0x3e8, + [COM4] = 0x2e8, +}; + +/** + * Select UART port + * + * @v uart UART + * @v port Port number, or 0 to disable + * @ret rc Return status code + */ +int uart_select ( struct uart *uart, unsigned int port ) { + int rc; + + /* Set new UART base */ + if ( port >= ( sizeof ( uart_base ) / sizeof ( uart_base[0] ) ) ) { + rc = -ENODEV; + goto err; + } + uart->base = ( ( void * ) ( intptr_t ) uart_base[port] ); + + /* Check that UART exists */ + if ( ( rc = uart_exists ( uart ) ) != 0 ) + goto err; + + return 0; + + err: + uart->base = NULL; + return rc; +} diff --git a/src/arch/x86/drivers/hyperv/hyperv.c b/src/arch/x86/drivers/hyperv/hyperv.c new file mode 100644 index 00000000..1903d1db --- /dev/null +++ b/src/arch/x86/drivers/hyperv/hyperv.c @@ -0,0 +1,820 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * Hyper-V driver + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hyperv.h" + +/** Maximum time to wait for a message response + * + * This is a policy decision. + */ +#define HV_MESSAGE_MAX_WAIT_MS 1000 + +/** Hyper-V timer frequency (fixed 10Mhz) */ +#define HV_TIMER_HZ 10000000 + +/** Hyper-V timer scale factor (used to avoid 64-bit division) */ +#define HV_TIMER_SHIFT 18 + +/** + * Convert a Hyper-V status code to an iPXE status code + * + * @v status Hyper-V status code + * @ret rc iPXE status code (before negation) + */ +#define EHV( status ) EPLATFORM ( EINFO_EPLATFORM, (status) ) + +/** + * Allocate zeroed pages + * + * @v hv Hyper-V hypervisor + * @v ... Page addresses to fill in, terminated by NULL + * @ret rc Return status code + */ +__attribute__ (( sentinel )) int +hv_alloc_pages ( struct hv_hypervisor *hv, ... ) { + va_list args; + void **page; + int i; + + /* Allocate and zero pages */ + va_start ( args, hv ); + for ( i = 0 ; ( ( page = va_arg ( args, void ** ) ) != NULL ); i++ ) { + *page = malloc_dma ( PAGE_SIZE, PAGE_SIZE ); + if ( ! *page ) + goto err_alloc; + memset ( *page, 0, PAGE_SIZE ); + } + va_end ( args ); + + return 0; + + err_alloc: + va_end ( args ); + va_start ( args, hv ); + for ( ; i >= 0 ; i-- ) { + page = va_arg ( args, void ** ); + free_dma ( *page, PAGE_SIZE ); + } + va_end ( args ); + return -ENOMEM; +} + +/** + * Free pages + * + * @v hv Hyper-V hypervisor + * @v ... Page addresses, terminated by NULL + */ +__attribute__ (( sentinel )) void +hv_free_pages ( struct hv_hypervisor *hv, ... ) { + va_list args; + void *page; + + va_start ( args, hv ); + while ( ( page = va_arg ( args, void * ) ) != NULL ) + free_dma ( page, PAGE_SIZE ); + va_end ( args ); +} + +/** + * Allocate message buffer + * + * @v hv Hyper-V hypervisor + * @ret rc Return status code + */ +static int hv_alloc_message ( struct hv_hypervisor *hv ) { + + /* Allocate buffer. Must be aligned to at least 8 bytes and + * must not cross a page boundary, so align on its own size. + */ + hv->message = malloc_dma ( sizeof ( *hv->message ), + sizeof ( *hv->message ) ); + if ( ! hv->message ) + return -ENOMEM; + + return 0; +} + +/** + * Free message buffer + * + * @v hv Hyper-V hypervisor + */ +static void hv_free_message ( struct hv_hypervisor *hv ) { + + /* Free buffer */ + free_dma ( hv->message, sizeof ( *hv->message ) ); +} + +/** + * Check whether or not we are running in Hyper-V + * + * @ret rc Return status code + */ +static int hv_check_hv ( void ) { + struct x86_features features; + uint32_t interface_id; + uint32_t discard_ebx; + uint32_t discard_ecx; + uint32_t discard_edx; + + /* Check for presence of a hypervisor (not necessarily Hyper-V) */ + x86_features ( &features ); + if ( ! ( features.intel.ecx & CPUID_FEATURES_INTEL_ECX_HYPERVISOR ) ) { + DBGC ( HV_INTERFACE_ID, "HV not running in a hypervisor\n" ); + return -ENODEV; + } + + /* Check that hypervisor is Hyper-V */ + cpuid ( HV_CPUID_INTERFACE_ID, 0, &interface_id, &discard_ebx, + &discard_ecx, &discard_edx ); + if ( interface_id != HV_INTERFACE_ID ) { + DBGC ( HV_INTERFACE_ID, "HV not running in Hyper-V (interface " + "ID %#08x)\n", interface_id ); + return -ENODEV; + } + + return 0; +} + +/** + * Check required features + * + * @v hv Hyper-V hypervisor + * @ret rc Return status code + */ +static int hv_check_features ( struct hv_hypervisor *hv ) { + uint32_t available; + uint32_t permissions; + uint32_t discard_ecx; + uint32_t discard_edx; + + /* Check that required features and privileges are available */ + cpuid ( HV_CPUID_FEATURES, 0, &available, &permissions, &discard_ecx, + &discard_edx ); + if ( ! ( available & HV_FEATURES_AVAIL_HYPERCALL_MSR ) ) { + DBGC ( hv, "HV %p has no hypercall MSRs (features %08x:%08x)\n", + hv, available, permissions ); + return -ENODEV; + } + if ( ! ( available & HV_FEATURES_AVAIL_SYNIC_MSR ) ) { + DBGC ( hv, "HV %p has no SynIC MSRs (features %08x:%08x)\n", + hv, available, permissions ); + return -ENODEV; + } + if ( ! ( permissions & HV_FEATURES_PERM_POST_MESSAGES ) ) { + DBGC ( hv, "HV %p cannot post messages (features %08x:%08x)\n", + hv, available, permissions ); + return -EACCES; + } + if ( ! ( permissions & HV_FEATURES_PERM_SIGNAL_EVENTS ) ) { + DBGC ( hv, "HV %p cannot signal events (features %08x:%08x)", + hv, available, permissions ); + return -EACCES; + } + + return 0; +} + +/** + * Check that Gen 2 UEFI firmware is not running + * + * @v hv Hyper-V hypervisor + * @ret rc Return status code + * + * We must not steal ownership from the Gen 2 UEFI firmware, since + * doing so will cause an immediate crash. Avoid this by checking for + * the guest OS identity known to be used by the Gen 2 UEFI firmware. + */ +static int hv_check_uefi ( struct hv_hypervisor *hv ) { + uint64_t guest_os_id; + + /* Check for UEFI firmware's guest OS identity */ + guest_os_id = rdmsr ( HV_X64_MSR_GUEST_OS_ID ); + if ( guest_os_id == HV_GUEST_OS_ID_UEFI ) { + DBGC ( hv, "HV %p is owned by UEFI firmware\n", hv ); + return -ENOTSUP; + } + + return 0; +} + +/** + * Map hypercall page + * + * @v hv Hyper-V hypervisor + */ +static void hv_map_hypercall ( struct hv_hypervisor *hv ) { + union { + struct { + uint32_t ebx; + uint32_t ecx; + uint32_t edx; + } __attribute__ (( packed )); + char text[ 13 /* "bbbbccccdddd" + NUL */ ]; + } vendor_id; + uint32_t build; + uint32_t version; + uint32_t discard_eax; + uint32_t discard_ecx; + uint32_t discard_edx; + uint64_t guest_os_id; + uint64_t hypercall; + + /* Report guest OS identity */ + guest_os_id = rdmsr ( HV_X64_MSR_GUEST_OS_ID ); + if ( guest_os_id != 0 ) { + DBGC ( hv, "HV %p guest OS ID MSR was %#08llx\n", + hv, guest_os_id ); + } + guest_os_id = HV_GUEST_OS_ID_IPXE; + DBGC2 ( hv, "HV %p guest OS ID MSR is %#08llx\n", hv, guest_os_id ); + wrmsr ( HV_X64_MSR_GUEST_OS_ID, guest_os_id ); + + /* Get hypervisor system identity (for debugging) */ + cpuid ( HV_CPUID_VENDOR_ID, 0, &discard_eax, &vendor_id.ebx, + &vendor_id.ecx, &vendor_id.edx ); + vendor_id.text[ sizeof ( vendor_id.text ) - 1 ] = '\0'; + cpuid ( HV_CPUID_HYPERVISOR_ID, 0, &build, &version, &discard_ecx, + &discard_edx ); + DBGC ( hv, "HV %p detected \"%s\" version %d.%d build %d\n", hv, + vendor_id.text, ( version >> 16 ), ( version & 0xffff ), build ); + + /* Map hypercall page */ + hypercall = rdmsr ( HV_X64_MSR_HYPERCALL ); + hypercall &= ( PAGE_SIZE - 1 ); + hypercall |= ( virt_to_phys ( hv->hypercall ) | HV_HYPERCALL_ENABLE ); + DBGC2 ( hv, "HV %p hypercall MSR is %#08llx\n", hv, hypercall ); + wrmsr ( HV_X64_MSR_HYPERCALL, hypercall ); +} + +/** + * Unmap hypercall page + * + * @v hv Hyper-V hypervisor + */ +static void hv_unmap_hypercall ( struct hv_hypervisor *hv ) { + uint64_t hypercall; + uint64_t guest_os_id; + + /* Unmap the hypercall page */ + hypercall = rdmsr ( HV_X64_MSR_HYPERCALL ); + hypercall &= ( ( PAGE_SIZE - 1 ) & ~HV_HYPERCALL_ENABLE ); + DBGC2 ( hv, "HV %p hypercall MSR is %#08llx\n", hv, hypercall ); + wrmsr ( HV_X64_MSR_HYPERCALL, hypercall ); + + /* Reset the guest OS identity */ + guest_os_id = 0; + DBGC2 ( hv, "HV %p guest OS ID MSR is %#08llx\n", hv, guest_os_id ); + wrmsr ( HV_X64_MSR_GUEST_OS_ID, guest_os_id ); +} + +/** + * Map synthetic interrupt controller + * + * @v hv Hyper-V hypervisor + */ +static void hv_map_synic ( struct hv_hypervisor *hv ) { + uint64_t simp; + uint64_t siefp; + uint64_t scontrol; + + /* Zero SynIC message and event pages */ + memset ( hv->synic.message, 0, PAGE_SIZE ); + memset ( hv->synic.event, 0, PAGE_SIZE ); + + /* Map SynIC message page */ + simp = rdmsr ( HV_X64_MSR_SIMP ); + simp &= ( PAGE_SIZE - 1 ); + simp |= ( virt_to_phys ( hv->synic.message ) | HV_SIMP_ENABLE ); + DBGC2 ( hv, "HV %p SIMP MSR is %#08llx\n", hv, simp ); + wrmsr ( HV_X64_MSR_SIMP, simp ); + + /* Map SynIC event page */ + siefp = rdmsr ( HV_X64_MSR_SIEFP ); + siefp &= ( PAGE_SIZE - 1 ); + siefp |= ( virt_to_phys ( hv->synic.event ) | HV_SIEFP_ENABLE ); + DBGC2 ( hv, "HV %p SIEFP MSR is %#08llx\n", hv, siefp ); + wrmsr ( HV_X64_MSR_SIEFP, siefp ); + + /* Enable SynIC */ + scontrol = rdmsr ( HV_X64_MSR_SCONTROL ); + scontrol |= HV_SCONTROL_ENABLE; + DBGC2 ( hv, "HV %p SCONTROL MSR is %#08llx\n", hv, scontrol ); + wrmsr ( HV_X64_MSR_SCONTROL, scontrol ); +} + +/** + * Unmap synthetic interrupt controller, leaving SCONTROL untouched + * + * @v hv Hyper-V hypervisor + */ +static void hv_unmap_synic_no_scontrol ( struct hv_hypervisor *hv ) { + uint64_t siefp; + uint64_t simp; + + /* Unmap SynIC event page */ + siefp = rdmsr ( HV_X64_MSR_SIEFP ); + siefp &= ( ( PAGE_SIZE - 1 ) & ~HV_SIEFP_ENABLE ); + DBGC2 ( hv, "HV %p SIEFP MSR is %#08llx\n", hv, siefp ); + wrmsr ( HV_X64_MSR_SIEFP, siefp ); + + /* Unmap SynIC message page */ + simp = rdmsr ( HV_X64_MSR_SIMP ); + simp &= ( ( PAGE_SIZE - 1 ) & ~HV_SIMP_ENABLE ); + DBGC2 ( hv, "HV %p SIMP MSR is %#08llx\n", hv, simp ); + wrmsr ( HV_X64_MSR_SIMP, simp ); +} + +/** + * Unmap synthetic interrupt controller + * + * @v hv Hyper-V hypervisor + */ +static void hv_unmap_synic ( struct hv_hypervisor *hv ) { + uint64_t scontrol; + + /* Disable SynIC */ + scontrol = rdmsr ( HV_X64_MSR_SCONTROL ); + scontrol &= ~HV_SCONTROL_ENABLE; + DBGC2 ( hv, "HV %p SCONTROL MSR is %#08llx\n", hv, scontrol ); + wrmsr ( HV_X64_MSR_SCONTROL, scontrol ); + + /* Unmap SynIC event and message pages */ + hv_unmap_synic_no_scontrol ( hv ); +} + +/** + * Enable synthetic interrupt + * + * @v hv Hyper-V hypervisor + * @v sintx Synthetic interrupt number + */ +void hv_enable_sint ( struct hv_hypervisor *hv, unsigned int sintx ) { + unsigned long msr = HV_X64_MSR_SINT ( sintx ); + uint64_t sint; + + /* Enable synthetic interrupt + * + * We have to enable the interrupt, otherwise messages will + * not be delivered (even though the documentation implies + * that polling for messages is possible). We enable AutoEOI + * and hook the interrupt to the obsolete IRQ13 (FPU + * exception) vector, which will be implemented as a no-op. + */ + sint = rdmsr ( msr ); + sint &= ~( HV_SINT_MASKED | HV_SINT_VECTOR_MASK ); + sint |= ( HV_SINT_AUTO_EOI | + HV_SINT_VECTOR ( IRQ_INT ( 13 /* See comment above */ ) ) ); + DBGC2 ( hv, "HV %p SINT%d MSR is %#08llx\n", hv, sintx, sint ); + wrmsr ( msr, sint ); +} + +/** + * Disable synthetic interrupt + * + * @v hv Hyper-V hypervisor + * @v sintx Synthetic interrupt number + */ +void hv_disable_sint ( struct hv_hypervisor *hv, unsigned int sintx ) { + unsigned long msr = HV_X64_MSR_SINT ( sintx ); + uint64_t sint; + + /* Do nothing if interrupt is already disabled */ + sint = rdmsr ( msr ); + if ( sint & HV_SINT_MASKED ) + return; + + /* Disable synthetic interrupt */ + sint &= ~HV_SINT_AUTO_EOI; + sint |= HV_SINT_MASKED; + DBGC2 ( hv, "HV %p SINT%d MSR is %#08llx\n", hv, sintx, sint ); + wrmsr ( msr, sint ); +} + +/** + * Post message + * + * @v hv Hyper-V hypervisor + * @v id Connection ID + * @v type Message type + * @v data Message + * @v len Length of message + * @ret rc Return status code + */ +int hv_post_message ( struct hv_hypervisor *hv, unsigned int id, + unsigned int type, const void *data, size_t len ) { + struct hv_post_message *msg = &hv->message->posted; + int status; + int rc; + + /* Sanity check */ + assert ( len <= sizeof ( msg->data ) ); + + /* Construct message */ + memset ( msg, 0, sizeof ( *msg ) ); + msg->id = cpu_to_le32 ( id ); + msg->type = cpu_to_le32 ( type ); + msg->len = cpu_to_le32 ( len ); + memcpy ( msg->data, data, len ); + DBGC2 ( hv, "HV %p connection %d posting message type %#08x:\n", + hv, id, type ); + DBGC2_HDA ( hv, 0, msg->data, len ); + + /* Post message */ + if ( ( status = hv_call ( hv, HV_POST_MESSAGE, msg, NULL ) ) != 0 ) { + rc = -EHV ( status ); + DBGC ( hv, "HV %p could not post message to %#08x: %s\n", + hv, id, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Wait for received message + * + * @v hv Hyper-V hypervisor + * @v sintx Synthetic interrupt number + * @ret rc Return status code + */ +int hv_wait_for_message ( struct hv_hypervisor *hv, unsigned int sintx ) { + struct hv_message *msg = &hv->message->received; + struct hv_message *src = &hv->synic.message[sintx]; + unsigned int retries; + size_t len; + + /* Wait for message to arrive */ + for ( retries = 0 ; retries < HV_MESSAGE_MAX_WAIT_MS ; retries++ ) { + + /* Check for message */ + if ( src->type ) { + + /* Copy message */ + memset ( msg, 0, sizeof ( *msg ) ); + len = src->len; + assert ( len <= sizeof ( *msg ) ); + memcpy ( msg, src, + ( offsetof ( typeof ( *msg ), data ) + len ) ); + DBGC2 ( hv, "HV %p SINT%d received message type " + "%#08x:\n", hv, sintx, + le32_to_cpu ( msg->type ) ); + DBGC2_HDA ( hv, 0, msg->data, len ); + + /* Consume message */ + src->type = 0; + + return 0; + } + + /* Trigger message delivery */ + wrmsr ( HV_X64_MSR_EOM, 0 ); + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( hv, "HV %p SINT%d timed out waiting for message\n", + hv, sintx ); + return -ETIMEDOUT; +} + +/** + * Signal event + * + * @v hv Hyper-V hypervisor + * @v id Connection ID + * @v flag Flag number + * @ret rc Return status code + */ +int hv_signal_event ( struct hv_hypervisor *hv, unsigned int id, + unsigned int flag ) { + struct hv_signal_event *event = &hv->message->signalled; + int status; + int rc; + + /* Construct event */ + memset ( event, 0, sizeof ( *event ) ); + event->id = cpu_to_le32 ( id ); + event->flag = cpu_to_le16 ( flag ); + + /* Signal event */ + if ( ( status = hv_call ( hv, HV_SIGNAL_EVENT, event, NULL ) ) != 0 ) { + rc = -EHV ( status ); + DBGC ( hv, "HV %p could not signal event to %#08x: %s\n", + hv, id, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Probe root device + * + * @v rootdev Root device + * @ret rc Return status code + */ +static int hv_probe ( struct root_device *rootdev ) { + struct hv_hypervisor *hv; + int rc; + + /* Check we are running in Hyper-V */ + if ( ( rc = hv_check_hv() ) != 0 ) + goto err_check_hv; + + /* Allocate and initialise structure */ + hv = zalloc ( sizeof ( *hv ) ); + if ( ! hv ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Check features */ + if ( ( rc = hv_check_features ( hv ) ) != 0 ) + goto err_check_features; + + /* Check that Gen 2 UEFI firmware is not running */ + if ( ( rc = hv_check_uefi ( hv ) ) != 0 ) + goto err_check_uefi; + + /* Allocate pages */ + if ( ( rc = hv_alloc_pages ( hv, &hv->hypercall, &hv->synic.message, + &hv->synic.event, NULL ) ) != 0 ) + goto err_alloc_pages; + + /* Allocate message buffer */ + if ( ( rc = hv_alloc_message ( hv ) ) != 0 ) + goto err_alloc_message; + + /* Map hypercall page */ + hv_map_hypercall ( hv ); + + /* Map synthetic interrupt controller */ + hv_map_synic ( hv ); + + /* Probe Hyper-V devices */ + if ( ( rc = vmbus_probe ( hv, &rootdev->dev ) ) != 0 ) + goto err_vmbus_probe; + + rootdev_set_drvdata ( rootdev, hv ); + return 0; + + vmbus_remove ( hv, &rootdev->dev ); + err_vmbus_probe: + hv_unmap_synic ( hv ); + hv_unmap_hypercall ( hv ); + hv_free_message ( hv ); + err_alloc_message: + hv_free_pages ( hv, hv->hypercall, hv->synic.message, hv->synic.event, + NULL ); + err_alloc_pages: + err_check_uefi: + err_check_features: + free ( hv ); + err_alloc: + err_check_hv: + return rc; +} + +/** + * Remove root device + * + * @v rootdev Root device + */ +static void hv_remove ( struct root_device *rootdev ) { + struct hv_hypervisor *hv = rootdev_get_drvdata ( rootdev ); + + vmbus_remove ( hv, &rootdev->dev ); + hv_unmap_synic ( hv ); + hv_unmap_hypercall ( hv ); + hv_free_message ( hv ); + hv_free_pages ( hv, hv->hypercall, hv->synic.message, hv->synic.event, + NULL ); + free ( hv ); + rootdev_set_drvdata ( rootdev, NULL ); +} + +/** Hyper-V root device driver */ +static struct root_driver hv_root_driver = { + .probe = hv_probe, + .remove = hv_remove, +}; + +/** Hyper-V root device */ +struct root_device hv_root_device __root_device = { + .dev = { .name = "Hyper-V" }, + .driver = &hv_root_driver, +}; + +/** + * Quiesce system + * + */ +static void hv_quiesce ( void ) { + struct hv_hypervisor *hv = rootdev_get_drvdata ( &hv_root_device ); + unsigned int i; + + /* Do nothing if we are not running in Hyper-V */ + if ( ! hv ) + return; + + /* The "enlightened" portions of the Windows Server 2016 boot + * process will not cleanly take ownership of an active + * Hyper-V connection. Experimentation shows that the minimum + * requirement is that we disable the SynIC message page + * (i.e. zero the SIMP MSR). + * + * We cannot perform a full shutdown of the Hyper-V + * connection. Experimentation shows that if we disable the + * SynIC (i.e. zero the SCONTROL MSR) then Windows Server 2016 + * will enter an indefinite wait loop. + * + * Attempt to create a safe handover environment by resetting + * all MSRs except for SCONTROL. + * + * Note that we do not shut down our VMBus devices, since we + * may need to unquiesce the system and continue operation. + */ + + /* Disable all synthetic interrupts */ + for ( i = 0 ; i <= HV_SINT_MAX ; i++ ) + hv_disable_sint ( hv, i ); + + /* Unmap synthetic interrupt controller, leaving SCONTROL + * enabled (see above). + */ + hv_unmap_synic_no_scontrol ( hv ); + + /* Unmap hypercall page */ + hv_unmap_hypercall ( hv ); + + DBGC ( hv, "HV %p quiesced\n", hv ); +} + +/** + * Unquiesce system + * + */ +static void hv_unquiesce ( void ) { + struct hv_hypervisor *hv = rootdev_get_drvdata ( &hv_root_device ); + uint64_t simp; + int rc; + + /* Do nothing if we are not running in Hyper-V */ + if ( ! hv ) + return; + + /* Experimentation shows that the "enlightened" portions of + * Windows Server 2016 will break our Hyper-V connection at + * some point during a SAN boot. Surprisingly it does not + * change the guest OS ID MSR, but it does leave the SynIC + * message page disabled. + * + * Our own explicit quiescing procedure will also disable the + * SynIC message page. We can therefore use the SynIC message + * page enable bit as a heuristic to determine when we need to + * reestablish our Hyper-V connection. + */ + simp = rdmsr ( HV_X64_MSR_SIMP ); + if ( simp & HV_SIMP_ENABLE ) + return; + + /* Remap hypercall page */ + hv_map_hypercall ( hv ); + + /* Remap synthetic interrupt controller */ + hv_map_synic ( hv ); + + /* Reset Hyper-V devices */ + if ( ( rc = vmbus_reset ( hv, &hv_root_device.dev ) ) != 0 ) { + DBGC ( hv, "HV %p could not unquiesce: %s\n", + hv, strerror ( rc ) ); + /* Nothing we can do */ + return; + } +} + +/** Hyper-V quiescer */ +struct quiescer hv_quiescer __quiescer = { + .quiesce = hv_quiesce, + .unquiesce = hv_unquiesce, +}; + +/** + * Probe timer + * + * @ret rc Return status code + */ +static int hv_timer_probe ( void ) { + uint32_t available; + uint32_t discard_ebx; + uint32_t discard_ecx; + uint32_t discard_edx; + int rc; + + /* Check we are running in Hyper-V */ + if ( ( rc = hv_check_hv() ) != 0 ) + return rc; + + /* Check for available reference counter */ + cpuid ( HV_CPUID_FEATURES, 0, &available, &discard_ebx, &discard_ecx, + &discard_edx ); + if ( ! ( available & HV_FEATURES_AVAIL_TIME_REF_COUNT_MSR ) ) { + DBGC ( HV_INTERFACE_ID, "HV has no time reference counter\n" ); + return -ENODEV; + } + + return 0; +} + +/** + * Get current system time in ticks + * + * @ret ticks Current time, in ticks + */ +static unsigned long hv_currticks ( void ) { + + /* Calculate time using a combination of bit shifts and + * multiplication (to avoid a 64-bit division). + */ + return ( ( rdmsr ( HV_X64_MSR_TIME_REF_COUNT ) >> HV_TIMER_SHIFT ) * + ( TICKS_PER_SEC / ( HV_TIMER_HZ >> HV_TIMER_SHIFT ) ) ); +} + +/** + * Delay for a fixed number of microseconds + * + * @v usecs Number of microseconds for which to delay + */ +static void hv_udelay ( unsigned long usecs ) { + uint32_t start; + uint32_t elapsed; + uint32_t threshold; + + /* Spin until specified number of 10MHz ticks have elapsed */ + start = rdmsr ( HV_X64_MSR_TIME_REF_COUNT ); + threshold = ( usecs * ( HV_TIMER_HZ / 1000000 ) ); + do { + elapsed = ( rdmsr ( HV_X64_MSR_TIME_REF_COUNT ) - start ); + } while ( elapsed < threshold ); +} + +/** Hyper-V timer */ +struct timer hv_timer __timer ( TIMER_PREFERRED ) = { + .name = "Hyper-V", + .probe = hv_timer_probe, + .currticks = hv_currticks, + .udelay = hv_udelay, +}; + +/* Drag in objects via hv_root_device */ +REQUIRING_SYMBOL ( hv_root_device ); + +/* Drag in netvsc driver */ +REQUIRE_OBJECT ( netvsc ); diff --git a/src/arch/x86/drivers/hyperv/hyperv.h b/src/arch/x86/drivers/hyperv/hyperv.h new file mode 100644 index 00000000..08031fc6 --- /dev/null +++ b/src/arch/x86/drivers/hyperv/hyperv.h @@ -0,0 +1,63 @@ +#ifndef _HYPERV_H +#define _HYPERV_H + +/** @file + * + * Hyper-V driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** Get vendor identification */ +#define HV_CPUID_VENDOR_ID 0x40000000UL + +/** Get interface identification */ +#define HV_CPUID_INTERFACE_ID 0x40000001UL + +/** Get hypervisor identification */ +#define HV_CPUID_HYPERVISOR_ID 0x40000002UL + +/** Get hypervisor features */ +#define HV_CPUID_FEATURES 0x40000003UL + +/** Time reference counter MSR is available */ +#define HV_FEATURES_AVAIL_TIME_REF_COUNT_MSR 0x00000002UL + +/** SynIC MSRs are available */ +#define HV_FEATURES_AVAIL_SYNIC_MSR 0x00000004UL + +/** Hypercall MSRs are available */ +#define HV_FEATURES_AVAIL_HYPERCALL_MSR 0x00000020UL + +/** Guest may post messages */ +#define HV_FEATURES_PERM_POST_MESSAGES 0x00000010UL + +/** Guest may signal events */ +#define HV_FEATURES_PERM_SIGNAL_EVENTS 0x00000020UL + +/** Guest OS identity MSR */ +#define HV_X64_MSR_GUEST_OS_ID 0x40000000UL + +/** Hypercall page MSR */ +#define HV_X64_MSR_HYPERCALL 0x40000001UL + +/** Time reference MSR */ +#define HV_X64_MSR_TIME_REF_COUNT 0x40000020UL + +/** SynIC control MSR */ +#define HV_X64_MSR_SCONTROL 0x40000080UL + +/** SynIC event flags page MSR */ +#define HV_X64_MSR_SIEFP 0x40000082UL + +/** SynIC message page MSR */ +#define HV_X64_MSR_SIMP 0x40000083UL + +/** SynIC end of message MSR */ +#define HV_X64_MSR_EOM 0x40000084UL + +/** SynIC interrupt source MSRs */ +#define HV_X64_MSR_SINT(x) ( 0x40000090UL + (x) ) + +#endif /* _HYPERV_H */ diff --git a/src/arch/x86/drivers/net/undi.c b/src/arch/x86/drivers/net/undi.c new file mode 100644 index 00000000..87c93c3b --- /dev/null +++ b/src/arch/x86/drivers/net/undi.c @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * UNDI PCI driver + * + */ + +/** + * Find UNDI ROM for PCI device + * + * @v pci PCI device + * @ret undirom UNDI ROM, or NULL + * + * Try to find a driver for this device. Try an exact match on the + * ROM address first, then fall back to a vendor/device ID match only + */ +static struct undi_rom * undipci_find_rom ( struct pci_device *pci ) { + struct undi_rom *undirom; + unsigned long rombase; + + rombase = pci_bar_start ( pci, PCI_ROM_ADDRESS ); + undirom = undirom_find_pci ( pci->vendor, pci->device, rombase ); + if ( ! undirom ) + undirom = undirom_find_pci ( pci->vendor, pci->device, 0 ); + return undirom; +} + +/** + * Probe PCI device + * + * @v pci PCI device + * @v id PCI ID + * @ret rc Return status code + */ +static int undipci_probe ( struct pci_device *pci ) { + struct undi_device *undi; + struct undi_rom *undirom; + int rc; + + /* Allocate UNDI device structure */ + undi = zalloc ( sizeof ( *undi ) ); + if ( ! undi ) + return -ENOMEM; + pci_set_drvdata ( pci, undi ); + + /* Find/create our pixie */ + if ( preloaded_undi.pci_busdevfn == pci->busdevfn ) { + /* Claim preloaded UNDI device */ + DBGC ( undi, "UNDI %p using preloaded UNDI device\n", undi ); + memcpy ( undi, &preloaded_undi, sizeof ( *undi ) ); + memset ( &preloaded_undi, 0, sizeof ( preloaded_undi ) ); + } else { + /* Find UNDI ROM for PCI device */ + if ( ! ( undirom = undipci_find_rom ( pci ) ) ) { + rc = -ENODEV; + goto err_find_rom; + } + + /* Call UNDI ROM loader to create pixie */ + if ( ( rc = undi_load_pci ( undi, undirom, + pci->busdevfn ) ) != 0 ) { + goto err_load_pci; + } + } + + /* Create network device */ + if ( ( rc = undinet_probe ( undi, &pci->dev ) ) != 0 ) + goto err_undinet_probe; + + return 0; + + err_undinet_probe: + undi_unload ( undi ); + err_find_rom: + err_load_pci: + free ( undi ); + pci_set_drvdata ( pci, NULL ); + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void undipci_remove ( struct pci_device *pci ) { + struct undi_device *undi = pci_get_drvdata ( pci ); + + undinet_remove ( undi ); + undi_unload ( undi ); + free ( undi ); + pci_set_drvdata ( pci, NULL ); +} + +static struct pci_device_id undipci_nics[] = { + PCI_ROM ( 0xffff, 0xffff, "undipci", "UNDI (PCI)", 0 ), +}; + +struct pci_driver undipci_driver __pci_driver_fallback = { + .ids = undipci_nics, + .id_count = ( sizeof ( undipci_nics ) / sizeof ( undipci_nics[0] ) ), + .class = PCI_CLASS_ID ( PCI_CLASS_NETWORK, PCI_ANY_ID, PCI_ANY_ID ), + .probe = undipci_probe, + .remove = undipci_remove, +}; diff --git a/src/arch/x86/drivers/net/undiisr.S b/src/arch/x86/drivers/net/undiisr.S new file mode 100644 index 00000000..2428d1f5 --- /dev/null +++ b/src/arch/x86/drivers/net/undiisr.S @@ -0,0 +1,87 @@ +FILE_LICENCE ( GPL2_OR_LATER ) + +#define PXENV_UNDI_ISR 0x0014 +#define PXENV_UNDI_ISR_IN_START 1 +#define PXENV_UNDI_ISR_OUT_OURS 0 +#define PXENV_UNDI_ISR_OUT_NOT_OURS 1 + +#define IRQ_PIC_CUTOFF 8 +#define ICR_EOI_NON_SPECIFIC 0x20 +#define PIC1_ICR 0x20 +#define PIC2_ICR 0xa0 + + .text + .arch i386 + .code16 + + .section ".text16", "ax", @progbits + .globl undiisr +undiisr: + + /* Preserve registers */ + pushw %ds + pushw %es + pushw %fs + pushw %gs + pushfl + pushal + + /* Set up our segment registers */ + movw %cs:rm_ds, %ax + movw %ax, %ds + + /* Check that we have an UNDI entry point */ + cmpw $0, undinet_entry_point + je chain + + /* Issue UNDI API call */ + movw %ax, %es + movw $undinet_params, %di + movw $PXENV_UNDI_ISR, %bx + movw $PXENV_UNDI_ISR_IN_START, funcflag + pushw %es + pushw %di + pushw %bx + lcall *undinet_entry_point + cli /* Just in case */ + addw $6, %sp + cmpw $PXENV_UNDI_ISR_OUT_OURS, funcflag + jne eoi + +trig: /* Record interrupt occurence */ + incb undiisr_trigger_count + +eoi: /* Send EOI */ + movb $ICR_EOI_NON_SPECIFIC, %al + cmpb $IRQ_PIC_CUTOFF, undiisr_irq + jb 1f + outb %al, $PIC2_ICR +1: outb %al, $PIC1_ICR + jmp exit + +chain: /* Chain to next handler */ + pushfw + lcall *undiisr_next_handler + +exit: /* Restore registers and return */ + cli + popal + movzwl %sp, %esp + addr32 movl -20(%esp), %esp /* %esp isn't restored by popal */ + popfl + popw %gs + popw %fs + popw %es + popw %ds + iret + + .section ".data16", "aw", @progbits +undinet_params: +status: .word 0 +funcflag: .word 0 +bufferlength: .word 0 +framelength: .word 0 +frameheaderlength: .word 0 +frame: .word 0, 0 +prottype: .byte 0 +pkttype: .byte 0 diff --git a/src/arch/x86/drivers/net/undiload.c b/src/arch/x86/drivers/net/undiload.c new file mode 100644 index 00000000..492dae4b --- /dev/null +++ b/src/arch/x86/drivers/net/undiload.c @@ -0,0 +1,186 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * UNDI load/unload + * + */ + +/* Disambiguate the various error causes */ +#define EINFO_EUNDILOAD \ + __einfo_uniqify ( EINFO_EPLATFORM, 0x01, \ + "UNDI loader error" ) +#define EUNDILOAD( status ) EPLATFORM ( EINFO_EUNDILOAD, status ) + +/** Parameter block for calling UNDI loader */ +static struct s_UNDI_LOADER __bss16 ( undi_loader ); +#define undi_loader __use_data16 ( undi_loader ) + +/** UNDI loader entry point */ +static SEGOFF16_t __bss16 ( undi_loader_entry ); +#define undi_loader_entry __use_data16 ( undi_loader_entry ) + +/** + * Call UNDI loader to create a pixie + * + * @v undi UNDI device + * @v undirom UNDI ROM + * @ret rc Return status code + */ +int undi_load ( struct undi_device *undi, struct undi_rom *undirom ) { + struct s_PXE ppxe; + unsigned int fbms_seg; + uint16_t exit; + int rc; + + /* Only one UNDI instance may be loaded at any given time */ + if ( undi_loader_entry.segment ) { + DBG ( "UNDI %p cannot load multiple instances\n", undi ); + rc = -EBUSY; + goto err_multiple; + } + + /* Set up START_UNDI parameters */ + memset ( &undi_loader, 0, sizeof ( undi_loader ) ); + undi_loader.AX = undi->pci_busdevfn; + undi_loader.BX = undi->isapnp_csn; + undi_loader.DX = undi->isapnp_read_port; + undi_loader.ES = BIOS_SEG; + undi_loader.DI = find_pnp_bios(); + + /* Allocate base memory for PXE stack */ + undi->restore_fbms = get_fbms(); + fbms_seg = ( undi->restore_fbms << 6 ); + fbms_seg -= ( ( undirom->code_size + 0x0f ) >> 4 ); + undi_loader.UNDI_CS = fbms_seg; + fbms_seg -= ( ( undirom->data_size + 0x0f ) >> 4 ); + undi_loader.UNDI_DS = fbms_seg; + undi->fbms = ( fbms_seg >> 6 ); + set_fbms ( undi->fbms ); + DBGC ( undi, "UNDI %p allocated [%d,%d) kB of base memory\n", + undi, undi->fbms, undi->restore_fbms ); + + /* Debug info */ + DBGC ( undi, "UNDI %p loading ROM %p to CS %04x:%04zx DS %04x:%04zx " + "for ", undi, undirom, undi_loader.UNDI_CS, undirom->code_size, + undi_loader.UNDI_DS, undirom->data_size ); + if ( undi->pci_busdevfn != UNDI_NO_PCI_BUSDEVFN ) { + unsigned int bus = ( undi->pci_busdevfn >> 8 ); + unsigned int devfn = ( undi->pci_busdevfn & 0xff ); + DBGC ( undi, "PCI %02x:%02x.%x\n", + bus, PCI_SLOT ( devfn ), PCI_FUNC ( devfn ) ); + } + if ( undi->isapnp_csn != UNDI_NO_ISAPNP_CSN ) { + DBGC ( undi, "ISAPnP(%04x) CSN %04x\n", + undi->isapnp_read_port, undi->isapnp_csn ); + } + + /* Call loader */ + undi_loader_entry = undirom->loader_entry; + __asm__ __volatile__ ( REAL_CODE ( "pushl %%ebp\n\t" /* gcc bug */ + "pushw %%ds\n\t" + "pushw %%ax\n\t" + "lcall *undi_loader_entry\n\t" + "popl %%ebp\n\t" /* discard */ + "popl %%ebp\n\t" /* gcc bug */ ) + : "=a" ( exit ) + : "a" ( __from_data16 ( &undi_loader ) ) + : "ebx", "ecx", "edx", "esi", "edi" ); + if ( exit != PXENV_EXIT_SUCCESS ) { + rc = -EUNDILOAD ( undi_loader.Status ); + DBGC ( undi, "UNDI %p loader failed: %s\n", + undi, strerror ( rc ) ); + goto err_loader; + } + + /* Populate PXE device structure */ + undi->pxenv = undi_loader.PXENVptr; + undi->ppxe = undi_loader.PXEptr; + copy_from_real ( &ppxe, undi->ppxe.segment, undi->ppxe.offset, + sizeof ( ppxe ) ); + undi->entry = ppxe.EntryPointSP; + DBGC ( undi, "UNDI %p loaded PXENV+ %04x:%04x !PXE %04x:%04x " + "entry %04x:%04x\n", undi, undi->pxenv.segment, + undi->pxenv.offset, undi->ppxe.segment, undi->ppxe.offset, + undi->entry.segment, undi->entry.offset ); + + return 0; + + err_loader: + set_fbms ( undi->restore_fbms ); + memset ( &undi_loader_entry, 0, sizeof ( undi_loader_entry ) ); + err_multiple: + return rc; +} + +/** + * Unload a pixie + * + * @v undi UNDI device + * @ret rc Return status code + * + * Erases the PXENV+ and !PXE signatures, and frees the used base + * memory (if possible). + */ +int undi_unload ( struct undi_device *undi ) { + static uint32_t dead = 0xdeaddead; + + DBGC ( undi, "UNDI %p unloading\n", undi ); + + /* Clear entry point */ + memset ( &undi_loader_entry, 0, sizeof ( undi_loader_entry ) ); + + /* Erase signatures */ + if ( undi->pxenv.segment ) + put_real ( dead, undi->pxenv.segment, undi->pxenv.offset ); + if ( undi->ppxe.segment ) + put_real ( dead, undi->ppxe.segment, undi->ppxe.offset ); + + /* Free base memory, if possible */ + if ( undi->fbms == get_fbms() ) { + DBGC ( undi, "UNDI %p freeing [%d,%d) kB of base memory\n", + undi, undi->fbms, undi->restore_fbms ); + set_fbms ( undi->restore_fbms ); + return 0; + } else { + DBGC ( undi, "UNDI %p leaking [%d,%d) kB of base memory\n", + undi, undi->fbms, undi->restore_fbms ); + return -EBUSY; + } +} diff --git a/src/arch/x86/drivers/net/undinet.c b/src/arch/x86/drivers/net/undinet.c new file mode 100644 index 00000000..9b7d6d84 --- /dev/null +++ b/src/arch/x86/drivers/net/undinet.c @@ -0,0 +1,1074 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * UNDI network device driver + * + */ + +/** An UNDI NIC */ +struct undi_nic { + /** Device supports IRQs */ + int irq_supported; + /** Assigned IRQ number */ + unsigned int irq; + /** Currently processing ISR */ + int isr_processing; + /** Bug workarounds */ + int hacks; +}; + +/* Disambiguate the various error causes */ +#define EINFO_EPXECALL \ + __einfo_uniqify ( EINFO_EPLATFORM, 0x01, \ + "External PXE API error" ) +#define EPXECALL( status ) EPLATFORM ( EINFO_EPXECALL, status ) + +/** + * @defgroup undi_hacks UNDI workarounds + * @{ + */ + +/** Work around Etherboot 5.4 bugs */ +#define UNDI_HACK_EB54 0x0001 + +/** @} */ + +/** Maximum number of times to retry PXENV_UNDI_INITIALIZE */ +#define UNDI_INITIALIZE_RETRY_MAX 10 + +/** Delay between retries of PXENV_UNDI_INITIALIZE */ +#define UNDI_INITIALIZE_RETRY_DELAY_MS 200 + +/** Maximum number of received packets per poll */ +#define UNDI_RX_QUOTA 4 + +/** Alignment of received frame payload */ +#define UNDI_RX_ALIGN 16 + +static void undinet_close ( struct net_device *netdev ); + +/** + * UNDI parameter block + * + * Used as the parameter block for all UNDI API calls. Resides in + * base memory. + */ +static union u_PXENV_ANY __bss16 ( undinet_params ); +#define undinet_params __use_data16 ( undinet_params ) + +/** + * UNDI entry point + * + * Used as the indirection vector for all UNDI API calls. Resides in + * base memory. + */ +SEGOFF16_t __bss16 ( undinet_entry_point ); +#define undinet_entry_point __use_data16 ( undinet_entry_point ) + +/** IRQ profiler */ +static struct profiler undinet_irq_profiler __profiler = + { .name = "undinet.irq" }; + +/** Receive profiler */ +static struct profiler undinet_rx_profiler __profiler = + { .name = "undinet.rx" }; + +/** A PXE API call breakdown profiler */ +struct undinet_profiler { + /** Total time spent performing REAL_CALL() */ + struct profiler total; + /** Time spent transitioning to real mode */ + struct profiler p2r; + /** Time spent in external code */ + struct profiler ext; + /** Time spent transitioning back to protected mode */ + struct profiler r2p; +}; + +/** PXENV_UNDI_TRANSMIT profiler */ +static struct undinet_profiler undinet_tx_profiler __profiler = { + { .name = "undinet.tx" }, + { .name = "undinet.tx_p2r" }, + { .name = "undinet.tx_ext" }, + { .name = "undinet.tx_r2p" }, +}; + +/** PXENV_UNDI_ISR profiler + * + * Note that this profiler will not see calls to + * PXENV_UNDI_ISR_IN_START, which are handled by the UNDI ISR and do + * not go via undinet_call(). + */ +static struct undinet_profiler undinet_isr_profiler __profiler = { + { .name = "undinet.isr" }, + { .name = "undinet.isr_p2r" }, + { .name = "undinet.isr_ext" }, + { .name = "undinet.isr_r2p" }, +}; + +/** PXE unknown API call profiler + * + * This profiler can be used to measure the overhead of a dummy PXE + * API call. + */ +static struct undinet_profiler undinet_unknown_profiler __profiler = { + { .name = "undinet.unknown" }, + { .name = "undinet.unknown_p2r" }, + { .name = "undinet.unknown_ext" }, + { .name = "undinet.unknown_r2p" }, +}; + +/** Miscellaneous PXE API call profiler */ +static struct undinet_profiler undinet_misc_profiler __profiler = { + { .name = "undinet.misc" }, + { .name = "undinet.misc_p2r" }, + { .name = "undinet.misc_ext" }, + { .name = "undinet.misc_r2p" }, +}; + +/***************************************************************************** + * + * UNDI API call + * + ***************************************************************************** + */ + +/** + * Name PXE API call + * + * @v function API call number + * @ret name API call name + */ +static inline __attribute__ (( always_inline )) const char * +undinet_function_name ( unsigned int function ) { + switch ( function ) { + case PXENV_START_UNDI: + return "PXENV_START_UNDI"; + case PXENV_STOP_UNDI: + return "PXENV_STOP_UNDI"; + case PXENV_UNDI_STARTUP: + return "PXENV_UNDI_STARTUP"; + case PXENV_UNDI_CLEANUP: + return "PXENV_UNDI_CLEANUP"; + case PXENV_UNDI_INITIALIZE: + return "PXENV_UNDI_INITIALIZE"; + case PXENV_UNDI_RESET_ADAPTER: + return "PXENV_UNDI_RESET_ADAPTER"; + case PXENV_UNDI_SHUTDOWN: + return "PXENV_UNDI_SHUTDOWN"; + case PXENV_UNDI_OPEN: + return "PXENV_UNDI_OPEN"; + case PXENV_UNDI_CLOSE: + return "PXENV_UNDI_CLOSE"; + case PXENV_UNDI_TRANSMIT: + return "PXENV_UNDI_TRANSMIT"; + case PXENV_UNDI_SET_MCAST_ADDRESS: + return "PXENV_UNDI_SET_MCAST_ADDRESS"; + case PXENV_UNDI_SET_STATION_ADDRESS: + return "PXENV_UNDI_SET_STATION_ADDRESS"; + case PXENV_UNDI_SET_PACKET_FILTER: + return "PXENV_UNDI_SET_PACKET_FILTER"; + case PXENV_UNDI_GET_INFORMATION: + return "PXENV_UNDI_GET_INFORMATION"; + case PXENV_UNDI_GET_STATISTICS: + return "PXENV_UNDI_GET_STATISTICS"; + case PXENV_UNDI_CLEAR_STATISTICS: + return "PXENV_UNDI_CLEAR_STATISTICS"; + case PXENV_UNDI_INITIATE_DIAGS: + return "PXENV_UNDI_INITIATE_DIAGS"; + case PXENV_UNDI_FORCE_INTERRUPT: + return "PXENV_UNDI_FORCE_INTERRUPT"; + case PXENV_UNDI_GET_MCAST_ADDRESS: + return "PXENV_UNDI_GET_MCAST_ADDRESS"; + case PXENV_UNDI_GET_NIC_TYPE: + return "PXENV_UNDI_GET_NIC_TYPE"; + case PXENV_UNDI_GET_IFACE_INFO: + return "PXENV_UNDI_GET_IFACE_INFO"; + /* + * Duplicate case value; this is a bug in the PXE specification. + * + * case PXENV_UNDI_GET_STATE: + * return "PXENV_UNDI_GET_STATE"; + */ + case PXENV_UNDI_ISR: + return "PXENV_UNDI_ISR"; + case PXENV_GET_CACHED_INFO: + return "PXENV_GET_CACHED_INFO"; + default: + return "UNKNOWN API CALL"; + } +} + +/** + * Determine applicable profiler pair (for debugging) + * + * @v function API call number + * @ret profiler Profiler + */ +static struct undinet_profiler * undinet_profiler ( unsigned int function ) { + + /* Determine applicable profiler */ + switch ( function ) { + case PXENV_UNDI_TRANSMIT: + return &undinet_tx_profiler; + case PXENV_UNDI_ISR: + return &undinet_isr_profiler; + case PXENV_UNKNOWN: + return &undinet_unknown_profiler; + default: + return &undinet_misc_profiler; + } +} + +/** + * Issue UNDI API call + * + * @v undinic UNDI NIC + * @v function API call number + * @v params PXE parameter block + * @v params_len Length of PXE parameter block + * @ret rc Return status code + */ +static int undinet_call ( struct undi_nic *undinic, unsigned int function, + void *params, size_t params_len ) { + struct undinet_profiler *profiler = undinet_profiler ( function ); + PXENV_EXIT_t exit; + uint32_t before; + uint32_t started; + uint32_t stopped; + uint32_t after; + int discard_D; + int rc; + + /* Copy parameter block and entry point */ + assert ( params_len <= sizeof ( undinet_params ) ); + memcpy ( &undinet_params, params, params_len ); + + /* Call real-mode entry point. This calling convention will + * work with both the !PXE and the PXENV+ entry points. + */ + profile_start ( &profiler->total ); + __asm__ __volatile__ ( REAL_CODE ( "pushl %%ebp\n\t" /* gcc bug */ + "rdtsc\n\t" + "pushl %%eax\n\t" + "pushw %%es\n\t" + "pushw %%di\n\t" + "pushw %%bx\n\t" + "lcall *undinet_entry_point\n\t" + "movw %%ax, %%bx\n\t" + "rdtsc\n\t" + "addw $6, %%sp\n\t" + "popl %%edx\n\t" + "popl %%ebp\n\t" /* gcc bug */ ) + : "=a" ( stopped ), "=d" ( started ), + "=b" ( exit ), "=D" ( discard_D ) + : "b" ( function ), + "D" ( __from_data16 ( &undinet_params ) ) + : "ecx", "esi" ); + profile_stop ( &profiler->total ); + before = profile_started ( &profiler->total ); + after = profile_stopped ( &profiler->total ); + profile_start_at ( &profiler->p2r, before ); + profile_stop_at ( &profiler->p2r, started ); + profile_start_at ( &profiler->ext, started ); + profile_stop_at ( &profiler->ext, stopped ); + profile_start_at ( &profiler->r2p, stopped ); + profile_stop_at ( &profiler->r2p, after ); + + /* Determine return status code based on PXENV_EXIT and + * PXENV_STATUS + */ + rc = ( ( exit == PXENV_EXIT_SUCCESS ) ? + 0 : -EPXECALL ( undinet_params.Status ) ); + + /* If anything goes wrong, print as much debug information as + * it's possible to give. + */ + if ( rc != 0 ) { + SEGOFF16_t rm_params = { + .segment = rm_ds, + .offset = __from_data16 ( &undinet_params ), + }; + + DBGC ( undinic, "UNDINIC %p %s failed: %s\n", undinic, + undinet_function_name ( function ), strerror ( rc ) ); + DBGC ( undinic, "UNDINIC %p parameters at %04x:%04x length " + "%#02zx, entry point at %04x:%04x\n", undinic, + rm_params.segment, rm_params.offset, params_len, + undinet_entry_point.segment, + undinet_entry_point.offset ); + DBGC ( undinic, "UNDINIC %p parameters provided:\n", undinic ); + DBGC_HDA ( undinic, rm_params, params, params_len ); + DBGC ( undinic, "UNDINIC %p parameters returned:\n", undinic ); + DBGC_HDA ( undinic, rm_params, &undinet_params, params_len ); + } + + /* Copy parameter block back */ + memcpy ( params, &undinet_params, params_len ); + + return rc; +} + +/***************************************************************************** + * + * UNDI interrupt service routine + * + ***************************************************************************** + */ + +/** + * UNDI interrupt service routine + * + * The UNDI ISR increments a counter (@c trigger_count) and exits. + */ +extern void undiisr ( void ); + +/** IRQ number */ +uint8_t __data16 ( undiisr_irq ); +#define undiisr_irq __use_data16 ( undiisr_irq ) + +/** IRQ chain vector */ +struct segoff __data16 ( undiisr_next_handler ); +#define undiisr_next_handler __use_data16 ( undiisr_next_handler ) + +/** IRQ trigger count */ +volatile uint8_t __data16 ( undiisr_trigger_count ) = 0; +#define undiisr_trigger_count __use_data16 ( undiisr_trigger_count ) + +/** Last observed trigger count */ +static unsigned int last_trigger_count = 0; + +/** + * Hook UNDI interrupt service routine + * + * @v irq IRQ number + */ +static void undinet_hook_isr ( unsigned int irq ) { + + assert ( irq <= IRQ_MAX ); + assert ( undiisr_irq == 0 ); + + undiisr_irq = irq; + hook_bios_interrupt ( IRQ_INT ( irq ), ( ( intptr_t ) undiisr ), + &undiisr_next_handler ); +} + +/** + * Unhook UNDI interrupt service routine + * + * @v irq IRQ number + */ +static void undinet_unhook_isr ( unsigned int irq ) { + + assert ( irq <= IRQ_MAX ); + + unhook_bios_interrupt ( IRQ_INT ( irq ), ( ( intptr_t ) undiisr ), + &undiisr_next_handler ); + undiisr_irq = 0; +} + +/** + * Test to see if UNDI ISR has been triggered + * + * @ret triggered ISR has been triggered since last check + */ +static int undinet_isr_triggered ( void ) { + unsigned int this_trigger_count; + + /* Read trigger_count. Do this only once; it is volatile */ + this_trigger_count = undiisr_trigger_count; + + if ( this_trigger_count == last_trigger_count ) { + /* Not triggered */ + return 0; + } else { + /* Triggered */ + last_trigger_count = this_trigger_count; + return 1; + } +} + +/***************************************************************************** + * + * UNDI network device interface + * + ***************************************************************************** + */ + +/** UNDI transmit buffer descriptor */ +static struct s_PXENV_UNDI_TBD __data16 ( undinet_tbd ); +#define undinet_tbd __use_data16 ( undinet_tbd ) + +/** UNDI transmit destination address */ +static uint8_t __data16_array ( undinet_destaddr, [ETH_ALEN] ); +#define undinet_destaddr __use_data16 ( undinet_destaddr ) + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int undinet_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct undi_nic *undinic = netdev->priv; + struct s_PXENV_UNDI_TRANSMIT undi_transmit; + const void *ll_dest; + const void *ll_source; + uint16_t net_proto; + unsigned int flags; + uint8_t protocol; + size_t len; + int rc; + + /* Technically, we ought to make sure that the previous + * transmission has completed before we re-use the buffer. + * However, many PXE stacks (including at least some Intel PXE + * stacks and Etherboot 5.4) fail to generate TX completions. + * In practice this won't be a problem, since our TX datapath + * has a very low packet volume and we can get away with + * assuming that a TX will be complete by the time we want to + * transmit the next packet. + */ + + /* Some PXE stacks are unable to cope with P_UNKNOWN, and will + * always try to prepend a link-layer header. Work around + * these stacks by stripping the existing link-layer header + * and allowing the PXE stack to (re)construct the link-layer + * header itself. + */ + if ( ( rc = eth_pull ( netdev, iobuf, &ll_dest, &ll_source, + &net_proto, &flags ) ) != 0 ) { + DBGC ( undinic, "UNDINIC %p could not strip Ethernet header: " + "%s\n", undinic, strerror ( rc ) ); + return rc; + } + memcpy ( undinet_destaddr, ll_dest, sizeof ( undinet_destaddr ) ); + switch ( net_proto ) { + case htons ( ETH_P_IP ) : + protocol = P_IP; + break; + case htons ( ETH_P_ARP ) : + protocol = P_ARP; + break; + case htons ( ETH_P_RARP ) : + protocol = P_RARP; + break; + default: + /* Unknown protocol; restore the original link-layer header */ + iob_push ( iobuf, sizeof ( struct ethhdr ) ); + protocol = P_UNKNOWN; + break; + } + + /* Copy packet to UNDI I/O buffer */ + len = iob_len ( iobuf ); + if ( len > sizeof ( basemem_packet ) ) + len = sizeof ( basemem_packet ); + memcpy ( &basemem_packet, iobuf->data, len ); + + /* Create PXENV_UNDI_TRANSMIT data structure */ + memset ( &undi_transmit, 0, sizeof ( undi_transmit ) ); + undi_transmit.Protocol = protocol; + undi_transmit.XmitFlag = ( ( flags & LL_BROADCAST ) ? + XMT_BROADCAST : XMT_DESTADDR ); + undi_transmit.DestAddr.segment = rm_ds; + undi_transmit.DestAddr.offset = __from_data16 ( &undinet_destaddr ); + undi_transmit.TBD.segment = rm_ds; + undi_transmit.TBD.offset = __from_data16 ( &undinet_tbd ); + + /* Create PXENV_UNDI_TBD data structure */ + undinet_tbd.ImmedLength = len; + undinet_tbd.Xmit.segment = rm_ds; + undinet_tbd.Xmit.offset = __from_data16 ( basemem_packet ); + + /* Issue PXE API call */ + if ( ( rc = undinet_call ( undinic, PXENV_UNDI_TRANSMIT, &undi_transmit, + sizeof ( undi_transmit ) ) ) != 0 ) + goto done; + + /* Free I/O buffer */ + netdev_tx_complete ( netdev, iobuf ); + done: + return rc; +} + +/** + * Poll for received packets + * + * @v netdev Network device + * + * Fun, fun, fun. UNDI drivers don't use polling; they use + * interrupts. We therefore cheat and pretend that an interrupt has + * occurred every time undinet_poll() is called. This isn't too much + * of a hack; PCI devices share IRQs and so the first thing that a + * proper ISR should do is call PXENV_UNDI_ISR to determine whether or + * not the UNDI NIC generated the interrupt; there is no harm done by + * spurious calls to PXENV_UNDI_ISR. Similarly, we wouldn't be + * handling them any more rapidly than the usual rate of + * undinet_poll() being called even if we did implement a full ISR. + * So it should work. Ha! + * + * Addendum (21/10/03). Some cards don't play nicely with this trick, + * so instead of doing it the easy way we have to go to all the hassle + * of installing a genuine interrupt service routine and dealing with + * the wonderful 8259 Programmable Interrupt Controller. Joy. + * + * Addendum (10/07/07). When doing things such as iSCSI boot, in + * which we have to co-operate with a running OS, we can't get away + * with the "ISR-just-increments-a-counter-and-returns" trick at all, + * because it involves tying up the PIC for far too long, and other + * interrupt-dependent components (e.g. local disks) start breaking. + * We therefore implement a "proper" ISR which calls PXENV_UNDI_ISR + * from within interrupt context in order to deassert the device + * interrupt, and sends EOI if applicable. + */ +static void undinet_poll ( struct net_device *netdev ) { + struct undi_nic *undinic = netdev->priv; + struct s_PXENV_UNDI_ISR undi_isr; + struct io_buffer *iobuf = NULL; + unsigned int quota = UNDI_RX_QUOTA; + size_t len; + size_t reserve_len; + size_t frag_len; + size_t max_frag_len; + int rc; + + if ( ! undinic->isr_processing ) { + /* Allow interrupt to occur. Do this even if + * interrupts are not known to be supported, since + * some cards erroneously report that they do not + * support interrupts. + */ + if ( ! undinet_isr_triggered() ) { + /* Allow interrupt to occur */ + profile_start ( &undinet_irq_profiler ); + __asm__ __volatile__ ( "sti\n\t" + "nop\n\t" + "nop\n\t" + "cli\n\t" ); + profile_stop ( &undinet_irq_profiler ); + + /* If interrupts are known to be supported, + * then do nothing on this poll; wait for the + * interrupt to be triggered. + */ + if ( undinic->irq_supported ) + return; + } + + /* Start ISR processing */ + undinic->isr_processing = 1; + undi_isr.FuncFlag = PXENV_UNDI_ISR_IN_PROCESS; + } else { + /* Continue ISR processing */ + undi_isr.FuncFlag = PXENV_UNDI_ISR_IN_GET_NEXT; + } + + /* Run through the ISR loop */ + while ( quota ) { + if ( ( rc = undinet_call ( undinic, PXENV_UNDI_ISR, &undi_isr, + sizeof ( undi_isr ) ) ) != 0 ) { + netdev_rx_err ( netdev, NULL, rc ); + break; + } + switch ( undi_isr.FuncFlag ) { + case PXENV_UNDI_ISR_OUT_TRANSMIT: + /* We don't care about transmit completions */ + break; + case PXENV_UNDI_ISR_OUT_RECEIVE: + /* Packet fragment received */ + profile_start ( &undinet_rx_profiler ); + len = undi_isr.FrameLength; + frag_len = undi_isr.BufferLength; + reserve_len = ( -undi_isr.FrameHeaderLength & + ( UNDI_RX_ALIGN - 1 ) ); + if ( ( len == 0 ) || ( len < frag_len ) ) { + /* Don't laugh. VMWare does it. */ + DBGC ( undinic, "UNDINIC %p reported insane " + "fragment (%zd of %zd bytes)\n", + undinic, frag_len, len ); + netdev_rx_err ( netdev, NULL, -EINVAL ); + break; + } + if ( ! iobuf ) { + iobuf = alloc_iob ( reserve_len + len ); + if ( ! iobuf ) { + DBGC ( undinic, "UNDINIC %p could not " + "allocate %zd bytes for RX " + "buffer\n", undinic, len ); + /* Fragment will be dropped */ + netdev_rx_err ( netdev, NULL, -ENOMEM ); + goto done; + } + iob_reserve ( iobuf, reserve_len ); + } + max_frag_len = iob_tailroom ( iobuf ); + if ( frag_len > max_frag_len ) { + DBGC ( undinic, "UNDINIC %p fragment too big " + "(%zd+%zd does not fit into %zd)\n", + undinic, iob_len ( iobuf ), frag_len, + ( iob_len ( iobuf ) + max_frag_len ) ); + frag_len = max_frag_len; + } + copy_from_real ( iob_put ( iobuf, frag_len ), + undi_isr.Frame.segment, + undi_isr.Frame.offset, frag_len ); + if ( iob_len ( iobuf ) == len ) { + /* Whole packet received; deliver it */ + netdev_rx ( netdev, iob_disown ( iobuf ) ); + quota--; + /* Etherboot 5.4 fails to return all packets + * under mild load; pretend it retriggered. + */ + if ( undinic->hacks & UNDI_HACK_EB54 ) + --last_trigger_count; + } + profile_stop ( &undinet_rx_profiler ); + break; + case PXENV_UNDI_ISR_OUT_DONE: + /* Processing complete */ + undinic->isr_processing = 0; + goto done; + default: + /* Should never happen. VMWare does it routinely. */ + DBGC ( undinic, "UNDINIC %p ISR returned invalid " + "FuncFlag %04x\n", undinic, undi_isr.FuncFlag ); + undinic->isr_processing = 0; + goto done; + } + undi_isr.FuncFlag = PXENV_UNDI_ISR_IN_GET_NEXT; + } + + done: + if ( iobuf ) { + DBGC ( undinic, "UNDINIC %p returned incomplete packet " + "(%zd of %zd)\n", undinic, iob_len ( iobuf ), + ( iob_len ( iobuf ) + iob_tailroom ( iobuf ) ) ); + netdev_rx_err ( netdev, iobuf, -EINVAL ); + } +} + +/** + * Open NIC + * + * @v netdev Net device + * @ret rc Return status code + */ +static int undinet_open ( struct net_device *netdev ) { + struct undi_nic *undinic = netdev->priv; + struct s_PXENV_UNDI_SET_STATION_ADDRESS undi_set_address; + struct s_PXENV_UNDI_OPEN undi_open; + int rc; + + /* Hook interrupt service routine and enable interrupt if applicable */ + if ( undinic->irq ) { + undinet_hook_isr ( undinic->irq ); + enable_irq ( undinic->irq ); + send_eoi ( undinic->irq ); + } + + /* Set station address. Required for some PXE stacks; will + * spuriously fail on others. Ignore failures. We only ever + * use it to set the MAC address to the card's permanent value + * anyway. + */ + memcpy ( undi_set_address.StationAddress, netdev->ll_addr, + sizeof ( undi_set_address.StationAddress ) ); + undinet_call ( undinic, PXENV_UNDI_SET_STATION_ADDRESS, + &undi_set_address, sizeof ( undi_set_address ) ); + + /* Open NIC. We ask for promiscuous operation, since it's the + * only way to ask for all multicast addresses. On any + * switched network, it shouldn't really make a difference to + * performance. + */ + memset ( &undi_open, 0, sizeof ( undi_open ) ); + undi_open.PktFilter = ( FLTR_DIRECTED | FLTR_BRDCST | FLTR_PRMSCS ); + if ( ( rc = undinet_call ( undinic, PXENV_UNDI_OPEN, &undi_open, + sizeof ( undi_open ) ) ) != 0 ) + goto err; + + DBGC ( undinic, "UNDINIC %p opened\n", undinic ); + return 0; + + err: + undinet_close ( netdev ); + return rc; +} + +/** + * Close NIC + * + * @v netdev Net device + */ +static void undinet_close ( struct net_device *netdev ) { + struct undi_nic *undinic = netdev->priv; + struct s_PXENV_UNDI_ISR undi_isr; + struct s_PXENV_UNDI_CLOSE undi_close; + int rc; + + /* Ensure ISR has exited cleanly */ + while ( undinic->isr_processing ) { + undi_isr.FuncFlag = PXENV_UNDI_ISR_IN_GET_NEXT; + if ( ( rc = undinet_call ( undinic, PXENV_UNDI_ISR, &undi_isr, + sizeof ( undi_isr ) ) ) != 0 ) + break; + switch ( undi_isr.FuncFlag ) { + case PXENV_UNDI_ISR_OUT_TRANSMIT: + case PXENV_UNDI_ISR_OUT_RECEIVE: + /* Continue draining */ + break; + default: + /* Stop processing */ + undinic->isr_processing = 0; + break; + } + } + + /* Close NIC */ + undinet_call ( undinic, PXENV_UNDI_CLOSE, &undi_close, + sizeof ( undi_close ) ); + + /* Disable interrupt and unhook ISR if applicable */ + if ( undinic->irq ) { + disable_irq ( undinic->irq ); + undinet_unhook_isr ( undinic->irq ); + } + + DBGC ( undinic, "UNDINIC %p closed\n", undinic ); +} + +/** + * Enable/disable interrupts + * + * @v netdev Net device + * @v enable Interrupts should be enabled + */ +static void undinet_irq ( struct net_device *netdev, int enable ) { + struct undi_nic *undinic = netdev->priv; + + /* Cannot support interrupts yet */ + DBGC ( undinic, "UNDINIC %p cannot %s interrupts\n", + undinic, ( enable ? "enable" : "disable" ) ); +} + +/** UNDI network device operations */ +static struct net_device_operations undinet_operations = { + .open = undinet_open, + .close = undinet_close, + .transmit = undinet_transmit, + .poll = undinet_poll, + .irq = undinet_irq, +}; + +/** A device with broken support for generating interrupts */ +struct undinet_irq_broken { + /** PCI vendor ID */ + uint16_t pci_vendor; + /** PCI device ID */ + uint16_t pci_device; + /** PCI subsystem vendor ID */ + uint16_t pci_subsys_vendor; + /** PCI subsystem ID */ + uint16_t pci_subsys; +}; + +/** + * List of devices with broken support for generating interrupts + * + * Some PXE stacks are known to claim that IRQs are supported, but + * then never generate interrupts. No satisfactory solution has been + * found to this problem; the workaround is to add the PCI vendor and + * device IDs to this list. This is something of a hack, since it + * will generate false positives for identical devices with a working + * PXE stack (e.g. those that have been reflashed with iPXE), but it's + * an improvement on the current situation. + */ +static const struct undinet_irq_broken undinet_irq_broken_list[] = { + /* HP XX70x laptops */ + { 0x8086, 0x1502, PCI_ANY_ID, PCI_ANY_ID }, + { 0x8086, 0x1503, PCI_ANY_ID, PCI_ANY_ID }, + /* HP 745 G3 laptop */ + { 0x14e4, 0x1687, PCI_ANY_ID, PCI_ANY_ID }, +}; + +/** + * Check for devices with broken support for generating interrupts + * + * @v desc Device description + * @ret irq_is_broken Interrupt support is broken; no interrupts are generated + */ +static int undinet_irq_is_broken ( struct device_description *desc ) { + const struct undinet_irq_broken *broken; + struct pci_device pci; + uint16_t subsys_vendor; + uint16_t subsys; + unsigned int i; + + /* Ignore non-PCI devices */ + if ( desc->bus_type != BUS_TYPE_PCI ) + return 0; + + /* Read subsystem IDs */ + pci_init ( &pci, desc->location ); + pci_read_config_word ( &pci, PCI_SUBSYSTEM_VENDOR_ID, &subsys_vendor ); + pci_read_config_word ( &pci, PCI_SUBSYSTEM_ID, &subsys ); + + /* Check for a match against the broken device list */ + for ( i = 0 ; i < ( sizeof ( undinet_irq_broken_list ) / + sizeof ( undinet_irq_broken_list[0] ) ) ; i++ ) { + broken = &undinet_irq_broken_list[i]; + if ( ( broken->pci_vendor == desc->vendor ) && + ( broken->pci_device == desc->device ) && + ( ( broken->pci_subsys_vendor == subsys_vendor ) || + ( broken->pci_subsys_vendor == PCI_ANY_ID ) ) && + ( ( broken->pci_subsys == subsys ) || + ( broken->pci_subsys == PCI_ANY_ID ) ) ) { + return 1; + } + } + return 0; +} + +/** + * Probe UNDI device + * + * @v undi UNDI device + * @v dev Underlying generic device + * @ret rc Return status code + */ +int undinet_probe ( struct undi_device *undi, struct device *dev ) { + struct net_device *netdev; + struct undi_nic *undinic; + struct s_PXENV_START_UNDI start_undi; + struct s_PXENV_UNDI_STARTUP undi_startup; + struct s_PXENV_UNDI_INITIALIZE undi_init; + struct s_PXENV_UNDI_GET_INFORMATION undi_info; + struct s_PXENV_UNDI_GET_IFACE_INFO undi_iface; + struct s_PXENV_UNDI_SHUTDOWN undi_shutdown; + struct s_PXENV_UNDI_CLEANUP undi_cleanup; + struct s_PXENV_STOP_UNDI stop_undi; + unsigned int retry; + int rc; + + /* Allocate net device */ + netdev = alloc_etherdev ( sizeof ( *undinic ) ); + if ( ! netdev ) + return -ENOMEM; + netdev_init ( netdev, &undinet_operations ); + undinic = netdev->priv; + undi_set_drvdata ( undi, netdev ); + netdev->dev = dev; + memset ( undinic, 0, sizeof ( *undinic ) ); + undinet_entry_point = undi->entry; + DBGC ( undinic, "UNDINIC %p using UNDI %p\n", undinic, undi ); + + /* Hook in UNDI stack */ + if ( ! ( undi->flags & UNDI_FL_STARTED ) ) { + memset ( &start_undi, 0, sizeof ( start_undi ) ); + start_undi.AX = undi->pci_busdevfn; + start_undi.BX = undi->isapnp_csn; + start_undi.DX = undi->isapnp_read_port; + start_undi.ES = BIOS_SEG; + start_undi.DI = find_pnp_bios(); + if ( ( rc = undinet_call ( undinic, PXENV_START_UNDI, + &start_undi, + sizeof ( start_undi ) ) ) != 0 ) + goto err_start_undi; + } + undi->flags |= UNDI_FL_STARTED; + + /* Bring up UNDI stack */ + if ( ! ( undi->flags & UNDI_FL_INITIALIZED ) ) { + memset ( &undi_startup, 0, sizeof ( undi_startup ) ); + if ( ( rc = undinet_call ( undinic, PXENV_UNDI_STARTUP, + &undi_startup, + sizeof ( undi_startup ) ) ) != 0 ) + goto err_undi_startup; + /* On some PXE stacks, PXENV_UNDI_INITIALIZE may fail + * due to a transient condition (e.g. media test + * failing because the link has only just come out of + * reset). We may therefore need to retry this call + * several times. + */ + for ( retry = 0 ; ; ) { + memset ( &undi_init, 0, sizeof ( undi_init ) ); + if ( ( rc = undinet_call ( undinic, + PXENV_UNDI_INITIALIZE, + &undi_init, + sizeof ( undi_init ) ) ) ==0) + break; + if ( ++retry > UNDI_INITIALIZE_RETRY_MAX ) + goto err_undi_initialize; + DBGC ( undinic, "UNDINIC %p retrying " + "PXENV_UNDI_INITIALIZE (retry %d)\n", + undinic, retry ); + /* Delay to allow link to settle if necessary */ + mdelay ( UNDI_INITIALIZE_RETRY_DELAY_MS ); + } + } + undi->flags |= UNDI_FL_INITIALIZED; + + /* Get device information */ + memset ( &undi_info, 0, sizeof ( undi_info ) ); + if ( ( rc = undinet_call ( undinic, PXENV_UNDI_GET_INFORMATION, + &undi_info, sizeof ( undi_info ) ) ) != 0 ) + goto err_undi_get_information; + memcpy ( netdev->hw_addr, undi_info.PermNodeAddress, ETH_ALEN ); + memcpy ( netdev->ll_addr, undi_info.CurrentNodeAddress, ETH_ALEN ); + undinic->irq = undi_info.IntNumber; + if ( undinic->irq > IRQ_MAX ) { + DBGC ( undinic, "UNDINIC %p ignoring invalid IRQ %d\n", + undinic, undinic->irq ); + undinic->irq = 0; + } + DBGC ( undinic, "UNDINIC %p has MAC address %s and IRQ %d\n", + undinic, eth_ntoa ( netdev->hw_addr ), undinic->irq ); + + /* Get interface information */ + memset ( &undi_iface, 0, sizeof ( undi_iface ) ); + if ( ( rc = undinet_call ( undinic, PXENV_UNDI_GET_IFACE_INFO, + &undi_iface, sizeof ( undi_iface ) ) ) != 0 ) + goto err_undi_get_iface_info; + DBGC ( undinic, "UNDINIC %p has type %s, speed %d, flags %08x\n", + undinic, undi_iface.IfaceType, undi_iface.LinkSpeed, + undi_iface.ServiceFlags ); + if ( ( undi_iface.ServiceFlags & SUPPORTED_IRQ ) && + ( undinic->irq != 0 ) ) { + undinic->irq_supported = 1; + } + DBGC ( undinic, "UNDINIC %p using %s mode\n", undinic, + ( undinic->irq_supported ? "interrupt" : "polling" ) ); + if ( strncmp ( ( ( char * ) undi_iface.IfaceType ), "Etherboot", + sizeof ( undi_iface.IfaceType ) ) == 0 ) { + DBGC ( undinic, "UNDINIC %p Etherboot 5.4 workaround enabled\n", + undinic ); + undinic->hacks |= UNDI_HACK_EB54; + } + if ( undinet_irq_is_broken ( &dev->desc ) ) { + DBGC ( undinic, "UNDINIC %p forcing polling mode due to " + "broken interrupts\n", undinic ); + undinic->irq_supported = 0; + } + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register; + + /* Mark as link up; we don't handle link state */ + netdev_link_up ( netdev ); + + DBGC ( undinic, "UNDINIC %p added\n", undinic ); + return 0; + + err_register: + err_undi_get_iface_info: + err_undi_get_information: + err_undi_initialize: + /* Shut down UNDI stack */ + memset ( &undi_shutdown, 0, sizeof ( undi_shutdown ) ); + undinet_call ( undinic, PXENV_UNDI_SHUTDOWN, &undi_shutdown, + sizeof ( undi_shutdown ) ); + memset ( &undi_cleanup, 0, sizeof ( undi_cleanup ) ); + undinet_call ( undinic, PXENV_UNDI_CLEANUP, &undi_cleanup, + sizeof ( undi_cleanup ) ); + undi->flags &= ~UNDI_FL_INITIALIZED; + err_undi_startup: + /* Unhook UNDI stack */ + memset ( &stop_undi, 0, sizeof ( stop_undi ) ); + undinet_call ( undinic, PXENV_STOP_UNDI, &stop_undi, + sizeof ( stop_undi ) ); + undi->flags &= ~UNDI_FL_STARTED; + err_start_undi: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + undi_set_drvdata ( undi, NULL ); + return rc; +} + +/** + * Remove UNDI device + * + * @v undi UNDI device + */ +void undinet_remove ( struct undi_device *undi ) { + struct net_device *netdev = undi_get_drvdata ( undi ); + struct undi_nic *undinic = netdev->priv; + struct s_PXENV_UNDI_SHUTDOWN undi_shutdown; + struct s_PXENV_UNDI_CLEANUP undi_cleanup; + struct s_PXENV_STOP_UNDI stop_undi; + + /* Unregister net device */ + unregister_netdev ( netdev ); + + /* If we are preparing for an OS boot, or if we cannot exit + * via the PXE stack, then shut down the PXE stack. + */ + if ( ! ( undi->flags & UNDI_FL_KEEP_ALL ) ) { + + /* Shut down UNDI stack */ + memset ( &undi_shutdown, 0, sizeof ( undi_shutdown ) ); + undinet_call ( undinic, PXENV_UNDI_SHUTDOWN, + &undi_shutdown, sizeof ( undi_shutdown ) ); + memset ( &undi_cleanup, 0, sizeof ( undi_cleanup ) ); + undinet_call ( undinic, PXENV_UNDI_CLEANUP, + &undi_cleanup, sizeof ( undi_cleanup ) ); + undi->flags &= ~UNDI_FL_INITIALIZED; + + /* Unhook UNDI stack */ + memset ( &stop_undi, 0, sizeof ( stop_undi ) ); + undinet_call ( undinic, PXENV_STOP_UNDI, &stop_undi, + sizeof ( stop_undi ) ); + undi->flags &= ~UNDI_FL_STARTED; + } + + /* Clear entry point */ + memset ( &undinet_entry_point, 0, sizeof ( undinet_entry_point ) ); + + /* Free network device */ + netdev_nullify ( netdev ); + netdev_put ( netdev ); + + DBGC ( undinic, "UNDINIC %p removed\n", undinic ); +} diff --git a/src/arch/x86/drivers/net/undionly.c b/src/arch/x86/drivers/net/undionly.c new file mode 100644 index 00000000..89837221 --- /dev/null +++ b/src/arch/x86/drivers/net/undionly.c @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * "Pure" UNDI driver + * + * This is the UNDI driver without explicit support for PCI or any + * other bus type. It is capable only of using the preloaded UNDI + * device. It must not be combined in an image with any other + * drivers. + * + * If you want a PXE-loadable image that contains only the UNDI + * driver, build "bin/undionly.kpxe". + * + * If you want any other image format, or any other drivers in + * addition to the UNDI driver, build e.g. "bin/undi.dsk". + */ + +/** UNDI root bus device */ +static struct device undibus_dev; + +/** + * Probe UNDI root bus + * + * @v rootdev UNDI bus root device + * + * Scans the UNDI bus for devices and registers all devices it can + * find. + */ +static int undibus_probe ( struct root_device *rootdev ) { + struct undi_device *undi = &preloaded_undi; + struct device *dev = &undibus_dev; + int rc; + + /* Check for a valie preloaded UNDI device */ + if ( ! undi->entry.segment ) { + DBG ( "No preloaded UNDI device found!\n" ); + return -ENODEV; + } + + /* Add to device hierarchy */ + dev->driver_name = "undionly"; + if ( undi->pci_busdevfn != UNDI_NO_PCI_BUSDEVFN ) { + dev->desc.bus_type = BUS_TYPE_PCI; + dev->desc.location = undi->pci_busdevfn; + dev->desc.vendor = undi->pci_vendor; + dev->desc.device = undi->pci_device; + snprintf ( dev->name, sizeof ( dev->name ), + "0000:%02x:%02x.%x", PCI_BUS ( undi->pci_busdevfn ), + PCI_SLOT ( undi->pci_busdevfn ), + PCI_FUNC ( undi->pci_busdevfn ) ); + } else if ( undi->isapnp_csn != UNDI_NO_ISAPNP_CSN ) { + dev->desc.bus_type = BUS_TYPE_ISAPNP; + snprintf ( dev->name, sizeof ( dev->name ), "ISAPNP" ); + } + dev->parent = &rootdev->dev; + list_add ( &dev->siblings, &rootdev->dev.children); + INIT_LIST_HEAD ( &dev->children ); + + /* Create network device */ + if ( ( rc = undinet_probe ( undi, dev ) ) != 0 ) + goto err; + + return 0; + + err: + list_del ( &dev->siblings ); + return rc; +} + +/** + * Remove UNDI root bus + * + * @v rootdev UNDI bus root device + */ +static void undibus_remove ( struct root_device *rootdev __unused ) { + struct undi_device *undi = &preloaded_undi; + struct device *dev = &undibus_dev; + + undinet_remove ( undi ); + list_del ( &dev->siblings ); +} + +/** UNDI bus root device driver */ +static struct root_driver undi_root_driver = { + .probe = undibus_probe, + .remove = undibus_remove, +}; + +/** UNDI bus root device */ +struct root_device undi_root_device __root_device = { + .dev = { .name = "UNDI" }, + .driver = &undi_root_driver, +}; + +/** + * Prepare for exit + * + * @v booting System is shutting down for OS boot + */ +static void undionly_shutdown ( int booting ) { + /* If we are shutting down to boot an OS, clear the "keep PXE + * stack" flag. + */ + if ( booting ) + preloaded_undi.flags &= ~UNDI_FL_KEEP_ALL; +} + +struct startup_fn startup_undionly __startup_fn ( STARTUP_LATE ) = { + .name = "undionly", + .shutdown = undionly_shutdown, +}; diff --git a/src/arch/x86/drivers/net/undipreload.c b/src/arch/x86/drivers/net/undipreload.c new file mode 100644 index 00000000..fca77184 --- /dev/null +++ b/src/arch/x86/drivers/net/undipreload.c @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** @file + * + * Preloaded UNDI stack + * + */ + +/** + * Preloaded UNDI device + * + * This is the UNDI device that was present when Etherboot started + * execution (i.e. when loading a .kpxe image). The first driver to + * claim this device must zero out this data structure. + */ +struct undi_device __data16 ( preloaded_undi ); diff --git a/src/arch/x86/drivers/net/undirom.c b/src/arch/x86/drivers/net/undirom.c new file mode 100644 index 00000000..257b1241 --- /dev/null +++ b/src/arch/x86/drivers/net/undirom.c @@ -0,0 +1,235 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * UNDI expansion ROMs + * + */ + +/** List of all UNDI ROMs */ +static LIST_HEAD ( undiroms ); + +/** + * Parse PXE ROM ID structure + * + * @v undirom UNDI ROM + * @v pxeromid Offset within ROM to PXE ROM ID structure + * @ret rc Return status code + */ +static int undirom_parse_pxeromid ( struct undi_rom *undirom, + unsigned int pxeromid ) { + struct undi_rom_id undi_rom_id; + unsigned int undiloader; + + DBGC ( undirom, "UNDIROM %p has PXE ROM ID at %04x:%04x\n", undirom, + undirom->rom_segment, pxeromid ); + + /* Read PXE ROM ID structure and verify */ + copy_from_real ( &undi_rom_id, undirom->rom_segment, pxeromid, + sizeof ( undi_rom_id ) ); + if ( undi_rom_id.Signature != UNDI_ROM_ID_SIGNATURE ) { + DBGC ( undirom, "UNDIROM %p has bad PXE ROM ID signature " + "%08x\n", undirom, undi_rom_id.Signature ); + return -EINVAL; + } + + /* Check for UNDI loader */ + undiloader = undi_rom_id.UNDILoader; + if ( ! undiloader ) { + DBGC ( undirom, "UNDIROM %p has no UNDI loader\n", undirom ); + return -EINVAL; + } + + /* Fill in UNDI ROM loader fields */ + undirom->loader_entry.segment = undirom->rom_segment; + undirom->loader_entry.offset = undiloader; + undirom->code_size = undi_rom_id.CodeSize; + undirom->data_size = undi_rom_id.DataSize; + + DBGC ( undirom, "UNDIROM %p has UNDI loader at %04x:%04x " + "(code %04zx data %04zx)\n", undirom, + undirom->loader_entry.segment, undirom->loader_entry.offset, + undirom->code_size, undirom->data_size ); + return 0; +} + +/** + * Parse PCI expansion header + * + * @v undirom UNDI ROM + * @v pcirheader Offset within ROM to PCI expansion header + */ +static int undirom_parse_pcirheader ( struct undi_rom *undirom, + unsigned int pcirheader ) { + struct pcir_header pcir_header; + + DBGC ( undirom, "UNDIROM %p has PCI expansion header at %04x:%04x\n", + undirom, undirom->rom_segment, pcirheader ); + + /* Read PCI expansion header and verify */ + copy_from_real ( &pcir_header, undirom->rom_segment, pcirheader, + sizeof ( pcir_header ) ); + if ( pcir_header.signature != PCIR_SIGNATURE ) { + DBGC ( undirom, "UNDIROM %p has bad PCI expansion header " + "signature %08x\n", undirom, pcir_header.signature ); + return -EINVAL; + } + + /* Fill in UNDI ROM PCI device fields */ + undirom->bus_type = PCI_NIC; + undirom->bus_id.pci.vendor_id = pcir_header.vendor_id; + undirom->bus_id.pci.device_id = pcir_header.device_id; + + DBGC ( undirom, "UNDIROM %p is for PCI devices %04x:%04x\n", undirom, + undirom->bus_id.pci.vendor_id, undirom->bus_id.pci.device_id ); + return 0; + +} + +/** + * Probe UNDI ROM + * + * @v rom_segment ROM segment address + * @ret rc Return status code + */ +static int undirom_probe ( unsigned int rom_segment ) { + struct undi_rom *undirom = NULL; + struct undi_rom_header romheader; + size_t rom_len; + unsigned int pxeromid; + unsigned int pcirheader; + int rc; + + /* Read expansion ROM header and verify */ + copy_from_real ( &romheader, rom_segment, 0, sizeof ( romheader ) ); + if ( romheader.Signature != ROM_SIGNATURE ) { + rc = -EINVAL; + goto err; + } + rom_len = ( romheader.ROMLength * 512 ); + + /* Allocate memory for UNDI ROM */ + undirom = zalloc ( sizeof ( *undirom ) ); + if ( ! undirom ) { + DBG ( "Could not allocate UNDI ROM structure\n" ); + rc = -ENOMEM; + goto err; + } + DBGC ( undirom, "UNDIROM %p trying expansion ROM at %04x:0000 " + "(%zdkB)\n", undirom, rom_segment, ( rom_len / 1024 ) ); + undirom->rom_segment = rom_segment; + + /* Check for and parse PXE ROM ID */ + pxeromid = romheader.PXEROMID; + if ( ! pxeromid ) { + DBGC ( undirom, "UNDIROM %p has no PXE ROM ID\n", undirom ); + rc = -EINVAL; + goto err; + } + if ( pxeromid > rom_len ) { + DBGC ( undirom, "UNDIROM %p PXE ROM ID outside ROM\n", + undirom ); + rc = -EINVAL; + goto err; + } + if ( ( rc = undirom_parse_pxeromid ( undirom, pxeromid ) ) != 0 ) + goto err; + + /* Parse PCIR header, if present */ + pcirheader = romheader.PCIRHeader; + if ( pcirheader ) + undirom_parse_pcirheader ( undirom, pcirheader ); + + /* Add to UNDI ROM list and return */ + DBGC ( undirom, "UNDIROM %p registered\n", undirom ); + list_add_tail ( &undirom->list, &undiroms ); + return 0; + + err: + free ( undirom ); + return rc; +} + +/** + * Create UNDI ROMs for all possible expansion ROMs + * + * @ret + */ +static void undirom_probe_all_roms ( void ) { + static int probed = 0; + unsigned int rom_segment; + + /* Perform probe only once */ + if ( probed ) + return; + + DBG ( "Scanning for PXE expansion ROMs\n" ); + + /* Scan through expansion ROM region at 512 byte intervals */ + for ( rom_segment = 0xc000 ; rom_segment < 0x10000 ; + rom_segment += 0x20 ) { + undirom_probe ( rom_segment ); + } + + probed = 1; +} + +/** + * Find UNDI ROM for PCI device + * + * @v vendor_id PCI vendor ID + * @v device_id PCI device ID + * @v rombase ROM base address, or 0 for any + * @ret undirom UNDI ROM, or NULL + */ +struct undi_rom * undirom_find_pci ( unsigned int vendor_id, + unsigned int device_id, + unsigned int rombase ) { + struct undi_rom *undirom; + + undirom_probe_all_roms(); + + list_for_each_entry ( undirom, &undiroms, list ) { + if ( undirom->bus_type != PCI_NIC ) + continue; + if ( undirom->bus_id.pci.vendor_id != vendor_id ) + continue; + if ( undirom->bus_id.pci.device_id != device_id ) + continue; + if ( rombase && ( ( undirom->rom_segment << 4 ) != rombase ) ) + continue; + DBGC ( undirom, "UNDIROM %p matched PCI %04x:%04x (%08x)\n", + undirom, vendor_id, device_id, rombase ); + return undirom; + } + + DBG ( "No UNDI ROM matched PCI %04x:%04x (%08x)\n", + vendor_id, device_id, rombase ); + return NULL; +} diff --git a/src/arch/x86/drivers/xen/hvm.c b/src/arch/x86/drivers/xen/hvm.c new file mode 100644 index 00000000..311f343c --- /dev/null +++ b/src/arch/x86/drivers/xen/hvm.c @@ -0,0 +1,505 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hvm.h" + +/** @file + * + * Xen HVM driver + * + */ + +/** + * Get CPUID base + * + * @v hvm HVM device + * @ret rc Return status code + */ +static int hvm_cpuid_base ( struct hvm_device *hvm ) { + struct { + uint32_t ebx; + uint32_t ecx; + uint32_t edx; + } __attribute__ (( packed )) signature; + uint32_t base; + uint32_t version; + uint32_t discard_eax; + uint32_t discard_ebx; + uint32_t discard_ecx; + uint32_t discard_edx; + + /* Scan for magic signature */ + for ( base = HVM_CPUID_MIN ; base <= HVM_CPUID_MAX ; + base += HVM_CPUID_STEP ) { + cpuid ( base, 0, &discard_eax, &signature.ebx, &signature.ecx, + &signature.edx ); + if ( memcmp ( &signature, HVM_CPUID_MAGIC, + sizeof ( signature ) ) == 0 ) { + hvm->cpuid_base = base; + cpuid ( ( base + HVM_CPUID_VERSION ), 0, &version, + &discard_ebx, &discard_ecx, &discard_edx ); + DBGC2 ( hvm, "HVM using CPUID base %#08x (v%d.%d)\n", + base, ( version >> 16 ), ( version & 0xffff ) ); + return 0; + } + } + + DBGC ( hvm, "HVM could not find hypervisor\n" ); + return -ENODEV; +} + +/** + * Map hypercall page(s) + * + * @v hvm HVM device + * @ret rc Return status code + */ +static int hvm_map_hypercall ( struct hvm_device *hvm ) { + uint32_t pages; + uint32_t msr; + uint32_t discard_ecx; + uint32_t discard_edx; + physaddr_t hypercall_phys; + uint32_t version; + static xen_extraversion_t extraversion; + int xenrc; + int rc; + + /* Get number of hypercall pages and MSR to use */ + cpuid ( ( hvm->cpuid_base + HVM_CPUID_PAGES ), 0, &pages, &msr, + &discard_ecx, &discard_edx ); + + /* Allocate pages */ + hvm->hypercall_len = ( pages * PAGE_SIZE ); + hvm->xen.hypercall = malloc_dma ( hvm->hypercall_len, PAGE_SIZE ); + if ( ! hvm->xen.hypercall ) { + DBGC ( hvm, "HVM could not allocate %d hypercall page(s)\n", + pages ); + return -ENOMEM; + } + hypercall_phys = virt_to_phys ( hvm->xen.hypercall ); + DBGC2 ( hvm, "HVM hypercall page(s) at [%#08lx,%#08lx) via MSR %#08x\n", + hypercall_phys, ( hypercall_phys + hvm->hypercall_len ), msr ); + + /* Write to MSR */ + wrmsr ( msr, hypercall_phys ); + + /* Check that hypercall mechanism is working */ + version = xenver_version ( &hvm->xen ); + if ( ( xenrc = xenver_extraversion ( &hvm->xen, &extraversion ) ) != 0){ + rc = -EXEN ( xenrc ); + DBGC ( hvm, "HVM could not get extraversion: %s\n", + strerror ( rc ) ); + return rc; + } + DBGC2 ( hvm, "HVM found Xen version %d.%d%s\n", + ( version >> 16 ), ( version & 0xffff ) , extraversion ); + + return 0; +} + +/** + * Unmap hypercall page(s) + * + * @v hvm HVM device + */ +static void hvm_unmap_hypercall ( struct hvm_device *hvm ) { + + /* Free pages */ + free_dma ( hvm->xen.hypercall, hvm->hypercall_len ); +} + +/** + * Allocate and map MMIO space + * + * @v hvm HVM device + * @v space Source mapping space + * @v len Length (must be a multiple of PAGE_SIZE) + * @ret mmio MMIO space address, or NULL on error + */ +static void * hvm_ioremap ( struct hvm_device *hvm, unsigned int space, + size_t len ) { + struct xen_add_to_physmap add; + struct xen_remove_from_physmap remove; + unsigned int pages = ( len / PAGE_SIZE ); + physaddr_t mmio_phys; + unsigned int i; + void *mmio; + int xenrc; + int rc; + + /* Sanity check */ + assert ( ( len % PAGE_SIZE ) == 0 ); + + /* Check for available space */ + if ( ( hvm->mmio_offset + len ) > hvm->mmio_len ) { + DBGC ( hvm, "HVM could not allocate %zd bytes of MMIO space " + "(%zd of %zd remaining)\n", len, + ( hvm->mmio_len - hvm->mmio_offset ), hvm->mmio_len ); + goto err_no_space; + } + + /* Map this space */ + mmio = pci_ioremap ( hvm->pci, ( hvm->mmio + hvm->mmio_offset ), len ); + if ( ! mmio ) { + DBGC ( hvm, "HVM could not map MMIO space [%08lx,%08lx)\n", + ( hvm->mmio + hvm->mmio_offset ), + ( hvm->mmio + hvm->mmio_offset + len ) ); + goto err_ioremap; + } + mmio_phys = virt_to_phys ( mmio ); + + /* Add to physical address space */ + for ( i = 0 ; i < pages ; i++ ) { + add.domid = DOMID_SELF; + add.idx = i; + add.space = space; + add.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i ); + if ( ( xenrc = xenmem_add_to_physmap ( &hvm->xen, &add ) ) !=0){ + rc = -EXEN ( xenrc ); + DBGC ( hvm, "HVM could not add space %d idx %d at " + "[%08lx,%08lx): %s\n", space, i, + ( mmio_phys + ( i * PAGE_SIZE ) ), + ( mmio_phys + ( ( i + 1 ) * PAGE_SIZE ) ), + strerror ( rc ) ); + goto err_add_to_physmap; + } + } + + /* Update offset */ + hvm->mmio_offset += len; + + return mmio; + + i = pages; + err_add_to_physmap: + for ( i-- ; ( signed int ) i >= 0 ; i-- ) { + remove.domid = DOMID_SELF; + add.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i ); + xenmem_remove_from_physmap ( &hvm->xen, &remove ); + } + iounmap ( mmio ); + err_ioremap: + err_no_space: + return NULL; +} + +/** + * Unmap MMIO space + * + * @v hvm HVM device + * @v mmio MMIO space address + * @v len Length (must be a multiple of PAGE_SIZE) + */ +static void hvm_iounmap ( struct hvm_device *hvm, void *mmio, size_t len ) { + struct xen_remove_from_physmap remove; + physaddr_t mmio_phys = virt_to_phys ( mmio ); + unsigned int pages = ( len / PAGE_SIZE ); + unsigned int i; + int xenrc; + int rc; + + /* Unmap this space */ + iounmap ( mmio ); + + /* Remove from physical address space */ + for ( i = 0 ; i < pages ; i++ ) { + remove.domid = DOMID_SELF; + remove.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i ); + if ( ( xenrc = xenmem_remove_from_physmap ( &hvm->xen, + &remove ) ) != 0 ) { + rc = -EXEN ( xenrc ); + DBGC ( hvm, "HVM could not remove space [%08lx,%08lx): " + "%s\n", ( mmio_phys + ( i * PAGE_SIZE ) ), + ( mmio_phys + ( ( i + 1 ) * PAGE_SIZE ) ), + strerror ( rc ) ); + /* Nothing we can do about this */ + } + } +} + +/** + * Map shared info page + * + * @v hvm HVM device + * @ret rc Return status code + */ +static int hvm_map_shared_info ( struct hvm_device *hvm ) { + physaddr_t shared_info_phys; + int rc; + + /* Map shared info page */ + hvm->xen.shared = hvm_ioremap ( hvm, XENMAPSPACE_shared_info, + PAGE_SIZE ); + if ( ! hvm->xen.shared ) { + rc = -ENOMEM; + goto err_alloc; + } + shared_info_phys = virt_to_phys ( hvm->xen.shared ); + DBGC2 ( hvm, "HVM shared info page at [%#08lx,%#08lx)\n", + shared_info_phys, ( shared_info_phys + PAGE_SIZE ) ); + + /* Sanity check */ + DBGC2 ( hvm, "HVM wallclock time is %d\n", + readl ( &hvm->xen.shared->wc_sec ) ); + + return 0; + + hvm_iounmap ( hvm, hvm->xen.shared, PAGE_SIZE ); + err_alloc: + return rc; +} + +/** + * Unmap shared info page + * + * @v hvm HVM device + */ +static void hvm_unmap_shared_info ( struct hvm_device *hvm ) { + + /* Unmap shared info page */ + hvm_iounmap ( hvm, hvm->xen.shared, PAGE_SIZE ); +} + +/** + * Map grant table + * + * @v hvm HVM device + * @ret rc Return status code + */ +static int hvm_map_grant ( struct hvm_device *hvm ) { + physaddr_t grant_phys; + int rc; + + /* Initialise grant table */ + if ( ( rc = xengrant_init ( &hvm->xen ) ) != 0 ) { + DBGC ( hvm, "HVM could not initialise grant table: %s\n", + strerror ( rc ) ); + return rc; + } + + /* Map grant table */ + hvm->xen.grant.table = hvm_ioremap ( hvm, XENMAPSPACE_grant_table, + hvm->xen.grant.len ); + if ( ! hvm->xen.grant.table ) + return -ENODEV; + + grant_phys = virt_to_phys ( hvm->xen.grant.table ); + DBGC2 ( hvm, "HVM mapped grant table at [%08lx,%08lx)\n", + grant_phys, ( grant_phys + hvm->xen.grant.len ) ); + return 0; +} + +/** + * Unmap grant table + * + * @v hvm HVM device + */ +static void hvm_unmap_grant ( struct hvm_device *hvm ) { + + /* Unmap grant table */ + hvm_iounmap ( hvm, hvm->xen.grant.table, hvm->xen.grant.len ); +} + +/** + * Map XenStore + * + * @v hvm HVM device + * @ret rc Return status code + */ +static int hvm_map_xenstore ( struct hvm_device *hvm ) { + uint64_t xenstore_evtchn; + uint64_t xenstore_pfn; + physaddr_t xenstore_phys; + char *name; + int xenrc; + int rc; + + /* Get XenStore event channel */ + if ( ( xenrc = xen_hvm_get_param ( &hvm->xen, HVM_PARAM_STORE_EVTCHN, + &xenstore_evtchn ) ) != 0 ) { + rc = -EXEN ( xenrc ); + DBGC ( hvm, "HVM could not get XenStore event channel: %s\n", + strerror ( rc ) ); + return rc; + } + hvm->xen.store.port = xenstore_evtchn; + + /* Get XenStore PFN */ + if ( ( xenrc = xen_hvm_get_param ( &hvm->xen, HVM_PARAM_STORE_PFN, + &xenstore_pfn ) ) != 0 ) { + rc = -EXEN ( xenrc ); + DBGC ( hvm, "HVM could not get XenStore PFN: %s\n", + strerror ( rc ) ); + return rc; + } + xenstore_phys = ( xenstore_pfn * PAGE_SIZE ); + + /* Map XenStore */ + hvm->xen.store.intf = pci_ioremap ( hvm->pci, xenstore_phys, + PAGE_SIZE ); + if ( ! hvm->xen.store.intf ) { + DBGC ( hvm, "HVM could not map XenStore at [%08lx,%08lx)\n", + xenstore_phys, ( xenstore_phys + PAGE_SIZE ) ); + return -ENODEV; + } + DBGC2 ( hvm, "HVM mapped XenStore at [%08lx,%08lx) with event port " + "%d\n", xenstore_phys, ( xenstore_phys + PAGE_SIZE ), + hvm->xen.store.port ); + + /* Check that XenStore is working */ + if ( ( rc = xenstore_read ( &hvm->xen, &name, "name", NULL ) ) != 0 ) { + DBGC ( hvm, "HVM could not read domain name: %s\n", + strerror ( rc ) ); + return rc; + } + DBGC2 ( hvm, "HVM running in domain \"%s\"\n", name ); + free ( name ); + + return 0; +} + +/** + * Unmap XenStore + * + * @v hvm HVM device + */ +static void hvm_unmap_xenstore ( struct hvm_device *hvm ) { + + /* Unmap XenStore */ + iounmap ( hvm->xen.store.intf ); +} + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int hvm_probe ( struct pci_device *pci ) { + struct hvm_device *hvm; + int rc; + + /* Allocate and initialise structure */ + hvm = zalloc ( sizeof ( *hvm ) ); + if ( ! hvm ) { + rc = -ENOMEM; + goto err_alloc; + } + hvm->pci = pci; + hvm->mmio = pci_bar_start ( pci, HVM_MMIO_BAR ); + hvm->mmio_len = pci_bar_size ( pci, HVM_MMIO_BAR ); + DBGC2 ( hvm, "HVM has MMIO space [%08lx,%08lx)\n", + hvm->mmio, ( hvm->mmio + hvm->mmio_len ) ); + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Attach to hypervisor */ + if ( ( rc = hvm_cpuid_base ( hvm ) ) != 0 ) + goto err_cpuid_base; + if ( ( rc = hvm_map_hypercall ( hvm ) ) != 0 ) + goto err_map_hypercall; + if ( ( rc = hvm_map_shared_info ( hvm ) ) != 0 ) + goto err_map_shared_info; + if ( ( rc = hvm_map_grant ( hvm ) ) != 0 ) + goto err_map_grant; + if ( ( rc = hvm_map_xenstore ( hvm ) ) != 0 ) + goto err_map_xenstore; + + /* Probe Xen devices */ + if ( ( rc = xenbus_probe ( &hvm->xen, &pci->dev ) ) != 0 ) { + DBGC ( hvm, "HVM could not probe Xen bus: %s\n", + strerror ( rc ) ); + goto err_xenbus_probe; + } + + pci_set_drvdata ( pci, hvm ); + return 0; + + xenbus_remove ( &hvm->xen, &pci->dev ); + err_xenbus_probe: + hvm_unmap_xenstore ( hvm ); + err_map_xenstore: + hvm_unmap_grant ( hvm ); + err_map_grant: + hvm_unmap_shared_info ( hvm ); + err_map_shared_info: + hvm_unmap_hypercall ( hvm ); + err_map_hypercall: + err_cpuid_base: + free ( hvm ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void hvm_remove ( struct pci_device *pci ) { + struct hvm_device *hvm = pci_get_drvdata ( pci ); + + xenbus_remove ( &hvm->xen, &pci->dev ); + hvm_unmap_xenstore ( hvm ); + hvm_unmap_grant ( hvm ); + hvm_unmap_shared_info ( hvm ); + hvm_unmap_hypercall ( hvm ); + free ( hvm ); +} + +/** PCI device IDs */ +static struct pci_device_id hvm_ids[] = { + PCI_ROM ( 0x5853, 0x0001, "hvm", "hvm", 0 ), + PCI_ROM ( 0x5853, 0x0002, "hvm2", "hvm2", 0 ), +}; + +/** PCI driver */ +struct pci_driver hvm_driver __pci_driver = { + .ids = hvm_ids, + .id_count = ( sizeof ( hvm_ids ) / sizeof ( hvm_ids[0] ) ), + .probe = hvm_probe, + .remove = hvm_remove, +}; + +/* Drag in objects via hvm_driver */ +REQUIRING_SYMBOL ( hvm_driver ); + +/* Drag in netfront driver */ +REQUIRE_OBJECT ( netfront ); diff --git a/src/arch/x86/drivers/xen/hvm.h b/src/arch/x86/drivers/xen/hvm.h new file mode 100644 index 00000000..88e49081 --- /dev/null +++ b/src/arch/x86/drivers/xen/hvm.h @@ -0,0 +1,77 @@ +#ifndef _HVM_H +#define _HVM_H + +/** @file + * + * Xen HVM driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** Minimum CPUID base */ +#define HVM_CPUID_MIN 0x40000000UL + +/** Maximum CPUID base */ +#define HVM_CPUID_MAX 0x4000ff00UL + +/** Increment between CPUID bases */ +#define HVM_CPUID_STEP 0x00000100UL + +/** Magic signature */ +#define HVM_CPUID_MAGIC "XenVMMXenVMM" + +/** Get Xen version */ +#define HVM_CPUID_VERSION 1 + +/** Get number of hypercall pages */ +#define HVM_CPUID_PAGES 2 + +/** PCI MMIO BAR */ +#define HVM_MMIO_BAR PCI_BASE_ADDRESS_1 + +/** A Xen HVM device */ +struct hvm_device { + /** Xen hypervisor */ + struct xen_hypervisor xen; + /** PCI device */ + struct pci_device *pci; + /** CPUID base */ + uint32_t cpuid_base; + /** Length of hypercall table */ + size_t hypercall_len; + /** MMIO base address */ + unsigned long mmio; + /** Current offset within MMIO address space */ + size_t mmio_offset; + /** Length of MMIO address space */ + size_t mmio_len; +}; + +/** + * Get HVM parameter value + * + * @v xen Xen hypervisor + * @v index Parameter index + * @v value Value to fill in + * @ret xenrc Xen status code + */ +static inline int xen_hvm_get_param ( struct xen_hypervisor *xen, + unsigned int index, uint64_t *value ) { + struct xen_hvm_param param; + int xenrc; + + param.domid = DOMID_SELF; + param.index = index; + xenrc = xen_hypercall_2 ( xen, __HYPERVISOR_hvm_op, HVMOP_get_param, + virt_to_phys ( ¶m ) ); + *value = param.value; + return xenrc; +} + +#endif /* _HVM_H */ diff --git a/src/arch/x86/hci/commands/pxe_cmd.c b/src/arch/x86/hci/commands/pxe_cmd.c new file mode 100644 index 00000000..473b97f9 --- /dev/null +++ b/src/arch/x86/hci/commands/pxe_cmd.c @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2010 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +#include +#include +#include +#include +#include + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * PXE commands + * + */ + +/** "startpxe" options */ +struct startpxe_options {}; + +/** "startpxe" option list */ +static struct option_descriptor startpxe_opts[] = {}; + +/** + * "startpxe" payload + * + * @v netdev Network device + * @v opts Command options + * @ret rc Return status code + */ +static int startpxe_payload ( struct net_device *netdev, + struct startpxe_options *opts __unused ) { + + if ( netdev_is_open ( netdev ) ) + pxe_activate ( netdev ); + + return 0; +} + +/** "startpxe" command descriptor */ +static struct ifcommon_command_descriptor startpxe_cmd = + IFCOMMON_COMMAND_DESC ( struct startpxe_options, startpxe_opts, + 0, MAX_ARGUMENTS, "[]", + startpxe_payload, 0 ); + +/** + * The "startpxe" command + * + * @v argc Argument count + * @v argv Argument list + * @ret rc Return status code + */ +static int startpxe_exec ( int argc, char **argv ) { + return ifcommon_exec ( argc, argv, &startpxe_cmd ); +} + +/** "stoppxe" options */ +struct stoppxe_options {}; + +/** "stoppxe" option list */ +static struct option_descriptor stoppxe_opts[] = {}; + +/** "stoppxe" command descriptor */ +static struct command_descriptor stoppxe_cmd = + COMMAND_DESC ( struct stoppxe_options, stoppxe_opts, 0, 0, NULL ); + +/** + * The "stoppxe" command + * + * @v argc Argument count + * @v argv Argument list + * @ret rc Return status code + */ +static int stoppxe_exec ( int argc __unused, char **argv __unused ) { + struct stoppxe_options opts; + int rc; + + /* Parse options */ + if ( ( rc = parse_options ( argc, argv, &stoppxe_cmd, &opts ) ) != 0 ) + return rc; + + pxe_deactivate(); + + return 0; +} + +/** PXE commands */ +struct command pxe_commands[] __command = { + { + .name = "startpxe", + .exec = startpxe_exec, + }, + { + .name = "stoppxe", + .exec = stoppxe_exec, + }, +}; diff --git a/src/arch/x86/image/bootsector.c b/src/arch/x86/image/bootsector.c new file mode 100644 index 00000000..67dad04f --- /dev/null +++ b/src/arch/x86/image/bootsector.c @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * x86 bootsector image format + * + */ + +#include +#include +#include +#include +#include + +/** Vector for storing original INT 18 handler + * + * We do not chain to this vector, so there is no need to place it in + * .text16. + */ +static struct segoff int18_vector; + +/** Vector for storing original INT 19 handler + * + * We do not chain to this vector, so there is no need to place it in + * .text16. + */ +static struct segoff int19_vector; + +/** Restart point for INT 18 or 19 */ +extern void bootsector_exec_fail ( void ); + +/** + * Jump to preloaded bootsector + * + * @v segment Real-mode segment + * @v offset Real-mode offset + * @v drive Drive number to pass to boot sector + * @ret rc Return status code + */ +int call_bootsector ( unsigned int segment, unsigned int offset, + unsigned int drive ) { + int discard_b, discard_D, discard_d; + + /* Reset console, since boot sector will probably use it */ + console_reset(); + + DBG ( "Booting from boot sector at %04x:%04x\n", segment, offset ); + + /* Hook INTs 18 and 19 to capture failure paths */ + hook_bios_interrupt ( 0x18, ( intptr_t ) bootsector_exec_fail, + &int18_vector ); + hook_bios_interrupt ( 0x19, ( intptr_t ) bootsector_exec_fail, + &int19_vector ); + + /* Boot the loaded sector + * + * We assume that the boot sector may completely destroy our + * real-mode stack, so we preserve everything we need in + * static storage. + */ + __asm__ __volatile__ ( REAL_CODE ( /* Save return address off-stack */ + "popw %%cs:saved_retaddr\n\t" + /* Save stack pointer */ + "movw %%ss, %%ax\n\t" + "movw %%ax, %%cs:saved_ss\n\t" + "movw %%sp, %%cs:saved_sp\n\t" + /* Save frame pointer (gcc bug) */ + "movl %%ebp, %%cs:saved_ebp\n\t" + /* Prepare jump to boot sector */ + "pushw %%bx\n\t" + "pushw %%di\n\t" + /* Clear all registers */ + "xorl %%eax, %%eax\n\t" + "xorl %%ebx, %%ebx\n\t" + "xorl %%ecx, %%ecx\n\t" + /* %edx contains drive number */ + "xorl %%esi, %%esi\n\t" + "xorl %%edi, %%edi\n\t" + "xorl %%ebp, %%ebp\n\t" + "movw %%ax, %%ds\n\t" + "movw %%ax, %%es\n\t" + "movw %%ax, %%fs\n\t" + "movw %%ax, %%gs\n\t" + /* Jump to boot sector */ + "sti\n\t" + "lret\n\t" + /* Preserved variables */ + "\nsaved_ebp: .long 0\n\t" + "\nsaved_ss: .word 0\n\t" + "\nsaved_sp: .word 0\n\t" + "\nsaved_retaddr: .word 0\n\t" + /* Boot failure return point */ + "\nbootsector_exec_fail:\n\t" + /* Restore frame pointer (gcc bug) */ + "movl %%cs:saved_ebp, %%ebp\n\t" + /* Restore stack pointer */ + "movw %%cs:saved_ss, %%ax\n\t" + "movw %%ax, %%ss\n\t" + "movw %%cs:saved_sp, %%sp\n\t" + /* Return via saved address */ + "jmp *%%cs:saved_retaddr\n\t" ) + : "=b" ( discard_b ), "=D" ( discard_D ), + "=d" ( discard_d ) + : "b" ( segment ), "D" ( offset ), + "d" ( drive ) + : "eax", "ecx", "esi" ); + + DBG ( "Booted disk returned via INT 18 or 19\n" ); + + /* Unhook INTs 18 and 19 */ + unhook_bios_interrupt ( 0x18, ( intptr_t ) bootsector_exec_fail, + &int18_vector ); + unhook_bios_interrupt ( 0x19, ( intptr_t ) bootsector_exec_fail, + &int19_vector ); + + return -ECANCELED; +} diff --git a/src/arch/x86/image/bzimage.c b/src/arch/x86/image/bzimage.c new file mode 100644 index 00000000..51498bf9 --- /dev/null +++ b/src/arch/x86/image/bzimage.c @@ -0,0 +1,673 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Linux bzImage image format + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +FEATURE ( FEATURE_IMAGE, "bzImage", DHCP_EB_FEATURE_BZIMAGE, 1 ); + +/** + * bzImage context + */ +struct bzimage_context { + /** Boot protocol version */ + unsigned int version; + /** Real-mode kernel portion load segment address */ + unsigned int rm_kernel_seg; + /** Real-mode kernel portion load address */ + userptr_t rm_kernel; + /** Real-mode kernel portion file size */ + size_t rm_filesz; + /** Real-mode heap top (offset from rm_kernel) */ + size_t rm_heap; + /** Command line (offset from rm_kernel) */ + size_t rm_cmdline; + /** Command line maximum length */ + size_t cmdline_size; + /** Real-mode kernel portion total memory size */ + size_t rm_memsz; + /** Non-real-mode kernel portion load address */ + userptr_t pm_kernel; + /** Non-real-mode kernel portion file and memory size */ + size_t pm_sz; + /** Video mode */ + unsigned int vid_mode; + /** Memory limit */ + uint64_t mem_limit; + /** Initrd address */ + physaddr_t ramdisk_image; + /** Initrd size */ + physaddr_t ramdisk_size; + + /** Command line magic block */ + struct bzimage_cmdline cmdline_magic; + /** bzImage header */ + struct bzimage_header bzhdr; +}; + +/** + * Parse bzImage header + * + * @v image bzImage file + * @v bzimg bzImage context + * @v src bzImage to parse + * @ret rc Return status code + */ +static int bzimage_parse_header ( struct image *image, + struct bzimage_context *bzimg, + userptr_t src ) { + unsigned int syssize; + int is_bzimage; + + /* Sanity check */ + if ( image->len < ( BZI_HDR_OFFSET + sizeof ( bzimg->bzhdr ) ) ) { + DBGC ( image, "bzImage %p too short for kernel header\n", + image ); + return -ENOEXEC; + } + + /* Read in header structures */ + memset ( bzimg, 0, sizeof ( *bzimg ) ); + copy_from_user ( &bzimg->cmdline_magic, src, BZI_CMDLINE_OFFSET, + sizeof ( bzimg->cmdline_magic ) ); + copy_from_user ( &bzimg->bzhdr, src, BZI_HDR_OFFSET, + sizeof ( bzimg->bzhdr ) ); + + /* Calculate size of real-mode portion */ + bzimg->rm_filesz = ( ( ( bzimg->bzhdr.setup_sects ? + bzimg->bzhdr.setup_sects : 4 ) + 1 ) << 9 ); + if ( bzimg->rm_filesz > image->len ) { + DBGC ( image, "bzImage %p too short for %zd byte of setup\n", + image, bzimg->rm_filesz ); + return -ENOEXEC; + } + bzimg->rm_memsz = BZI_ASSUMED_RM_SIZE; + + /* Calculate size of protected-mode portion */ + bzimg->pm_sz = ( image->len - bzimg->rm_filesz ); + syssize = ( ( bzimg->pm_sz + 15 ) / 16 ); + + /* Check for signatures and determine version */ + if ( bzimg->bzhdr.boot_flag != BZI_BOOT_FLAG ) { + DBGC ( image, "bzImage %p missing 55AA signature\n", image ); + return -ENOEXEC; + } + if ( bzimg->bzhdr.header == BZI_SIGNATURE ) { + /* 2.00+ */ + bzimg->version = bzimg->bzhdr.version; + } else { + /* Pre-2.00. Check that the syssize field is correct, + * as a guard against accepting arbitrary binary data, + * since the 55AA check is pretty lax. Note that the + * syssize field is unreliable for protocols between + * 2.00 and 2.03 inclusive, so we should not always + * check this field. + */ + bzimg->version = 0x0100; + if ( bzimg->bzhdr.syssize != syssize ) { + DBGC ( image, "bzImage %p bad syssize %x (expected " + "%x)\n", image, bzimg->bzhdr.syssize, syssize ); + return -ENOEXEC; + } + } + + /* Determine image type */ + is_bzimage = ( ( bzimg->version >= 0x0200 ) ? + ( bzimg->bzhdr.loadflags & BZI_LOAD_HIGH ) : 0 ); + + /* Calculate load address of real-mode portion */ + bzimg->rm_kernel_seg = ( is_bzimage ? 0x1000 : 0x9000 ); + bzimg->rm_kernel = real_to_user ( bzimg->rm_kernel_seg, 0 ); + + /* Allow space for the stack and heap */ + bzimg->rm_memsz += BZI_STACK_SIZE; + bzimg->rm_heap = bzimg->rm_memsz; + + /* Allow space for the command line */ + bzimg->rm_cmdline = bzimg->rm_memsz; + bzimg->rm_memsz += BZI_CMDLINE_SIZE; + + /* Calculate load address of protected-mode portion */ + bzimg->pm_kernel = phys_to_user ( is_bzimage ? BZI_LOAD_HIGH_ADDR + : BZI_LOAD_LOW_ADDR ); + + /* Extract video mode */ + bzimg->vid_mode = bzimg->bzhdr.vid_mode; + + /* Extract memory limit */ + bzimg->mem_limit = ( ( bzimg->version >= 0x0203 ) ? + bzimg->bzhdr.initrd_addr_max : BZI_INITRD_MAX ); + + /* Extract command line size */ + bzimg->cmdline_size = ( ( bzimg->version >= 0x0206 ) ? + bzimg->bzhdr.cmdline_size : BZI_CMDLINE_SIZE ); + + DBGC ( image, "bzImage %p version %04x RM %#lx+%#zx PM %#lx+%#zx " + "cmdlen %zd\n", image, bzimg->version, + user_to_phys ( bzimg->rm_kernel, 0 ), bzimg->rm_filesz, + user_to_phys ( bzimg->pm_kernel, 0 ), bzimg->pm_sz, + bzimg->cmdline_size ); + + return 0; +} + +/** + * Update bzImage header in loaded kernel + * + * @v image bzImage file + * @v bzimg bzImage context + * @v dst bzImage to update + */ +static void bzimage_update_header ( struct image *image, + struct bzimage_context *bzimg, + userptr_t dst ) { + + /* Set loader type */ + if ( bzimg->version >= 0x0200 ) + bzimg->bzhdr.type_of_loader = BZI_LOADER_TYPE_IPXE; + + /* Set heap end pointer */ + if ( bzimg->version >= 0x0201 ) { + bzimg->bzhdr.heap_end_ptr = ( bzimg->rm_heap - 0x200 ); + bzimg->bzhdr.loadflags |= BZI_CAN_USE_HEAP; + } + + /* Set command line */ + if ( bzimg->version >= 0x0202 ) { + bzimg->bzhdr.cmd_line_ptr = user_to_phys ( bzimg->rm_kernel, + bzimg->rm_cmdline ); + } else { + bzimg->cmdline_magic.magic = BZI_CMDLINE_MAGIC; + bzimg->cmdline_magic.offset = bzimg->rm_cmdline; + if ( bzimg->version >= 0x0200 ) + bzimg->bzhdr.setup_move_size = bzimg->rm_memsz; + } + + /* Set video mode */ + bzimg->bzhdr.vid_mode = bzimg->vid_mode; + + /* Set initrd address */ + if ( bzimg->version >= 0x0200 ) { + bzimg->bzhdr.ramdisk_image = bzimg->ramdisk_image; + bzimg->bzhdr.ramdisk_size = bzimg->ramdisk_size; + } + + /* Write out header structures */ + copy_to_user ( dst, BZI_CMDLINE_OFFSET, &bzimg->cmdline_magic, + sizeof ( bzimg->cmdline_magic ) ); + copy_to_user ( dst, BZI_HDR_OFFSET, &bzimg->bzhdr, + sizeof ( bzimg->bzhdr ) ); + + DBGC ( image, "bzImage %p vidmode %d\n", image, bzimg->vid_mode ); +} + +/** + * Parse kernel command line for bootloader parameters + * + * @v image bzImage file + * @v bzimg bzImage context + * @v cmdline Kernel command line + * @ret rc Return status code + */ +static int bzimage_parse_cmdline ( struct image *image, + struct bzimage_context *bzimg, + const char *cmdline ) { + char *vga; + char *mem; + + /* Look for "vga=" */ + if ( ( vga = strstr ( cmdline, "vga=" ) ) ) { + vga += 4; + if ( strcmp ( vga, "normal" ) == 0 ) { + bzimg->vid_mode = BZI_VID_MODE_NORMAL; + } else if ( strcmp ( vga, "ext" ) == 0 ) { + bzimg->vid_mode = BZI_VID_MODE_EXT; + } else if ( strcmp ( vga, "ask" ) == 0 ) { + bzimg->vid_mode = BZI_VID_MODE_ASK; + } else { + bzimg->vid_mode = strtoul ( vga, &vga, 0 ); + if ( *vga && ( *vga != ' ' ) ) { + DBGC ( image, "bzImage %p strange \"vga=\"" + "terminator '%c'\n", image, *vga ); + } + } + } + + /* Look for "mem=" */ + if ( ( mem = strstr ( cmdline, "mem=" ) ) ) { + mem += 4; + bzimg->mem_limit = strtoul ( mem, &mem, 0 ); + switch ( *mem ) { + case 'G': + case 'g': + bzimg->mem_limit <<= 10; + /* Fall through */ + case 'M': + case 'm': + bzimg->mem_limit <<= 10; + /* Fall through */ + case 'K': + case 'k': + bzimg->mem_limit <<= 10; + break; + case '\0': + case ' ': + break; + default: + DBGC ( image, "bzImage %p strange \"mem=\" " + "terminator '%c'\n", image, *mem ); + break; + } + bzimg->mem_limit -= 1; + } + + return 0; +} + +/** + * Set command line + * + * @v image bzImage image + * @v bzimg bzImage context + * @v cmdline Kernel command line + */ +static void bzimage_set_cmdline ( struct image *image, + struct bzimage_context *bzimg, + const char *cmdline ) { + size_t cmdline_len; + + /* Copy command line down to real-mode portion */ + cmdline_len = ( strlen ( cmdline ) + 1 ); + if ( cmdline_len > bzimg->cmdline_size ) + cmdline_len = bzimg->cmdline_size; + copy_to_user ( bzimg->rm_kernel, bzimg->rm_cmdline, + cmdline, cmdline_len ); + DBGC ( image, "bzImage %p command line \"%s\"\n", image, cmdline ); +} + +/** + * Parse standalone image command line for cpio parameters + * + * @v image bzImage file + * @v cpio CPIO header + * @v cmdline Command line + */ +static void bzimage_parse_cpio_cmdline ( struct image *image, + struct cpio_header *cpio, + const char *cmdline ) { + char *arg; + char *end; + unsigned int mode; + + /* Look for "mode=" */ + if ( ( arg = strstr ( cmdline, "mode=" ) ) ) { + arg += 5; + mode = strtoul ( arg, &end, 8 /* Octal for file mode */ ); + if ( *end && ( *end != ' ' ) ) { + DBGC ( image, "bzImage %p strange \"mode=\"" + "terminator '%c'\n", image, *end ); + } + cpio_set_field ( cpio->c_mode, ( 0100000 | mode ) ); + } +} + +/** + * Align initrd length + * + * @v len Length + * @ret len Length rounded up to INITRD_ALIGN + */ +static inline size_t bzimage_align ( size_t len ) { + + return ( ( len + INITRD_ALIGN - 1 ) & ~( INITRD_ALIGN - 1 ) ); +} + +/** + * Load initrd + * + * @v image bzImage image + * @v initrd initrd image + * @v address Address at which to load, or UNULL + * @ret len Length of loaded image, excluding zero-padding + */ +static size_t bzimage_load_initrd ( struct image *image, + struct image *initrd, + userptr_t address ) { + char *filename = initrd->cmdline; + char *cmdline; + struct cpio_header cpio; + size_t offset; + size_t name_len; + size_t pad_len; + + /* Do not include kernel image itself as an initrd */ + if ( initrd == image ) + return 0; + + /* Create cpio header for non-prebuilt images */ + if ( filename && filename[0] ) { + cmdline = strchr ( filename, ' ' ); + name_len = ( ( cmdline ? ( ( size_t ) ( cmdline - filename ) ) + : strlen ( filename ) ) + 1 /* NUL */ ); + memset ( &cpio, '0', sizeof ( cpio ) ); + memcpy ( cpio.c_magic, CPIO_MAGIC, sizeof ( cpio.c_magic ) ); + cpio_set_field ( cpio.c_mode, 0100644 ); + cpio_set_field ( cpio.c_nlink, 1 ); + cpio_set_field ( cpio.c_filesize, initrd->len ); + cpio_set_field ( cpio.c_namesize, name_len ); + if ( cmdline ) { + bzimage_parse_cpio_cmdline ( image, &cpio, + ( cmdline + 1 /* ' ' */ )); + } + offset = ( ( sizeof ( cpio ) + name_len + 0x03 ) & ~0x03 ); + } else { + offset = 0; + name_len = 0; + } + + /* Copy in initrd image body (and cpio header if applicable) */ + if ( address ) { + memmove_user ( address, offset, initrd->data, 0, initrd->len ); + if ( offset ) { + memset_user ( address, 0, 0, offset ); + copy_to_user ( address, 0, &cpio, sizeof ( cpio ) ); + copy_to_user ( address, sizeof ( cpio ), filename, + ( name_len - 1 /* NUL (or space) */ ) ); + } + DBGC ( image, "bzImage %p initrd %p [%#08lx,%#08lx,%#08lx)" + "%s%s\n", image, initrd, user_to_phys ( address, 0 ), + user_to_phys ( address, offset ), + user_to_phys ( address, ( offset + initrd->len ) ), + ( filename ? " " : "" ), ( filename ? filename : "" ) ); + DBGC2_MD5A ( image, user_to_phys ( address, offset ), + user_to_virt ( address, offset ), initrd->len ); + } + offset += initrd->len; + + /* Zero-pad to next INITRD_ALIGN boundary */ + pad_len = ( ( -offset ) & ( INITRD_ALIGN - 1 ) ); + if ( address ) + memset_user ( address, offset, 0, pad_len ); + + return offset; +} + +/** + * Check that initrds can be loaded + * + * @v image bzImage image + * @v bzimg bzImage context + * @ret rc Return status code + */ +static int bzimage_check_initrds ( struct image *image, + struct bzimage_context *bzimg ) { + struct image *initrd; + userptr_t bottom; + size_t len = 0; + int rc; + + /* Calculate total loaded length of initrds */ + for_each_image ( initrd ) { + + /* Skip kernel */ + if ( initrd == image ) + continue; + + /* Calculate length */ + len += bzimage_load_initrd ( image, initrd, UNULL ); + len = bzimage_align ( len ); + + DBGC ( image, "bzImage %p initrd %p from [%#08lx,%#08lx)%s%s\n", + image, initrd, user_to_phys ( initrd->data, 0 ), + user_to_phys ( initrd->data, initrd->len ), + ( initrd->cmdline ? " " : "" ), + ( initrd->cmdline ? initrd->cmdline : "" ) ); + DBGC2_MD5A ( image, user_to_phys ( initrd->data, 0 ), + user_to_virt ( initrd->data, 0 ), initrd->len ); + } + + /* Calculate lowest usable address */ + bottom = userptr_add ( bzimg->pm_kernel, bzimg->pm_sz ); + + /* Check that total length fits within space available for + * reshuffling. This is a conservative check, since CPIO + * headers are not present during reshuffling, but this + * doesn't hurt and keeps the code simple. + */ + if ( ( rc = initrd_reshuffle_check ( len, bottom ) ) != 0 ) { + DBGC ( image, "bzImage %p failed reshuffle check: %s\n", + image, strerror ( rc ) ); + return rc; + } + + /* Check that total length fits within kernel's memory limit */ + if ( user_to_phys ( bottom, len ) > bzimg->mem_limit ) { + DBGC ( image, "bzImage %p not enough space for initrds\n", + image ); + return -ENOBUFS; + } + + return 0; +} + +/** + * Load initrds, if any + * + * @v image bzImage image + * @v bzimg bzImage context + */ +static void bzimage_load_initrds ( struct image *image, + struct bzimage_context *bzimg ) { + struct image *initrd; + struct image *highest = NULL; + struct image *other; + userptr_t top; + userptr_t dest; + size_t offset; + size_t len; + + /* Reshuffle initrds into desired order */ + initrd_reshuffle ( userptr_add ( bzimg->pm_kernel, bzimg->pm_sz ) ); + + /* Find highest initrd */ + for_each_image ( initrd ) { + if ( ( highest == NULL ) || + ( userptr_sub ( initrd->data, highest->data ) > 0 ) ) { + highest = initrd; + } + } + + /* Do nothing if there are no initrds */ + if ( ! highest ) + return; + + /* Find highest usable address */ + top = userptr_add ( highest->data, bzimage_align ( highest->len ) ); + if ( user_to_phys ( top, -1 ) > bzimg->mem_limit ) { + top = phys_to_user ( ( bzimg->mem_limit + 1 ) & + ~( INITRD_ALIGN - 1 ) ); + } + DBGC ( image, "bzImage %p loading initrds from %#08lx downwards\n", + image, user_to_phys ( top, -1 ) ); + + /* Load initrds in order */ + for_each_image ( initrd ) { + + /* Calculate cumulative length of following + * initrds (including padding). + */ + offset = 0; + for_each_image ( other ) { + if ( other == initrd ) + offset = 0; + offset += bzimage_load_initrd ( image, other, UNULL ); + offset = bzimage_align ( offset ); + } + + /* Load initrd at this address */ + dest = userptr_add ( top, -offset ); + len = bzimage_load_initrd ( image, initrd, dest ); + + /* Record initrd location */ + if ( ! bzimg->ramdisk_image ) + bzimg->ramdisk_image = user_to_phys ( dest, 0 ); + bzimg->ramdisk_size = ( user_to_phys ( dest, len ) - + bzimg->ramdisk_image ); + } + DBGC ( image, "bzImage %p initrds at [%#08lx,%#08lx)\n", + image, bzimg->ramdisk_image, + ( bzimg->ramdisk_image + bzimg->ramdisk_size ) ); +} + +/** + * Execute bzImage image + * + * @v image bzImage image + * @ret rc Return status code + */ +static int bzimage_exec ( struct image *image ) { + struct bzimage_context bzimg; + const char *cmdline = ( image->cmdline ? image->cmdline : "" ); + int rc; + + /* Read and parse header from image */ + if ( ( rc = bzimage_parse_header ( image, &bzimg, + image->data ) ) != 0 ) + return rc; + + /* Prepare segments */ + if ( ( rc = prep_segment ( bzimg.rm_kernel, bzimg.rm_filesz, + bzimg.rm_memsz ) ) != 0 ) { + DBGC ( image, "bzImage %p could not prepare RM segment: %s\n", + image, strerror ( rc ) ); + return rc; + } + if ( ( rc = prep_segment ( bzimg.pm_kernel, bzimg.pm_sz, + bzimg.pm_sz ) ) != 0 ) { + DBGC ( image, "bzImage %p could not prepare PM segment: %s\n", + image, strerror ( rc ) ); + return rc; + } + + /* Parse command line for bootloader parameters */ + if ( ( rc = bzimage_parse_cmdline ( image, &bzimg, cmdline ) ) != 0) + return rc; + + /* Check that initrds can be loaded */ + if ( ( rc = bzimage_check_initrds ( image, &bzimg ) ) != 0 ) + return rc; + + /* Remove kernel from image list (without invalidating image pointer) */ + unregister_image ( image_get ( image ) ); + + /* Load segments */ + memcpy_user ( bzimg.rm_kernel, 0, image->data, + 0, bzimg.rm_filesz ); + memcpy_user ( bzimg.pm_kernel, 0, image->data, + bzimg.rm_filesz, bzimg.pm_sz ); + + /* Store command line */ + bzimage_set_cmdline ( image, &bzimg, cmdline ); + + /* Prepare for exiting. Must do this before loading initrds, + * since loading the initrds will corrupt the external heap. + */ + shutdown_boot(); + + /* Load any initrds */ + bzimage_load_initrds ( image, &bzimg ); + + /* Update kernel header */ + bzimage_update_header ( image, &bzimg, bzimg.rm_kernel ); + + DBGC ( image, "bzImage %p jumping to RM kernel at %04x:0000 " + "(stack %04x:%04zx)\n", image, ( bzimg.rm_kernel_seg + 0x20 ), + bzimg.rm_kernel_seg, bzimg.rm_heap ); + + /* Jump to the kernel */ + __asm__ __volatile__ ( REAL_CODE ( "movw %w0, %%ds\n\t" + "movw %w0, %%es\n\t" + "movw %w0, %%fs\n\t" + "movw %w0, %%gs\n\t" + "movw %w0, %%ss\n\t" + "movw %w1, %%sp\n\t" + "pushw %w2\n\t" + "pushw $0\n\t" + "lret\n\t" ) + : : "R" ( bzimg.rm_kernel_seg ), + "R" ( bzimg.rm_heap ), + "R" ( bzimg.rm_kernel_seg + 0x20 ) ); + + /* There is no way for the image to return, since we provide + * no return address. + */ + assert ( 0 ); + + return -ECANCELED; /* -EIMPOSSIBLE */ +} + +/** + * Probe bzImage image + * + * @v image bzImage file + * @ret rc Return status code + */ +int bzimage_probe ( struct image *image ) { + struct bzimage_context bzimg; + int rc; + + /* Read and parse header from image */ + if ( ( rc = bzimage_parse_header ( image, &bzimg, + image->data ) ) != 0 ) + return rc; + + return 0; +} + +/** Linux bzImage image type */ +struct image_type bzimage_image_type __image_type ( PROBE_NORMAL ) = { + .name = "bzImage", + .probe = bzimage_probe, + .exec = bzimage_exec, +}; diff --git a/src/arch/x86/image/com32.c b/src/arch/x86/image/com32.c new file mode 100644 index 00000000..6f0e6604 --- /dev/null +++ b/src/arch/x86/image/com32.c @@ -0,0 +1,304 @@ +/* + * Copyright (C) 2008 Daniel Verkamp . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * @file + * + * SYSLINUX COM32 image format + * + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * Execute COMBOOT image + * + * @v image COM32 image + * @ret rc Return status code + */ +static int com32_exec_loop ( struct image *image ) { + struct memory_map memmap; + unsigned int i; + int state; + uint32_t avail_mem_top; + + state = rmsetjmp ( comboot_return ); + + switch ( state ) { + case 0: /* First time through; invoke COM32 program */ + + /* Get memory map */ + get_memmap ( &memmap ); + + /* Find end of block covering COM32 image loading area */ + for ( i = 0, avail_mem_top = 0 ; i < memmap.count ; i++ ) { + if ( (memmap.regions[i].start <= COM32_START_PHYS) && + (memmap.regions[i].end > COM32_START_PHYS + image->len) ) { + avail_mem_top = memmap.regions[i].end; + break; + } + } + + DBGC ( image, "COM32 %p: available memory top = 0x%x\n", + image, avail_mem_top ); + + assert ( avail_mem_top != 0 ); + + /* Hook COMBOOT API interrupts */ + hook_comboot_interrupts(); + + /* Unregister image, so that a "boot" command doesn't + * throw us into an execution loop. We never + * reregister ourselves; COMBOOT images expect to be + * removed on exit. + */ + unregister_image ( image ); + + __asm__ __volatile__ ( PHYS_CODE ( + /* Preserve registers */ + "pushal\n\t" + /* Preserve stack pointer */ + "subl $4, %k0\n\t" + "movl %%esp, (%k0)\n\t" + /* Switch to COM32 stack */ + "movl %k0, %%esp\n\t" + /* Enable interrupts */ + "sti\n\t" + /* Construct stack frame */ + "pushl %k1\n\t" + "pushl %k2\n\t" + "pushl %k3\n\t" + "pushl %k4\n\t" + "pushl %k5\n\t" + "pushl %k6\n\t" + "pushl $6\n\t" + /* Call COM32 entry point */ + "movl %k7, %k0\n\t" + "call *%k0\n\t" + /* Disable interrupts */ + "cli\n\t" + /* Restore stack pointer */ + "movl 28(%%esp), %%esp\n\t" + /* Restore registers */ + "popal\n\t" ) + : + : "r" ( avail_mem_top ), + "r" ( virt_to_phys ( com32_cfarcall_wrapper ) ), + "r" ( virt_to_phys ( com32_farcall_wrapper ) ), + "r" ( get_fbms() * 1024 - ( COM32_BOUNCE_SEG << 4 ) ), + "i" ( COM32_BOUNCE_SEG << 4 ), + "r" ( virt_to_phys ( com32_intcall_wrapper ) ), + "r" ( virt_to_phys ( image->cmdline ? + image->cmdline : "" ) ), + "i" ( COM32_START_PHYS ) + : "memory" ); + DBGC ( image, "COM32 %p: returned\n", image ); + break; + + case COMBOOT_EXIT: + DBGC ( image, "COM32 %p: exited\n", image ); + break; + + case COMBOOT_EXIT_RUN_KERNEL: + assert ( image->replacement ); + DBGC ( image, "COM32 %p: exited to run kernel %s\n", + image, image->replacement->name ); + break; + + case COMBOOT_EXIT_COMMAND: + DBGC ( image, "COM32 %p: exited after executing command\n", + image ); + break; + + default: + assert ( 0 ); + break; + } + + unhook_comboot_interrupts(); + comboot_force_text_mode(); + + return 0; +} + +/** + * Check image name extension + * + * @v image COM32 image + * @ret rc Return status code + */ +static int com32_identify ( struct image *image ) { + const char *ext; + static const uint8_t magic[] = { 0xB8, 0xFF, 0x4C, 0xCD, 0x21 }; + uint8_t buf[5]; + + if ( image->len >= 5 ) { + /* Check for magic number + * mov eax,21cd4cffh + * B8 FF 4C CD 21 + */ + copy_from_user ( buf, image->data, 0, sizeof(buf) ); + if ( ! memcmp ( buf, magic, sizeof(buf) ) ) { + DBGC ( image, "COM32 %p: found magic number\n", + image ); + return 0; + } + } + + /* Magic number not found; check filename extension */ + + ext = strrchr( image->name, '.' ); + + if ( ! ext ) { + DBGC ( image, "COM32 %p: no extension\n", + image ); + return -ENOEXEC; + } + + ++ext; + + if ( strcasecmp( ext, "c32" ) ) { + DBGC ( image, "COM32 %p: unrecognized extension %s\n", + image, ext ); + return -ENOEXEC; + } + + return 0; +} + + +/** + * Load COM32 image into memory + * @v image COM32 image + * @ret rc Return status code + */ +static int com32_load_image ( struct image *image ) { + size_t filesz, memsz; + userptr_t buffer; + int rc; + + filesz = image->len; + memsz = filesz; + buffer = phys_to_user ( COM32_START_PHYS ); + if ( ( rc = prep_segment ( buffer, filesz, memsz ) ) != 0 ) { + DBGC ( image, "COM32 %p: could not prepare segment: %s\n", + image, strerror ( rc ) ); + return rc; + } + + /* Copy image to segment */ + memcpy_user ( buffer, 0, image->data, 0, filesz ); + + return 0; +} + +/** + * Prepare COM32 low memory bounce buffer + * @v image COM32 image + * @ret rc Return status code + */ +static int com32_prepare_bounce_buffer ( struct image * image ) { + unsigned int seg; + userptr_t seg_userptr; + size_t filesz, memsz; + int rc; + + seg = COM32_BOUNCE_SEG; + seg_userptr = real_to_user ( seg, 0 ); + + /* Ensure the entire 64k segment is free */ + memsz = 0xFFFF; + filesz = 0; + + /* Prepare, verify, and load the real-mode segment */ + if ( ( rc = prep_segment ( seg_userptr, filesz, memsz ) ) != 0 ) { + DBGC ( image, "COM32 %p: could not prepare bounce buffer segment: %s\n", + image, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Probe COM32 image + * + * @v image COM32 image + * @ret rc Return status code + */ +static int com32_probe ( struct image *image ) { + int rc; + + DBGC ( image, "COM32 %p: name '%s'\n", image, image->name ); + + /* Check if this is a COMBOOT image */ + if ( ( rc = com32_identify ( image ) ) != 0 ) { + return rc; + } + + return 0; +} + +/** + * Execute COMBOOT image + * + * @v image COM32 image + * @ret rc Return status code + */ +static int com32_exec ( struct image *image ) { + int rc; + + /* Load image */ + if ( ( rc = com32_load_image ( image ) ) != 0 ) { + return rc; + } + + /* Prepare bounce buffer segment */ + if ( ( rc = com32_prepare_bounce_buffer ( image ) ) != 0 ) { + return rc; + } + + /* Reset console */ + console_reset(); + + return com32_exec_loop ( image ); +} + +/** SYSLINUX COM32 image type */ +struct image_type com32_image_type __image_type ( PROBE_NORMAL ) = { + .name = "COM32", + .probe = com32_probe, + .exec = com32_exec, +}; diff --git a/src/arch/x86/image/comboot.c b/src/arch/x86/image/comboot.c new file mode 100644 index 00000000..9a847f0f --- /dev/null +++ b/src/arch/x86/image/comboot.c @@ -0,0 +1,331 @@ +/* + * Copyright (C) 2008 Daniel Verkamp . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * @file + * + * SYSLINUX COMBOOT (16-bit) image format + * + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +FEATURE ( FEATURE_IMAGE, "COMBOOT", DHCP_EB_FEATURE_COMBOOT, 1 ); + +/** + * COMBOOT PSP, copied to offset 0 of code segment + */ +struct comboot_psp { + /** INT 20 instruction, executed if COMBOOT image returns with RET */ + uint16_t int20; + /** Segment of first non-free paragraph of memory */ + uint16_t first_non_free_para; +}; + +/** Offset in PSP of command line */ +#define COMBOOT_PSP_CMDLINE_OFFSET 0x81 + +/** Maximum length of command line in PSP + * (127 bytes minus space and CR) */ +#define COMBOOT_MAX_CMDLINE_LEN 125 + + +/** + * Copy command line to PSP + * + * @v image COMBOOT image + */ +static void comboot_copy_cmdline ( struct image * image, userptr_t seg_userptr ) { + const char *cmdline = ( image->cmdline ? image->cmdline : "" ); + int cmdline_len = strlen ( cmdline ); + if( cmdline_len > COMBOOT_MAX_CMDLINE_LEN ) + cmdline_len = COMBOOT_MAX_CMDLINE_LEN; + uint8_t len_byte = cmdline_len; + char spc = ' ', cr = '\r'; + + /* Copy length to byte before command line */ + copy_to_user ( seg_userptr, COMBOOT_PSP_CMDLINE_OFFSET - 1, + &len_byte, 1 ); + + /* Command line starts with space */ + copy_to_user ( seg_userptr, + COMBOOT_PSP_CMDLINE_OFFSET, + &spc, 1 ); + + /* Copy command line */ + copy_to_user ( seg_userptr, + COMBOOT_PSP_CMDLINE_OFFSET + 1, + cmdline, cmdline_len ); + + /* Command line ends with CR */ + copy_to_user ( seg_userptr, + COMBOOT_PSP_CMDLINE_OFFSET + cmdline_len + 1, + &cr, 1 ); +} + +/** + * Initialize PSP + * + * @v image COMBOOT image + * @v seg_userptr segment to initialize + */ +static void comboot_init_psp ( struct image * image, userptr_t seg_userptr ) { + struct comboot_psp psp; + + /* Fill PSP */ + + /* INT 20h instruction, byte order reversed */ + psp.int20 = 0x20CD; + + /* get_fbms() returns BIOS free base memory counter, which is in + * kilobytes; x * 1024 / 16 == x * 64 == x << 6 */ + psp.first_non_free_para = get_fbms() << 6; + + DBGC ( image, "COMBOOT %p: first non-free paragraph = 0x%x\n", + image, psp.first_non_free_para ); + + /* Copy the PSP to offset 0 of segment. + * The rest of the PSP was already zeroed by + * comboot_prepare_segment. */ + copy_to_user ( seg_userptr, 0, &psp, sizeof( psp ) ); + + /* Copy the command line to the PSP */ + comboot_copy_cmdline ( image, seg_userptr ); +} + +/** + * Execute COMBOOT image + * + * @v image COMBOOT image + * @ret rc Return status code + */ +static int comboot_exec_loop ( struct image *image ) { + userptr_t seg_userptr = real_to_user ( COMBOOT_PSP_SEG, 0 ); + int state; + + state = rmsetjmp ( comboot_return ); + + switch ( state ) { + case 0: /* First time through; invoke COMBOOT program */ + + /* Initialize PSP */ + comboot_init_psp ( image, seg_userptr ); + + /* Hook COMBOOT API interrupts */ + hook_comboot_interrupts(); + + DBGC ( image, "executing 16-bit COMBOOT image at %4x:0100\n", + COMBOOT_PSP_SEG ); + + /* Unregister image, so that a "boot" command doesn't + * throw us into an execution loop. We never + * reregister ourselves; COMBOOT images expect to be + * removed on exit. + */ + unregister_image ( image ); + + /* Store stack segment at 0x38 and stack pointer at 0x3A + * in the PSP and jump to the image */ + __asm__ __volatile__ ( + REAL_CODE ( /* Save return address with segment on old stack */ + "popw %%ax\n\t" + "pushw %%cs\n\t" + "pushw %%ax\n\t" + /* Set DS=ES=segment with image */ + "movw %w0, %%ds\n\t" + "movw %w0, %%es\n\t" + /* Set SS:SP to new stack (end of image segment) */ + "movw %w0, %%ss\n\t" + "xor %%sp, %%sp\n\t" + "pushw $0\n\t" + "pushw %w0\n\t" + "pushw $0x100\n\t" + /* Zero registers (some COM files assume GP regs are 0) */ + "xorw %%ax, %%ax\n\t" + "xorw %%bx, %%bx\n\t" + "xorw %%cx, %%cx\n\t" + "xorw %%dx, %%dx\n\t" + "xorw %%si, %%si\n\t" + "xorw %%di, %%di\n\t" + "xorw %%bp, %%bp\n\t" + "lret\n\t" ) + : : "r" ( COMBOOT_PSP_SEG ) : "eax" ); + DBGC ( image, "COMBOOT %p: returned\n", image ); + break; + + case COMBOOT_EXIT: + DBGC ( image, "COMBOOT %p: exited\n", image ); + break; + + case COMBOOT_EXIT_RUN_KERNEL: + assert ( image->replacement ); + DBGC ( image, "COMBOOT %p: exited to run kernel %s\n", + image, image->replacement->name ); + break; + + case COMBOOT_EXIT_COMMAND: + DBGC ( image, "COMBOOT %p: exited after executing command\n", + image ); + break; + + default: + assert ( 0 ); + break; + } + + unhook_comboot_interrupts(); + comboot_force_text_mode(); + + return 0; +} + +/** + * Check image name extension + * + * @v image COMBOOT image + * @ret rc Return status code + */ +static int comboot_identify ( struct image *image ) { + const char *ext; + + ext = strrchr( image->name, '.' ); + + if ( ! ext ) { + DBGC ( image, "COMBOOT %p: no extension\n", + image ); + return -ENOEXEC; + } + + ++ext; + + if ( strcasecmp( ext, "cbt" ) ) { + DBGC ( image, "COMBOOT %p: unrecognized extension %s\n", + image, ext ); + return -ENOEXEC; + } + + return 0; +} + +/** + * Load COMBOOT image into memory, preparing a segment and returning it + * @v image COMBOOT image + * @ret rc Return status code + */ +static int comboot_prepare_segment ( struct image *image ) +{ + userptr_t seg_userptr; + size_t filesz, memsz; + int rc; + + /* Load image in segment */ + seg_userptr = real_to_user ( COMBOOT_PSP_SEG, 0 ); + + /* Allow etra 0x100 bytes before image for PSP */ + filesz = image->len + 0x100; + + /* Ensure the entire 64k segment is free */ + memsz = 0xFFFF; + + /* Prepare, verify, and load the real-mode segment */ + if ( ( rc = prep_segment ( seg_userptr, filesz, memsz ) ) != 0 ) { + DBGC ( image, "COMBOOT %p: could not prepare segment: %s\n", + image, strerror ( rc ) ); + return rc; + } + + /* Zero PSP */ + memset_user ( seg_userptr, 0, 0, 0x100 ); + + /* Copy image to segment:0100 */ + memcpy_user ( seg_userptr, 0x100, image->data, 0, image->len ); + + return 0; +} + +/** + * Probe COMBOOT image + * + * @v image COMBOOT image + * @ret rc Return status code + */ +static int comboot_probe ( struct image *image ) { + int rc; + + DBGC ( image, "COMBOOT %p: name '%s'\n", + image, image->name ); + + /* Check if this is a COMBOOT image */ + if ( ( rc = comboot_identify ( image ) ) != 0 ) { + + return rc; + } + + return 0; +} + +/** + * Execute COMBOOT image + * + * @v image COMBOOT image + * @ret rc Return status code + */ +static int comboot_exec ( struct image *image ) { + int rc; + + /* Sanity check for filesize */ + if( image->len >= 0xFF00 ) { + DBGC( image, "COMBOOT %p: image too large\n", + image ); + return -ENOEXEC; + } + + /* Prepare segment and load image */ + if ( ( rc = comboot_prepare_segment ( image ) ) != 0 ) { + return rc; + } + + /* Reset console */ + console_reset(); + + return comboot_exec_loop ( image ); +} + +/** SYSLINUX COMBOOT (16-bit) image type */ +struct image_type comboot_image_type __image_type ( PROBE_NORMAL ) = { + .name = "COMBOOT", + .probe = comboot_probe, + .exec = comboot_exec, +}; diff --git a/src/arch/x86/image/elfboot.c b/src/arch/x86/image/elfboot.c new file mode 100644 index 00000000..dc356892 --- /dev/null +++ b/src/arch/x86/image/elfboot.c @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2008 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** + * @file + * + * ELF bootable image + * + */ + +FEATURE ( FEATURE_IMAGE, "ELF", DHCP_EB_FEATURE_ELF, 1 ); + +/** + * Execute ELF image + * + * @v image ELF image + * @ret rc Return status code + */ +static int elfboot_exec ( struct image *image ) { + physaddr_t entry; + physaddr_t max; + int rc; + + /* Load the image using core ELF support */ + if ( ( rc = elf_load ( image, &entry, &max ) ) != 0 ) { + DBGC ( image, "ELF %p could not load: %s\n", + image, strerror ( rc ) ); + return rc; + } + + /* An ELF image has no callback interface, so we need to shut + * down before invoking it. + */ + shutdown_boot(); + + /* Jump to OS with flat physical addressing */ + DBGC ( image, "ELF %p starting execution at %lx\n", image, entry ); + __asm__ __volatile__ ( PHYS_CODE ( "pushl %%ebp\n\t" /* gcc bug */ + "call *%%edi\n\t" + "popl %%ebp\n\t" /* gcc bug */ ) + : : "D" ( entry ) + : "eax", "ebx", "ecx", "edx", "esi", "memory" ); + + DBGC ( image, "ELF %p returned\n", image ); + + /* It isn't safe to continue after calling shutdown() */ + while ( 1 ) {} + + return -ECANCELED; /* -EIMPOSSIBLE, anyone? */ +} + +/** + * Check that ELF segment uses flat physical addressing + * + * @v image ELF file + * @v phdr ELF program header + * @v dest Destination address + * @ret rc Return status code + */ +static int elfboot_check_segment ( struct image *image, Elf_Phdr *phdr, + physaddr_t dest ) { + + /* Check that ELF segment uses flat physical addressing */ + if ( phdr->p_vaddr != dest ) { + DBGC ( image, "ELF %p uses virtual addressing (phys %x, " + "virt %x)\n", image, phdr->p_paddr, phdr->p_vaddr ); + return -ENOEXEC; + } + + return 0; +} + +/** + * Probe ELF image + * + * @v image ELF file + * @ret rc Return status code + */ +static int elfboot_probe ( struct image *image ) { + Elf32_Ehdr ehdr; + static const uint8_t e_ident[] = { + [EI_MAG0] = ELFMAG0, + [EI_MAG1] = ELFMAG1, + [EI_MAG2] = ELFMAG2, + [EI_MAG3] = ELFMAG3, + [EI_CLASS] = ELFCLASS32, + [EI_DATA] = ELFDATA2LSB, + [EI_VERSION] = EV_CURRENT, + }; + physaddr_t entry; + physaddr_t max; + int rc; + + /* Read ELF header */ + copy_from_user ( &ehdr, image->data, 0, sizeof ( ehdr ) ); + if ( memcmp ( ehdr.e_ident, e_ident, sizeof ( e_ident ) ) != 0 ) { + DBGC ( image, "Invalid ELF identifier\n" ); + return -ENOEXEC; + } + + /* Check that this image uses flat physical addressing */ + if ( ( rc = elf_segments ( image, &ehdr, elfboot_check_segment, + &entry, &max ) ) != 0 ) { + DBGC ( image, "Unloadable ELF image\n" ); + return rc; + } + + return 0; +} + +/** ELF image type */ +struct image_type elfboot_image_type __image_type ( PROBE_NORMAL ) = { + .name = "ELF", + .probe = elfboot_probe, + .exec = elfboot_exec, +}; diff --git a/src/arch/x86/image/initrd.c b/src/arch/x86/image/initrd.c new file mode 100644 index 00000000..8f6366d3 --- /dev/null +++ b/src/arch/x86/image/initrd.c @@ -0,0 +1,305 @@ +/* + * Copyright (C) 2012 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * Initial ramdisk (initrd) reshuffling + * + */ + +/** Maximum address available for initrd */ +userptr_t initrd_top; + +/** Minimum address available for initrd */ +userptr_t initrd_bottom; + +/** + * Squash initrds as high as possible in memory + * + * @v top Highest possible address + * @ret used Lowest address used by initrds + */ +static userptr_t initrd_squash_high ( userptr_t top ) { + userptr_t current = top; + struct image *initrd; + struct image *highest; + size_t len; + + /* Squash up any initrds already within or below the region */ + while ( 1 ) { + + /* Find the highest image not yet in its final position */ + highest = NULL; + for_each_image ( initrd ) { + if ( ( userptr_sub ( initrd->data, current ) < 0 ) && + ( ( highest == NULL ) || + ( userptr_sub ( initrd->data, + highest->data ) > 0 ) ) ) { + highest = initrd; + } + } + if ( ! highest ) + break; + + /* Move this image to its final position */ + len = ( ( highest->len + INITRD_ALIGN - 1 ) & + ~( INITRD_ALIGN - 1 ) ); + current = userptr_sub ( current, len ); + DBGC ( &images, "INITRD squashing %s [%#08lx,%#08lx)->" + "[%#08lx,%#08lx)\n", highest->name, + user_to_phys ( highest->data, 0 ), + user_to_phys ( highest->data, highest->len ), + user_to_phys ( current, 0 ), + user_to_phys ( current, highest->len ) ); + memmove_user ( current, 0, highest->data, 0, highest->len ); + highest->data = current; + } + + /* Copy any remaining initrds (e.g. embedded images) to the region */ + for_each_image ( initrd ) { + if ( userptr_sub ( initrd->data, top ) >= 0 ) { + len = ( ( initrd->len + INITRD_ALIGN - 1 ) & + ~( INITRD_ALIGN - 1 ) ); + current = userptr_sub ( current, len ); + DBGC ( &images, "INITRD copying %s [%#08lx,%#08lx)->" + "[%#08lx,%#08lx)\n", initrd->name, + user_to_phys ( initrd->data, 0 ), + user_to_phys ( initrd->data, initrd->len ), + user_to_phys ( current, 0 ), + user_to_phys ( current, initrd->len ) ); + memcpy_user ( current, 0, initrd->data, 0, + initrd->len ); + initrd->data = current; + } + } + + return current; +} + +/** + * Swap position of two adjacent initrds + * + * @v low Lower initrd + * @v high Higher initrd + * @v free Free space + * @v free_len Length of free space + */ +static void initrd_swap ( struct image *low, struct image *high, + userptr_t free, size_t free_len ) { + size_t len = 0; + size_t frag_len; + size_t new_len; + + DBGC ( &images, "INITRD swapping %s [%#08lx,%#08lx)<->[%#08lx,%#08lx) " + "%s\n", low->name, user_to_phys ( low->data, 0 ), + user_to_phys ( low->data, low->len ), + user_to_phys ( high->data, 0 ), + user_to_phys ( high->data, high->len ), high->name ); + + /* Round down length of free space */ + free_len &= ~( INITRD_ALIGN - 1 ); + assert ( free_len > 0 ); + + /* Swap image data */ + while ( len < high->len ) { + + /* Calculate maximum fragment length */ + frag_len = ( high->len - len ); + if ( frag_len > free_len ) + frag_len = free_len; + new_len = ( ( len + frag_len + INITRD_ALIGN - 1 ) & + ~( INITRD_ALIGN - 1 ) ); + + /* Swap fragments */ + memcpy_user ( free, 0, high->data, len, frag_len ); + memmove_user ( low->data, new_len, low->data, len, low->len ); + memcpy_user ( low->data, len, free, 0, frag_len ); + len = new_len; + } + + /* Adjust data pointers */ + high->data = low->data; + low->data = userptr_add ( low->data, len ); +} + +/** + * Swap position of any two adjacent initrds not currently in the correct order + * + * @v free Free space + * @v free_len Length of free space + * @ret swapped A pair of initrds was swapped + */ +static int initrd_swap_any ( userptr_t free, size_t free_len ) { + struct image *low; + struct image *high; + size_t padded_len; + userptr_t adjacent; + + /* Find any pair of initrds that can be swapped */ + for_each_image ( low ) { + + /* Calculate location of adjacent image (if any) */ + padded_len = ( ( low->len + INITRD_ALIGN - 1 ) & + ~( INITRD_ALIGN - 1 ) ); + adjacent = userptr_add ( low->data, padded_len ); + + /* Search for adjacent image */ + for_each_image ( high ) { + + /* If we have found the adjacent image, swap and exit */ + if ( high->data == adjacent ) { + initrd_swap ( low, high, free, free_len ); + return 1; + } + + /* Stop search if all remaining potential + * adjacent images are already in the correct + * order. + */ + if ( high == low ) + break; + } + } + + /* Nothing swapped */ + return 0; +} + +/** + * Dump initrd locations (for debug) + * + */ +static void initrd_dump ( void ) { + struct image *initrd; + + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return; + + /* Dump initrd locations */ + for_each_image ( initrd ) { + DBGC ( &images, "INITRD %s at [%#08lx,%#08lx)\n", + initrd->name, user_to_phys ( initrd->data, 0 ), + user_to_phys ( initrd->data, initrd->len ) ); + DBGC2_MD5A ( &images, user_to_phys ( initrd->data, 0 ), + user_to_virt ( initrd->data, 0 ), initrd->len ); + } +} + +/** + * Reshuffle initrds into desired order at top of memory + * + * @v bottom Lowest address available for initrds + * + * After this function returns, the initrds have been rearranged in + * memory and the external heap structures will have been corrupted. + * Reshuffling must therefore take place immediately prior to jumping + * to the loaded OS kernel; no further execution within iPXE is + * permitted. + */ +void initrd_reshuffle ( userptr_t bottom ) { + userptr_t top; + userptr_t used; + userptr_t free; + size_t free_len; + + /* Calculate limits of available space for initrds */ + top = initrd_top; + if ( userptr_sub ( initrd_bottom, bottom ) > 0 ) + bottom = initrd_bottom; + + /* Debug */ + DBGC ( &images, "INITRD region [%#08lx,%#08lx)\n", + user_to_phys ( bottom, 0 ), user_to_phys ( top, 0 ) ); + initrd_dump(); + + /* Squash initrds as high as possible in memory */ + used = initrd_squash_high ( top ); + + /* Calculate available free space */ + free = bottom; + free_len = userptr_sub ( used, free ); + + /* Bubble-sort initrds into desired order */ + while ( initrd_swap_any ( free, free_len ) ) {} + + /* Debug */ + initrd_dump(); +} + +/** + * Check that there is enough space to reshuffle initrds + * + * @v len Total length of initrds (including padding) + * @v bottom Lowest address available for initrds + * @ret rc Return status code + */ +int initrd_reshuffle_check ( size_t len, userptr_t bottom ) { + userptr_t top; + size_t available; + + /* Calculate limits of available space for initrds */ + top = initrd_top; + if ( userptr_sub ( initrd_bottom, bottom ) > 0 ) + bottom = initrd_bottom; + available = userptr_sub ( top, bottom ); + + /* Allow for a sensible minimum amount of free space */ + len += INITRD_MIN_FREE_LEN; + + /* Check for available space */ + return ( ( len < available ) ? 0 : -ENOBUFS ); +} + +/** + * initrd startup function + * + */ +static void initrd_startup ( void ) { + size_t len; + + /* Record largest memory block available. Do this after any + * allocations made during driver startup (e.g. large host + * memory blocks for Infiniband devices, which may still be in + * use at the time of rearranging if a SAN device is hooked) + * but before any allocations for downloaded images (which we + * can safely reuse when rearranging). + */ + len = largest_memblock ( &initrd_bottom ); + initrd_top = userptr_add ( initrd_bottom, len ); +} + +/** initrd startup function */ +struct startup_fn startup_initrd __startup_fn ( STARTUP_LATE ) = { + .name = "initrd", + .startup = initrd_startup, +}; diff --git a/src/arch/x86/image/multiboot.c b/src/arch/x86/image/multiboot.c new file mode 100644 index 00000000..0c85df70 --- /dev/null +++ b/src/arch/x86/image/multiboot.c @@ -0,0 +1,492 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Multiboot image format + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +FEATURE ( FEATURE_IMAGE, "MBOOT", DHCP_EB_FEATURE_MULTIBOOT, 1 ); + +/** + * Maximum number of modules we will allow for + * + * If this has bitten you: sorry. I did have a perfect scheme with a + * dynamically allocated list of modules on the protected-mode stack, + * but it was incompatible with some broken OSes that can only access + * low memory at boot time (even though we kindly set up 4GB flat + * physical addressing as per the multiboot specification. + * + */ +#define MAX_MODULES 8 + +/** + * Maximum combined length of command lines + * + * Again; sorry. Some broken OSes zero out any non-base memory that + * isn't part of the loaded module set, so we can't just use + * virt_to_phys(cmdline) to point to the command lines, even though + * this would comply with the Multiboot spec. + */ +#define MB_MAX_CMDLINE 512 + +/** Multiboot flags that we support */ +#define MB_SUPPORTED_FLAGS ( MB_FLAG_PGALIGN | MB_FLAG_MEMMAP | \ + MB_FLAG_VIDMODE | MB_FLAG_RAW ) + +/** Compulsory feature multiboot flags */ +#define MB_COMPULSORY_FLAGS 0x0000ffff + +/** Optional feature multiboot flags */ +#define MB_OPTIONAL_FLAGS 0xffff0000 + +/** + * Multiboot flags that we don't support + * + * We only care about the compulsory feature flags (bits 0-15); we are + * allowed to ignore the optional feature flags. + */ +#define MB_UNSUPPORTED_FLAGS ( MB_COMPULSORY_FLAGS & ~MB_SUPPORTED_FLAGS ) + +/** A multiboot header descriptor */ +struct multiboot_header_info { + /** The actual multiboot header */ + struct multiboot_header mb; + /** Offset of header within the multiboot image */ + size_t offset; +}; + +/** Multiboot module command lines */ +static char __bss16_array ( mb_cmdlines, [MB_MAX_CMDLINE] ); +#define mb_cmdlines __use_data16 ( mb_cmdlines ) + +/** Offset within module command lines */ +static unsigned int mb_cmdline_offset; + +/** + * Build multiboot memory map + * + * @v image Multiboot image + * @v mbinfo Multiboot information structure + * @v mbmemmap Multiboot memory map + * @v limit Maxmimum number of memory map entries + */ +static void multiboot_build_memmap ( struct image *image, + struct multiboot_info *mbinfo, + struct multiboot_memory_map *mbmemmap, + unsigned int limit ) { + struct memory_map memmap; + unsigned int i; + + /* Get memory map */ + get_memmap ( &memmap ); + + /* Translate into multiboot format */ + memset ( mbmemmap, 0, sizeof ( *mbmemmap ) ); + for ( i = 0 ; i < memmap.count ; i++ ) { + if ( i >= limit ) { + DBGC ( image, "MULTIBOOT %p limit of %d memmap " + "entries reached\n", image, limit ); + break; + } + mbmemmap[i].size = ( sizeof ( mbmemmap[i] ) - + sizeof ( mbmemmap[i].size ) ); + mbmemmap[i].base_addr = memmap.regions[i].start; + mbmemmap[i].length = ( memmap.regions[i].end - + memmap.regions[i].start ); + mbmemmap[i].type = MBMEM_RAM; + mbinfo->mmap_length += sizeof ( mbmemmap[i] ); + if ( memmap.regions[i].start == 0 ) + mbinfo->mem_lower = ( memmap.regions[i].end / 1024 ); + if ( memmap.regions[i].start == 0x100000 ) + mbinfo->mem_upper = ( ( memmap.regions[i].end - + 0x100000 ) / 1024 ); + } +} + +/** + * Add command line in base memory + * + * @v image Image + * @ret physaddr Physical address of command line + */ +static physaddr_t multiboot_add_cmdline ( struct image *image ) { + char *mb_cmdline = ( mb_cmdlines + mb_cmdline_offset ); + size_t remaining = ( sizeof ( mb_cmdlines ) - mb_cmdline_offset ); + char *buf = mb_cmdline; + size_t len; + + /* Copy image URI to base memory buffer as start of command line */ + len = ( format_uri ( image->uri, buf, remaining ) + 1 /* NUL */ ); + if ( len > remaining ) + len = remaining; + mb_cmdline_offset += len; + buf += len; + remaining -= len; + + /* Copy command line to base memory buffer, if present */ + if ( image->cmdline ) { + mb_cmdline_offset--; /* Strip NUL */ + buf--; + remaining++; + len = ( snprintf ( buf, remaining, " %s", + image->cmdline ) + 1 /* NUL */ ); + if ( len > remaining ) + len = remaining; + mb_cmdline_offset += len; + } + + return virt_to_phys ( mb_cmdline ); +} + +/** + * Add multiboot modules + * + * @v image Multiboot image + * @v start Start address for modules + * @v mbinfo Multiboot information structure + * @v modules Multiboot module list + * @ret rc Return status code + */ +static int multiboot_add_modules ( struct image *image, physaddr_t start, + struct multiboot_info *mbinfo, + struct multiboot_module *modules, + unsigned int limit ) { + struct image *module_image; + struct multiboot_module *module; + int rc; + + /* Add each image as a multiboot module */ + for_each_image ( module_image ) { + + if ( mbinfo->mods_count >= limit ) { + DBGC ( image, "MULTIBOOT %p limit of %d modules " + "reached\n", image, limit ); + break; + } + + /* Do not include kernel image itself as a module */ + if ( module_image == image ) + continue; + + /* Page-align the module */ + start = ( ( start + 0xfff ) & ~0xfff ); + + /* Prepare segment */ + if ( ( rc = prep_segment ( phys_to_user ( start ), + module_image->len, + module_image->len ) ) != 0 ) { + DBGC ( image, "MULTIBOOT %p could not prepare module " + "%s: %s\n", image, module_image->name, + strerror ( rc ) ); + return rc; + } + + /* Copy module */ + memcpy_user ( phys_to_user ( start ), 0, + module_image->data, 0, module_image->len ); + + /* Add module to list */ + module = &modules[mbinfo->mods_count++]; + module->mod_start = start; + module->mod_end = ( start + module_image->len ); + module->string = multiboot_add_cmdline ( module_image ); + module->reserved = 0; + DBGC ( image, "MULTIBOOT %p module %s is [%x,%x)\n", + image, module_image->name, module->mod_start, + module->mod_end ); + start += module_image->len; + } + + return 0; +} + +/** + * The multiboot information structure + * + * Kept in base memory because some OSes won't find it elsewhere, + * along with the other structures belonging to the Multiboot + * information table. + */ +static struct multiboot_info __bss16 ( mbinfo ); +#define mbinfo __use_data16 ( mbinfo ) + +/** The multiboot bootloader name */ +static char __bss16_array ( mb_bootloader_name, [32] ); +#define mb_bootloader_name __use_data16 ( mb_bootloader_name ) + +/** The multiboot memory map */ +static struct multiboot_memory_map + __bss16_array ( mbmemmap, [MAX_MEMORY_REGIONS] ); +#define mbmemmap __use_data16 ( mbmemmap ) + +/** The multiboot module list */ +static struct multiboot_module __bss16_array ( mbmodules, [MAX_MODULES] ); +#define mbmodules __use_data16 ( mbmodules ) + +/** + * Find multiboot header + * + * @v image Multiboot file + * @v hdr Multiboot header descriptor to fill in + * @ret rc Return status code + */ +static int multiboot_find_header ( struct image *image, + struct multiboot_header_info *hdr ) { + uint32_t buf[64]; + size_t offset; + unsigned int buf_idx; + uint32_t checksum; + + /* Scan through first 8kB of image file 256 bytes at a time. + * (Use the buffering to avoid the overhead of a + * copy_from_user() for every dword.) + */ + for ( offset = 0 ; offset < 8192 ; offset += sizeof ( buf[0] ) ) { + /* Check for end of image */ + if ( offset > image->len ) + break; + /* Refill buffer if applicable */ + buf_idx = ( ( offset % sizeof ( buf ) ) / sizeof ( buf[0] ) ); + if ( buf_idx == 0 ) { + copy_from_user ( buf, image->data, offset, + sizeof ( buf ) ); + } + /* Check signature */ + if ( buf[buf_idx] != MULTIBOOT_HEADER_MAGIC ) + continue; + /* Copy header and verify checksum */ + copy_from_user ( &hdr->mb, image->data, offset, + sizeof ( hdr->mb ) ); + checksum = ( hdr->mb.magic + hdr->mb.flags + + hdr->mb.checksum ); + if ( checksum != 0 ) + continue; + /* Record offset of multiboot header and return */ + hdr->offset = offset; + return 0; + } + + /* No multiboot header found */ + return -ENOEXEC; +} + +/** + * Load raw multiboot image into memory + * + * @v image Multiboot file + * @v hdr Multiboot header descriptor + * @ret entry Entry point + * @ret max Maximum used address + * @ret rc Return status code + */ +static int multiboot_load_raw ( struct image *image, + struct multiboot_header_info *hdr, + physaddr_t *entry, physaddr_t *max ) { + size_t offset; + size_t filesz; + size_t memsz; + userptr_t buffer; + int rc; + + /* Sanity check */ + if ( ! ( hdr->mb.flags & MB_FLAG_RAW ) ) { + DBGC ( image, "MULTIBOOT %p is not flagged as a raw image\n", + image ); + return -EINVAL; + } + + /* Verify and prepare segment */ + offset = ( hdr->offset - hdr->mb.header_addr + hdr->mb.load_addr ); + filesz = ( hdr->mb.load_end_addr ? + ( hdr->mb.load_end_addr - hdr->mb.load_addr ) : + ( image->len - offset ) ); + memsz = ( hdr->mb.bss_end_addr ? + ( hdr->mb.bss_end_addr - hdr->mb.load_addr ) : filesz ); + buffer = phys_to_user ( hdr->mb.load_addr ); + if ( ( rc = prep_segment ( buffer, filesz, memsz ) ) != 0 ) { + DBGC ( image, "MULTIBOOT %p could not prepare segment: %s\n", + image, strerror ( rc ) ); + return rc; + } + + /* Copy image to segment */ + memcpy_user ( buffer, 0, image->data, offset, filesz ); + + /* Record execution entry point and maximum used address */ + *entry = hdr->mb.entry_addr; + *max = ( hdr->mb.load_addr + memsz ); + + return 0; +} + +/** + * Load ELF multiboot image into memory + * + * @v image Multiboot file + * @ret entry Entry point + * @ret max Maximum used address + * @ret rc Return status code + */ +static int multiboot_load_elf ( struct image *image, physaddr_t *entry, + physaddr_t *max ) { + int rc; + + /* Load ELF image*/ + if ( ( rc = elf_load ( image, entry, max ) ) != 0 ) { + DBGC ( image, "MULTIBOOT %p ELF image failed to load: %s\n", + image, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Execute multiboot image + * + * @v image Multiboot image + * @ret rc Return status code + */ +static int multiboot_exec ( struct image *image ) { + struct multiboot_header_info hdr; + physaddr_t entry; + physaddr_t max; + int rc; + + /* Locate multiboot header, if present */ + if ( ( rc = multiboot_find_header ( image, &hdr ) ) != 0 ) { + DBGC ( image, "MULTIBOOT %p has no multiboot header\n", + image ); + return rc; + } + + /* Abort if we detect flags that we cannot support */ + if ( hdr.mb.flags & MB_UNSUPPORTED_FLAGS ) { + DBGC ( image, "MULTIBOOT %p flags %08x not supported\n", + image, ( hdr.mb.flags & MB_UNSUPPORTED_FLAGS ) ); + return -ENOTSUP; + } + + /* There is technically a bit MB_FLAG_RAW to indicate whether + * this is an ELF or a raw image. In practice, grub will use + * the ELF header if present, and Solaris relies on this + * behaviour. + */ + if ( ( ( rc = multiboot_load_elf ( image, &entry, &max ) ) != 0 ) && + ( ( rc = multiboot_load_raw ( image, &hdr, &entry, &max ) ) != 0 )) + return rc; + + /* Populate multiboot information structure */ + memset ( &mbinfo, 0, sizeof ( mbinfo ) ); + mbinfo.flags = ( MBI_FLAG_LOADER | MBI_FLAG_MEM | MBI_FLAG_MMAP | + MBI_FLAG_CMDLINE | MBI_FLAG_MODS ); + mb_cmdline_offset = 0; + mbinfo.cmdline = multiboot_add_cmdline ( image ); + mbinfo.mods_addr = virt_to_phys ( mbmodules ); + mbinfo.mmap_addr = virt_to_phys ( mbmemmap ); + snprintf ( mb_bootloader_name, sizeof ( mb_bootloader_name ), + "iPXE %s", product_version ); + mbinfo.boot_loader_name = virt_to_phys ( mb_bootloader_name ); + if ( ( rc = multiboot_add_modules ( image, max, &mbinfo, mbmodules, + ( sizeof ( mbmodules ) / + sizeof ( mbmodules[0] ) ) ) ) !=0) + return rc; + + /* Multiboot images may not return and have no callback + * interface, so shut everything down prior to booting the OS. + */ + shutdown_boot(); + + /* Build memory map after unhiding bootloader memory regions as part of + * shutting everything down. + */ + multiboot_build_memmap ( image, &mbinfo, mbmemmap, + ( sizeof(mbmemmap) / sizeof(mbmemmap[0]) ) ); + + /* Jump to OS with flat physical addressing */ + DBGC ( image, "MULTIBOOT %p starting execution at %lx\n", + image, entry ); + __asm__ __volatile__ ( PHYS_CODE ( "pushl %%ebp\n\t" + "call *%%edi\n\t" + "popl %%ebp\n\t" ) + : : "a" ( MULTIBOOT_BOOTLOADER_MAGIC ), + "b" ( virt_to_phys ( &mbinfo ) ), + "D" ( entry ) + : "ecx", "edx", "esi", "memory" ); + + DBGC ( image, "MULTIBOOT %p returned\n", image ); + + /* It isn't safe to continue after calling shutdown() */ + while ( 1 ) {} + + return -ECANCELED; /* -EIMPOSSIBLE, anyone? */ +} + +/** + * Probe multiboot image + * + * @v image Multiboot file + * @ret rc Return status code + */ +static int multiboot_probe ( struct image *image ) { + struct multiboot_header_info hdr; + int rc; + + /* Locate multiboot header, if present */ + if ( ( rc = multiboot_find_header ( image, &hdr ) ) != 0 ) { + DBGC ( image, "MULTIBOOT %p has no multiboot header\n", + image ); + return rc; + } + DBGC ( image, "MULTIBOOT %p found header with flags %08x\n", + image, hdr.mb.flags ); + + return 0; +} + +/** Multiboot image type */ +struct image_type multiboot_image_type __image_type ( PROBE_MULTIBOOT ) = { + .name = "Multiboot", + .probe = multiboot_probe, + .exec = multiboot_exec, +}; diff --git a/src/arch/x86/image/nbi.c b/src/arch/x86/image/nbi.c new file mode 100644 index 00000000..b691bee2 --- /dev/null +++ b/src/arch/x86/image/nbi.c @@ -0,0 +1,427 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * NBI image format. + * + * The Net Boot Image format is defined by the "Draft Net Boot Image + * Proposal 0.3" by Jamie Honan, Gero Kuhlmann and Ken Yap. It is now + * considered to be a legacy format, but it still included because a + * large amount of software (e.g. nymph, LTSP) makes use of NBI files. + * + * Etherboot does not implement the INT 78 callback interface + * described by the NBI specification. For a callback interface on + * x86 architecture, use PXE. + * + */ + +FEATURE ( FEATURE_IMAGE, "NBI", DHCP_EB_FEATURE_NBI, 1 ); + +/** + * An NBI image header + * + * Note that the length field uses a peculiar encoding; use the + * NBI_LENGTH() macro to decode the actual header length. + * + */ +struct imgheader { + unsigned long magic; /**< Magic number (NBI_MAGIC) */ + union { + unsigned char length; /**< Nibble-coded header length */ + unsigned long flags; /**< Image flags */ + }; + segoff_t location; /**< 16-bit seg:off header location */ + union { + segoff_t segoff; /**< 16-bit seg:off entry point */ + unsigned long linear; /**< 32-bit entry point */ + } execaddr; +} __attribute__ (( packed )); + +/** NBI magic number */ +#define NBI_MAGIC 0x1B031336UL + +/* Interpretation of the "length" fields */ +#define NBI_NONVENDOR_LENGTH(len) ( ( (len) & 0x0f ) << 2 ) +#define NBI_VENDOR_LENGTH(len) ( ( (len) & 0xf0 ) >> 2 ) +#define NBI_LENGTH(len) ( NBI_NONVENDOR_LENGTH(len) + NBI_VENDOR_LENGTH(len) ) + +/* Interpretation of the "flags" fields */ +#define NBI_PROGRAM_RETURNS(flags) ( (flags) & ( 1 << 8 ) ) +#define NBI_LINEAR_EXEC_ADDR(flags) ( (flags) & ( 1 << 31 ) ) + +/** NBI header length */ +#define NBI_HEADER_LENGTH 512 + +/** + * An NBI segment header + * + * Note that the length field uses a peculiar encoding; use the + * NBI_LENGTH() macro to decode the actual header length. + * + */ +struct segheader { + unsigned char length; /**< Nibble-coded header length */ + unsigned char vendortag; /**< Vendor-defined private tag */ + unsigned char reserved; + unsigned char flags; /**< Segment flags */ + unsigned long loadaddr; /**< Load address */ + unsigned long imglength; /**< Segment length in NBI file */ + unsigned long memlength; /**< Segment length in memory */ +}; + +/* Interpretation of the "flags" fields */ +#define NBI_LOADADDR_FLAGS(flags) ( (flags) & 0x03 ) +#define NBI_LOADADDR_ABS 0x00 +#define NBI_LOADADDR_AFTER 0x01 +#define NBI_LOADADDR_END 0x02 +#define NBI_LOADADDR_BEFORE 0x03 +#define NBI_LAST_SEGHEADER(flags) ( (flags) & ( 1 << 2 ) ) + +/* Define a type for passing info to a loaded program */ +struct ebinfo { + uint8_t major, minor; /* Version */ + uint16_t flags; /* Bit flags */ +}; + +/** + * Prepare a segment for an NBI image + * + * @v image NBI image + * @v offset Offset within NBI image + * @v filesz Length of initialised-data portion of the segment + * @v memsz Total length of the segment + * @v src Source for initialised data + * @ret rc Return status code + */ +static int nbi_prepare_segment ( struct image *image, size_t offset __unused, + userptr_t dest, size_t filesz, size_t memsz ){ + int rc; + + if ( ( rc = prep_segment ( dest, filesz, memsz ) ) != 0 ) { + DBGC ( image, "NBI %p could not prepare segment: %s\n", + image, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Load a segment for an NBI image + * + * @v image NBI image + * @v offset Offset within NBI image + * @v filesz Length of initialised-data portion of the segment + * @v memsz Total length of the segment + * @v src Source for initialised data + * @ret rc Return status code + */ +static int nbi_load_segment ( struct image *image, size_t offset, + userptr_t dest, size_t filesz, + size_t memsz __unused ) { + memcpy_user ( dest, 0, image->data, offset, filesz ); + return 0; +} + +/** + * Process segments of an NBI image + * + * @v image NBI image + * @v imgheader Image header information + * @v process Function to call for each segment + * @ret rc Return status code + */ +static int nbi_process_segments ( struct image *image, + struct imgheader *imgheader, + int ( * process ) ( struct image *image, + size_t offset, + userptr_t dest, + size_t filesz, + size_t memsz ) ) { + struct segheader sh; + size_t offset = 0; + size_t sh_off; + userptr_t dest; + size_t filesz; + size_t memsz; + int rc; + + /* Copy image header to target location */ + dest = real_to_user ( imgheader->location.segment, + imgheader->location.offset ); + filesz = memsz = NBI_HEADER_LENGTH; + if ( ( rc = process ( image, offset, dest, filesz, memsz ) ) != 0 ) + return rc; + offset += filesz; + + /* Process segments in turn */ + sh_off = NBI_LENGTH ( imgheader->length ); + do { + /* Read segment header */ + copy_from_user ( &sh, image->data, sh_off, sizeof ( sh ) ); + if ( sh.length == 0 ) { + /* Avoid infinite loop? */ + DBGC ( image, "NBI %p invalid segheader length 0\n", + image ); + return -ENOEXEC; + } + + /* Calculate segment load address */ + switch ( NBI_LOADADDR_FLAGS ( sh.flags ) ) { + case NBI_LOADADDR_ABS: + dest = phys_to_user ( sh.loadaddr ); + break; + case NBI_LOADADDR_AFTER: + dest = userptr_add ( dest, memsz + sh.loadaddr ); + break; + case NBI_LOADADDR_BEFORE: + dest = userptr_add ( dest, -sh.loadaddr ); + break; + case NBI_LOADADDR_END: + /* Not correct according to the spec, but + * maintains backwards compatibility with + * previous versions of Etherboot. + */ + dest = phys_to_user ( ( extmemsize() + 1024 ) * 1024 + - sh.loadaddr ); + break; + default: + /* Cannot be reached */ + assert ( 0 ); + } + + /* Process this segment */ + filesz = sh.imglength; + memsz = sh.memlength; + if ( ( offset + filesz ) > image->len ) { + DBGC ( image, "NBI %p segment outside file\n", image ); + return -ENOEXEC; + } + if ( ( rc = process ( image, offset, dest, + filesz, memsz ) ) != 0 ) { + return rc; + } + offset += filesz; + + /* Next segheader */ + sh_off += NBI_LENGTH ( sh.length ); + if ( sh_off >= NBI_HEADER_LENGTH ) { + DBGC ( image, "NBI %p header overflow\n", image ); + return -ENOEXEC; + } + + } while ( ! NBI_LAST_SEGHEADER ( sh.flags ) ); + + if ( offset != image->len ) { + DBGC ( image, "NBI %p length wrong (file %zd, metadata %zd)\n", + image, image->len, offset ); + return -ENOEXEC; + } + + return 0; +} + +/** + * Boot a 16-bit NBI image + * + * @v imgheader Image header information + * @ret rc Return status code, if image returns + */ +static int nbi_boot16 ( struct image *image, struct imgheader *imgheader ) { + int discard_D, discard_S, discard_b; + int32_t rc; + + DBGC ( image, "NBI %p executing 16-bit image at %04x:%04x\n", image, + imgheader->execaddr.segoff.segment, + imgheader->execaddr.segoff.offset ); + + __asm__ __volatile__ ( + REAL_CODE ( "pushl %%ebp\n\t" /* gcc bug */ + "pushw %%ds\n\t" /* far pointer to bootp data */ + "pushw %%bx\n\t" + "pushl %%esi\n\t" /* location */ + "pushw %%cs\n\t" /* lcall execaddr */ + "call 1f\n\t" + "jmp 2f\n\t" + "\n1:\n\t" + "pushl %%edi\n\t" + "lret\n\t" + "\n2:\n\t" + "addw $8,%%sp\n\t" /* clean up stack */ + "popl %%ebp\n\t" /* gcc bug */ ) + : "=a" ( rc ), "=D" ( discard_D ), "=S" ( discard_S ), + "=b" ( discard_b ) + : "D" ( imgheader->execaddr.segoff ), + "S" ( imgheader->location ), + "b" ( __from_data16 ( basemem_packet ) ) + : "ecx", "edx" ); + + return rc; +} + +/** + * Boot a 32-bit NBI image + * + * @v imgheader Image header information + * @ret rc Return status code, if image returns + */ +static int nbi_boot32 ( struct image *image, struct imgheader *imgheader ) { + struct ebinfo loaderinfo = { + product_major_version, product_minor_version, + 0 + }; + int discard_D, discard_S, discard_b; + int32_t rc; + + DBGC ( image, "NBI %p executing 32-bit image at %lx\n", + image, imgheader->execaddr.linear ); + + /* Jump to OS with flat physical addressing */ + __asm__ __volatile__ ( + PHYS_CODE ( "pushl %%ebp\n\t" /* gcc bug */ + "pushl %%ebx\n\t" /* bootp data */ + "pushl %%esi\n\t" /* imgheader */ + "pushl %%eax\n\t" /* loaderinfo */ + "call *%%edi\n\t" + "addl $12, %%esp\n\t" /* clean up stack */ + "popl %%ebp\n\t" /* gcc bug */ ) + : "=a" ( rc ), "=D" ( discard_D ), "=S" ( discard_S ), + "=b" ( discard_b ) + : "D" ( imgheader->execaddr.linear ), + "S" ( ( imgheader->location.segment << 4 ) + + imgheader->location.offset ), + "b" ( virt_to_phys ( basemem_packet ) ), + "a" ( virt_to_phys ( &loaderinfo ) ) + : "ecx", "edx", "memory" ); + + return rc; +} + +/** + * Prepare DHCP parameter block for NBI image + * + * @v image NBI image + * @ret rc Return status code + */ +static int nbi_prepare_dhcp ( struct image *image ) { + struct net_device *boot_netdev; + int rc; + + boot_netdev = last_opened_netdev(); + if ( ! boot_netdev ) { + DBGC ( image, "NBI %p could not identify a network device\n", + image ); + return -ENODEV; + } + + if ( ( rc = create_fakedhcpack ( boot_netdev, basemem_packet, + sizeof ( basemem_packet ) ) ) != 0 ) { + DBGC ( image, "NBI %p failed to build DHCP packet\n", image ); + return rc; + } + + return 0; +} + +/** + * Execute a loaded NBI image + * + * @v image NBI image + * @ret rc Return status code + */ +static int nbi_exec ( struct image *image ) { + struct imgheader imgheader; + int may_return; + int rc; + + /* Retrieve image header */ + copy_from_user ( &imgheader, image->data, 0, sizeof ( imgheader ) ); + + DBGC ( image, "NBI %p placing header at %hx:%hx\n", image, + imgheader.location.segment, imgheader.location.offset ); + + /* NBI files can have overlaps between segments; the bss of + * one segment may overlap the initialised data of another. I + * assume this is a design flaw, but there are images out + * there that we need to work with. We therefore do two + * passes: first to initialise the segments, then to copy the + * data. This avoids zeroing out already-copied data. + */ + if ( ( rc = nbi_process_segments ( image, &imgheader, + nbi_prepare_segment ) ) != 0 ) + return rc; + if ( ( rc = nbi_process_segments ( image, &imgheader, + nbi_load_segment ) ) != 0 ) + return rc; + + /* Prepare DHCP option block */ + if ( ( rc = nbi_prepare_dhcp ( image ) ) != 0 ) + return rc; + + /* Shut down now if NBI image will not return */ + may_return = NBI_PROGRAM_RETURNS ( imgheader.flags ); + if ( ! may_return ) + shutdown_boot(); + + /* Execute NBI image */ + if ( NBI_LINEAR_EXEC_ADDR ( imgheader.flags ) ) { + rc = nbi_boot32 ( image, &imgheader ); + } else { + rc = nbi_boot16 ( image, &imgheader ); + } + + if ( ! may_return ) { + /* Cannot continue after shutdown() called */ + DBGC ( image, "NBI %p returned %d from non-returnable image\n", + image, rc ); + while ( 1 ) {} + } + + DBGC ( image, "NBI %p returned %d\n", image, rc ); + + return rc; +} + +/** + * Probe NBI image + * + * @v image NBI image + * @ret rc Return status code + */ +static int nbi_probe ( struct image *image ) { + struct imgheader imgheader; + + /* If we don't have enough data give up */ + if ( image->len < NBI_HEADER_LENGTH ) { + DBGC ( image, "NBI %p too short for an NBI image\n", image ); + return -ENOEXEC; + } + + /* Check image header */ + copy_from_user ( &imgheader, image->data, 0, sizeof ( imgheader ) ); + if ( imgheader.magic != NBI_MAGIC ) { + DBGC ( image, "NBI %p has no NBI signature\n", image ); + return -ENOEXEC; + } + + return 0; +} + +/** NBI image type */ +struct image_type nbi_image_type __image_type ( PROBE_NORMAL ) = { + .name = "NBI", + .probe = nbi_probe, + .exec = nbi_exec, +}; diff --git a/src/arch/x86/image/pxe_image.c b/src/arch/x86/image/pxe_image.c new file mode 100644 index 00000000..b6bcb18b --- /dev/null +++ b/src/arch/x86/image/pxe_image.c @@ -0,0 +1,179 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * PXE image format + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +FEATURE ( FEATURE_IMAGE, "PXE", DHCP_EB_FEATURE_PXE, 1 ); + +/** PXE command line */ +const char *pxe_cmdline; + +/** + * Execute PXE image + * + * @v image PXE image + * @ret rc Return status code + */ +static int pxe_exec ( struct image *image ) { + userptr_t buffer = real_to_user ( 0, 0x7c00 ); + struct net_device *netdev; + int rc; + + /* Verify and prepare segment */ + if ( ( rc = prep_segment ( buffer, image->len, image->len ) ) != 0 ) { + DBGC ( image, "IMAGE %p could not prepare segment: %s\n", + image, strerror ( rc ) ); + return rc; + } + + /* Copy image to segment */ + memcpy_user ( buffer, 0, image->data, 0, image->len ); + + /* Arbitrarily pick the most recently opened network device */ + if ( ( netdev = last_opened_netdev() ) == NULL ) { + DBGC ( image, "IMAGE %p could not locate PXE net device\n", + image ); + return -ENODEV; + } + netdev_get ( netdev ); + + /* Activate PXE */ + pxe_activate ( netdev ); + + /* Construct fake DHCP packets */ + pxe_fake_cached_info(); + + /* Set PXE command line */ + pxe_cmdline = image->cmdline; + + /* Reset console since PXE NBP will probably use it */ + console_reset(); + + /* Disable IRQ, if applicable */ + if ( netdev_irq_supported ( netdev ) && netdev->dev->desc.irq ) + disable_irq ( netdev->dev->desc.irq ); + + /* Start PXE NBP */ + rc = pxe_start_nbp(); + + /* Clear PXE command line */ + pxe_cmdline = NULL; + + /* Deactivate PXE */ + pxe_deactivate(); + + /* Try to reopen network device. Ignore errors, since the NBP + * may have called PXENV_STOP_UNDI. + */ + netdev_open ( netdev ); + netdev_put ( netdev ); + + return rc; +} + +/** + * Probe PXE image + * + * @v image PXE file + * @ret rc Return status code + */ +int pxe_probe ( struct image *image ) { + + /* Images too large to fit in base memory cannot be PXE + * images. We include this check to help prevent unrecognised + * images from being marked as PXE images, since PXE images + * have no signature we can check against. + */ + if ( image->len > ( 0xa0000 - 0x7c00 ) ) + return -ENOEXEC; + + /* Rejecting zero-length images is also useful, since these + * end up looking to the user like bugs in iPXE. + */ + if ( ! image->len ) + return -ENOEXEC; + + return 0; +} + +/** + * Probe PXE image (with rejection of potential EFI images) + * + * @v image PXE file + * @ret rc Return status code + */ +int pxe_probe_no_mz ( struct image *image ) { + uint16_t magic; + int rc; + + /* Probe PXE image */ + if ( ( rc = pxe_probe ( image ) ) != 0 ) + return rc; + + /* Reject image with an "MZ" signature which may indicate an + * EFI image incorrectly handed out to a BIOS system. + */ + if ( image->len >= sizeof ( magic ) ) { + copy_from_user ( &magic, image->data, 0, sizeof ( magic ) ); + if ( magic == cpu_to_le16 ( EFI_IMAGE_DOS_SIGNATURE ) ) { + DBGC ( image, "IMAGE %p may be an EFI image\n", + image ); + return -ENOTTY; + } + } + + return 0; +} + +/** PXE image type */ +struct image_type pxe_image_type[] __image_type ( PROBE_PXE ) = { + { + .name = "PXE-NBP", + .probe = pxe_probe_no_mz, + .exec = pxe_exec, + }, + { + .name = "PXE-NBP (may be EFI?)", + .probe = pxe_probe, + .exec = pxe_exec, + }, +}; diff --git a/src/arch/x86/image/sdi.c b/src/arch/x86/image/sdi.c new file mode 100644 index 00000000..fa2d0b73 --- /dev/null +++ b/src/arch/x86/image/sdi.c @@ -0,0 +1,140 @@ +/* + * Copyright (C) 2012 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * System Deployment Image (SDI) + * + * Based on the MSDN article "RAM boot using SDI in Windows XP + * Embedded with Service Pack 1", available at the time of writing + * from: + * + * http://msdn.microsoft.com/en-us/library/ms838543.aspx + */ + +FEATURE ( FEATURE_IMAGE, "SDI", DHCP_EB_FEATURE_SDI, 1 ); + +/** + * Parse SDI image header + * + * @v image SDI file + * @v sdi SDI header to fill in + * @ret rc Return status code + */ +static int sdi_parse_header ( struct image *image, struct sdi_header *sdi ) { + + /* Sanity check */ + if ( image->len < sizeof ( *sdi ) ) { + DBGC ( image, "SDI %p too short for SDI header\n", image ); + return -ENOEXEC; + } + + /* Read in header */ + copy_from_user ( sdi, image->data, 0, sizeof ( *sdi ) ); + + /* Check signature */ + if ( sdi->magic != SDI_MAGIC ) { + DBGC ( image, "SDI %p is not an SDI image\n", image ); + return -ENOEXEC; + } + + return 0; +} + +/** + * Execute SDI image + * + * @v image SDI file + * @ret rc Return status code + */ +static int sdi_exec ( struct image *image ) { + struct sdi_header sdi; + uint32_t sdiptr; + int rc; + + /* Parse image header */ + if ( ( rc = sdi_parse_header ( image, &sdi ) ) != 0 ) + return rc; + + /* Check that image is bootable */ + if ( sdi.boot_size == 0 ) { + DBGC ( image, "SDI %p is not bootable\n", image ); + return -ENOTTY; + } + DBGC ( image, "SDI %p image at %08lx+%08zx\n", + image, user_to_phys ( image->data, 0 ), image->len ); + DBGC ( image, "SDI %p boot code at %08lx+%llx\n", image, + user_to_phys ( image->data, sdi.boot_offset ), sdi.boot_size ); + + /* Copy boot code */ + memcpy_user ( real_to_user ( SDI_BOOT_SEG, SDI_BOOT_OFF ), 0, + image->data, sdi.boot_offset, sdi.boot_size ); + + /* Jump to boot code */ + sdiptr = ( user_to_phys ( image->data, 0 ) | SDI_WTF ); + __asm__ __volatile__ ( REAL_CODE ( "ljmp %0, %1\n\t" ) + : : "i" ( SDI_BOOT_SEG ), + "i" ( SDI_BOOT_OFF ), + "d" ( sdiptr ) ); + + /* There is no way for the image to return, since we provide + * no return address. + */ + assert ( 0 ); + + return -ECANCELED; /* -EIMPOSSIBLE */ +} + +/** + * Probe SDI image + * + * @v image SDI file + * @ret rc Return status code + */ +static int sdi_probe ( struct image *image ) { + struct sdi_header sdi; + int rc; + + /* Parse image */ + if ( ( rc = sdi_parse_header ( image, &sdi ) ) != 0 ) + return rc; + + return 0; +} + +/** SDI image type */ +struct image_type sdi_image_type __image_type ( PROBE_NORMAL ) = { + .name = "SDI", + .probe = sdi_probe, + .exec = sdi_exec, +}; diff --git a/src/arch/x86/include/basemem.h b/src/arch/x86/include/basemem.h new file mode 100644 index 00000000..01c2ea91 --- /dev/null +++ b/src/arch/x86/include/basemem.h @@ -0,0 +1,35 @@ +#ifndef _BASEMEM_H +#define _BASEMEM_H + +/** @file + * + * Base memory allocation + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** + * Read the BIOS free base memory counter + * + * @ret fbms Free base memory counter (in kB) + */ +static inline unsigned int get_fbms ( void ) { + uint16_t fbms; + + get_real ( fbms, BDA_SEG, BDA_FBMS ); + return fbms; +} + +extern void set_fbms ( unsigned int new_fbms ); + +/* Actually in hidemem.c, but putting it here avoids polluting the + * architecture-independent include/hidemem.h. + */ +extern void hide_basemem ( void ); + +#endif /* _BASEMEM_H */ diff --git a/src/arch/x86/include/basemem_packet.h b/src/arch/x86/include/basemem_packet.h new file mode 100644 index 00000000..def6dee3 --- /dev/null +++ b/src/arch/x86/include/basemem_packet.h @@ -0,0 +1,15 @@ +#ifndef BASEMEM_PACKET_H +#define BASEMEM_PACKET_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** Maximum length of base memory packet buffer */ +#define BASEMEM_PACKET_LEN 1514 + +/** Base memory packet buffer */ +extern char __bss16_array ( basemem_packet, [BASEMEM_PACKET_LEN] ); +#define basemem_packet __use_data16 ( basemem_packet ) + +#endif /* BASEMEM_PACKET_H */ diff --git a/src/arch/x86/include/bios.h b/src/arch/x86/include/bios.h new file mode 100644 index 00000000..14e7acbc --- /dev/null +++ b/src/arch/x86/include/bios.h @@ -0,0 +1,17 @@ +#ifndef BIOS_H +#define BIOS_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#define BDA_SEG 0x0040 +#define BDA_EBDA 0x000e +#define BDA_EQUIPMENT_WORD 0x0010 +#define BDA_FBMS 0x0013 +#define BDA_TICKS 0x006c +#define BDA_MIDNIGHT 0x0070 +#define BDA_REBOOT 0x0072 +#define BDA_REBOOT_WARM 0x1234 +#define BDA_NUM_DRIVES 0x0075 +#define BDA_CHAR_HEIGHT 0x0085 + +#endif /* BIOS_H */ diff --git a/src/arch/x86/include/bios_disks.h b/src/arch/x86/include/bios_disks.h new file mode 100644 index 00000000..0dd7c4eb --- /dev/null +++ b/src/arch/x86/include/bios_disks.h @@ -0,0 +1,69 @@ +#ifndef BIOS_DISKS_H +#define BIOS_DISKS_H + +#include "dev.h" + +/* + * Constants + * + */ + +#define BIOS_DISK_MAX_NAME_LEN 6 + +struct bios_disk_sector { + char data[512]; +}; + +/* + * The location of a BIOS disk + * + */ +struct bios_disk_loc { + uint8_t drive; +}; + +/* + * A physical BIOS disk device + * + */ +struct bios_disk_device { + char name[BIOS_DISK_MAX_NAME_LEN]; + uint8_t drive; + uint8_t type; +}; + +/* + * A BIOS disk driver, with a valid device ID range and naming + * function. + * + */ +struct bios_disk_driver { + void ( *fill_drive_name ) ( char *buf, uint8_t drive ); + uint8_t min_drive; + uint8_t max_drive; +}; + +/* + * Define a BIOS disk driver + * + */ +#define BIOS_DISK_DRIVER( _name, _fill_drive_name, _min_drive, _max_drive ) \ + static struct bios_disk_driver _name = { \ + .fill_drive_name = _fill_drive_name, \ + .min_drive = _min_drive, \ + .max_drive = _max_drive, \ + } + +/* + * Functions in bios_disks.c + * + */ + + +/* + * bios_disk bus global definition + * + */ +extern struct bus_driver bios_disk_driver; + +#endif /* BIOS_DISKS_H */ diff --git a/src/arch/x86/include/biosint.h b/src/arch/x86/include/biosint.h new file mode 100644 index 00000000..f47116f7 --- /dev/null +++ b/src/arch/x86/include/biosint.h @@ -0,0 +1,34 @@ +#ifndef BIOSINT_H +#define BIOSINT_H + +/** + * @file BIOS interrupts + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +struct segoff; + +/** + * Hooked interrupt count + * + * At exit, after unhooking all possible interrupts, this counter + * should be examined. If it is non-zero, it means that we failed to + * unhook at least one interrupt vector, and so must not free up the + * memory we are using. (Note that this also implies that we should + * re-hook INT 15 in order to hide ourselves from the memory map). + */ +extern uint16_t __text16 ( hooked_bios_interrupts ); +#define hooked_bios_interrupts __use_text16 ( hooked_bios_interrupts ) + +extern void hook_bios_interrupt ( unsigned int interrupt, unsigned int handler, + struct segoff *chain_vector ); +extern int unhook_bios_interrupt ( unsigned int interrupt, + unsigned int handler, + struct segoff *chain_vector ); +extern void check_bios_interrupts ( void ); + +#endif /* BIOSINT_H */ diff --git a/src/arch/x86/include/bits/acpi.h b/src/arch/x86/include/bits/acpi.h new file mode 100644 index 00000000..a6ff9080 --- /dev/null +++ b/src/arch/x86/include/bits/acpi.h @@ -0,0 +1,14 @@ +#ifndef _BITS_ACPI_H +#define _BITS_ACPI_H + +/** @file + * + * x86-specific ACPI API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#endif /* _BITS_ACPI_H */ diff --git a/src/arch/x86/include/bits/bitops.h b/src/arch/x86/include/bits/bitops.h new file mode 100644 index 00000000..17dcf102 --- /dev/null +++ b/src/arch/x86/include/bits/bitops.h @@ -0,0 +1,94 @@ +#ifndef _BITS_BITOPS_H +#define _BITS_BITOPS_H + +/** @file + * + * x86 bit operations + * + * We perform atomic bit set and bit clear operations using "lock bts" + * and "lock btr". We use the output constraint to inform the + * compiler that any memory from the start of the bit field up to and + * including the byte containing the bit may be modified. (This is + * overkill but shouldn't matter in practice since we're unlikely to + * subsequently read other bits from the same bit field.) + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** + * Set bit atomically + * + * @v bit Bit to set + * @v bits Bit field + */ +static inline __attribute__ (( always_inline )) void +set_bit ( unsigned int bit, volatile void *bits ) { + volatile struct { + uint8_t byte[ ( bit / 8 ) + 1 ]; + } *bytes = bits; + + __asm__ __volatile__ ( "lock bts %1, %0" + : "+m" ( *bytes ) : "Ir" ( bit ) ); +} + +/** + * Clear bit atomically + * + * @v bit Bit to set + * @v bits Bit field + */ +static inline __attribute__ (( always_inline )) void +clear_bit ( unsigned int bit, volatile void *bits ) { + volatile struct { + uint8_t byte[ ( bit / 8 ) + 1 ]; + } *bytes = bits; + + __asm__ __volatile__ ( "lock btr %1, %0" + : "+m" ( *bytes ) : "Ir" ( bit ) ); +} + +/** + * Test and set bit atomically + * + * @v bit Bit to set + * @v bits Bit field + * @ret old Old value of bit (zero or non-zero) + */ +static inline __attribute__ (( always_inline )) int +test_and_set_bit ( unsigned int bit, volatile void *bits ) { + volatile struct { + uint8_t byte[ ( bit / 8 ) + 1 ]; + } *bytes = bits; + int old; + + __asm__ __volatile__ ( "lock bts %2, %0\n\t" + "sbb %1, %1\n\t" + : "+m" ( *bytes ), "=r" ( old ) + : "Ir" ( bit ) ); + return old; +} + +/** + * Test and clear bit atomically + * + * @v bit Bit to set + * @v bits Bit field + * @ret old Old value of bit (zero or non-zero) + */ +static inline __attribute__ (( always_inline )) int +test_and_clear_bit ( unsigned int bit, volatile void *bits ) { + volatile struct { + uint8_t byte[ ( bit / 8 ) + 1 ]; + } *bytes = bits; + int old; + + __asm__ __volatile__ ( "lock btr %2, %0\n\t" + "sbb %1, %1\n\t" + : "+m" ( *bytes ), "=r" ( old ) + : "Ir" ( bit ) ); + return old; +} + +#endif /* _BITS_BITOPS_H */ diff --git a/src/arch/x86/include/bits/endian.h b/src/arch/x86/include/bits/endian.h new file mode 100644 index 00000000..85718cfd --- /dev/null +++ b/src/arch/x86/include/bits/endian.h @@ -0,0 +1,8 @@ +#ifndef _BITS_ENDIAN_H +#define _BITS_ENDIAN_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#define __BYTE_ORDER __LITTLE_ENDIAN + +#endif /* _BITS_ENDIAN_H */ diff --git a/src/arch/x86/include/bits/entropy.h b/src/arch/x86/include/bits/entropy.h new file mode 100644 index 00000000..5ac7fcd2 --- /dev/null +++ b/src/arch/x86/include/bits/entropy.h @@ -0,0 +1,14 @@ +#ifndef _BITS_ENTROPY_H +#define _BITS_ENTROPY_H + +/** @file + * + * x86-specific entropy API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#endif /* _BITS_ENTROPY_H */ diff --git a/src/arch/x86/include/bits/iomap.h b/src/arch/x86/include/bits/iomap.h new file mode 100644 index 00000000..d6fff257 --- /dev/null +++ b/src/arch/x86/include/bits/iomap.h @@ -0,0 +1,14 @@ +#ifndef _BITS_IOMAP_H +#define _BITS_IOMAP_H + +/** @file + * + * x86-specific I/O mapping API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#endif /* _BITS_IOMAP_H */ diff --git a/src/arch/x86/include/bits/nap.h b/src/arch/x86/include/bits/nap.h new file mode 100644 index 00000000..7103b94c --- /dev/null +++ b/src/arch/x86/include/bits/nap.h @@ -0,0 +1,15 @@ +#ifndef _BITS_NAP_H +#define _BITS_NAP_H + +/** @file + * + * x86-specific CPU sleeping API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +#endif /* _BITS_MAP_H */ diff --git a/src/arch/x86/include/bits/reboot.h b/src/arch/x86/include/bits/reboot.h new file mode 100644 index 00000000..e702dd3d --- /dev/null +++ b/src/arch/x86/include/bits/reboot.h @@ -0,0 +1,14 @@ +#ifndef _BITS_REBOOT_H +#define _BITS_REBOOT_H + +/** @file + * + * x86-specific reboot API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#endif /* _BITS_REBOOT_H */ diff --git a/src/arch/x86/include/bits/sanboot.h b/src/arch/x86/include/bits/sanboot.h new file mode 100644 index 00000000..1b9924e6 --- /dev/null +++ b/src/arch/x86/include/bits/sanboot.h @@ -0,0 +1,14 @@ +#ifndef _BITS_SANBOOT_H +#define _BITS_SANBOOT_H + +/** @file + * + * x86-specific sanboot API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#endif /* _BITS_SANBOOT_H */ diff --git a/src/arch/x86/include/bits/smbios.h b/src/arch/x86/include/bits/smbios.h new file mode 100644 index 00000000..9977c87a --- /dev/null +++ b/src/arch/x86/include/bits/smbios.h @@ -0,0 +1,14 @@ +#ifndef _BITS_SMBIOS_H +#define _BITS_SMBIOS_H + +/** @file + * + * x86-specific SMBIOS API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#endif /* _BITS_SMBIOS_H */ diff --git a/src/arch/x86/include/bits/time.h b/src/arch/x86/include/bits/time.h new file mode 100644 index 00000000..556d96f6 --- /dev/null +++ b/src/arch/x86/include/bits/time.h @@ -0,0 +1,14 @@ +#ifndef _BITS_TIME_H +#define _BITS_TIME_H + +/** @file + * + * x86-specific time API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#endif /* _BITS_TIME_H */ diff --git a/src/arch/x86/include/bits/uaccess.h b/src/arch/x86/include/bits/uaccess.h new file mode 100644 index 00000000..e9e7e5af --- /dev/null +++ b/src/arch/x86/include/bits/uaccess.h @@ -0,0 +1,14 @@ +#ifndef _BITS_UACCESS_H +#define _BITS_UACCESS_H + +/** @file + * + * x86-specific user access API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#endif /* _BITS_UACCESS_H */ diff --git a/src/arch/x86/include/bits/uart.h b/src/arch/x86/include/bits/uart.h new file mode 100644 index 00000000..e09cd3f4 --- /dev/null +++ b/src/arch/x86/include/bits/uart.h @@ -0,0 +1,41 @@ +#ifndef _BITS_UART_H +#define _BITS_UART_H + +/** @file + * + * 16550-compatible UART + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** + * Write to UART register + * + * @v uart UART + * @v addr Register address + * @v data Data + */ +static inline __attribute__ (( always_inline )) void +uart_write ( struct uart *uart, unsigned int addr, uint8_t data ) { + outb ( data, ( uart->base + addr ) ); +} + +/** + * Read from UART register + * + * @v uart UART + * @v addr Register address + * @ret data Data + */ +static inline __attribute__ (( always_inline )) uint8_t +uart_read ( struct uart *uart, unsigned int addr ) { + return inb ( uart->base + addr ); +} + +extern int uart_select ( struct uart *uart, unsigned int port ); + +#endif /* _BITS_UART_H */ diff --git a/src/arch/x86/include/bits/umalloc.h b/src/arch/x86/include/bits/umalloc.h new file mode 100644 index 00000000..5d1f554d --- /dev/null +++ b/src/arch/x86/include/bits/umalloc.h @@ -0,0 +1,14 @@ +#ifndef _BITS_UMALLOC_H +#define _BITS_UMALLOC_H + +/** @file + * + * x86-specific user memory allocation API implementations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#endif /* _BITS_UMALLOC_H */ diff --git a/src/arch/x86/include/bits/xen.h b/src/arch/x86/include/bits/xen.h new file mode 100644 index 00000000..3433cea1 --- /dev/null +++ b/src/arch/x86/include/bits/xen.h @@ -0,0 +1,164 @@ +#ifndef _BITS_XEN_H +#define _BITS_XEN_H + +/** @file + * + * Xen interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/* Hypercall registers */ +#ifdef __x86_64__ +#define XEN_REG1 "rdi" +#define XEN_REG2 "rsi" +#define XEN_REG3 "rdx" +#define XEN_REG4 "r10" +#define XEN_REG5 "r8" +#else +#define XEN_REG1 "ebx" +#define XEN_REG2 "ecx" +#define XEN_REG3 "edx" +#define XEN_REG4 "esi" +#define XEN_REG5 "edi" +#endif + +/** A hypercall entry point */ +struct xen_hypercall { + /** Code generated by hypervisor */ + uint8_t code[32]; +} __attribute__ (( packed )); + +/** + * Issue hypercall with one argument + * + * @v xen Xen hypervisor + * @v hypercall Hypercall number + * @v arg1 First argument + * @ret retval Return value + */ +static inline __attribute__ (( always_inline )) unsigned long +xen_hypercall_1 ( struct xen_hypervisor *xen, unsigned int hypercall, + unsigned long arg1 ) { + register unsigned long reg1 asm ( XEN_REG1 ) = arg1; + unsigned long retval; + + __asm__ __volatile__ ( "call *%2" + : "=a" ( retval ), "+r" ( reg1 ) + : "r" ( &xen->hypercall[hypercall] ) + : XEN_REG2, XEN_REG3, XEN_REG4, XEN_REG5, + "memory" ); + return retval; +} + +/** + * Issue hypercall with two arguments + * + * @v xen Xen hypervisor + * @v hypercall Hypercall number + * @v arg1 First argument + * @v arg2 Second argument + * @ret retval Return value + */ +static inline __attribute__ (( always_inline )) unsigned long +xen_hypercall_2 ( struct xen_hypervisor *xen, unsigned int hypercall, + unsigned long arg1, unsigned long arg2 ) { + register unsigned long reg1 asm ( XEN_REG1 ) = arg1; + register unsigned long reg2 asm ( XEN_REG2 ) = arg2; + unsigned long retval; + + __asm__ __volatile__ ( "call *%3" + : "=a" ( retval ), "+r" ( reg1 ), "+r" ( reg2 ) + : "r" ( &xen->hypercall[hypercall] ) + : XEN_REG3, XEN_REG4, XEN_REG5, "memory" ); + return retval; +} + +/** + * Issue hypercall with three arguments + * + * @v xen Xen hypervisor + * @v hypercall Hypercall number + * @v arg1 First argument + * @v arg2 Second argument + * @v arg3 Third argument + * @ret retval Return value + */ +static inline __attribute__ (( always_inline )) unsigned long +xen_hypercall_3 ( struct xen_hypervisor *xen, unsigned int hypercall, + unsigned long arg1, unsigned long arg2, unsigned long arg3 ) { + register unsigned long reg1 asm ( XEN_REG1 ) = arg1; + register unsigned long reg2 asm ( XEN_REG2 ) = arg2; + register unsigned long reg3 asm ( XEN_REG3 ) = arg3; + unsigned long retval; + + __asm__ __volatile__ ( "call *%4" + : "=a" ( retval ), "+r" ( reg1 ), "+r" ( reg2 ), + "+r" ( reg3 ) + : "r" ( &xen->hypercall[hypercall] ) + : XEN_REG4, XEN_REG5, "memory" ); + return retval; +} + +/** + * Issue hypercall with four arguments + * + * @v xen Xen hypervisor + * @v hypercall Hypercall number + * @v arg1 First argument + * @v arg2 Second argument + * @v arg3 Third argument + * @v arg4 Fourth argument + * @ret retval Return value + */ +static inline __attribute__ (( always_inline )) unsigned long +xen_hypercall_4 ( struct xen_hypervisor *xen, unsigned int hypercall, + unsigned long arg1, unsigned long arg2, unsigned long arg3, + unsigned long arg4 ) { + register unsigned long reg1 asm ( XEN_REG1 ) = arg1; + register unsigned long reg2 asm ( XEN_REG2 ) = arg2; + register unsigned long reg3 asm ( XEN_REG3 ) = arg3; + register unsigned long reg4 asm ( XEN_REG4 ) = arg4; + unsigned long retval; + + __asm__ __volatile__ ( "call *%5" + : "=a" ( retval ), "+r" ( reg1 ), "+r" ( reg2 ), + "+r" ( reg3 ), "+r" ( reg4 ) + : "r" ( &xen->hypercall[hypercall] ) + : XEN_REG5, "memory" ); + return retval; +} + +/** + * Issue hypercall with five arguments + * + * @v xen Xen hypervisor + * @v hypercall Hypercall number + * @v arg1 First argument + * @v arg2 Second argument + * @v arg3 Third argument + * @v arg4 Fourth argument + * @v arg5 Fifth argument + * @ret retval Return value + */ +static inline __attribute__ (( always_inline )) unsigned long +xen_hypercall_5 ( struct xen_hypervisor *xen, unsigned int hypercall, + unsigned long arg1, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5 ) { + register unsigned long reg1 asm ( XEN_REG1 ) = arg1; + register unsigned long reg2 asm ( XEN_REG2 ) = arg2; + register unsigned long reg3 asm ( XEN_REG3 ) = arg3; + register unsigned long reg4 asm ( XEN_REG4 ) = arg4; + register unsigned long reg5 asm ( XEN_REG5 ) = arg5; + unsigned long retval; + + __asm__ __volatile__ ( "call *%6" + : "=a" ( retval ), "+r" ( reg1 ), "+r" ( reg2 ), + "+r" ( reg3 ), "+r" ( reg4 ), "+r" ( reg5 ) + : "r" ( &xen->hypercall[hypercall] ) + : "memory" ); + return retval; +} + +#endif /* _BITS_XEN_H */ diff --git a/src/arch/x86/include/bochs.h b/src/arch/x86/include/bochs.h new file mode 100644 index 00000000..9d090fc1 --- /dev/null +++ b/src/arch/x86/include/bochs.h @@ -0,0 +1,34 @@ +#ifndef BOCHS_H +#define BOCHS_H + +/** @file + * + * bochs breakpoints + * + * This file defines @c bochsbp, the magic breakpoint instruction that + * is incredibly useful when debugging under bochs. This file should + * never be included in production code. + * + * Use the pseudo-instruction @c bochsbp in assembly code, or the + * bochsbp() function in C code. + * + */ + +#ifdef ASSEMBLY + +/* Breakpoint for when debugging under bochs */ +#define bochsbp xchgw %bx, %bx +#define BOCHSBP bochsbp + +#else /* ASSEMBLY */ + +/** Breakpoint for when debugging under bochs */ +static inline void bochsbp ( void ) { + __asm__ __volatile__ ( "xchgw %bx, %bx" ); +} + +#endif /* ASSEMBLY */ + +#warning "bochs.h should not be included into production code" + +#endif /* BOCHS_H */ diff --git a/src/arch/x86/include/bootsector.h b/src/arch/x86/include/bootsector.h new file mode 100644 index 00000000..c5d35aae --- /dev/null +++ b/src/arch/x86/include/bootsector.h @@ -0,0 +1,14 @@ +#ifndef _BOOTSECTOR_H +#define _BOOTSECTOR_H + +/** @file + * + * x86 bootsector image format + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern int call_bootsector ( unsigned int segment, unsigned int offset, + unsigned int drive ); + +#endif /* _BOOTSECTOR_H */ diff --git a/src/arch/x86/include/bzimage.h b/src/arch/x86/include/bzimage.h new file mode 100644 index 00000000..4933ce5b --- /dev/null +++ b/src/arch/x86/include/bzimage.h @@ -0,0 +1,142 @@ +#ifndef _BZIMAGE_H +#define _BZIMAGE_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** + * A bzImage header + * + * As documented in Documentation/i386/boot.txt + */ +struct bzimage_header { + /** The size of the setup in sectors + * + * If this field contains 0, assume it contains 4. + */ + uint8_t setup_sects; + /** If set, the root is mounted readonly */ + uint16_t root_flags; + /** DO NOT USE - for bootsect.S use only */ + uint16_t syssize; + /** DO NOT USE - obsolete */ + uint16_t swap_dev; + /** DO NOT USE - for bootsect.S use only */ + uint16_t ram_size; + /** Video mode control */ + uint16_t vid_mode; + /** Default root device number */ + uint16_t root_dev; + /** 0xAA55 magic number */ + uint16_t boot_flag; + /** Jump instruction */ + uint16_t jump; + /** Magic signature "HdrS" */ + uint32_t header; + /** Boot protocol version supported */ + uint16_t version; + /** Boot loader hook (see below) */ + uint32_t realmode_swtch; + /** The load-low segment (0x1000) (obsolete) */ + uint16_t start_sys; + /** Pointer to kernel version string */ + uint16_t kernel_version; + /** Boot loader identifier */ + uint8_t type_of_loader; + /** Boot protocol option flags */ + uint8_t loadflags; + /** Move to high memory size (used with hooks) */ + uint16_t setup_move_size; + /** Boot loader hook (see below) */ + uint32_t code32_start; + /** initrd load address (set by boot loader) */ + uint32_t ramdisk_image; + /** initrd size (set by boot loader) */ + uint32_t ramdisk_size; + /** DO NOT USE - for bootsect.S use only */ + uint32_t bootsect_kludge; + /** Free memory after setup end */ + uint16_t heap_end_ptr; + /** Unused */ + uint16_t pad1; + /** 32-bit pointer to the kernel command line */ + uint32_t cmd_line_ptr; + /** Highest legal initrd address */ + uint32_t initrd_addr_max; + /** Physical addr alignment required for kernel */ + uint32_t kernel_alignment; + /** Whether kernel is relocatable or not */ + uint8_t relocatable_kernel; + /** Unused */ + uint8_t pad2[3]; + /** Maximum size of the kernel command line */ + uint32_t cmdline_size; +} __attribute__ (( packed )); + +/** Offset of bzImage header within kernel image */ +#define BZI_HDR_OFFSET 0x1f1 + +/** bzImage boot flag value */ +#define BZI_BOOT_FLAG 0xaa55 + +/** bzImage magic signature value */ +#define BZI_SIGNATURE 0x53726448 + +/** bzImage boot loader identifier for Etherboot */ +#define BZI_LOADER_TYPE_ETHERBOOT 0x40 + +/** bzImage boot loader identifier for iPXE + * + * We advertise ourselves as Etherboot version 6. + */ +#define BZI_LOADER_TYPE_IPXE ( BZI_LOADER_TYPE_ETHERBOOT | 0x06 ) + +/** bzImage "load high" flag */ +#define BZI_LOAD_HIGH 0x01 + +/** Load address for high-loaded kernels */ +#define BZI_LOAD_HIGH_ADDR 0x100000 + +/** Load address for low-loaded kernels */ +#define BZI_LOAD_LOW_ADDR 0x10000 + +/** bzImage "kernel can use heap" flag */ +#define BZI_CAN_USE_HEAP 0x80 + +/** bzImage special video mode "normal" */ +#define BZI_VID_MODE_NORMAL 0xffff + +/** bzImage special video mode "ext" */ +#define BZI_VID_MODE_EXT 0xfffe + +/** bzImage special video mode "ask" */ +#define BZI_VID_MODE_ASK 0xfffd + +/** bzImage maximum initrd address for versions < 2.03 */ +#define BZI_INITRD_MAX 0x37ffffff + +/** bzImage command-line structure used by older kernels */ +struct bzimage_cmdline { + /** Magic signature */ + uint16_t magic; + /** Offset to command line */ + uint16_t offset; +} __attribute__ (( packed )); + +/** Offset of bzImage command-line structure within kernel image */ +#define BZI_CMDLINE_OFFSET 0x20 + +/** bzImage command line present magic marker value */ +#define BZI_CMDLINE_MAGIC 0xa33f + +/** Assumed size of real-mode portion (including .bss) */ +#define BZI_ASSUMED_RM_SIZE 0x8000 + +/** Amount of stack space to provide */ +#define BZI_STACK_SIZE 0x1000 + +/** Maximum size of command line */ +#define BZI_CMDLINE_SIZE 0x7ff + +#endif /* _BZIMAGE_H */ diff --git a/src/arch/x86/include/comboot.h b/src/arch/x86/include/comboot.h new file mode 100644 index 00000000..69c6ef02 --- /dev/null +++ b/src/arch/x86/include/comboot.h @@ -0,0 +1,130 @@ +#ifndef COMBOOT_H +#define COMBOOT_H + +/** + * @file + * + * SYSLINUX COMBOOT + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include + +/** Segment used for COMBOOT PSP and image */ +#define COMBOOT_PSP_SEG 0x07C0 + +/** Entry point address of COM32 images */ +#define COM32_START_PHYS 0x101000 + +/** COM32 bounce buffer segment */ +#define COM32_BOUNCE_SEG 0x07C0 + +/** Size of SYSLINUX file block in bytes */ +#define COMBOOT_FILE_BLOCKSZ 512 + +/** COMBOOT feature flags (INT 22h AX=15h) */ +#define COMBOOT_FEATURE_LOCAL_BOOT (1 << 0) +#define COMBOOT_FEATURE_IDLE_LOOP (1 << 1) + +/** Maximum number of shuffle descriptors for + * shuffle and boot functions + * (INT 22h AX=0012h, 001Ah, 001Bh) + */ +#define COMBOOT_MAX_SHUFFLE_DESCRIPTORS 682 + +typedef union { + uint32_t l; + uint16_t w[2]; + uint8_t b[4]; +} com32_reg32_t; + +typedef struct { + uint16_t gs; /* Offset 0 */ + uint16_t fs; /* Offset 2 */ + uint16_t es; /* Offset 4 */ + uint16_t ds; /* Offset 6 */ + + com32_reg32_t edi; /* Offset 8 */ + com32_reg32_t esi; /* Offset 12 */ + com32_reg32_t ebp; /* Offset 16 */ + com32_reg32_t _unused_esp; /* Offset 20 */ + com32_reg32_t ebx; /* Offset 24 */ + com32_reg32_t edx; /* Offset 28 */ + com32_reg32_t ecx; /* Offset 32 */ + com32_reg32_t eax; /* Offset 36 */ + + com32_reg32_t eflags; /* Offset 40 */ +} com32sys_t; + +typedef struct { + uint32_t eax; /* Offset 0 */ + uint32_t ecx; /* Offset 4 */ + uint32_t edx; /* Offset 8 */ + uint32_t ebx; /* Offset 12 */ + uint32_t esp; /* Offset 16 */ + uint32_t ebp; /* Offset 20 */ + uint32_t esi; /* Offset 24 */ + uint32_t edi; /* Offset 28 */ + + uint32_t eip; /* Offset 32 */ +} syslinux_pm_regs; + +typedef struct { + uint16_t es; /* Offset 0 */ + uint16_t _unused_cs; /* Offset 2 */ + uint16_t ds; /* Offset 4 */ + uint16_t ss; /* Offset 6 */ + uint16_t fs; /* Offset 8 */ + uint16_t gs; /* Offset 10 */ + + uint32_t eax; /* Offset 12 */ + uint32_t ecx; /* Offset 16 */ + uint32_t edx; /* Offset 20 */ + uint32_t ebx; /* Offset 24 */ + uint32_t esp; /* Offset 28 */ + uint32_t ebp; /* Offset 32 */ + uint32_t esi; /* Offset 36 */ + uint32_t edi; /* Offset 40 */ + + uint16_t ip; /* Offset 44 */ + uint16_t cs; /* Offset 46 */ +} syslinux_rm_regs; + +typedef struct { + uint32_t dest; + uint32_t src; + uint32_t len; +} comboot_shuffle_descriptor; + +extern void hook_comboot_interrupts ( ); +extern void unhook_comboot_interrupts ( ); + +/* These are not the correct prototypes, but it doens't matter, + * as we only ever get the address of these functions; + * they are only called from COM32 code running in PHYS_CODE + */ +extern void com32_intcall_wrapper ( ); +extern void com32_farcall_wrapper ( ); +extern void com32_cfarcall_wrapper ( ); + +/* Resolve a hostname to an (IPv4) address */ +extern int comboot_resolv ( const char *name, struct in_addr *address ); + +/* setjmp/longjmp context buffer used to return after loading an image */ +extern rmjmp_buf comboot_return; + +#define COMBOOT_EXIT 1 +#define COMBOOT_EXIT_RUN_KERNEL 2 +#define COMBOOT_EXIT_COMMAND 3 + +extern void comboot_force_text_mode ( void ); + +#define COMBOOT_VIDEO_GRAPHICS 0x01 +#define COMBOOT_VIDEO_NONSTANDARD 0x02 +#define COMBOOT_VIDEO_VESA 0x04 +#define COMBOOT_VIDEO_NOTEXT 0x08 + +#endif diff --git a/src/arch/x86/include/fakee820.h b/src/arch/x86/include/fakee820.h new file mode 100644 index 00000000..552b1e48 --- /dev/null +++ b/src/arch/x86/include/fakee820.h @@ -0,0 +1,9 @@ +#ifndef _FAKEE820_H +#define _FAKEE820_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern void fake_e820 ( void ); +extern void unfake_e820 ( void ); + +#endif /* _FAKEE820_H */ diff --git a/src/arch/x86/include/initrd.h b/src/arch/x86/include/initrd.h new file mode 100644 index 00000000..ddb3e5a4 --- /dev/null +++ b/src/arch/x86/include/initrd.h @@ -0,0 +1,30 @@ +#ifndef _INITRD_H +#define _INITRD_H + +/** @file + * + * Initial ramdisk (initrd) reshuffling + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** Minimum alignment for initrds + * + * Some versions of Linux complain about initrds that are not + * page-aligned. + */ +#define INITRD_ALIGN 4096 + +/** Minimum free space required to reshuffle initrds + * + * Chosen to avoid absurdly long reshuffling times + */ +#define INITRD_MIN_FREE_LEN ( 512 * 1024 ) + +extern void initrd_reshuffle ( userptr_t bottom ); +extern int initrd_reshuffle_check ( size_t len, userptr_t bottom ); + +#endif /* _INITRD_H */ diff --git a/src/arch/x86/include/int13.h b/src/arch/x86/include/int13.h new file mode 100644 index 00000000..f82a583c --- /dev/null +++ b/src/arch/x86/include/int13.h @@ -0,0 +1,333 @@ +#ifndef INT13_H +#define INT13_H + +/** @file + * + * INT 13 emulation + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** + * @defgroup int13ops INT 13 operation codes + * @{ + */ + +/** Reset disk system */ +#define INT13_RESET 0x00 +/** Get status of last operation */ +#define INT13_GET_LAST_STATUS 0x01 +/** Read sectors */ +#define INT13_READ_SECTORS 0x02 +/** Write sectors */ +#define INT13_WRITE_SECTORS 0x03 +/** Get drive parameters */ +#define INT13_GET_PARAMETERS 0x08 +/** Get disk type */ +#define INT13_GET_DISK_TYPE 0x15 +/** Extensions installation check */ +#define INT13_EXTENSION_CHECK 0x41 +/** Extended read */ +#define INT13_EXTENDED_READ 0x42 +/** Extended write */ +#define INT13_EXTENDED_WRITE 0x43 +/** Verify sectors */ +#define INT13_EXTENDED_VERIFY 0x44 +/** Extended seek */ +#define INT13_EXTENDED_SEEK 0x47 +/** Get extended drive parameters */ +#define INT13_GET_EXTENDED_PARAMETERS 0x48 +/** Get CD-ROM status / terminate emulation */ +#define INT13_CDROM_STATUS_TERMINATE 0x4b +/** Read CD-ROM boot catalog */ +#define INT13_CDROM_READ_BOOT_CATALOG 0x4d + +/** @} */ + +/** + * @defgroup int13status INT 13 status codes + * @{ + */ + +/** Operation completed successfully */ +#define INT13_STATUS_SUCCESS 0x00 +/** Invalid function or parameter */ +#define INT13_STATUS_INVALID 0x01 +/** Read error */ +#define INT13_STATUS_READ_ERROR 0x04 +/** Reset failed */ +#define INT13_STATUS_RESET_FAILED 0x05 +/** Write error */ +#define INT13_STATUS_WRITE_ERROR 0xcc + +/** @} */ + +/** Block size for non-extended INT 13 calls */ +#define INT13_BLKSIZE 512 + +/** @defgroup int13fddtype INT 13 floppy disk drive types + * @{ + */ + +/** 360K */ +#define INT13_FDD_TYPE_360K 0x01 +/** 1.2M */ +#define INT13_FDD_TYPE_1M2 0x02 +/** 720K */ +#define INT13_FDD_TYPE_720K 0x03 +/** 1.44M */ +#define INT13_FDD_TYPE_1M44 0x04 + +/** An INT 13 disk address packet */ +struct int13_disk_address { + /** Size of the packet, in bytes */ + uint8_t bufsize; + /** Reserved */ + uint8_t reserved_a; + /** Block count */ + uint8_t count; + /** Reserved */ + uint8_t reserved_b; + /** Data buffer */ + struct segoff buffer; + /** Starting block number */ + uint64_t lba; + /** Data buffer (EDD 3.0+ only) */ + uint64_t buffer_phys; + /** Block count (EDD 4.0+ only) */ + uint32_t long_count; + /** Reserved */ + uint32_t reserved_c; +} __attribute__ (( packed )); + +/** INT 13 disk parameters */ +struct int13_disk_parameters { + /** Size of this structure */ + uint16_t bufsize; + /** Flags */ + uint16_t flags; + /** Number of cylinders */ + uint32_t cylinders; + /** Number of heads */ + uint32_t heads; + /** Number of sectors per track */ + uint32_t sectors_per_track; + /** Total number of sectors on drive */ + uint64_t sectors; + /** Bytes per sector */ + uint16_t sector_size; + /** Device parameter table extension */ + struct segoff dpte; + /** Device path information */ + struct edd_device_path_information dpi; +} __attribute__ (( packed )); + +/** + * @defgroup int13types INT 13 disk types + * @{ + */ + +/** No such drive */ +#define INT13_DISK_TYPE_NONE 0x00 +/** Floppy without change-line support */ +#define INT13_DISK_TYPE_FDD 0x01 +/** Floppy with change-line support */ +#define INT13_DISK_TYPE_FDD_CL 0x02 +/** Hard disk */ +#define INT13_DISK_TYPE_HDD 0x03 + +/** @} */ + +/** + * @defgroup int13flags INT 13 disk parameter flags + * @{ + */ + +/** DMA boundary errors handled transparently */ +#define INT13_FL_DMA_TRANSPARENT 0x01 +/** CHS information is valid */ +#define INT13_FL_CHS_VALID 0x02 +/** Removable drive */ +#define INT13_FL_REMOVABLE 0x04 +/** Write with verify supported */ +#define INT13_FL_VERIFIABLE 0x08 +/** Has change-line supported (valid only for removable drives) */ +#define INT13_FL_CHANGE_LINE 0x10 +/** Drive can be locked (valid only for removable drives) */ +#define INT13_FL_LOCKABLE 0x20 +/** CHS is max possible, not current media (valid only for removable drives) */ +#define INT13_FL_CHS_MAX 0x40 + +/** @} */ + +/** + * @defgroup int13exts INT 13 extension flags + * @{ + */ + +/** Extended disk access functions supported */ +#define INT13_EXTENSION_LINEAR 0x01 +/** Removable drive functions supported */ +#define INT13_EXTENSION_REMOVABLE 0x02 +/** EDD functions supported */ +#define INT13_EXTENSION_EDD 0x04 +/** 64-bit extensions are present */ +#define INT13_EXTENSION_64BIT 0x08 + +/** @} */ + +/** + * @defgroup int13vers INT 13 extension versions + * @{ + */ + +/** INT13 extensions version 1.x */ +#define INT13_EXTENSION_VER_1_X 0x01 +/** INT13 extensions version 2.0 (EDD-1.0) */ +#define INT13_EXTENSION_VER_2_0 0x20 +/** INT13 extensions version 2.1 (EDD-1.1) */ +#define INT13_EXTENSION_VER_2_1 0x21 +/** INT13 extensions version 3.0 (EDD-3.0) */ +#define INT13_EXTENSION_VER_3_0 0x30 + +/** @} */ + +/** Maximum number of sectors for which CHS geometry is allowed to be valid + * + * This number is taken from the EDD specification. + */ +#define INT13_MAX_CHS_SECTORS 15482880 + +/** Bootable CD-ROM specification packet */ +struct int13_cdrom_specification { + /** Size of packet in bytes */ + uint8_t size; + /** Boot media type */ + uint8_t media_type; + /** Drive number */ + uint8_t drive; + /** CD-ROM controller number */ + uint8_t controller; + /** LBA of disk image to emulate */ + uint32_t lba; + /** Device specification */ + uint16_t device; + /** Segment of 3K buffer for caching CD-ROM reads */ + uint16_t cache_segment; + /** Load segment for initial boot image */ + uint16_t load_segment; + /** Number of 512-byte sectors to load */ + uint16_t load_sectors; + /** Low 8 bits of cylinder number */ + uint8_t cyl; + /** Sector number, plus high 2 bits of cylinder number */ + uint8_t cyl_sector; + /** Head number */ + uint8_t head; +} __attribute__ (( packed )); + +/** Bootable CD-ROM boot catalog command packet */ +struct int13_cdrom_boot_catalog_command { + /** Size of packet in bytes */ + uint8_t size; + /** Number of sectors of boot catalog to read */ + uint8_t count; + /** Buffer for boot catalog */ + uint32_t buffer; + /** First sector in boot catalog to transfer */ + uint16_t start; +} __attribute__ (( packed )); + +/** A C/H/S address within a partition table entry */ +struct partition_chs { + /** Head number */ + uint8_t head; + /** Sector number, plus high 2 bits of cylinder number */ + uint8_t cyl_sector; + /** Low 8 bits of cylinder number */ + uint8_t cyl; +} __attribute__ (( packed )); + +#define PART_HEAD(chs) ( (chs).head ) +#define PART_SECTOR(chs) ( (chs).cyl_sector & 0x3f ) +#define PART_CYLINDER(chs) ( (chs).cyl | ( ( (chs).cyl_sector & 0xc0 ) << 2 ) ) + +/** A partition table entry within the MBR */ +struct partition_table_entry { + /** Bootable flag */ + uint8_t bootable; + /** C/H/S start address */ + struct partition_chs chs_start; + /** System indicator (partition type) */ + uint8_t type; + /** C/H/S end address */ + struct partition_chs chs_end; + /** Linear start address */ + uint32_t start; + /** Linear length */ + uint32_t length; +} __attribute__ (( packed )); + +/** A Master Boot Record */ +struct master_boot_record { + /** Code area */ + uint8_t code[440]; + /** Disk signature */ + uint32_t signature; + /** Padding */ + uint8_t pad[2]; + /** Partition table */ + struct partition_table_entry partitions[4]; + /** 0x55aa MBR signature */ + uint16_t magic; +} __attribute__ (( packed )); + +/** MBR magic signature */ +#define INT13_MBR_MAGIC 0xaa55 + +/** A floppy disk geometry */ +struct int13_fdd_geometry { + /** Number of tracks */ + uint8_t tracks; + /** Number of heads and sectors per track */ + uint8_t heads_spt; +}; + +/** Define a floppy disk geometry */ +#define INT13_FDD_GEOMETRY( cylinders, heads, sectors ) \ + { \ + .tracks = (cylinders), \ + .heads_spt = ( ( (heads) << 6 ) | (sectors) ), \ + } + +/** Get floppy disk number of cylinders */ +#define INT13_FDD_CYLINDERS( geometry ) ( (geometry)->tracks ) + +/** Get floppy disk number of heads */ +#define INT13_FDD_HEADS( geometry ) ( (geometry)->heads_spt >> 6 ) + +/** Get floppy disk number of sectors per track */ +#define INT13_FDD_SECTORS( geometry ) ( (geometry)->heads_spt & 0x3f ) + +/** A floppy drive parameter table */ +struct int13_fdd_parameters { + uint8_t step_rate__head_unload; + uint8_t head_load__ndma; + uint8_t motor_off_delay; + uint8_t bytes_per_sector; + uint8_t sectors_per_track; + uint8_t gap_length; + uint8_t data_length; + uint8_t format_gap_length; + uint8_t format_filler; + uint8_t head_settle_time; + uint8_t motor_start_time; +} __attribute__ (( packed )); + +#endif /* INT13_H */ diff --git a/src/arch/x86/include/ipxe/acpipwr.h b/src/arch/x86/include/ipxe/acpipwr.h new file mode 100644 index 00000000..93da0942 --- /dev/null +++ b/src/arch/x86/include/ipxe/acpipwr.h @@ -0,0 +1,14 @@ +#ifndef _IPXE_ACPIPWR_H +#define _IPXE_ACPIPWR_H + +/** @file + * + * ACPI power off + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern int acpi_poweroff ( void ); + +#endif /* _IPXE_ACPIPWR_H */ diff --git a/src/arch/x86/include/ipxe/apm.h b/src/arch/x86/include/ipxe/apm.h new file mode 100644 index 00000000..21d913ac --- /dev/null +++ b/src/arch/x86/include/ipxe/apm.h @@ -0,0 +1,14 @@ +#ifndef _IPXE_APM_H +#define _IPXE_APM_H + +/** @file + * + * Advanced Power Management + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern int apm_poweroff ( void ); + +#endif /* _IPXE_APM_H */ diff --git a/src/arch/x86/include/ipxe/bios_nap.h b/src/arch/x86/include/ipxe/bios_nap.h new file mode 100644 index 00000000..c9b82c1e --- /dev/null +++ b/src/arch/x86/include/ipxe/bios_nap.h @@ -0,0 +1,18 @@ +#ifndef _IPXE_BIOS_NAP_H +#define _IPXE_BIOS_NAP_H + +/** @file + * + * BIOS CPU sleeping + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef NAP_PCBIOS +#define NAP_PREFIX_pcbios +#else +#define NAP_PREFIX_pcbios __pcbios_ +#endif + +#endif /* _IPXE_BIOS_NAP_H */ diff --git a/src/arch/x86/include/ipxe/bios_reboot.h b/src/arch/x86/include/ipxe/bios_reboot.h new file mode 100644 index 00000000..3f6df907 --- /dev/null +++ b/src/arch/x86/include/ipxe/bios_reboot.h @@ -0,0 +1,18 @@ +#ifndef _IPXE_BIOS_REBOOT_H +#define _IPXE_BIOS_REBOOT_H + +/** @file + * + * Standard PC-BIOS reboot mechanism + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef REBOOT_PCBIOS +#define REBOOT_PREFIX_pcbios +#else +#define REBOOT_PREFIX_pcbios __pcbios_ +#endif + +#endif /* _IPXE_BIOS_REBOOT_H */ diff --git a/src/arch/x86/include/ipxe/bios_sanboot.h b/src/arch/x86/include/ipxe/bios_sanboot.h new file mode 100644 index 00000000..85d69803 --- /dev/null +++ b/src/arch/x86/include/ipxe/bios_sanboot.h @@ -0,0 +1,18 @@ +#ifndef _IPXE_BIOS_SANBOOT_H +#define _IPXE_BIOS_SANBOOT_H + +/** @file + * + * Standard PC-BIOS sanboot interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef SANBOOT_PCBIOS +#define SANBOOT_PREFIX_pcbios +#else +#define SANBOOT_PREFIX_pcbios __pcbios_ +#endif + +#endif /* _IPXE_BIOS_SANBOOT_H */ diff --git a/src/arch/x86/include/ipxe/bios_smbios.h b/src/arch/x86/include/ipxe/bios_smbios.h new file mode 100644 index 00000000..9f7f9c8f --- /dev/null +++ b/src/arch/x86/include/ipxe/bios_smbios.h @@ -0,0 +1,18 @@ +#ifndef _IPXE_BIOS_SMBIOS_H +#define _IPXE_BIOS_SMBIOS_H + +/** @file + * + * Standard PC-BIOS SMBIOS interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef SMBIOS_PCBIOS +#define SMBIOS_PREFIX_pcbios +#else +#define SMBIOS_PREFIX_pcbios __pcbios_ +#endif + +#endif /* _IPXE_BIOS_SMBIOS_H */ diff --git a/src/arch/x86/include/ipxe/errno/pcbios.h b/src/arch/x86/include/ipxe/errno/pcbios.h new file mode 100644 index 00000000..6312adaa --- /dev/null +++ b/src/arch/x86/include/ipxe/errno/pcbios.h @@ -0,0 +1,115 @@ +#ifndef _IPXE_ERRNO_PCBIOS_H +#define _IPXE_ERRNO_PCBIOS_H + +/** + * @file + * + * PC-BIOS platform error codes + * + * We use the PXE-specified error codes as the platform error codes + * for the PC-BIOS platform. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** + * Convert platform error code to platform component of iPXE error code + * + * @v platform Platform error code + * @ret errno Platform component of iPXE error code + */ +#define PLATFORM_TO_ERRNO( platform ) ( (platform) & 0xff ) + +/** + * Convert iPXE error code to platform error code + * + * @v errno iPXE error code + * @ret platform Platform error code + */ +#define ERRNO_TO_PLATFORM( errno ) ( (errno) & 0xff ) + +/* Platform-specific error codes */ +#define PLATFORM_ENOERR PXENV_STATUS_SUCCESS +#define PLATFORM_E2BIG PXENV_STATUS_BAD_FUNC +#define PLATFORM_EACCES PXENV_STATUS_TFTP_ACCESS_VIOLATION +#define PLATFORM_EADDRINUSE PXENV_STATUS_UDP_OPEN +#define PLATFORM_EADDRNOTAVAIL PXENV_STATUS_UDP_OPEN +#define PLATFORM_EAFNOSUPPORT PXENV_STATUS_UNSUPPORTED +#define PLATFORM_EAGAIN PXENV_STATUS_FAILURE +#define PLATFORM_EALREADY PXENV_STATUS_UDP_OPEN +#define PLATFORM_EBADF PXENV_STATUS_TFTP_CLOSED +#define PLATFORM_EBADMSG PXENV_STATUS_FAILURE +#define PLATFORM_EBUSY PXENV_STATUS_OUT_OF_RESOURCES +#define PLATFORM_ECANCELED PXENV_STATUS_BINL_CANCELED_BY_KEYSTROKE +#define PLATFORM_ECHILD PXENV_STATUS_TFTP_FILE_NOT_FOUND +#define PLATFORM_ECONNABORTED PXENV_STATUS_TFTP_CANNOT_READ_FROM_CONNECTION +#define PLATFORM_ECONNREFUSED PXENV_STATUS_TFTP_CANNOT_OPEN_CONNECTION +#define PLATFORM_ECONNRESET PXENV_STATUS_TFTP_CANNOT_READ_FROM_CONNECTION +#define PLATFORM_EDEADLK PXENV_STATUS_FAILURE +#define PLATFORM_EDESTADDRREQ PXENV_STATUS_BAD_FUNC +#define PLATFORM_EDOM PXENV_STATUS_FAILURE +#define PLATFORM_EDQUOT PXENV_STATUS_FAILURE +#define PLATFORM_EEXIST PXENV_STATUS_FAILURE +#define PLATFORM_EFAULT PXENV_STATUS_MCOPY_PROBLEM +#define PLATFORM_EFBIG PXENV_STATUS_MCOPY_PROBLEM +#define PLATFORM_EHOSTUNREACH PXENV_STATUS_ARP_TIMEOUT +#define PLATFORM_EIDRM PXENV_STATUS_FAILURE +#define PLATFORM_EILSEQ PXENV_STATUS_FAILURE +#define PLATFORM_EINPROGRESS PXENV_STATUS_FAILURE +#define PLATFORM_EINTR PXENV_STATUS_FAILURE +#define PLATFORM_EINVAL PXENV_STATUS_BAD_FUNC +#define PLATFORM_EIO PXENV_STATUS_TFTP_CANNOT_READ_FROM_CONNECTION +#define PLATFORM_EISCONN PXENV_STATUS_UDP_OPEN +#define PLATFORM_EISDIR PXENV_STATUS_FAILURE +#define PLATFORM_ELOOP PXENV_STATUS_FAILURE +#define PLATFORM_EMFILE PXENV_STATUS_OUT_OF_RESOURCES +#define PLATFORM_EMLINK PXENV_STATUS_FAILURE +#define PLATFORM_EMSGSIZE PXENV_STATUS_BAD_FUNC +#define PLATFORM_EMULTIHOP PXENV_STATUS_FAILURE +#define PLATFORM_ENAMETOOLONG PXENV_STATUS_FAILURE +#define PLATFORM_ENETDOWN PXENV_STATUS_ARP_TIMEOUT +#define PLATFORM_ENETRESET PXENV_STATUS_FAILURE +#define PLATFORM_ENETUNREACH PXENV_STATUS_ARP_TIMEOUT +#define PLATFORM_ENFILE PXENV_STATUS_OUT_OF_RESOURCES +#define PLATFORM_ENOBUFS PXENV_STATUS_OUT_OF_RESOURCES +#define PLATFORM_ENODATA PXENV_STATUS_FAILURE +#define PLATFORM_ENODEV PXENV_STATUS_TFTP_FILE_NOT_FOUND +#define PLATFORM_ENOENT PXENV_STATUS_TFTP_FILE_NOT_FOUND +#define PLATFORM_ENOEXEC PXENV_STATUS_FAILURE +#define PLATFORM_ENOLCK PXENV_STATUS_FAILURE +#define PLATFORM_ENOLINK PXENV_STATUS_FAILURE +#define PLATFORM_ENOMEM PXENV_STATUS_OUT_OF_RESOURCES +#define PLATFORM_ENOMSG PXENV_STATUS_FAILURE +#define PLATFORM_ENOPROTOOPT PXENV_STATUS_UNSUPPORTED +#define PLATFORM_ENOSPC PXENV_STATUS_OUT_OF_RESOURCES +#define PLATFORM_ENOSR PXENV_STATUS_OUT_OF_RESOURCES +#define PLATFORM_ENOSTR PXENV_STATUS_FAILURE +#define PLATFORM_ENOSYS PXENV_STATUS_UNSUPPORTED +#define PLATFORM_ENOTCONN PXENV_STATUS_FAILURE +#define PLATFORM_ENOTDIR PXENV_STATUS_FAILURE +#define PLATFORM_ENOTEMPTY PXENV_STATUS_FAILURE +#define PLATFORM_ENOTSOCK PXENV_STATUS_FAILURE +#define PLATFORM_ENOTSUP PXENV_STATUS_UNSUPPORTED +#define PLATFORM_ENOTTY PXENV_STATUS_FAILURE +#define PLATFORM_ENXIO PXENV_STATUS_TFTP_FILE_NOT_FOUND +#define PLATFORM_EOPNOTSUPP PXENV_STATUS_UNSUPPORTED +#define PLATFORM_EOVERFLOW PXENV_STATUS_FAILURE +#define PLATFORM_EPERM PXENV_STATUS_TFTP_ACCESS_VIOLATION +#define PLATFORM_EPIPE PXENV_STATUS_FAILURE +#define PLATFORM_EPROTO PXENV_STATUS_FAILURE +#define PLATFORM_EPROTONOSUPPORT PXENV_STATUS_UNSUPPORTED +#define PLATFORM_EPROTOTYPE PXENV_STATUS_FAILURE +#define PLATFORM_ERANGE PXENV_STATUS_FAILURE +#define PLATFORM_EROFS PXENV_STATUS_FAILURE +#define PLATFORM_ESPIPE PXENV_STATUS_FAILURE +#define PLATFORM_ESRCH PXENV_STATUS_TFTP_FILE_NOT_FOUND +#define PLATFORM_ESTALE PXENV_STATUS_FAILURE +#define PLATFORM_ETIME PXENV_STATUS_FAILURE +#define PLATFORM_ETIMEDOUT PXENV_STATUS_TFTP_READ_TIMEOUT +#define PLATFORM_ETXTBSY PXENV_STATUS_FAILURE +#define PLATFORM_EWOULDBLOCK PXENV_STATUS_TFTP_OPEN +#define PLATFORM_EXDEV PXENV_STATUS_FAILURE + +#endif /* _IPXE_ERRNO_PCBIOS_H */ diff --git a/src/arch/x86/include/ipxe/guestrpc.h b/src/arch/x86/include/ipxe/guestrpc.h new file mode 100644 index 00000000..bc3d8550 --- /dev/null +++ b/src/arch/x86/include/ipxe/guestrpc.h @@ -0,0 +1,68 @@ +#ifndef _IPXE_GUESTRPC_H +#define _IPXE_GUESTRPC_H + +/** @file + * + * VMware GuestRPC mechanism + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** GuestRPC magic number */ +#define GUESTRPC_MAGIC 0x49435052 /* "RPCI" */ + +/** Open RPC channel */ +#define GUESTRPC_OPEN 0x00 + +/** Open RPC channel success status */ +#define GUESTRPC_OPEN_SUCCESS 0x00010000 + +/** Send RPC command length */ +#define GUESTRPC_COMMAND_LEN 0x01 + +/** Send RPC command length success status */ +#define GUESTRPC_COMMAND_LEN_SUCCESS 0x00810000 + +/** Send RPC command data */ +#define GUESTRPC_COMMAND_DATA 0x02 + +/** Send RPC command data success status */ +#define GUESTRPC_COMMAND_DATA_SUCCESS 0x00010000 + +/** Receive RPC reply length */ +#define GUESTRPC_REPLY_LEN 0x03 + +/** Receive RPC reply length success status */ +#define GUESTRPC_REPLY_LEN_SUCCESS 0x00830000 + +/** Receive RPC reply data */ +#define GUESTRPC_REPLY_DATA 0x04 + +/** Receive RPC reply data success status */ +#define GUESTRPC_REPLY_DATA_SUCCESS 0x00010000 + +/** Finish receiving RPC reply */ +#define GUESTRPC_REPLY_FINISH 0x05 + +/** Finish receiving RPC reply success status */ +#define GUESTRPC_REPLY_FINISH_SUCCESS 0x00010000 + +/** Close RPC channel */ +#define GUESTRPC_CLOSE 0x06 + +/** Close RPC channel success status */ +#define GUESTRPC_CLOSE_SUCCESS 0x00010000 + +/** RPC command success status */ +#define GUESTRPC_SUCCESS 0x2031 /* "1 " */ + +extern int guestrpc_open ( void ); +extern void guestrpc_close ( int channel ); +extern int guestrpc_command ( int channel, const char *command, char *reply, + size_t reply_len ); + +#endif /* _IPXE_GUESTRPC_H */ diff --git a/src/arch/x86/include/ipxe/iomap_pages.h b/src/arch/x86/include/ipxe/iomap_pages.h new file mode 100644 index 00000000..18e0a300 --- /dev/null +++ b/src/arch/x86/include/ipxe/iomap_pages.h @@ -0,0 +1,24 @@ +#ifndef _IPXE_IOMAP_PAGES_H +#define _IPXE_IOMAP_PAGES_H + +/** @file + * + * I/O mapping API using page tables + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef IOMAP_PAGES +#define IOMAP_PREFIX_pages +#else +#define IOMAP_PREFIX_pages __pages_ +#endif + +static inline __always_inline unsigned long +IOMAP_INLINE ( pages, io_to_bus ) ( volatile const void *io_addr ) { + /* Not easy to do; just return the CPU address for debugging purposes */ + return ( ( intptr_t ) io_addr ); +} + +#endif /* _IPXE_IOMAP_PAGES_H */ diff --git a/src/arch/x86/include/ipxe/memtop_umalloc.h b/src/arch/x86/include/ipxe/memtop_umalloc.h new file mode 100644 index 00000000..dee055d1 --- /dev/null +++ b/src/arch/x86/include/ipxe/memtop_umalloc.h @@ -0,0 +1,18 @@ +#ifndef _IPXE_MEMTOP_UMALLOC_H +#define _IPXE_MEMTOP_UMALLOC_H + +/** @file + * + * External memory allocation + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef UMALLOC_MEMTOP +#define UMALLOC_PREFIX_memtop +#else +#define UMALLOC_PREFIX_memtop __memtop_ +#endif + +#endif /* _IPXE_MEMTOP_UMALLOC_H */ diff --git a/src/arch/x86/include/ipxe/pit8254.h b/src/arch/x86/include/ipxe/pit8254.h new file mode 100644 index 00000000..00b0ab16 --- /dev/null +++ b/src/arch/x86/include/ipxe/pit8254.h @@ -0,0 +1,81 @@ +#ifndef _IPXE_PIT8254_H +#define _IPXE_PIT8254_H + +/** @file + * + * 8254 Programmable Interval Timer + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** IRQ0 channel */ +#define PIT8254_CH_IRQ0 0 + +/** PC speaker channel */ +#define PIT8254_CH_SPKR 2 + +/** Timer frequency (1.193182MHz) */ +#define PIT8254_HZ 1193182UL + +/** Data port */ +#define PIT8254_DATA(channel) ( 0x40 + (channel) ) + +/** Mode/command register */ +#define PIT8254_CMD 0x43 + +/** Select channel */ +#define PIT8254_CMD_CHANNEL(channel) ( (channel) << 6 ) + +/** Access modes */ +#define PIT8254_CMD_ACCESS_LATCH 0x00 /**< Latch count value command */ +#define PIT8254_CMD_ACCESS_LO 0x10 /**< Low byte only */ +#define PIT8254_CMD_ACCESS_HI 0x20 /**< High byte only */ +#define PIT8254_CMD_ACCESS_LOHI 0x30 /**< Low-byte, high-byte pair */ + +/* Operating modes */ +#define PIT8254_CMD_OP_TERMINAL 0x00 /**< Interrupt on terminal count */ +#define PIT8254_CMD_OP_ONESHOT 0x02 /**< Hardware re-triggerable one-shot */ +#define PIT8254_CMD_OP_RATE 0x04 /**< Rate generator */ +#define PIT8254_CMD_OP_SQUARE 0x06 /**< Square wave generator */ +#define PIT8254_CMD_OP_SWSTROBE 0x08 /**< Software triggered strobe */ +#define PIT8254_CMD_OP_HWSTROBE 0x0a /**< Hardware triggered strobe */ +#define PIT8254_CMD_OP_RATE2 0x0c /**< Rate generator (duplicate) */ +#define PIT8254_CMD_OP_SQUARE2 0x0e /**< Square wave generator (duplicate)*/ + +/** Binary mode */ +#define PIT8254_CMD_BINARY 0x00 + +/** BCD mode */ +#define PIT8254_CMD_BCD 0x01 + +/** PC speaker control register */ +#define PIT8254_SPKR 0x61 + +/** PC speaker channel gate */ +#define PIT8254_SPKR_GATE 0x01 + +/** PC speaker enabled */ +#define PIT8254_SPKR_ENABLE 0x02 + +/** PC speaker channel output */ +#define PIT8254_SPKR_OUT 0x20 + +extern void pit8254_speaker_delay ( unsigned int ticks ); + +/** + * Delay for a fixed number of microseconds + * + * @v usecs Number of microseconds for which to delay + */ +static inline __attribute__ (( always_inline )) void +pit8254_udelay ( unsigned long usecs ) { + + /* Delays are invariably compile-time constants; force the + * multiplication and division to take place at compilation + * time rather than runtime. + */ + pit8254_speaker_delay ( ( usecs * PIT8254_HZ ) / 1000000 ); +} + +#endif /* _IPXE_PIT8254_H */ diff --git a/src/arch/x86/include/ipxe/rsdp.h b/src/arch/x86/include/ipxe/rsdp.h new file mode 100644 index 00000000..7e32c001 --- /dev/null +++ b/src/arch/x86/include/ipxe/rsdp.h @@ -0,0 +1,18 @@ +#ifndef _IPXE_RSDP_H +#define _IPXE_RSDP_H + +/** @file + * + * Standard PC-BIOS ACPI RSDP interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef ACPI_RSDP +#define ACPI_PREFIX_rsdp +#else +#define ACPI_PREFIX_rsdp __rsdp_ +#endif + +#endif /* _IPXE_RSDP_H */ diff --git a/src/arch/x86/include/ipxe/rtc_entropy.h b/src/arch/x86/include/ipxe/rtc_entropy.h new file mode 100644 index 00000000..581abcd3 --- /dev/null +++ b/src/arch/x86/include/ipxe/rtc_entropy.h @@ -0,0 +1,62 @@ +#ifndef _IPXE_RTC_ENTROPY_H +#define _IPXE_RTC_ENTROPY_H + +/** @file + * + * RTC-based entropy source + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#ifdef ENTROPY_RTC +#define ENTROPY_PREFIX_rtc +#else +#define ENTROPY_PREFIX_rtc __rtc_ +#endif + +/** + * min-entropy per sample + * + * @ret min_entropy min-entropy of each sample + */ +static inline __always_inline min_entropy_t +ENTROPY_INLINE ( rtc, min_entropy_per_sample ) ( void ) { + + /* The min-entropy has been measured on several platforms + * using the entropy_sample test code. Modelling the samples + * as independent, and using a confidence level of 99.99%, the + * measurements were as follows: + * + * qemu-kvm : 7.38 bits + * VMware : 7.46 bits + * Physical hardware : 2.67 bits + * + * We choose the lowest of these (2.67 bits) and apply a 50% + * safety margin to allow for some potential non-independence + * of samples. + */ + return MIN_ENTROPY ( 1.3 ); +} + +extern uint8_t rtc_sample ( void ); + +/** + * Get noise sample + * + * @ret noise Noise sample + * @ret rc Return status code + */ +static inline __always_inline int +ENTROPY_INLINE ( rtc, get_noise ) ( noise_sample_t *noise ) { + + /* Get sample */ + *noise = rtc_sample(); + + /* Always successful */ + return 0; +} + +#endif /* _IPXE_RTC_ENTROPY_H */ diff --git a/src/arch/x86/include/ipxe/rtc_time.h b/src/arch/x86/include/ipxe/rtc_time.h new file mode 100644 index 00000000..cb8c7f49 --- /dev/null +++ b/src/arch/x86/include/ipxe/rtc_time.h @@ -0,0 +1,18 @@ +#ifndef _IPXE_RTC_TIME_H +#define _IPXE_RTC_TIME_H + +/** @file + * + * RTC-based time source + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef TIME_RTC +#define TIME_PREFIX_rtc +#else +#define TIME_PREFIX_rtc __rtc_ +#endif + +#endif /* _IPXE_RTC_TIME_H */ diff --git a/src/arch/x86/include/ipxe/vesafb.h b/src/arch/x86/include/ipxe/vesafb.h new file mode 100644 index 00000000..efc8f2cb --- /dev/null +++ b/src/arch/x86/include/ipxe/vesafb.h @@ -0,0 +1,210 @@ +#ifndef _IPXE_VESAFB_H +#define _IPXE_VESAFB_H + +/** @file + * + * VESA frame buffer console + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** INT 10,4f00: return controller information */ +#define VBE_CONTROLLER_INFO 0x4f00 + +/** VBE controller information */ +struct vbe_controller_info { + /** VBE signature */ + uint32_t vbe_signature; + /** VBE minor version */ + uint8_t vbe_minor_version; + /** VBE major version */ + uint8_t vbe_major_version; + /** Pointer to OEM string */ + struct segoff oem_string_ptr; + /** Capabilities of graphics controller */ + uint32_t capabilities; + /** Pointer to video mode list */ + struct segoff video_mode_ptr; + /** Number of 64kB memory blocks */ + uint16_t total_memory; + /** VBE implementation software revision */ + uint16_t oem_software_rev; + /** Pointer to vendor name string */ + struct segoff oem_vendor_name_ptr; + /** Pointer to product name string */ + struct segoff oem_product_name_ptr; + /** Pointer to product revision string */ + struct segoff oem_product_rev_ptr; + /** Reserved for VBE implementation scratch area */ + uint8_t reserved[222]; + /* VBE2.0 defines an additional 256-byte data area for + * including the OEM strings inline within the VBE information + * block; we omit this to reduce the amount of base memory + * required for VBE calls. + */ +} __attribute__ (( packed )); + +/** VBE controller information signature */ +#define VBE_CONTROLLER_SIGNATURE \ + ( ( 'V' << 0 ) | ( 'E' << 8 ) | ( 'S' << 16 ) | ( 'A' << 24 ) ) + +/** VBE mode list end marker */ +#define VBE_MODE_END 0xffff + +/** INT 10,4f01: return VBE mode information */ +#define VBE_MODE_INFO 0x4f01 + +/** VBE mode information */ +struct vbe_mode_info { + /** Mode attributes */ + uint16_t mode_attributes; + /** Window A attributes */ + uint8_t win_a_attributes; + /** Window B attributes */ + uint8_t win_b_attributes; + /** Window granularity */ + uint16_t win_granularity; + /** Window size */ + uint16_t win_size; + /** Window A start segment */ + uint16_t win_a_segment; + /** Window B start segment */ + uint16_t win_b_segment; + /** Pointer to window function */ + struct segoff win_func_ptr; + /** Bytes per scan line */ + uint16_t bytes_per_scan_line; + /** Horizontal resolution in pixels or characters */ + uint16_t x_resolution; + /** Vertical resolution in pixels or characters */ + uint16_t y_resolution; + /** Character cell width in pixels */ + uint8_t x_char_size; + /** Character cell height in pixels */ + uint8_t y_char_size; + /** Number of memory planes */ + uint8_t number_of_planes; + /** Bits per pixel */ + uint8_t bits_per_pixel; + /** Number of banks */ + uint8_t number_of_banks; + /** Memory model type */ + uint8_t memory_model; + /** Bank size in kB */ + uint8_t bank_size; + /** Number of images */ + uint8_t number_of_image_pages; + /** Reserved for page function */ + uint8_t reserved_1; + /** Size of direct colour red mask in bits */ + uint8_t red_mask_size; + /** Bit position of LSB of red mask */ + uint8_t red_field_position; + /** Size of direct colour green mask in bits */ + uint8_t green_mask_size; + /** Bit position of LSB of green mask */ + uint8_t green_field_position; + /** Size of direct colour blue mask in bits */ + uint8_t blue_mask_size; + /** Bit position of LSB of blue mask */ + uint8_t blue_field_position; + /** Size of direct colour reserved mask in bits */ + uint8_t rsvd_mask_size; + /** Bit position of LSB of reserved mask */ + uint8_t rsvd_field_position; + /** Direct colour mode attributes */ + uint8_t direct_colour_mode_info; + /** Physical address for flat memory frame buffer */ + uint32_t phys_base_ptr; + /** Pointer to start of off-screen memory */ + uint32_t off_screen_mem_offset; + /** Amount of off-screen memory in 1kB units */ + uint16_t off_screen_mem_size; + /** Reserved */ + uint8_t reserved_2[206]; +} __attribute__ (( packed )); + +/** VBE mode attributes */ +enum vbe_mode_attributes { + /** Mode supported in hardware */ + VBE_MODE_ATTR_SUPPORTED = 0x0001, + /** TTY output functions supported by BIOS */ + VBE_MODE_ATTR_TTY = 0x0004, + /** Colour mode */ + VBE_MODE_ATTR_COLOUR = 0x0008, + /** Graphics mode */ + VBE_MODE_ATTR_GRAPHICS = 0x0010, + /** Not a VGA compatible mode */ + VBE_MODE_ATTR_NOT_VGA = 0x0020, + /** VGA compatible windowed memory mode is not available */ + VBE_MODE_ATTR_NOT_WINDOWED = 0x0040, + /** Linear frame buffer mode is available */ + VBE_MODE_ATTR_LINEAR = 0x0080, + /** Double scan mode is available */ + VBE_MODE_ATTR_DOUBLE = 0x0100, + /** Interlaced mode is available */ + VBE_MODE_ATTR_INTERLACED = 0x0200, + /** Hardware triple buffering support */ + VBE_MODE_ATTR_TRIPLE_BUF = 0x0400, + /** Hardware stereoscopic display support */ + VBE_MODE_ATTR_STEREO = 0x0800, + /** Dual display start address support */ + VBE_MODE_ATTR_DUAL = 0x1000, +}; + +/** VBE mode memory models */ +enum vbe_mode_memory_model { + /** Text mode */ + VBE_MODE_MODEL_TEXT = 0x00, + /** CGA graphics mode */ + VBE_MODE_MODEL_CGA = 0x01, + /** Hercules graphics mode */ + VBE_MODE_MODEL_HERCULES = 0x02, + /** Planar mode */ + VBE_MODE_MODEL_PLANAR = 0x03, + /** Packed pixel mode */ + VBE_MODE_MODEL_PACKED_PIXEL = 0x04, + /** Non-chain 4, 256 colour mode */ + VBE_MODE_MODEL_NON_CHAIN_4 = 0x05, + /** Direct colour mode */ + VBE_MODE_MODEL_DIRECT_COLOUR = 0x06, + /** YUV mode */ + VBE_MODE_MODEL_YUV = 0x07, +}; + +/** INT 10,4f02: set VBE mode */ +#define VBE_SET_MODE 0x4f02 + +/** VBE linear frame buffer mode bit */ +#define VBE_MODE_LINEAR 0x4000 + +/** INT 10,1130: get font information */ +#define VBE_GET_FONT 0x1130 + +/** Font sets */ +enum vbe_font_set { + /** 8x14 character font */ + VBE_FONT_8x14 = 0x0200, + /** 8x8 double dot font */ + VBE_FONT_8x8_DOUBLE = 0x0300, + /** 8x8 double dot font (high 128 characters) */ + VBE_FONT_8x8_DOUBLE_HIGH = 0x0400, + /** 9x14 alpha alternate font */ + VBE_FONT_9x14_ALPHA_ALT = 0x0500, + /** 8x16 font */ + VBE_FONT_8x16 = 0x0600, + /** 9x16 alternate font */ + VBE_FONT_9x16_ALT = 0x0700, +}; + +/** INT 10,00: set VGA mode */ +#define VBE_SET_VGA_MODE 0x0000 + +/** INT 10,0f: get VGA mode */ +#define VBE_GET_VGA_MODE 0x0f00 + +#endif /* _IPXE_VESAFB_H */ diff --git a/src/arch/x86/include/ipxe/vmware.h b/src/arch/x86/include/ipxe/vmware.h new file mode 100644 index 00000000..24f60a03 --- /dev/null +++ b/src/arch/x86/include/ipxe/vmware.h @@ -0,0 +1,81 @@ +#ifndef _IPXE_VMWARE_H +#define _IPXE_VMWARE_H + +/** @file + * + * VMware backdoor mechanism + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** VMware backdoor I/O port */ +#define VMW_PORT 0x5658 + +/** VMware backdoor magic value */ +#define VMW_MAGIC 0x564d5868 /* "VMXh" */ + +/** VMware backdoor magic instruction */ +#define VMW_BACKDOOR "inl %%dx, %%eax" + +/** Get VMware version */ +#define VMW_CMD_GET_VERSION 0x0a + +/** Issue GuestRPC command */ +#define VMW_CMD_GUESTRPC 0x1e + +/** + * Get VMware version + * + * @ret version VMware version(?) + * @ret magic VMware magic number, if present + * @ret product_type VMware product type + */ +static inline __attribute__ (( always_inline )) void +vmware_cmd_get_version ( uint32_t *version, uint32_t *magic, + uint32_t *product_type ) { + uint32_t discard_d; + + /* Perform backdoor call */ + __asm__ __volatile__ ( VMW_BACKDOOR + : "=a" ( *version ), "=b" ( *magic ), + "=c" ( *product_type ), "=d" ( discard_d ) + : "0" ( VMW_MAGIC ), "1" ( 0 ), + "2" ( VMW_CMD_GET_VERSION ), + "3" ( VMW_PORT ) ); +} + +/** + * Issue GuestRPC command + * + * @v channel Channel number + * @v subcommand GuestRPC subcommand + * @v parameter Subcommand-specific parameter + * @ret edxhi Subcommand-specific result + * @ret ebx Subcommand-specific result + * @ret status Command status + */ +static inline __attribute__ (( always_inline )) uint32_t +vmware_cmd_guestrpc ( int channel, uint16_t subcommand, uint32_t parameter, + uint16_t *edxhi, uint32_t *ebx ) { + uint32_t discard_a; + uint32_t status; + uint32_t edx; + + /* Perform backdoor call */ + __asm__ __volatile__ ( VMW_BACKDOOR + : "=a" ( discard_a ), "=b" ( *ebx ), + "=c" ( status ), "=d" ( edx ) + : "0" ( VMW_MAGIC ), "1" ( parameter ), + "2" ( VMW_CMD_GUESTRPC | ( subcommand << 16 )), + "3" ( VMW_PORT | ( channel << 16 ) ) ); + *edxhi = ( edx >> 16 ); + + return status; +} + +extern int vmware_present ( void ); + +#endif /* _IPXE_VMWARE_H */ diff --git a/src/arch/x86/include/kir.h b/src/arch/x86/include/kir.h new file mode 100644 index 00000000..84633d26 --- /dev/null +++ b/src/arch/x86/include/kir.h @@ -0,0 +1,18 @@ +#ifndef KIR_H +#define KIR_H + +#ifndef KEEP_IT_REAL +#error "kir.h can be used only with -DKEEP_IT_REAL" +#endif + +#ifdef ASSEMBLY + +#define code32 code16gcc + +#else /* ASSEMBLY */ + +__asm__ ( ".code16gcc" ); + +#endif /* ASSEMBLY */ + +#endif /* KIR_H */ diff --git a/src/arch/x86/include/libkir.h b/src/arch/x86/include/libkir.h new file mode 100644 index 00000000..1f5b1350 --- /dev/null +++ b/src/arch/x86/include/libkir.h @@ -0,0 +1,233 @@ +#ifndef LIBKIR_H +#define LIBKIR_H + +#include "realmode.h" + +#ifndef ASSEMBLY + +/* + * Full API documentation for these functions is in realmode.h. + * + */ + +/* Access to variables in .data16 and .text16 in a way compatible with librm */ +#define __data16( variable ) variable +#define __data16_array( variable, array ) variable array +#define __bss16( variable ) variable +#define __bss16_array( variable, array ) variable array +#define __text16( variable ) variable +#define __text16_array( variable,array ) variable array +#define __use_data16( variable ) variable +#define __use_text16( variable ) variable +#define __from_data16( pointer ) pointer +#define __from_text16( pointer ) pointer + +/* Real-mode data and code segments */ +static inline __attribute__ (( always_inline )) unsigned int _rm_cs ( void ) { + uint16_t cs; + __asm__ __volatile__ ( "movw %%cs, %w0" : "=r" ( cs ) ); + return cs; +} + +static inline __attribute__ (( always_inline )) unsigned int _rm_ds ( void ) { + uint16_t ds; + __asm__ __volatile__ ( "movw %%ds, %w0" : "=r" ( ds ) ); + return ds; +} + +#define rm_cs ( _rm_cs() ) +#define rm_ds ( _rm_ds() ) + +/* Copy to/from base memory */ + +static inline void copy_to_real_libkir ( unsigned int dest_seg, + unsigned int dest_off, + const void *src, size_t n ) { + unsigned int discard_D, discard_S, discard_c; + + __asm__ __volatile__ ( "pushw %%es\n\t" + "movw %3, %%es\n\t" + "rep movsb\n\t" + "popw %%es\n\t" + : "=D" ( discard_D ), "=S" ( discard_S ), + "=c" ( discard_c ) + : "r" ( dest_seg ), "D" ( dest_off ), + "S" ( src ), + "c" ( n ) + : "memory" ); +} + +static inline void copy_from_real_libkir ( void *dest, + unsigned int src_seg, + unsigned int src_off, + size_t n ) { + unsigned int discard_D, discard_S, discard_c; + + __asm__ __volatile__ ( "pushw %%ds\n\t" + "movw %4, %%ds\n\t" + "rep movsb\n\t" + "popw %%ds\n\t" + : "=D" ( discard_D ), "=S" ( discard_S ), + "=c" ( discard_c ) + : "D" ( dest ), + "r" ( src_seg ), "S" ( src_off ), + "c" ( n ) + : "memory" ); +} + +#define copy_to_real copy_to_real_libkir +#define copy_from_real copy_from_real_libkir + +/* + * Transfer individual values to/from base memory. There may well be + * a neater way to do this. We have two versions: one for constant + * offsets (where the mov instruction must be of the form "mov + * %es:123, %xx") and one for non-constant offsets (where the mov + * instruction must be of the form "mov %es:(%xx), %yx". If it's + * possible to incorporate both forms into one __asm__ instruction, I + * don't know how to do it. + * + * Ideally, the mov instruction should be "mov%z0"; the "%z0" is meant + * to expand to either "b", "w" or "l" depending on the size of + * operand 0. This would remove the (minor) ambiguity in the mov + * instruction. However, gcc on at least my system barfs with an + * "internal compiler error" when confronted with %z0. + * + */ + +#define put_real_kir_const_off( var, seg, off ) \ + __asm__ ( "movw %w1, %%es\n\t" \ + "mov %0, %%es:%c2\n\t" \ + "pushw %%ds\n\t" /* restore %es */ \ + "popw %%es\n\t" \ + : \ + : "r,r" ( var ), "rm,rm" ( seg ), "i,!r" ( off ) \ + ) + +#define put_real_kir_nonconst_off( var, seg, off ) \ + __asm__ ( "movw %w1, %%es\n\t" \ + "mov %0, %%es:(%2)\n\t" \ + "pushw %%ds\n\t" /* restore %es */ \ + "popw %%es\n\t" \ + : \ + : "r" ( var ), "rm" ( seg ), "r" ( off ) \ + ) + +#define put_real_kir( var, seg, off ) \ + do { \ + if ( __builtin_constant_p ( off ) ) \ + put_real_kir_const_off ( var, seg, off ); \ + else \ + put_real_kir_nonconst_off ( var, seg, off ); \ + } while ( 0 ) + +#define get_real_kir_const_off( var, seg, off ) \ + __asm__ ( "movw %w1, %%es\n\t" \ + "mov %%es:%c2, %0\n\t" \ + "pushw %%ds\n\t" /* restore %es */ \ + "popw %%es\n\t" \ + : "=r,r" ( var ) \ + : "rm,rm" ( seg ), "i,!r" ( off ) \ + ) + +#define get_real_kir_nonconst_off( var, seg, off ) \ + __asm__ ( "movw %w1, %%es\n\t" \ + "mov %%es:(%2), %0\n\t" \ + "pushw %%ds\n\t" /* restore %es */ \ + "popw %%es\n\t" \ + : "=r" ( var ) \ + : "rm" ( seg ), "r" ( off ) \ + ) + +#define get_real_kir( var, seg, off ) \ + do { \ + if ( __builtin_constant_p ( off ) ) \ + get_real_kir_const_off ( var, seg, off ); \ + else \ + get_real_kir_nonconst_off ( var, seg, off ); \ + } while ( 0 ) + +#define put_real put_real_kir +#define get_real get_real_kir + +/** + * A pointer to a user buffer + * + * This is actually a struct segoff, but encoded as a uint32_t to + * ensure that gcc passes it around efficiently. + */ +typedef uint32_t userptr_t; + +/** + * Copy data to user buffer + * + * @v buffer User buffer + * @v offset Offset within user buffer + * @v src Source + * @v len Length + */ +static inline __attribute__ (( always_inline )) void +copy_to_user ( userptr_t buffer, off_t offset, const void *src, size_t len ) { + copy_to_real ( ( buffer >> 16 ), ( ( buffer & 0xffff ) + offset ), + src, len ); +} + +/** + * Copy data from user buffer + * + * @v dest Destination + * @v buffer User buffer + * @v offset Offset within user buffer + * @v len Length + */ +static inline __attribute__ (( always_inline )) void +copy_from_user ( void *dest, userptr_t buffer, off_t offset, size_t len ) { + copy_from_real ( dest, ( buffer >> 16 ), + ( ( buffer & 0xffff ) + offset ), len ); +} + +/** + * Convert segment:offset address to user buffer + * + * @v segment Real-mode segment + * @v offset Real-mode offset + * @ret buffer User buffer + */ +static inline __attribute__ (( always_inline )) userptr_t +real_to_user ( unsigned int segment, unsigned int offset ) { + return ( ( segment << 16 ) | offset ); +} + +/** + * Convert virtual address to user buffer + * + * @v virtual Virtual address + * @ret buffer User buffer + * + * This constructs a user buffer from an ordinary pointer. Use it + * when you need to pass a pointer to an internal buffer to a function + * that expects a @c userptr_t. + */ +static inline __attribute__ (( always_inline )) userptr_t +virt_to_user ( void * virtual ) { + return real_to_user ( rm_ds, ( intptr_t ) virtual ); +} + +/* TEXT16_CODE: declare a fragment of code that resides in .text16 */ +#define TEXT16_CODE( asm_code_str ) \ + ".section \".text16\", \"ax\", @progbits\n\t" \ + ".code16\n\t" \ + ".arch i386\n\t" \ + asm_code_str "\n\t" \ + ".code16gcc\n\t" \ + ".previous\n\t" + +/* REAL_CODE: declare a fragment of code that executes in real mode */ +#define REAL_CODE( asm_code_str ) \ + ".code16\n\t" \ + asm_code_str "\n\t" \ + ".code16gcc\n\t" + +#endif /* ASSEMBLY */ + +#endif /* LIBKIR_H */ diff --git a/src/arch/x86/include/librm.h b/src/arch/x86/include/librm.h new file mode 100644 index 00000000..5196d390 --- /dev/null +++ b/src/arch/x86/include/librm.h @@ -0,0 +1,477 @@ +#ifndef LIBRM_H +#define LIBRM_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/* Segment selectors as used in our protected-mode GDTs. + * + * Don't change these unless you really know what you're doing. + */ +#define VIRTUAL_CS 0x08 +#define VIRTUAL_DS 0x10 +#define PHYSICAL_CS 0x18 +#define PHYSICAL_DS 0x20 +#define REAL_CS 0x28 +#define REAL_DS 0x30 +#define P2R_DS 0x38 +#define LONG_CS 0x40 + +/* Calculate symbol address within VIRTUAL_CS or VIRTUAL_DS + * + * In a 64-bit build, we set the bases of VIRTUAL_CS and VIRTUAL_DS + * such that truncating a .textdata symbol value to 32 bits gives a + * valid 32-bit virtual address. + * + * The C code is compiled with -mcmodel=kernel and so we must place + * all .textdata symbols within the negative 2GB of the 64-bit address + * space. Consequently, all .textdata symbols will have the MSB set + * after truncation to 32 bits. This means that a straightforward + * R_X86_64_32 relocation record for the symbol will fail, since the + * truncated symbol value will not correctly zero-extend to the + * original 64-bit value. + * + * Using an R_X86_64_32S relocation record would work, but there is no + * (sensible) way to generate these relocation records within 32-bit + * or 16-bit code. + * + * The simplest solution is to generate an R_X86_64_32 relocation + * record with an addend of (-0xffffffff00000000). Since all + * .textdata symbols are within the negative 2GB of the 64-bit address + * space, this addend acts to effectively truncate the symbol to 32 + * bits, thereby matching the semantics of the R_X86_64_32 relocation + * records generated for 32-bit and 16-bit code. + * + * In a 32-bit build, this problem does not exist, and we can just use + * the .textdata symbol values directly. + */ +#ifdef __x86_64__ +#define VIRTUAL(address) ( (address) - 0xffffffff00000000 ) +#else +#define VIRTUAL(address) (address) +#endif + +#ifdef ASSEMBLY + +/** + * Call C function from real-mode code + * + * @v function C function + */ +.macro virtcall function + pushl $VIRTUAL(\function) + call virt_call +.endm + +#else /* ASSEMBLY */ + +#ifdef UACCESS_LIBRM +#define UACCESS_PREFIX_librm +#else +#define UACCESS_PREFIX_librm __librm_ +#endif + +/** + * Call C function from real-mode code + * + * @v function C function + */ +#define VIRT_CALL( function ) \ + "pushl $( " _S2 ( VIRTUAL ( function ) ) " )\n\t" \ + "call virt_call\n\t" + +/* Variables in librm.S */ +extern const unsigned long virt_offset; + +/** + * Convert physical address to user pointer + * + * @v phys_addr Physical address + * @ret userptr User pointer + */ +static inline __always_inline userptr_t +UACCESS_INLINE ( librm, phys_to_user ) ( unsigned long phys_addr ) { + + /* In a 64-bit build, any valid physical address is directly + * usable as a virtual address, since the low 4GB is + * identity-mapped. + */ + if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) + return phys_addr; + + /* In a 32-bit build, subtract virt_offset */ + return ( phys_addr - virt_offset ); +} + +/** + * Convert user buffer to physical address + * + * @v userptr User pointer + * @v offset Offset from user pointer + * @ret phys_addr Physical address + */ +static inline __always_inline unsigned long +UACCESS_INLINE ( librm, user_to_phys ) ( userptr_t userptr, off_t offset ) { + unsigned long addr = ( userptr + offset ); + + /* In a 64-bit build, any virtual address in the low 4GB is + * directly usable as a physical address, since the low 4GB is + * identity-mapped. + */ + if ( ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) && + ( addr <= 0xffffffffUL ) ) + return addr; + + /* In a 32-bit build or in a 64-bit build with a virtual + * address above 4GB: add virt_offset + */ + return ( addr + virt_offset ); +} + +static inline __always_inline userptr_t +UACCESS_INLINE ( librm, virt_to_user ) ( volatile const void *addr ) { + return trivial_virt_to_user ( addr ); +} + +static inline __always_inline void * +UACCESS_INLINE ( librm, user_to_virt ) ( userptr_t userptr, off_t offset ) { + return trivial_user_to_virt ( userptr, offset ); +} + +static inline __always_inline userptr_t +UACCESS_INLINE ( librm, userptr_add ) ( userptr_t userptr, off_t offset ) { + return trivial_userptr_add ( userptr, offset ); +} + +static inline __always_inline off_t +UACCESS_INLINE ( librm, userptr_sub ) ( userptr_t userptr, + userptr_t subtrahend ) { + return trivial_userptr_sub ( userptr, subtrahend ); +} + +static inline __always_inline void +UACCESS_INLINE ( librm, memcpy_user ) ( userptr_t dest, off_t dest_off, + userptr_t src, off_t src_off, + size_t len ) { + trivial_memcpy_user ( dest, dest_off, src, src_off, len ); +} + +static inline __always_inline void +UACCESS_INLINE ( librm, memmove_user ) ( userptr_t dest, off_t dest_off, + userptr_t src, off_t src_off, + size_t len ) { + trivial_memmove_user ( dest, dest_off, src, src_off, len ); +} + +static inline __always_inline int +UACCESS_INLINE ( librm, memcmp_user ) ( userptr_t first, off_t first_off, + userptr_t second, off_t second_off, + size_t len ) { + return trivial_memcmp_user ( first, first_off, second, second_off, len); +} + +static inline __always_inline void +UACCESS_INLINE ( librm, memset_user ) ( userptr_t buffer, off_t offset, + int c, size_t len ) { + trivial_memset_user ( buffer, offset, c, len ); +} + +static inline __always_inline size_t +UACCESS_INLINE ( librm, strlen_user ) ( userptr_t buffer, off_t offset ) { + return trivial_strlen_user ( buffer, offset ); +} + +static inline __always_inline off_t +UACCESS_INLINE ( librm, memchr_user ) ( userptr_t buffer, off_t offset, + int c, size_t len ) { + return trivial_memchr_user ( buffer, offset, c, len ); +} + + +/****************************************************************************** + * + * Access to variables in .data16 and .text16 + * + */ + +extern char * const data16; +extern char * const text16; + +#define __data16( variable ) \ + __attribute__ (( section ( ".data16" ) )) \ + _data16_ ## variable __asm__ ( #variable ) + +#define __data16_array( variable, array ) \ + __attribute__ (( section ( ".data16" ) )) \ + _data16_ ## variable array __asm__ ( #variable ) + +#define __bss16( variable ) \ + __attribute__ (( section ( ".bss16" ) )) \ + _data16_ ## variable __asm__ ( #variable ) + +#define __bss16_array( variable, array ) \ + __attribute__ (( section ( ".bss16" ) )) \ + _data16_ ## variable array __asm__ ( #variable ) + +#define __text16( variable ) \ + __attribute__ (( section ( ".text16.data" ) )) \ + _text16_ ## variable __asm__ ( #variable ) + +#define __text16_array( variable, array ) \ + __attribute__ (( section ( ".text16.data" ) )) \ + _text16_ ## variable array __asm__ ( #variable ) + +#define __use_data16( variable ) \ + ( * ( ( typeof ( _data16_ ## variable ) * ) \ + & ( data16 [ ( size_t ) & ( _data16_ ## variable ) ] ) ) ) + +#define __use_text16( variable ) \ + ( * ( ( typeof ( _text16_ ## variable ) * ) \ + & ( text16 [ ( size_t ) & ( _text16_ ## variable ) ] ) ) ) + +#define __from_data16( pointer ) \ + ( ( unsigned int ) \ + ( ( ( void * ) (pointer) ) - ( ( void * ) data16 ) ) ) + +#define __from_text16( pointer ) \ + ( ( unsigned int ) \ + ( ( ( void * ) (pointer) ) - ( ( void * ) text16 ) ) ) + +/* Variables in librm.S, present in the normal data segment */ +extern uint16_t rm_sp; +extern uint16_t rm_ss; +extern const uint16_t __text16 ( rm_cs ); +#define rm_cs __use_text16 ( rm_cs ) +extern const uint16_t __text16 ( rm_ds ); +#define rm_ds __use_text16 ( rm_ds ) + +extern uint16_t copy_user_to_rm_stack ( userptr_t data, size_t size ); +extern void remove_user_from_rm_stack ( userptr_t data, size_t size ); + +/* CODE_DEFAULT: restore default .code32/.code64 directive */ +#ifdef __x86_64__ +#define CODE_DEFAULT ".code64" +#else +#define CODE_DEFAULT ".code32" +#endif + +/* LINE_SYMBOL: declare a symbol for the current source code line */ +#define LINE_SYMBOL _S2 ( OBJECT ) "__line_" _S2 ( __LINE__ ) "__%=:" + +/* TEXT16_CODE: declare a fragment of code that resides in .text16 */ +#define TEXT16_CODE( asm_code_str ) \ + ".section \".text16\", \"ax\", @progbits\n\t" \ + "\n" LINE_SYMBOL "\n\t" \ + ".code16\n\t" \ + asm_code_str "\n\t" \ + CODE_DEFAULT "\n\t" \ + ".previous\n\t" + +/* REAL_CODE: declare a fragment of code that executes in real mode */ +#define REAL_CODE( asm_code_str ) \ + "push $1f\n\t" \ + "call real_call\n\t" \ + TEXT16_CODE ( "\n1:\n\t" \ + asm_code_str \ + "\n\t" \ + "ret\n\t" ) + +/* PHYS_CODE: declare a fragment of code that executes in flat physical mode */ +#define PHYS_CODE( asm_code_str ) \ + "push $1f\n\t" \ + "call phys_call\n\t" \ + ".section \".text.phys\", \"ax\", @progbits\n\t"\ + "\n" LINE_SYMBOL "\n\t" \ + ".code32\n\t" \ + "\n1:\n\t" \ + asm_code_str \ + "\n\t" \ + "ret\n\t" \ + CODE_DEFAULT "\n\t" \ + ".previous\n\t" + +/** Number of interrupts */ +#define NUM_INT 256 + +/** A 32-bit interrupt descriptor table register */ +struct idtr32 { + /** Limit */ + uint16_t limit; + /** Base */ + uint32_t base; +} __attribute__ (( packed )); + +/** A 64-bit interrupt descriptor table register */ +struct idtr64 { + /** Limit */ + uint16_t limit; + /** Base */ + uint64_t base; +} __attribute__ (( packed )); + +/** A 32-bit interrupt descriptor table entry */ +struct interrupt32_descriptor { + /** Low 16 bits of address */ + uint16_t low; + /** Code segment */ + uint16_t segment; + /** Unused */ + uint8_t unused; + /** Type and attributes */ + uint8_t attr; + /** High 16 bits of address */ + uint16_t high; +} __attribute__ (( packed )); + +/** A 64-bit interrupt descriptor table entry */ +struct interrupt64_descriptor { + /** Low 16 bits of address */ + uint16_t low; + /** Code segment */ + uint16_t segment; + /** Unused */ + uint8_t unused; + /** Type and attributes */ + uint8_t attr; + /** Middle 16 bits of address */ + uint16_t mid; + /** High 32 bits of address */ + uint32_t high; + /** Reserved */ + uint32_t reserved; +} __attribute__ (( packed )); + +/** Interrupt descriptor is present */ +#define IDTE_PRESENT 0x80 + +/** Interrupt descriptor 32-bit interrupt gate type */ +#define IDTE_TYPE_IRQ32 0x0e + +/** Interrupt descriptor 64-bit interrupt gate type */ +#define IDTE_TYPE_IRQ64 0x0e + +/** An interrupt vector + * + * Each interrupt vector comprises an eight-byte fragment of code: + * + * 50 pushl %eax (or pushq %rax in long mode) + * b0 xx movb $INT, %al + * e9 xx xx xx xx jmp interrupt_wrapper + */ +struct interrupt_vector { + /** "push" instruction */ + uint8_t push; + /** "movb" instruction */ + uint8_t movb; + /** Interrupt number */ + uint8_t intr; + /** "jmp" instruction */ + uint8_t jmp; + /** Interrupt wrapper address offset */ + uint32_t offset; + /** Next instruction after jump */ + uint8_t next[0]; +} __attribute__ (( packed )); + +/** "push %eax" instruction */ +#define PUSH_INSN 0x50 + +/** "movb" instruction */ +#define MOVB_INSN 0xb0 + +/** "jmp" instruction */ +#define JMP_INSN 0xe9 + +/** 32-bit interrupt wrapper stack frame */ +struct interrupt_frame32 { + uint32_t esp; + uint32_t ss; + uint32_t gs; + uint32_t fs; + uint32_t es; + uint32_t ds; + uint32_t ebp; + uint32_t edi; + uint32_t esi; + uint32_t edx; + uint32_t ecx; + uint32_t ebx; + uint32_t eax; + uint32_t eip; + uint32_t cs; + uint32_t eflags; +} __attribute__ (( packed )); + +/** 64-bit interrupt wrapper stack frame */ +struct interrupt_frame64 { + uint64_t r15; + uint64_t r14; + uint64_t r13; + uint64_t r12; + uint64_t r11; + uint64_t r10; + uint64_t r9; + uint64_t r8; + uint64_t rbp; + uint64_t rdi; + uint64_t rsi; + uint64_t rdx; + uint64_t rcx; + uint64_t rbx; + uint64_t rax; + uint64_t rip; + uint64_t cs; + uint64_t rflags; + uint64_t rsp; + uint64_t ss; +} __attribute__ (( packed )); + +extern void set_interrupt_vector ( unsigned int intr, void *vector ); + +/** A page table */ +struct page_table { + /** Page address and flags */ + uint64_t page[512]; +}; + +/** Page flags */ +enum page_flags { + /** Page is present */ + PAGE_P = 0x01, + /** Page is writable */ + PAGE_RW = 0x02, + /** Page is accessible by user code */ + PAGE_US = 0x04, + /** Page-level write-through */ + PAGE_PWT = 0x08, + /** Page-level cache disable */ + PAGE_PCD = 0x10, + /** Page is a large page */ + PAGE_PS = 0x80, + /** Page is the last page in an allocation + * + * This bit is ignored by the hardware. We use it to track + * the size of allocations made by ioremap(). + */ + PAGE_LAST = 0x800, +}; + +/** The I/O space page table */ +extern struct page_table io_pages; + +/** I/O page size + * + * We choose to use 2MB pages for I/O space, to minimise the number of + * page table entries required. + */ +#define IO_PAGE_SIZE 0x200000UL + +/** I/O page base address + * + * We choose to place I/O space immediately above the identity-mapped + * 32-bit address space. + */ +#define IO_BASE ( ( void * ) 0x100000000ULL ) + +#endif /* ASSEMBLY */ + +#endif /* LIBRM_H */ diff --git a/src/arch/x86/include/memsizes.h b/src/arch/x86/include/memsizes.h new file mode 100644 index 00000000..f115f757 --- /dev/null +++ b/src/arch/x86/include/memsizes.h @@ -0,0 +1,19 @@ +#ifndef _MEMSIZES_H +#define _MEMSIZES_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** + * Get size of base memory from BIOS free base memory counter + * + * @ret basemem Base memory size, in kB + */ +static inline unsigned int basememsize ( void ) { + return get_fbms(); +} + +extern unsigned int extmemsize ( void ); + +#endif /* _MEMSIZES_H */ diff --git a/src/arch/x86/include/multiboot.h b/src/arch/x86/include/multiboot.h new file mode 100644 index 00000000..ae09df6c --- /dev/null +++ b/src/arch/x86/include/multiboot.h @@ -0,0 +1,149 @@ +#ifndef _MULTIBOOT_H +#define _MULTIBOOT_H + +/** + * @file + * + * Multiboot operating systems + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** The magic number for the Multiboot header */ +#define MULTIBOOT_HEADER_MAGIC 0x1BADB002 + +/** Boot modules must be page aligned */ +#define MB_FLAG_PGALIGN 0x00000001 + +/** Memory map must be provided */ +#define MB_FLAG_MEMMAP 0x00000002 + +/** Video mode information must be provided */ +#define MB_FLAG_VIDMODE 0x00000004 + +/** Image is a raw multiboot image (not ELF) */ +#define MB_FLAG_RAW 0x00010000 + +/** + * The magic number passed by a Multiboot-compliant boot loader + * + * Must be passed in register %eax when jumping to the Multiboot OS + * image. + */ +#define MULTIBOOT_BOOTLOADER_MAGIC 0x2BADB002 + +/** Multiboot information structure mem_* fields are valid */ +#define MBI_FLAG_MEM 0x00000001 + +/** Multiboot information structure boot_device field is valid */ +#define MBI_FLAG_BOOTDEV 0x00000002 + +/** Multiboot information structure cmdline field is valid */ +#define MBI_FLAG_CMDLINE 0x00000004 + +/** Multiboot information structure module fields are valid */ +#define MBI_FLAG_MODS 0x00000008 + +/** Multiboot information structure a.out symbol table is valid */ +#define MBI_FLAG_AOUT 0x00000010 + +/** Multiboot information struture ELF section header table is valid */ +#define MBI_FLAG_ELF 0x00000020 + +/** Multiboot information structure memory map is valid */ +#define MBI_FLAG_MMAP 0x00000040 + +/** Multiboot information structure drive list is valid */ +#define MBI_FLAG_DRIVES 0x00000080 + +/** Multiboot information structure ROM configuration field is valid */ +#define MBI_FLAG_CFGTBL 0x00000100 + +/** Multiboot information structure boot loader name field is valid */ +#define MBI_FLAG_LOADER 0x00000200 + +/** Multiboot information structure APM table is valid */ +#define MBI_FLAG_APM 0x00000400 + +/** Multiboot information structure video information is valid */ +#define MBI_FLAG_VBE 0x00000800 + +/** A multiboot header */ +struct multiboot_header { + uint32_t magic; + uint32_t flags; + uint32_t checksum; + uint32_t header_addr; + uint32_t load_addr; + uint32_t load_end_addr; + uint32_t bss_end_addr; + uint32_t entry_addr; +} __attribute__ (( packed, may_alias )); + +/** A multiboot a.out symbol table */ +struct multiboot_aout_symbol_table { + uint32_t tabsize; + uint32_t strsize; + uint32_t addr; + uint32_t reserved; +} __attribute__ (( packed, may_alias )); + +/** A multiboot ELF section header table */ +struct multiboot_elf_section_header_table { + uint32_t num; + uint32_t size; + uint32_t addr; + uint32_t shndx; +} __attribute__ (( packed, may_alias )); + +/** A multiboot information structure */ +struct multiboot_info { + uint32_t flags; + uint32_t mem_lower; + uint32_t mem_upper; + uint32_t boot_device; + uint32_t cmdline; + uint32_t mods_count; + uint32_t mods_addr; + union { + struct multiboot_aout_symbol_table aout_syms; + struct multiboot_elf_section_header_table elf_sections; + } syms; + uint32_t mmap_length; + uint32_t mmap_addr; + uint32_t drives_length; + uint32_t drives_addr; + uint32_t config_table; + uint32_t boot_loader_name; + uint32_t apm_table; + uint32_t vbe_control_info; + uint32_t vbe_mode_info; + uint16_t vbe_mode; + uint16_t vbe_interface_seg; + uint16_t vbe_interface_off; + uint16_t vbe_interface_len; +} __attribute__ (( packed, may_alias )); + +/** A multiboot module structure */ +struct multiboot_module { + uint32_t mod_start; + uint32_t mod_end; + uint32_t string; + uint32_t reserved; +} __attribute__ (( packed, may_alias )); + +/** A multiboot memory map entry */ +struct multiboot_memory_map { + uint32_t size; + uint64_t base_addr; + uint64_t length; + uint32_t type; +} __attribute__ (( packed, may_alias )); + +/** Usable RAM */ +#define MBMEM_RAM 1 + +#endif /* _MULTIBOOT_H */ diff --git a/src/arch/x86/include/pic8259.h b/src/arch/x86/include/pic8259.h new file mode 100644 index 00000000..dbec5fd2 --- /dev/null +++ b/src/arch/x86/include/pic8259.h @@ -0,0 +1,70 @@ +/* + * Basic support for controlling the 8259 Programmable Interrupt Controllers. + * + * Initially written by Michael Brown (mcb30). + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifndef PIC8259_H +#define PIC8259_H + +#include + +#define IRQ_PIC_CUTOFF 8 + +/* 8259 register locations */ +#define PIC1_ICW1 0x20 +#define PIC1_OCW2 0x20 +#define PIC1_OCW3 0x20 +#define PIC1_ICR 0x20 +#define PIC1_IRR 0x20 +#define PIC1_ISR 0x20 +#define PIC1_ICW2 0x21 +#define PIC1_ICW3 0x21 +#define PIC1_ICW4 0x21 +#define PIC1_IMR 0x21 +#define PIC2_ICW1 0xa0 +#define PIC2_OCW2 0xa0 +#define PIC2_OCW3 0xa0 +#define PIC2_ICR 0xa0 +#define PIC2_IRR 0xa0 +#define PIC2_ISR 0xa0 +#define PIC2_ICW2 0xa1 +#define PIC2_ICW3 0xa1 +#define PIC2_ICW4 0xa1 +#define PIC2_IMR 0xa1 + +/* Register command values */ +#define OCW3_ID 0x08 +#define OCW3_READ_IRR 0x02 +#define OCW3_READ_ISR 0x03 +#define ICR_EOI_NON_SPECIFIC 0x20 +#define ICR_EOI_NOP 0x40 +#define ICR_EOI_SPECIFIC 0x60 +#define ICR_EOI_SET_PRIORITY 0xc0 + +/* Macros to enable/disable IRQs */ +#define IMR_REG(x) ( (x) < IRQ_PIC_CUTOFF ? PIC1_IMR : PIC2_IMR ) +#define IMR_BIT(x) ( 1 << ( (x) % IRQ_PIC_CUTOFF ) ) +#define irq_enabled(x) ( ( inb ( IMR_REG(x) ) & IMR_BIT(x) ) == 0 ) +#define enable_irq(x) outb ( inb( IMR_REG(x) ) & ~IMR_BIT(x), IMR_REG(x) ) +#define disable_irq(x) outb ( inb( IMR_REG(x) ) | IMR_BIT(x), IMR_REG(x) ) + +/* Macros for acknowledging IRQs */ +#define ICR_REG( irq ) ( (irq) < IRQ_PIC_CUTOFF ? PIC1_ICR : PIC2_ICR ) +#define ICR_VALUE( irq ) ( (irq) % IRQ_PIC_CUTOFF ) +#define CHAINED_IRQ 2 + +/* Utility macros to convert IRQ numbers to INT numbers and INT vectors */ +#define IRQ_INT( irq ) ( ( ( (irq) - IRQ_PIC_CUTOFF ) ^ 0x70 ) & 0x7f ) + +/* Other constants */ +#define IRQ_MAX 15 +#define IRQ_NONE -1U + +/* Function prototypes + */ +void send_eoi ( unsigned int irq ); + +#endif /* PIC8259_H */ diff --git a/src/arch/x86/include/pnpbios.h b/src/arch/x86/include/pnpbios.h new file mode 100644 index 00000000..d1487370 --- /dev/null +++ b/src/arch/x86/include/pnpbios.h @@ -0,0 +1,17 @@ +#ifndef _PNPBIOS_H +#define _PNPBIOS_H + +/** @file + * + * PnP BIOS + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/* BIOS segment address */ +#define BIOS_SEG 0xf000 + +extern int find_pnp_bios ( void ); + +#endif /* _PNPBIOS_H */ diff --git a/src/arch/x86/include/pxe.h b/src/arch/x86/include/pxe.h new file mode 100644 index 00000000..54649b50 --- /dev/null +++ b/src/arch/x86/include/pxe.h @@ -0,0 +1,200 @@ +#ifndef PXE_H +#define PXE_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include "pxe_types.h" +#include "pxe_error.h" +#include "pxe_api.h" +#include +#include + +/** PXE API invalid function code */ +#define PXENV_UNKNOWN 0xffff + +/** Parameter block for pxenv_unknown() */ +struct s_PXENV_UNKNOWN { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNKNOWN PXENV_UNKNOWN_t; + +/* Union used for PXE API calls; we don't know the type of the + * structure until we interpret the opcode. Also, Status is available + * in the same location for any opcode, and it's convenient to have + * non-specific access to it. + */ +union u_PXENV_ANY { + /* Make it easy to read status for any operation */ + PXENV_STATUS_t Status; + struct s_PXENV_UNKNOWN unknown; + struct s_PXENV_UNLOAD_STACK unload_stack; + struct s_PXENV_GET_CACHED_INFO get_cached_info; + struct s_PXENV_TFTP_READ_FILE restart_tftp; + struct s_PXENV_START_UNDI start_undi; + struct s_PXENV_STOP_UNDI stop_undi; + struct s_PXENV_START_BASE start_base; + struct s_PXENV_STOP_BASE stop_base; + struct s_PXENV_TFTP_OPEN tftp_open; + struct s_PXENV_TFTP_CLOSE tftp_close; + struct s_PXENV_TFTP_READ tftp_read; + struct s_PXENV_TFTP_READ_FILE tftp_read_file; + struct s_PXENV_TFTP_GET_FSIZE tftp_get_fsize; + struct s_PXENV_UDP_OPEN udp_open; + struct s_PXENV_UDP_CLOSE udp_close; + struct s_PXENV_UDP_WRITE udp_write; + struct s_PXENV_UDP_READ udp_read; + struct s_PXENV_UNDI_STARTUP undi_startup; + struct s_PXENV_UNDI_CLEANUP undi_cleanup; + struct s_PXENV_UNDI_INITIALIZE undi_initialize; + struct s_PXENV_UNDI_RESET undi_reset_adapter; + struct s_PXENV_UNDI_SHUTDOWN undi_shutdown; + struct s_PXENV_UNDI_OPEN undi_open; + struct s_PXENV_UNDI_CLOSE undi_close; + struct s_PXENV_UNDI_TRANSMIT undi_transmit; + struct s_PXENV_UNDI_SET_MCAST_ADDRESS undi_set_mcast_address; + struct s_PXENV_UNDI_SET_STATION_ADDRESS undi_set_station_address; + struct s_PXENV_UNDI_SET_PACKET_FILTER undi_set_packet_filter; + struct s_PXENV_UNDI_GET_INFORMATION undi_get_information; + struct s_PXENV_UNDI_GET_STATISTICS undi_get_statistics; + struct s_PXENV_UNDI_CLEAR_STATISTICS undi_clear_statistics; + struct s_PXENV_UNDI_INITIATE_DIAGS undi_initiate_diags; + struct s_PXENV_UNDI_FORCE_INTERRUPT undi_force_interrupt; + struct s_PXENV_UNDI_GET_MCAST_ADDRESS undi_get_mcast_address; + struct s_PXENV_UNDI_GET_NIC_TYPE undi_get_nic_type; + struct s_PXENV_UNDI_GET_IFACE_INFO undi_get_iface_info; + struct s_PXENV_UNDI_GET_STATE undi_get_state; + struct s_PXENV_UNDI_ISR undi_isr; + struct s_PXENV_FILE_OPEN file_open; + struct s_PXENV_FILE_CLOSE file_close; + struct s_PXENV_FILE_SELECT file_select; + struct s_PXENV_FILE_READ file_read; + struct s_PXENV_GET_FILE_SIZE get_file_size; + struct s_PXENV_FILE_EXEC file_exec; + struct s_PXENV_FILE_API_CHECK file_api_check; + struct s_PXENV_FILE_EXIT_HOOK file_exit_hook; +}; + +typedef union u_PXENV_ANY PXENV_ANY_t; + +/** A PXE API call */ +struct pxe_api_call { + /** Entry point + * + * @v params PXE API call parameters + * @ret exit PXE API call exit code + */ + PXENV_EXIT_t ( * entry ) ( union u_PXENV_ANY *params ); + /** Length of parameters */ + uint16_t params_len; + /** Opcode */ + uint16_t opcode; +}; + +/** PXE API call table */ +#define PXE_API_CALLS __table ( struct pxe_api_call, "pxe_api_calls" ) + +/** Declare a PXE API call */ +#define __pxe_api_call __table_entry ( PXE_API_CALLS, 01 ) + +/** + * Define a PXE API call + * + * @v _opcode Opcode + * @v _entry Entry point + * @v _params_type Type of parameter structure + * @ret call PXE API call + */ +#define PXE_API_CALL( _opcode, _entry, _params_type ) { \ + .entry = ( ( ( ( PXENV_EXIT_t ( * ) ( _params_type *params ) ) NULL ) \ + == ( ( typeof ( _entry ) * ) NULL ) ) \ + ? ( ( PXENV_EXIT_t ( * ) \ + ( union u_PXENV_ANY *params ) ) _entry ) \ + : ( ( PXENV_EXIT_t ( * ) \ + ( union u_PXENV_ANY *params ) ) _entry ) ), \ + .params_len = sizeof ( _params_type ), \ + .opcode = _opcode, \ + } + +/** An UNDI expansion ROM header */ +struct undi_rom_header { + /** Signature + * + * Must be equal to @c ROM_SIGNATURE + */ + UINT16_t Signature; + /** ROM length in 512-byte blocks */ + UINT8_t ROMLength; + /** Unused */ + UINT8_t unused[0x13]; + /** Offset of the PXE ROM ID structure */ + UINT16_t PXEROMID; + /** Offset of the PCI ROM structure */ + UINT16_t PCIRHeader; +} __attribute__ (( packed )); + +/** Signature for an expansion ROM */ +#define ROM_SIGNATURE 0xaa55 + +/** An UNDI ROM ID structure */ +struct undi_rom_id { + /** Signature + * + * Must be equal to @c UNDI_ROM_ID_SIGNATURE + */ + UINT32_t Signature; + /** Length of structure */ + UINT8_t StructLength; + /** Checksum */ + UINT8_t StructCksum; + /** Structure revision + * + * Must be zero. + */ + UINT8_t StructRev; + /** UNDI revision + * + * Version 2.1.0 is encoded as the byte sequence 0x00, 0x01, 0x02. + */ + UINT8_t UNDIRev[3]; + /** Offset to UNDI loader */ + UINT16_t UNDILoader; + /** Minimum required stack segment size */ + UINT16_t StackSize; + /** Minimum required data segment size */ + UINT16_t DataSize; + /** Minimum required code segment size */ + UINT16_t CodeSize; +} __attribute__ (( packed )); + +/** Signature for an UNDI ROM ID structure */ +#define UNDI_ROM_ID_SIGNATURE \ + ( ( 'U' << 0 ) + ( 'N' << 8 ) + ( 'D' << 16 ) + ( 'I' << 24 ) ) + +/** A PCI expansion header */ +struct pcir_header { + /** Signature + * + * Must be equal to @c PCIR_SIGNATURE + */ + uint32_t signature; + /** PCI vendor ID */ + uint16_t vendor_id; + /** PCI device ID */ + uint16_t device_id; +} __attribute__ (( packed )); + +/** Signature for an UNDI ROM ID structure */ +#define PCIR_SIGNATURE \ + ( ( 'P' << 0 ) + ( 'C' << 8 ) + ( 'I' << 16 ) + ( 'R' << 24 ) ) + +extern struct net_device *pxe_netdev; +extern const char *pxe_cmdline; + +extern void pxe_set_netdev ( struct net_device *netdev ); +extern void pxe_fake_cached_info ( void ); +extern PXENV_EXIT_t pxenv_tftp_read_file ( struct s_PXENV_TFTP_READ_FILE + *tftp_read_file ); +extern PXENV_EXIT_t undi_loader ( struct s_UNDI_LOADER *undi_loader ); + +#endif /* PXE_H */ diff --git a/src/arch/x86/include/pxe_api.h b/src/arch/x86/include/pxe_api.h new file mode 100644 index 00000000..3110d26d --- /dev/null +++ b/src/arch/x86/include/pxe_api.h @@ -0,0 +1,1823 @@ +#ifndef PXE_API_H +#define PXE_API_H + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + * As an alternative, at your option, you may use this file under the + * following terms, known as the "MIT license": + * + * Copyright (c) 2005-2009 Michael Brown + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** @file + * + * Preboot eXecution Environment (PXE) API + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include "pxe_types.h" + +/** @addtogroup pxe Preboot eXecution Environment (PXE) API + * @{ + */ + +/** @defgroup pxe_api_call PXE entry points + * + * PXE entry points and calling conventions + * + * @{ + */ + +/** The PXENV+ structure */ +struct s_PXENV { + /** Signature + * + * Contains the bytes 'P', 'X', 'E', 'N', 'V', '+'. + */ + UINT8_t Signature[6]; + /** PXE API version + * + * MSB is major version number, LSB is minor version number. + * If the API version number is 0x0201 or greater, the !PXE + * structure pointed to by #PXEPtr should be used instead of + * this data structure. + */ + UINT16_t Version; + UINT8_t Length; /**< Length of this structure */ + /** Checksum + * + * The byte checksum of this structure (using the length in + * #Length) must be zero. + */ + UINT8_t Checksum; + SEGOFF16_t RMEntry; /**< Real-mode PXENV+ entry point */ + /** Protected-mode PXENV+ entry point offset + * + * PXE 2.1 deprecates this entry point. For protected-mode + * API calls, use the !PXE structure pointed to by #PXEPtr + * instead. + */ + UINT32_t PMOffset; + /** Protected-mode PXENV+ entry point segment selector + * + * PXE 2.1 deprecates this entry point. For protected-mode + * API calls, use the !PXE structure pointed to by #PXEPtr + * instead. + */ + SEGSEL_t PMSelector; + SEGSEL_t StackSeg; /**< Stack segment selector */ + UINT16_t StackSize; /**< Stack segment size */ + SEGSEL_t BC_CodeSeg; /**< Base-code code segment selector */ + UINT16_t BC_CodeSize; /**< Base-code code segment size */ + SEGSEL_t BC_DataSeg; /**< Base-code data segment selector */ + UINT16_t BC_DataSize; /**< Base-code data segment size */ + SEGSEL_t UNDIDataSeg; /**< UNDI data segment selector */ + UINT16_t UNDIDataSize; /**< UNDI data segment size */ + SEGSEL_t UNDICodeSeg; /**< UNDI code segment selector */ + UINT16_t UNDICodeSize; /**< UNDI code segment size */ + /** Address of the !PXE structure + * + * This field is present only if #Version is 0x0201 or + * greater. If present, it points to a struct s_PXE. + */ + SEGOFF16_t PXEPtr; +} __attribute__ (( packed )); + +typedef struct s_PXENV PXENV_t; + +/** The !PXE structure */ +struct s_PXE { + /** Signature + * + * Contains the bytes '!', 'P', 'X', 'E'. + */ + UINT8_t Signature[4]; + UINT8_t StructLength; /**< Length of this structure */ + /** Checksum + * + * The byte checksum of this structure (using the length in + * #StructLength) must be zero. + */ + UINT8_t StructCksum; + /** Revision of this structure + * + * For PXE version 2.1, this field must be zero. + */ + UINT8_t StructRev; + UINT8_t reserved_1; /**< Must be zero */ + /** Address of the UNDI ROM ID structure + * + * This is a pointer to a struct s_UNDI_ROM_ID. + */ + SEGOFF16_t UNDIROMID; + /** Address of the Base Code ROM ID structure + * + * This is a pointer to a struct s_BC_ROM_ID. + */ + SEGOFF16_t BaseROMID; + /** 16-bit !PXE entry point + * + * This is the entry point for either real mode, or protected + * mode with a 16-bit stack segment. + */ + SEGOFF16_t EntryPointSP; + /** 32-bit !PXE entry point + * + * This is the entry point for protected mode with a 32-bit + * stack segment. + */ + SEGOFF16_t EntryPointESP; + /** Status call-out function + * + * @v 0 (if in a time-out loop) + * @v n Number of a received TFTP packet + * @ret 0 Continue operation + * @ret 1 Cancel operation + * + * This function will be called whenever the PXE stack is in + * protected mode, is waiting for an event (e.g. a DHCP reply) + * and wishes to allow the user to cancel the operation. + * Parameters are passed in register %ax; the return value + * must also be placed in register %ax. All other registers + * and flags @b must be preserved. + * + * In real mode, an internal function (that checks for a + * keypress) will be used. + * + * If this field is set to -1, no status call-out function + * will be used and consequently the user will not be allowed + * to interrupt operations. + * + * @note The PXE specification version 2.1 defines the + * StatusCallout field, mentions it 11 times, but nowhere + * defines what it actually does or how it gets called. + * Fortunately, the WfM specification version 1.1a deigns to + * inform us of such petty details. + */ + SEGOFF16_t StatusCallout; + UINT8_t reserved_2; /**< Must be zero */ + /** Number of segment descriptors + * + * If this number is greater than 7, the remaining descriptors + * follow immediately after #BC_CodeWrite. + */ + UINT8_t SegDescCnt; + /** First protected-mode selector + * + * This is the segment selector value for the first segment + * assigned to PXE. Protected-mode selectors must be + * consecutive, according to the PXE 2.1 specification, though + * no reason is given. Each #SEGDESC_t includes a field for + * the segment selector, so this information is entirely + * redundant. + */ + SEGSEL_t FirstSelector; + /** Stack segment descriptor */ + SEGDESC_t Stack; + /** UNDI data segment descriptor */ + SEGDESC_t UNDIData; + /** UNDI code segment descriptor */ + SEGDESC_t UNDICode; + /** UNDI writable code segment descriptor */ + SEGDESC_t UNDICodeWrite; + /** Base-code data segment descriptor */ + SEGDESC_t BC_Data; + /** Base-code code segment descriptor */ + SEGDESC_t BC_Code; + /** Base-code writable code segment descriptor */ + SEGDESC_t BC_CodeWrite; +} __attribute__ (( packed )); + +typedef struct s_PXE PXE_t; + +/** @} */ /* pxe_api_call */ + +/** @defgroup pxe_preboot_api PXE Preboot API + * + * General high-level functions: #PXENV_UNLOAD_STACK, #PXENV_START_UNDI etc. + * + * @{ + */ + +/** @defgroup pxenv_unload_stack PXENV_UNLOAD_STACK + * + * UNLOAD BASE CODE STACK + * + * @{ + */ + +/** PXE API function code for pxenv_unload_stack() */ +#define PXENV_UNLOAD_STACK 0x0070 + +/** Parameter block for pxenv_unload_stack() */ +struct s_PXENV_UNLOAD_STACK { + PXENV_STATUS_t Status; /**< PXE status code */ + UINT8_t reserved[10]; /**< Must be zero */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNLOAD_STACK PXENV_UNLOAD_STACK_t; + +/** @} */ /* pxenv_unload_stack */ + +/** @defgroup pxenv_get_cached_info PXENV_GET_CACHED_INFO + * + * GET CACHED INFO + * + * @{ + */ + +/** PXE API function code for pxenv_get_cached_info() */ +#define PXENV_GET_CACHED_INFO 0x0071 + +/** The client's DHCPDISCOVER packet */ +#define PXENV_PACKET_TYPE_DHCP_DISCOVER 1 + +/** The DHCP server's DHCPACK packet */ +#define PXENV_PACKET_TYPE_DHCP_ACK 2 + +/** The Boot Server's Discover Reply packet + * + * This packet contains DHCP option 60 set to "PXEClient", a valid + * boot file name, and may or may not contain MTFTP options. + */ +#define PXENV_PACKET_TYPE_CACHED_REPLY 3 + +/** Parameter block for pxenv_get_cached_info() */ +struct s_PXENV_GET_CACHED_INFO { + PXENV_STATUS_t Status; /**< PXE status code */ + /** Packet type. + * + * Valid values are #PXENV_PACKET_TYPE_DHCP_DISCOVER, + * #PXENV_PACKET_TYPE_DHCP_ACK or #PXENV_PACKET_TYPE_CACHED_REPLY + */ + UINT16_t PacketType; + UINT16_t BufferSize; /**< Buffer size */ + SEGOFF16_t Buffer; /**< Buffer address */ + UINT16_t BufferLimit; /**< Maximum buffer size */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_GET_CACHED_INFO PXENV_GET_CACHED_INFO_t; + +#define BOOTP_REQ 1 /**< A BOOTP request packet */ +#define BOOTP_REP 2 /**< A BOOTP reply packet */ + +/** DHCP broadcast flag + * + * Request a broadcast response (DHCPOFFER or DHCPACK) from the DHCP + * server. + */ +#define BOOTP_BCAST 0x8000 + +#define VM_RFC1048 0x63825363L /**< DHCP magic cookie */ + +/** Maximum length of DHCP options */ +#define BOOTP_DHCPVEND 1024 + +/** Format of buffer filled in by pxenv_get_cached_info() + * + * This somewhat convoluted data structure simply describes the layout + * of a DHCP packet. Refer to RFC2131 section 2 for a full + * description. + */ +struct bootph { + /** Message opcode. + * + * Valid values are #BOOTP_REQ and #BOOTP_REP. + */ + UINT8_t opcode; + /** NIC hardware type. + * + * Valid values are as for s_PXENV_UNDI_GET_INFORMATION::HwType. + */ + UINT8_t Hardware; + UINT8_t Hardlen; /**< MAC address length */ + /** Gateway hops + * + * Zero in packets sent by the client. May be non-zero in + * replies from the DHCP server, if the reply comes via a DHCP + * relay agent. + */ + UINT8_t Gatehops; + UINT32_t ident; /**< DHCP transaction id (xid) */ + /** Elapsed time + * + * Number of seconds since the client began the DHCP + * transaction. + */ + UINT16_t seconds; + /** Flags + * + * This is the bitwise-OR of any of the following values: + * #BOOTP_BCAST. + */ + UINT16_t Flags; + /** Client IP address + * + * Set only if the client already has an IP address. + */ + IP4_t cip; + /** Your IP address + * + * This is the IP address that the server assigns to the + * client. + */ + IP4_t yip; + /** Server IP address + * + * This is the IP address of the BOOTP/DHCP server. + */ + IP4_t sip; + /** Gateway IP address + * + * This is the IP address of the BOOTP/DHCP relay agent, if + * any. It is @b not (necessarily) the address of the default + * gateway for routing purposes. + */ + IP4_t gip; + MAC_ADDR_t CAddr; /**< Client MAC address */ + UINT8_t Sname[64]; /**< Server host name */ + UINT8_t bootfile[128]; /**< Boot file name */ + /** DHCP options + * + * Don't ask. Just laugh. Then burn a copy of the PXE + * specification and send Intel an e-mail asking them if + * they've figured out what a "union" does in C yet. + */ + union bootph_vendor { + UINT8_t d[BOOTP_DHCPVEND]; /**< DHCP options */ + /** DHCP options */ + struct bootph_vendor_v { + /** DHCP magic cookie + * + * Should have the value #VM_RFC1048. + */ + UINT8_t magic[4]; + UINT32_t flags; /**< BOOTP flags/opcodes */ + /** "End of BOOTP vendor extensions" + * + * Abandon hope, all ye who consider the + * purpose of this field. + */ + UINT8_t pad[56]; + } v; + } vendor; +} __attribute__ (( packed )); + +typedef struct bootph BOOTPLAYER_t; + +/** @} */ /* pxenv_get_cached_info */ + +/** @defgroup pxenv_restart_tftp PXENV_RESTART_TFTP + * + * RESTART TFTP + * + * @{ + */ + +/** PXE API function code for pxenv_restart_tftp() */ +#define PXENV_RESTART_TFTP 0x0073 + +/** Parameter block for pxenv_restart_tftp() */ +struct s_PXENV_TFTP_READ_FILE; + +typedef struct s_PXENV_RESTART_TFTP PXENV_RESTART_TFTP_t; + +/** @} */ /* pxenv_restart_tftp */ + +/** @defgroup pxenv_start_undi PXENV_START_UNDI + * + * START UNDI + * + * @{ + */ + +/** PXE API function code for pxenv_start_undi() */ +#define PXENV_START_UNDI 0x0000 + +/** Parameter block for pxenv_start_undi() */ +struct s_PXENV_START_UNDI { + PXENV_STATUS_t Status; /**< PXE status code */ + /** %ax register as passed to the Option ROM initialisation routine. + * + * For a PCI device, this should contain the bus:dev:fn value + * that uniquely identifies the PCI device in the system. For + * a non-PCI device, this field is not defined. + */ + UINT16_t AX; + /** %bx register as passed to the Option ROM initialisation routine. + * + * For an ISAPnP device, this should contain the Card Select + * Number assigned to the ISAPnP card. For non-ISAPnP + * devices, this should contain 0xffff. + */ + UINT16_t BX; + /** %dx register as passed to the Option ROM initialisation routine. + * + * For an ISAPnP device, this should contain the ISAPnP Read + * Port address as currently set in all ISAPnP cards. If + * there are no ISAPnP cards, this should contain 0xffff. (If + * this is a non-ISAPnP device, but there are ISAPnP cards in + * the system, this value is not well defined.) + */ + UINT16_t DX; + /** %di register as passed to the Option ROM initialisation routine. + * + * This contains the #OFF16_t portion of a struct #s_SEGOFF16 + * that points to the System BIOS Plug and Play Installation + * Check Structure. (Refer to section 4.4 of the Plug and + * Play BIOS specification for a description of this + * structure.) + * + * @note The PXE specification defines the type of this field + * as #UINT16_t. For x86, #OFF16_t and #UINT16_t are + * equivalent anyway; for other architectures #OFF16_t makes + * more sense. + */ + OFF16_t DI; + /** %es register as passed to the Option ROM initialisation routine. + * + * This contains the #SEGSEL_t portion of a struct #s_SEGOFF16 + * that points to the System BIOS Plug and Play Installation + * Check Structure. (Refer to section 4.4 of the Plug and + * Play BIOS specification for a description of this + * structure.) + * + * @note The PXE specification defines the type of this field + * as #UINT16_t. For x86, #SEGSEL_t and #UINT16_t are + * equivalent anyway; for other architectures #SEGSEL_t makes + * more sense. + */ + SEGSEL_t ES; +} __attribute__ (( packed )); + +typedef struct s_PXENV_START_UNDI PXENV_START_UNDI_t; + +/** @} */ /* pxenv_start_undi */ + +/** @defgroup pxenv_stop_undi PXENV_STOP_UNDI + * + * STOP UNDI + * + * @{ + */ + +/** PXE API function code for pxenv_stop_undi() */ +#define PXENV_STOP_UNDI 0x0015 + +/** Parameter block for pxenv_stop_undi() */ +struct s_PXENV_STOP_UNDI { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_STOP_UNDI PXENV_STOP_UNDI_t; + +/** @} */ /* pxenv_stop_undi */ + +/** @defgroup pxenv_start_base PXENV_START_BASE + * + * START BASE + * + * @{ + */ + +/** PXE API function code for pxenv_start_base() */ +#define PXENV_START_BASE 0x0075 + +/** Parameter block for pxenv_start_base() */ +struct s_PXENV_START_BASE { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_START_BASE PXENV_START_BASE_t; + +/** @} */ /* pxenv_start_base */ + +/** @defgroup pxenv_stop_base PXENV_STOP_BASE + * + * STOP BASE + * + * @{ + */ + +/** PXE API function code for pxenv_stop_base() */ +#define PXENV_STOP_BASE 0x0076 + +/** Parameter block for pxenv_stop_base() */ +struct s_PXENV_STOP_BASE { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_STOP_BASE PXENV_STOP_BASE_t; + +/** @} */ /* pxenv_stop_base */ + +/** @} */ /* pxe_preboot_api */ + +/** @defgroup pxe_tftp_api PXE TFTP API + * + * Download files via TFTP or MTFTP + * + * @{ + */ + +/** @defgroup pxenv_tftp_open PXENV_TFTP_OPEN + * + * TFTP OPEN + * + * @{ + */ + +/** PXE API function code for pxenv_tftp_open() */ +#define PXENV_TFTP_OPEN 0x0020 + +/** Parameter block for pxenv_tftp_open() */ +struct s_PXENV_TFTP_OPEN { + PXENV_STATUS_t Status; /**< PXE status code */ + IP4_t ServerIPAddress; /**< TFTP server IP address */ + IP4_t GatewayIPAddress; /**< Relay agent IP address */ + UINT8_t FileName[128]; /**< File name */ + UDP_PORT_t TFTPPort; /**< TFTP server UDP port */ + /** Requested size of TFTP packets + * + * This is the TFTP "blksize" option. This must be at least + * 512, since servers that do not support TFTP options cannot + * negotiate blocksizes smaller than this. + */ + UINT16_t PacketSize; +} __attribute__ (( packed )); + +typedef struct s_PXENV_TFTP_OPEN PXENV_TFTP_OPEN_t; + +/** @} */ /* pxenv_tftp_open */ + +/** @defgroup pxenv_tftp_close PXENV_TFTP_CLOSE + * + * TFTP CLOSE + * + * @{ + */ + +/** PXE API function code for pxenv_tftp_close() */ +#define PXENV_TFTP_CLOSE 0x0021 + +/** Parameter block for pxenv_tftp_close() */ +struct s_PXENV_TFTP_CLOSE { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_TFTP_CLOSE PXENV_TFTP_CLOSE_t; + +/** @} */ /* pxenv_tftp_close */ + +/** @defgroup pxenv_tftp_read PXENV_TFTP_READ + * + * TFTP READ + * + * @{ + */ + +/** PXE API function code for pxenv_tftp_read() */ +#define PXENV_TFTP_READ 0x0022 + +/** Parameter block for pxenv_tftp_read() */ +struct s_PXENV_TFTP_READ { + PXENV_STATUS_t Status; /**< PXE status code */ + UINT16_t PacketNumber; /**< TFTP packet number */ + UINT16_t BufferSize; /**< Size of data buffer */ + SEGOFF16_t Buffer; /**< Address of data buffer */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_TFTP_READ PXENV_TFTP_READ_t; + +/** @} */ /* pxenv_tftp_read */ + +/** @defgroup pxenv_tftp_read_file PXENV_TFTP_READ_FILE + * + * TFTP/MTFTP READ FILE + * + * @{ + */ + +/** PXE API function code for pxenv_tftp_read_file() */ +#define PXENV_TFTP_READ_FILE 0x0023 + +/** Parameter block for pxenv_tftp_read_file() */ +struct s_PXENV_TFTP_READ_FILE { + PXENV_STATUS_t Status; /**< PXE status code */ + UINT8_t FileName[128]; /**< File name */ + UINT32_t BufferSize; /**< Size of data buffer */ + ADDR32_t Buffer; /**< Address of data buffer */ + IP4_t ServerIPAddress; /**< TFTP server IP address */ + IP4_t GatewayIPAddress; /**< Relay agent IP address */ + /** File multicast IP address */ + IP4_t McastIPAddress; + /** Client multicast listening port */ + UDP_PORT_t TFTPClntPort; + /** Server multicast listening port */ + UDP_PORT_t TFTPSrvPort; + /** TFTP open timeout. + * + * This is the timeout for receiving the first DATA or ACK + * packets during the MTFTP Listen phase. + */ + UINT16_t TFTPOpenTimeOut; + /** TFTP reopen timeout. + * + * This is the timeout for receiving an ACK packet while in + * the MTFTP Listen phase (when at least one ACK packet has + * already been seen). + */ + UINT16_t TFTPReopenDelay; +} __attribute__ (( packed )); + +typedef struct s_PXENV_TFTP_READ_FILE PXENV_TFTP_READ_FILE_t; + +/** @} */ /* pxenv_tftp_read_file */ + +/** @defgroup pxenv_tftp_get_fsize PXENV_TFTP_GET_FSIZE + * + * TFTP GET FILE SIZE + * + * @{ + */ + +/** PXE API function code for pxenv_tftp_get_fsize() */ +#define PXENV_TFTP_GET_FSIZE 0x0025 + +/** Parameter block for pxenv_tftp_get_fsize() */ +struct s_PXENV_TFTP_GET_FSIZE { + PXENV_STATUS_t Status; /**< PXE status code */ + IP4_t ServerIPAddress; /**< TFTP server IP address */ + IP4_t GatewayIPAddress; /**< Relay agent IP address */ + UINT8_t FileName[128]; /**< File name */ + UINT32_t FileSize; /**< Size of the file */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_TFTP_GET_FSIZE PXENV_TFTP_GET_FSIZE_t; + +/** @} */ /* pxenv_tftp_get_fsize */ + +/** @} */ /* pxe_tftp_api */ + +/** @defgroup pxe_udp_api PXE UDP API + * + * Transmit and receive UDP packets + * + * @{ + */ + +/** @defgroup pxenv_udp_open PXENV_UDP_OPEN + * + * UDP OPEN + * + * @{ + */ + +/** PXE API function code for pxenv_udp_open() */ +#define PXENV_UDP_OPEN 0x0030 + +/** Parameter block for pxenv_udp_open() */ +struct s_PXENV_UDP_OPEN { + PXENV_STATUS_t Status; /**< PXE status code */ + IP4_t src_ip; /**< IP address of this station */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UDP_OPEN PXENV_UDP_OPEN_t; + +/** @} */ /* pxenv_udp_open */ + +/** @defgroup pxenv_udp_close PXENV_UDP_CLOSE + * + * UDP CLOSE + * + * @{ + */ + +/** PXE API function code for pxenv_udp_close() */ +#define PXENV_UDP_CLOSE 0x0031 + +/** Parameter block for pxenv_udp_close() */ +struct s_PXENV_UDP_CLOSE { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UDP_CLOSE PXENV_UDP_CLOSE_t; + +/** @} */ /* pxenv_udp_close */ + +/** @defgroup pxenv_udp_write PXENV_UDP_WRITE + * + * UDP WRITE + * + * @{ + */ + +/** PXE API function code for pxenv_udp_write() */ +#define PXENV_UDP_WRITE 0x0033 + +/** Parameter block for pxenv_udp_write() */ +struct s_PXENV_UDP_WRITE { + PXENV_STATUS_t Status; /**< PXE status code */ + IP4_t ip; /**< Destination IP address */ + IP4_t gw; /**< Relay agent IP address */ + UDP_PORT_t src_port; /**< Source UDP port */ + UDP_PORT_t dst_port; /**< Destination UDP port */ + UINT16_t buffer_size; /**< UDP payload buffer size */ + SEGOFF16_t buffer; /**< UDP payload buffer address */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UDP_WRITE PXENV_UDP_WRITE_t; + +/** @} */ /* pxenv_udp_write */ + +/** @defgroup pxenv_udp_read PXENV_UDP_READ + * + * UDP READ + * + * @{ + */ + +/** PXE API function code for pxenv_udp_read() */ +#define PXENV_UDP_READ 0x0032 + +/** Parameter block for pxenv_udp_read() */ +struct s_PXENV_UDP_READ { + PXENV_STATUS_t Status; /**< PXE status code */ + IP4_t src_ip; /**< Source IP address */ + IP4_t dest_ip; /**< Destination IP address */ + UDP_PORT_t s_port; /**< Source UDP port */ + UDP_PORT_t d_port; /**< Destination UDP port */ + UINT16_t buffer_size; /**< UDP payload buffer size */ + SEGOFF16_t buffer; /**< UDP payload buffer address */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UDP_READ PXENV_UDP_READ_t; + +/** @} */ /* pxenv_udp_read */ + +/** @} */ /* pxe_udp_api */ + +/** @defgroup pxe_undi_api PXE UNDI API + * + * Direct control of the network interface card + * + * @{ + */ + +/** @defgroup pxenv_undi_startup PXENV_UNDI_STARTUP + * + * UNDI STARTUP + * + * @{ + */ + +/** PXE API function code for pxenv_undi_startup() */ +#define PXENV_UNDI_STARTUP 0x0001 + +#define PXENV_BUS_ISA 0 /**< ISA bus type */ +#define PXENV_BUS_EISA 1 /**< EISA bus type */ +#define PXENV_BUS_MCA 2 /**< MCA bus type */ +#define PXENV_BUS_PCI 3 /**< PCI bus type */ +#define PXENV_BUS_VESA 4 /**< VESA bus type */ +#define PXENV_BUS_PCMCIA 5 /**< PCMCIA bus type */ + +/** Parameter block for pxenv_undi_startup() */ +struct s_PXENV_UNDI_STARTUP { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_STARTUP PXENV_UNDI_STARTUP_t; + +/** @} */ /* pxenv_undi_startup */ + +/** @defgroup pxenv_undi_cleanup PXENV_UNDI_CLEANUP + * + * UNDI CLEANUP + * + * @{ + */ + +/** PXE API function code for pxenv_undi_cleanup() */ +#define PXENV_UNDI_CLEANUP 0x0002 + +/** Parameter block for pxenv_undi_cleanup() */ +struct s_PXENV_UNDI_CLEANUP { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_CLEANUP PXENV_UNDI_CLEANUP_t; + +/** @} */ /* pxenv_undi_cleanup */ + +/** @defgroup pxenv_undi_initialize PXENV_UNDI_INITIALIZE + * + * UNDI INITIALIZE + * + * @{ + */ + +/** PXE API function code for pxenv_undi_initialize() */ +#define PXENV_UNDI_INITIALIZE 0x0003 + +/** Parameter block for pxenv_undi_initialize() */ +struct s_PXENV_UNDI_INITIALIZE { + PXENV_STATUS_t Status; /**< PXE status code */ + /** NDIS 2.0 configuration information, or NULL + * + * This is a pointer to the data structure returned by the + * NDIS 2.0 GetProtocolManagerInfo() API call. The data + * structure is documented, in a rather haphazard way, in + * section 4-17 of the NDIS 2.0 specification. + */ + ADDR32_t ProtocolIni; + UINT8_t reserved[8]; /**< Must be zero */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_INITIALIZE PXENV_UNDI_INITIALIZE_t; + +/** @} */ /* pxenv_undi_initialize */ + +/** @defgroup pxenv_undi_reset_adapter PXENV_UNDI_RESET_ADAPTER + * + * UNDI RESET ADAPTER + * + * @{ + */ + +/** PXE API function code for pxenv_undi_reset_adapter() */ +#define PXENV_UNDI_RESET_ADAPTER 0x0004 + +/** Maximum number of multicast MAC addresses */ +#define MAXNUM_MCADDR 8 + +/** List of multicast MAC addresses */ +struct s_PXENV_UNDI_MCAST_ADDRESS { + /** Number of multicast MAC addresses */ + UINT16_t MCastAddrCount; + /** List of up to #MAXNUM_MCADDR multicast MAC addresses */ + MAC_ADDR_t McastAddr[MAXNUM_MCADDR]; +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_MCAST_ADDRESS PXENV_UNDI_MCAST_ADDRESS_t; + +/** Parameter block for pxenv_undi_reset_adapter() */ +struct s_PXENV_UNDI_RESET { + PXENV_STATUS_t Status; /**< PXE status code */ + /** Multicast MAC addresses */ + struct s_PXENV_UNDI_MCAST_ADDRESS R_Mcast_Buf; +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_RESET PXENV_UNDI_RESET_t; + +/** @} */ /* pxenv_undi_reset_adapter */ + +/** @defgroup pxenv_undi_shutdown PXENV_UNDI_SHUTDOWN + * + * UNDI SHUTDOWN + * + * @{ + */ + +/** PXE API function code for pxenv_undi_shutdown() */ +#define PXENV_UNDI_SHUTDOWN 0x0005 + +/** Parameter block for pxenv_undi_shutdown() */ +struct s_PXENV_UNDI_SHUTDOWN { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_SHUTDOWN PXENV_UNDI_SHUTDOWN_t; + +/** @} */ /* pxenv_undi_shutdown */ + +/** @defgroup pxenv_undi_open PXENV_UNDI_OPEN + * + * UNDI OPEN + * + * @{ + */ + +/** PXE API function code for pxenv_undi_open() */ +#define PXENV_UNDI_OPEN 0x0006 + +/** Accept "directed" packets + * + * These are packets addresses to either this adapter's MAC address or + * to any of the configured multicast MAC addresses (see + * #s_PXENV_UNDI_MCAST_ADDRESS). + */ +#define FLTR_DIRECTED 0x0001 +/** Accept broadcast packets */ +#define FLTR_BRDCST 0x0002 +/** Accept all packets; listen in promiscuous mode */ +#define FLTR_PRMSCS 0x0004 +/** Accept source-routed packets */ +#define FLTR_SRC_RTG 0x0008 + +/** Parameter block for pxenv_undi_open() */ +struct s_PXENV_UNDI_OPEN { + PXENV_STATUS_t Status; /**< PXE status code */ + /** Open flags as defined in NDIS 2.0 + * + * This is the OpenOptions field as passed to the NDIS 2.0 + * OpenAdapter() API call. It is defined to be "adapter + * specific", though 0 is guaranteed to be a valid value. + */ + UINT16_t OpenFlag; + /** Receive packet filter + * + * This is the bitwise-OR of any of the following flags: + * #FLTR_DIRECTED, #FLTR_BRDCST, #FLTR_PRMSCS and + * #FLTR_SRC_RTG. + */ + UINT16_t PktFilter; + /** Multicast MAC addresses */ + struct s_PXENV_UNDI_MCAST_ADDRESS R_Mcast_Buf; +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_OPEN PXENV_UNDI_OPEN_t; + +/** @} */ /* pxenv_undi_open */ + +/** @defgroup pxenv_undi_close PXENV_UNDI_CLOSE + * + * UNDI CLOSE + * + * @{ + */ + +/** PXE API function code for pxenv_undi_close() */ +#define PXENV_UNDI_CLOSE 0x0007 + +/** Parameter block for pxenv_undi_close() */ +struct s_PXENV_UNDI_CLOSE { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_CLOSE PXENV_UNDI_CLOSE_t; + +/** @} */ /* pxenv_undi_close */ + +/** @defgroup pxenv_undi_transmit PXENV_UNDI_TRANSMIT + * + * UNDI TRANSMIT PACKET + * + * @{ + */ + +/** PXE API function code for pxenv_undi_transmit() */ +#define PXENV_UNDI_TRANSMIT 0x0008 + +#define P_UNKNOWN 0 /**< Media header already filled in */ +#define P_IP 1 /**< IP protocol */ +#define P_ARP 2 /**< ARP protocol */ +#define P_RARP 3 /**< RARP protocol */ +#define P_OTHER 4 /**< Other protocol */ + +#define XMT_DESTADDR 0x0000 /**< Unicast packet */ +#define XMT_BROADCAST 0x0001 /**< Broadcast packet */ + +/** Maximum number of data blocks in a transmit buffer descriptor */ +#define MAX_DATA_BLKS 8 + +/** A transmit buffer descriptor, as pointed to by s_PXENV_UNDI_TRANSMIT::TBD + */ +struct s_PXENV_UNDI_TBD { + UINT16_t ImmedLength; /**< Length of the transmit buffer */ + SEGOFF16_t Xmit; /**< Address of the transmit buffer */ + UINT16_t DataBlkCount; + /** Array of up to #MAX_DATA_BLKS additional transmit buffers */ + struct DataBlk { + /** Always 1 + * + * A value of 0 would indicate that #TDDataPtr were an + * #ADDR32_t rather than a #SEGOFF16_t. The PXE + * specification version 2.1 explicitly states that + * this is not supported; #TDDataPtr will always be a + * #SEGOFF16_t. + */ + UINT8_t TDPtrType; + UINT8_t TDRsvdByte; /**< Must be zero */ + UINT16_t TDDataLen; /**< Length of this transmit buffer */ + SEGOFF16_t TDDataPtr; /**< Address of this transmit buffer */ + } DataBlock[MAX_DATA_BLKS]; +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_TBD PXENV_UNDI_TBD_t; + +/** Parameter block for pxenv_undi_transmit() */ +struct s_PXENV_UNDI_TRANSMIT { + PXENV_STATUS_t Status; /**< PXE status code */ + /** Protocol + * + * Valid values are #P_UNKNOWN, #P_IP, #P_ARP or #P_RARP. If + * the caller has already filled in the media header, this + * field must be set to #P_UNKNOWN. + */ + UINT8_t Protocol; + /** Unicast/broadcast flag + * + * Valid values are #XMT_DESTADDR or #XMT_BROADCAST. + */ + UINT8_t XmitFlag; + SEGOFF16_t DestAddr; /**< Destination MAC address */ + /** Address of the Transmit Buffer Descriptor + * + * This is a pointer to a struct s_PXENV_UNDI_TBD. + */ + SEGOFF16_t TBD; + UINT32_t Reserved[2]; /**< Must be zero */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_TRANSMIT PXENV_UNDI_TRANSMIT_t; + +/** @} */ /* pxenv_undi_transmit */ + +/** @defgroup pxenv_undi_set_mcast_address PXENV_UNDI_SET_MCAST_ADDRESS + * + * UNDI SET MULTICAST ADDRESS + * + * @{ + */ + +/** PXE API function code for pxenv_undi_set_mcast_address() */ +#define PXENV_UNDI_SET_MCAST_ADDRESS 0x0009 + +/** Parameter block for pxenv_undi_set_mcast_address() */ +struct s_PXENV_UNDI_SET_MCAST_ADDRESS { + PXENV_STATUS_t Status; /**< PXE status code */ + /** List of multicast addresses */ + struct s_PXENV_UNDI_MCAST_ADDRESS R_Mcast_Buf; +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_SET_MCAST_ADDRESS PXENV_UNDI_SET_MCAST_ADDRESS_t; + +/** @} */ /* pxenv_undi_set_mcast_address */ + +/** @defgroup pxenv_undi_set_station_address PXENV_UNDI_SET_STATION_ADDRESS + * + * UNDI SET STATION ADDRESS + * + * @{ + */ + +/** PXE API function code for pxenv_undi_set_station_address() */ +#define PXENV_UNDI_SET_STATION_ADDRESS 0x000a + +/** Parameter block for pxenv_undi_set_station_address() */ +struct s_PXENV_UNDI_SET_STATION_ADDRESS { + PXENV_STATUS_t Status; /**< PXE status code */ + MAC_ADDR_t StationAddress; /**< Station MAC address */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_SET_STATION_ADDRESS PXENV_UNDI_SET_STATION_ADDRESS_t; + +/** @} */ /* pxenv_undi_set_station_address */ + +/** @defgroup pxenv_undi_set_packet_filter PXENV_UNDI_SET_PACKET_FILTER + * + * UNDI SET PACKET FILTER + * + * @{ + */ + +/** PXE API function code for pxenv_undi_set_packet_filter() */ +#define PXENV_UNDI_SET_PACKET_FILTER 0x000b + +/** Parameter block for pxenv_undi_set_packet_filter() */ +struct s_PXENV_UNDI_SET_PACKET_FILTER { + PXENV_STATUS_t Status; /**< PXE status code */ + /** Receive packet filter + * + * This field takes the same values as + * s_PXENV_UNDI_OPEN::PktFilter. + * + * @note Yes, this field is a different size to + * s_PXENV_UNDI_OPEN::PktFilter. Blame "the managers at Intel + * who apparently let a consultant come up with the spec + * without any kind of adult supervision" (quote from hpa). + */ + UINT8_t filter; +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_SET_PACKET_FILTER PXENV_UNDI_SET_PACKET_FILTER_t; + +/** @} */ /* pxenv_undi_set_packet_filter */ + +/** @defgroup pxenv_undi_get_information PXENV_UNDI_GET_INFORMATION + * + * UNDI GET INFORMATION + * + * @{ + */ + +/** PXE API function code for pxenv_undi_get_information() */ +#define PXENV_UNDI_GET_INFORMATION 0x000c + +#define ETHER_TYPE 1 /**< Ethernet (10Mb) */ +#define EXP_ETHER_TYPE 2 /**< Experimental Ethernet (3Mb) */ +#define AX25_TYPE 3 /**< Amateur Radio AX.25 */ +#define TOKEN_RING_TYPE 4 /**< Proteon ProNET Token Ring */ +#define CHAOS_TYPE 5 /**< Chaos */ +#define IEEE_TYPE 6 /**< IEEE 802 Networks */ +#define ARCNET_TYPE 7 /**< ARCNET */ + +/** Parameter block for pxenv_undi_get_information() */ +struct s_PXENV_UNDI_GET_INFORMATION { + PXENV_STATUS_t Status; /**< PXE status code */ + UINT16_t BaseIo; /**< I/O base address */ + UINT16_t IntNumber; /**< IRQ number */ + UINT16_t MaxTranUnit; /**< Adapter MTU */ + /** Hardware type + * + * Valid values are defined in RFC1010 ("Assigned numbers"), + * and are #ETHER_TYPE, #EXP_ETHER_TYPE, #AX25_TYPE, + * #TOKEN_RING_TYPE, #CHAOS_TYPE, #IEEE_TYPE or #ARCNET_TYPE. + */ + UINT16_t HwType; + UINT16_t HwAddrLen; /**< MAC address length */ + MAC_ADDR_t CurrentNodeAddress; /**< Current MAC address */ + MAC_ADDR_t PermNodeAddress; /**< Permanent (EEPROM) MAC address */ + SEGSEL_t ROMAddress; /**< Real-mode ROM segment address */ + UINT16_t RxBufCt; /**< Receive queue length */ + UINT16_t TxBufCt; /**< Transmit queue length */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_GET_INFORMATION PXENV_UNDI_GET_INFORMATION_t; + +/** @} */ /* pxenv_undi_get_information */ + +/** @defgroup pxenv_undi_get_statistics PXENV_UNDI_GET_STATISTICS + * + * UNDI GET STATISTICS + * + * @{ + */ + +/** PXE API function code for pxenv_undi_get_statistics() */ +#define PXENV_UNDI_GET_STATISTICS 0x000d + +/** Parameter block for pxenv_undi_get_statistics() */ +struct s_PXENV_UNDI_GET_STATISTICS { + PXENV_STATUS_t Status; /**< PXE status code */ + UINT32_t XmtGoodFrames; /**< Successful transmission count */ + UINT32_t RcvGoodFrames; /**< Successful reception count */ + UINT32_t RcvCRCErrors; /**< Receive CRC error count */ + UINT32_t RcvResourceErrors; /**< Receive queue overflow count */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_GET_STATISTICS PXENV_UNDI_GET_STATISTICS_t; + +/** @} */ /* pxenv_undi_get_statistics */ + +/** @defgroup pxenv_undi_clear_statistics PXENV_UNDI_CLEAR_STATISTICS + * + * UNDI CLEAR STATISTICS + * + * @{ + */ + +/** PXE API function code for pxenv_undi_clear_statistics() */ +#define PXENV_UNDI_CLEAR_STATISTICS 0x000e + +/** Parameter block for pxenv_undi_clear_statistics() */ +struct s_PXENV_UNDI_CLEAR_STATISTICS { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_CLEAR_STATISTICS PXENV_UNDI_CLEAR_STATISTICS_t; + +/** @} */ /* pxenv_undi_clear_statistics */ + +/** @defgroup pxenv_undi_initiate_diags PXENV_UNDI_INITIATE_DIAGS + * + * UNDI INITIATE DIAGS + * + * @{ + */ + +/** PXE API function code for pxenv_undi_initiate_diags() */ +#define PXENV_UNDI_INITIATE_DIAGS 0x000f + +/** Parameter block for pxenv_undi_initiate_diags() */ +struct s_PXENV_UNDI_INITIATE_DIAGS { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_INITIATE_DIAGS PXENV_UNDI_INITIATE_DIAGS_t; + +/** @} */ /* pxenv_undi_initiate_diags */ + +/** @defgroup pxenv_undi_force_interrupt PXENV_UNDI_FORCE_INTERRUPT + * + * UNDI FORCE INTERRUPT + * + * @{ + */ + +/** PXE API function code for pxenv_undi_force_interrupt() */ +#define PXENV_UNDI_FORCE_INTERRUPT 0x0010 + +/** Parameter block for pxenv_undi_force_interrupt() */ +struct s_PXENV_UNDI_FORCE_INTERRUPT { + PXENV_STATUS_t Status; /**< PXE status code */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_FORCE_INTERRUPT PXENV_UNDI_FORCE_INTERRUPT_t; + +/** @} */ /* pxenv_undi_force_interrupt */ + +/** @defgroup pxenv_undi_get_mcast_address PXENV_UNDI_GET_MCAST_ADDRESS + * + * UNDI GET MULTICAST ADDRESS + * + * @{ + */ + +/** PXE API function code for pxenv_undi_get_mcast_address() */ +#define PXENV_UNDI_GET_MCAST_ADDRESS 0x0011 + +/** Parameter block for pxenv_undi_get_mcast_address() */ +struct s_PXENV_UNDI_GET_MCAST_ADDRESS { + PXENV_STATUS_t Status; /**< PXE status code */ + IP4_t InetAddr; /**< Multicast IP address */ + MAC_ADDR_t MediaAddr; /**< Multicast MAC address */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_GET_MCAST_ADDRESS PXENV_UNDI_GET_MCAST_ADDRESS_t; + +/** @} */ /* pxenv_undi_get_mcast_address */ + +/** @defgroup pxenv_undi_get_nic_type PXENV_UNDI_GET_NIC_TYPE + * + * UNDI GET NIC TYPE + * + * @{ + */ + +/** PXE API function code for pxenv_undi_get_nic_type() */ +#define PXENV_UNDI_GET_NIC_TYPE 0x0012 + +#define PCI_NIC 2 /**< PCI network card */ +#define PnP_NIC 3 /**< ISAPnP network card */ +#define CardBus_NIC 4 /**< CardBus network card */ + +/** Information for a PCI or equivalent NIC */ +struct pci_nic_info { + UINT16_t Vendor_ID; /**< PCI vendor ID */ + UINT16_t Dev_ID; /**< PCI device ID */ + UINT8_t Base_Class; /**< PCI base class */ + UINT8_t Sub_Class; /**< PCI sub class */ + UINT8_t Prog_Intf; /**< PCI programming interface */ + UINT8_t Rev; /**< PCI revision */ + UINT16_t BusDevFunc; /**< PCI bus:dev:fn address */ + UINT16_t SubVendor_ID; /**< PCI subvendor ID */ + UINT16_t SubDevice_ID; /**< PCI subdevice ID */ +} __attribute__ (( packed )); + +/** Information for an ISAPnP or equivalent NIC */ +struct pnp_nic_info { + UINT32_t EISA_Dev_ID; /**< EISA device ID */ + UINT8_t Base_Class; /**< Base class */ + UINT8_t Sub_Class; /**< Sub class */ + UINT8_t Prog_Intf; /**< Programming interface */ + /** Card Select Number assigned to card */ + UINT16_t CardSelNum; +} __attribute__ (( packed )); + +/** Parameter block for pxenv_undi_get_nic_type() */ +struct s_PXENV_UNDI_GET_NIC_TYPE { + PXENV_STATUS_t Status; /**< PXE status code */ + /** NIC type + * + * Valid values are #PCI_NIC, #PnP_NIC or #CardBus_NIC. + */ + UINT8_t NicType; + /** NIC information */ + union nic_type_info { + /** NIC information (if #NicType==#PCI_NIC) */ + struct pci_nic_info pci; + /** NIC information (if #NicType==#CardBus_NIC) */ + struct pci_nic_info cardbus; + /** NIC information (if #NicType==#PnP_NIC) */ + struct pnp_nic_info pnp; + } info; +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_GET_NIC_TYPE PXENV_UNDI_GET_NIC_TYPE_t; + +/** @} */ /* pxenv_undi_get_nic_type */ + +/** @defgroup pxenv_undi_get_iface_info PXENV_UNDI_GET_IFACE_INFO + * + * UNDI GET IFACE INFO + * + * @{ + */ + +/** PXE API function code for pxenv_undi_get_iface_info() */ +#define PXENV_UNDI_GET_IFACE_INFO 0x0013 + +/** Broadcast supported */ +#define SUPPORTED_BROADCAST 0x0001 +/** Multicast supported */ +#define SUPPORTED_MULTICAST 0x0002 +/** Functional/group addressing supported */ +#define SUPPORTED_GROUP 0x0004 +/** Promiscuous mode supported */ +#define SUPPORTED_PROMISCUOUS 0x0008 +/** Software settable station address */ +#define SUPPORTED_SET_STATION_ADDRESS 0x0010 +/** InitiateDiagnostics supported */ +#define SUPPORTED_DIAGNOSTICS 0x0040 +/** Reset MAC supported */ +#define SUPPORTED_RESET 0x0400 +/** Open / Close Adapter supported */ +#define SUPPORTED_OPEN_CLOSE 0x0800 +/** Interrupt Request supported */ +#define SUPPORTED_IRQ 0x1000 + +/** Parameter block for pxenv_undi_get_iface_info() */ +struct s_PXENV_UNDI_GET_IFACE_INFO { + PXENV_STATUS_t Status; /**< PXE status code */ + /** Interface type + * + * This is defined in the NDIS 2.0 specification to be one of + * the strings "802.3", "802.4", "802.5", "802.6", "DIX", + * "DIX+802.3", "APPLETALK", "ARCNET", "FDDI", "SDLC", "BSC", + * "HDLC", or "ISDN". + * + * "Normal" Ethernet, for various historical reasons, is + * "DIX+802.3". + */ + UINT8_t IfaceType[16]; + UINT32_t LinkSpeed; /**< Link speed, in bits per second */ + /** Service flags + * + * These are the "service flags" defined in the "MAC + * Service-Specific Characteristics" table in the NDIS 2.0 + * specification. Almost all of them are irrelevant to PXE. + */ + UINT32_t ServiceFlags; + UINT32_t Reserved[4]; /**< Must be zero */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_GET_IFACE_INFO PXENV_UNDI_GET_IFACE_INFO_t; + +/** @} */ /* pxenv_undi_get_iface_info */ + +/** @defgroup pxenv_undi_get_state PXENV_UNDI_GET_STATE + * + * UNDI GET STATE + * + * @{ + */ + +/** PXE API function code for pxenv_undi_get_state() */ +#define PXENV_UNDI_GET_STATE 0x0015 + +/** pxenv_start_undi() has been called */ +#define PXE_UNDI_GET_STATE_STARTED 1 +/** pxenv_undi_initialize() has been called */ +#define PXE_UNDI_GET_STATE_INITIALIZED 2 +/** pxenv_undi_open() has been called */ +#define PXE_UNDI_GET_STATE_OPENED 3 + +/** Parameter block for pxenv_undi_get_state() */ +struct s_PXENV_UNDI_GET_STATE { + PXENV_STATUS_t Status; /**< PXE status code */ + /** Current state of the UNDI driver + * + * Valid values are #PXE_UNDI_GET_STATE_STARTED, + * #PXE_UNDI_GET_STATE_INITIALIZED or + * #PXE_UNDI_GET_STATE_OPENED. + */ + UINT8_t UNDIstate; +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_GET_STATE PXENV_UNDI_GET_STATE_t; + +/** @} */ /* pxenv_undi_get_state */ + +/** @defgroup pxenv_undi_isr PXENV_UNDI_ISR + * + * UNDI ISR + * + * @{ + */ + +/** PXE API function code for pxenv_undi_isr() */ +#define PXENV_UNDI_ISR 0x0014 + +/** Determine whether or not this is our interrupt */ +#define PXENV_UNDI_ISR_IN_START 1 +/** Start processing interrupt */ +#define PXENV_UNDI_ISR_IN_PROCESS 2 +/** Continue processing interrupt */ +#define PXENV_UNDI_ISR_IN_GET_NEXT 3 +/** This interrupt was ours */ +#define PXENV_UNDI_ISR_OUT_OURS 0 +/** This interrupt was not ours */ +#define PXENV_UNDI_ISR_OUT_NOT_OURS 1 +/** Finished processing interrupt */ +#define PXENV_UNDI_ISR_OUT_DONE 0 +/** A packet transmission has completed */ +#define PXENV_UNDI_ISR_OUT_TRANSMIT 2 +/** A packet has been received */ +#define PXENV_UNDI_ISR_OUT_RECEIVE 3 +/** We are already in the middle of processing an interrupt */ +#define PXENV_UNDI_ISR_OUT_BUSY 4 + +/** Unicast packet (or packet captured in promiscuous mode) */ +#define P_DIRECTED 0 +/** Broadcast packet */ +#define P_BROADCAST 1 +/** Multicast packet */ +#define P_MULTICAST 2 + +/** Parameter block for pxenv_undi_isr() */ +struct s_PXENV_UNDI_ISR { + PXENV_STATUS_t Status; /**< PXE status code */ + /** Function flag + * + * Valid values are #PXENV_UNDI_ISR_IN_START, + * #PXENV_UNDI_ISR_IN_PROCESS, #PXENV_UNDI_ISR_IN_GET_NEXT, + * #PXENV_UNDI_ISR_OUT_OURS, #PXENV_UNDI_ISR_OUT_NOT_OURS, + * #PXENV_UNDI_ISR_OUT_DONE, #PXENV_UNDI_ISR_OUT_TRANSMIT, + * #PXENV_UNDI_ISR_OUT_RECEIVE or #PXENV_UNDI_ISR_OUT_BUSY. + */ + UINT16_t FuncFlag; + UINT16_t BufferLength; /**< Data buffer length */ + UINT16_t FrameLength; /**< Total frame length */ + UINT16_t FrameHeaderLength; /**< Frame header length */ + SEGOFF16_t Frame; /**< Data buffer address */ + /** Protocol type + * + * Valid values are #P_IP, #P_ARP, #P_RARP or #P_OTHER. + */ + UINT8_t ProtType; + /** Packet type + * + * Valid values are #P_DIRECTED, #P_BROADCAST or #P_MULTICAST. + */ + UINT8_t PktType; +} __attribute__ (( packed )); + +typedef struct s_PXENV_UNDI_ISR PXENV_UNDI_ISR_t; + +/** @} */ /* pxenv_undi_isr */ + +/** @} */ /* pxe_undi_api */ + +/** @defgroup pxe_file_api PXE FILE API + * + * POSIX-like file operations + * + * @{ + */ + +/** Minimum possible opcode used within PXE FILE API */ +#define PXENV_FILE_MIN 0x00e0 + +/** Minimum possible opcode used within PXE FILE API */ +#define PXENV_FILE_MAX 0x00ef + +/** @defgroup pxenv_file_open PXENV_FILE_OPEN + * + * FILE OPEN + * + * @{ + */ + +/** PXE API function code for pxenv_file_open() */ +#define PXENV_FILE_OPEN 0x00e0 + +/** Parameter block for pxenv_file_open() */ +struct s_PXENV_FILE_OPEN { + PXENV_STATUS_t Status; /**< PXE status code */ + UINT16_t FileHandle; /**< File handle */ + SEGOFF16_t FileName; /**< File URL */ + UINT32_t Reserved; /**< Reserved */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_FILE_OPEN PXENV_FILE_OPEN_t; + +/** @} */ /* pxenv_file_open */ + +/** @defgroup pxenv_file_close PXENV_FILE_CLOSE + * + * FILE CLOSE + * + * @{ + */ + +/** PXE API function code for pxenv_file_close() */ +#define PXENV_FILE_CLOSE 0x00e1 + +/** Parameter block for pxenv_file_close() */ +struct s_PXENV_FILE_CLOSE { + PXENV_STATUS_t Status; /**< PXE status code */ + UINT16_t FileHandle; /**< File handle */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_FILE_CLOSE PXENV_FILE_CLOSE_t; + +/** @} */ /* pxenv_file_close */ + +/** @defgroup pxenv_file_select PXENV_FILE_SELECT + * + * FILE SELECT + * + * @{ + */ + +/** PXE API function code for pxenv_file_select() */ +#define PXENV_FILE_SELECT 0x00e2 + +/** File is ready for reading */ +#define RDY_READ 0x0001 + +/** Parameter block for pxenv_file_select() */ +struct s_PXENV_FILE_SELECT { + PXENV_STATUS_t Status; /**< PXE status code */ + UINT16_t FileHandle; /**< File handle */ + UINT16_t Ready; /**< Indication of readiness */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_FILE_SELECT PXENV_FILE_SELECT_t; + +/** @} */ /* pxenv_file_select */ + +/** @defgroup pxenv_file_read PXENV_FILE_READ + * + * FILE READ + * + * @{ + */ + +/** PXE API function code for pxenv_file_read() */ +#define PXENV_FILE_READ 0x00e3 + +/** Parameter block for pxenv_file_read() */ +struct s_PXENV_FILE_READ { + PXENV_STATUS_t Status; /**< PXE status code */ + UINT16_t FileHandle; /**< File handle */ + UINT16_t BufferSize; /**< Data buffer size */ + SEGOFF16_t Buffer; /**< Data buffer */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_FILE_READ PXENV_FILE_READ_t; + +/** @} */ /* pxenv_file_read */ + +/** @defgroup pxenv_get_file_size PXENV_GET_FILE_SIZE + * + * GET FILE SIZE + * + * @{ + */ + +/** PXE API function code for pxenv_get_file_size() */ +#define PXENV_GET_FILE_SIZE 0x00e4 + +/** Parameter block for pxenv_get_file_size() */ +struct s_PXENV_GET_FILE_SIZE { + PXENV_STATUS_t Status; /**< PXE status code */ + UINT16_t FileHandle; /**< File handle */ + UINT32_t FileSize; /**< File size */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_GET_FILE_SIZE PXENV_GET_FILE_SIZE_t; + +/** @} */ /* pxenv_get_file_size */ + +/** @defgroup pxenv_file_exec PXENV_FILE_EXEC + * + * FILE EXEC + * + * @{ + */ + +/** PXE API function code for pxenv_file_exec() */ +#define PXENV_FILE_EXEC 0x00e5 + +/** Parameter block for pxenv_file_exec() */ +struct s_PXENV_FILE_EXEC { + PXENV_STATUS_t Status; /**< PXE status code */ + SEGOFF16_t Command; /**< Command to execute */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_FILE_EXEC PXENV_FILE_EXEC_t; + +/** @} */ /* pxenv_file_exec */ + +/** @defgroup pxenv_file_api_check PXENV_FILE_API_CHECK + * + * FILE API CHECK + * + * @{ + */ + +/** PXE API function code for pxenv_file_api_check() */ +#define PXENV_FILE_API_CHECK 0x00e6 + +/** Parameter block for pxenv_file_api_check() */ +struct s_PXENV_FILE_API_CHECK { + PXENV_STATUS_t Status; /**< PXE status code */ + UINT16_t Size; /**< Size of structure */ + UINT32_t Magic; /**< Magic number */ + UINT32_t Provider; /**< Implementation identifier */ + UINT32_t APIMask; /**< Supported API functions */ + UINT32_t Flags; /**< Reserved for the future */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_FILE_API_CHECK PXENV_FILE_API_CHECK_t; + +/** @} */ /* pxenv_file_api_check */ + +/** @defgroup pxenv_file_exit_hook PXENV_FILE_EXIT_HOOK + * + * FILE EXIT HOOK + * + * @{ + */ + +/** PXE API function code for pxenv_file_exit_hook() */ +#define PXENV_FILE_EXIT_HOOK 0x00e7 + +/** Parameter block for pxenv_file_exit_hook() */ +struct s_PXENV_FILE_EXIT_HOOK { + PXENV_STATUS_t Status; /**< PXE status code */ + SEGOFF16_t Hook; /**< SEG16:OFF16 to jump to */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_FILE_EXIT_HOOK PXENV_FILE_EXIT_HOOK_t; + +/** @} */ /* pxenv_file_exit_hook */ + +/** @defgroup pxenv_file_cmdline PXENV_FILE_CMDLINE + * + * FILE CMDLINE + * + * @{ + */ + +/** PXE API function code for pxenv_file_cmdline() */ +#define PXENV_FILE_CMDLINE 0x00e8 + +/** Parameter block for pxenv_file_cmdline() */ +struct s_PXENV_FILE_CMDLINE { + PXENV_STATUS_t Status; /**< PXE status code */ + UINT16_t BufferSize; /**< Data buffer size */ + SEGOFF16_t Buffer; /**< Data buffer */ +} __attribute__ (( packed )); + +typedef struct s_PXENV_FILE_CMDLINE PXENV_FILE_CMDLINE_t; + +/** @} */ /* pxe_file_cmdline */ + +/** @} */ /* pxe_file_api */ + +/** @defgroup pxe_loader_api PXE Loader API + * + * The UNDI ROM loader API + * + * @{ + */ + +/** Parameter block for undi_loader() */ +struct s_UNDI_LOADER { + /** PXE status code */ + PXENV_STATUS_t Status; + /** %ax register as for PXENV_START_UNDI */ + UINT16_t AX; + /** %bx register as for PXENV_START_UNDI */ + UINT16_t BX; + /** %dx register as for PXENV_START_UNDI */ + UINT16_t DX; + /** %di register as for PXENV_START_UNDI */ + OFF16_t DI; + /** %es register as for PXENV_START_UNDI */ + SEGSEL_t ES; + /** UNDI data segment + * + * @note The PXE specification defines the type of this field + * as #UINT16_t. For x86, #SEGSEL_t and #UINT16_t are + * equivalent anyway; for other architectures #SEGSEL_t makes + * more sense. + */ + SEGSEL_t UNDI_DS; + /** UNDI code segment + * + * @note The PXE specification defines the type of this field + * as #UINT16_t. For x86, #SEGSEL_t and #UINT16_t are + * equivalent anyway; for other architectures #SEGSEL_t makes + * more sense. + */ + SEGSEL_t UNDI_CS; + /** Address of the !PXE structure (a struct s_PXE) */ + SEGOFF16_t PXEptr; + /** Address of the PXENV+ structure (a struct s_PXENV) */ + SEGOFF16_t PXENVptr; +} __attribute__ (( packed )); + +typedef struct s_UNDI_LOADER UNDI_LOADER_t; + +/** @} */ /* pxe_loader_api */ + +/** @} */ /* pxe */ + +/** @page pxe_notes Etherboot PXE implementation notes + +@section pxe_routing IP routing + +Several PXE API calls (e.g. pxenv_tftp_open() and pxenv_udp_write()) +allow for the caller to specify a "relay agent IP address", often in a +field called "gateway" or similar. The PXE specification states that +"The IP layer should provide space for a minimum of four routing +entries obtained from the default router and static route DHCP option +tags in the DHCPACK message, plus any non-zero giaddr field from the +DHCPOFFER message(s) accepted by the client". + +The DHCP static route option ("option static-routes" in dhcpd.conf) +works only for classed IP routing (i.e. it provides no way to specify +a subnet mask). Since virtually everything now uses classless IP +routing, the DHCP static route option is almost totally useless, and +is (according to the dhcp-options man page) not implemented by any of +the popular DHCP clients. + +This leaves the caller-specified "relay agent IP address", the giaddr +field from the DHCPOFFER message(s) and the default gateway(s) +provided via the routers option ("option routers" in dhcpd.conf) in +the DHCPACK message. Each of these is a default gateway address. +It's a fair bet that the routers option should take priority over the +giaddr field, since the routers option has to be explicitly specified +by the DHCP server operator. Similarly, it's fair to assume that the +caller-specified "relay agent IP address", if present, should take +priority over any other routing table entries. + +@bug Etherboot currently ignores all potential sources of routing +information other than the first router provided to it by a DHCP +routers option. + +@section pxe_x86_modes x86 processor mode restrictions + +On the x86 platform, different PXE API calls have different +restrictions on the processor modes (real or protected) that can be +used. See the individual API call descriptions for the restrictions +that apply to any particular call. + +@subsection pxe_x86_pmode16 Real mode, or protected-mode with 16-bit stack + +The PXE specification states that the API function can be called in +protected mode only if the s_PXE::StatusCallout field is set to a +non-zero value, and that the API function cannot be called with a +32-bit stack segment. + +Etherboot does not enforce either of these restrictions; they seem (as +with so much of the PXE specification) to be artifacts of the Intel +implementation. + +*/ + +#endif /* PXE_API_H */ diff --git a/src/arch/x86/include/pxe_call.h b/src/arch/x86/include/pxe_call.h new file mode 100644 index 00000000..2ad0a950 --- /dev/null +++ b/src/arch/x86/include/pxe_call.h @@ -0,0 +1,43 @@ +#ifndef _PXE_CALL_H +#define _PXE_CALL_H + +/** @file + * + * PXE API entry point + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +struct net_device; + +/** PXE load address segment */ +#define PXE_LOAD_SEGMENT 0 + +/** PXE load address offset */ +#define PXE_LOAD_OFFSET 0x7c00 + +/** PXE physical load address */ +#define PXE_LOAD_PHYS ( ( PXE_LOAD_SEGMENT << 4 ) + PXE_LOAD_OFFSET ) + +/** !PXE structure */ +extern struct s_PXE __text16 ( ppxe ); +#define ppxe __use_text16 ( ppxe ) + +/** PXENV+ structure */ +extern struct s_PXENV __text16 ( pxenv ); +#define pxenv __use_text16 ( pxenv ) + +/** PXENV_RESTART_TFTP jump buffer */ +extern rmjmp_buf pxe_restart_nbp; + +extern void pxe_activate ( struct net_device *netdev ); +extern int pxe_deactivate ( void ); +extern int pxe_start_nbp ( void ); +extern __asmcall void pxe_api_call ( struct i386_all_regs *ix86 ); +extern int pxe_api_call_weak ( struct i386_all_regs *ix86 ); + +#endif /* _PXE_CALL_H */ diff --git a/src/arch/x86/include/pxe_error.h b/src/arch/x86/include/pxe_error.h new file mode 100644 index 00000000..51298e66 --- /dev/null +++ b/src/arch/x86/include/pxe_error.h @@ -0,0 +1,123 @@ +#ifndef PXE_ERROR_H +#define PXE_ERROR_H + +/** @file + * + * Preboot eXecution Environment (PXE) error definitions + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @defgroup pxeerrors PXE error codes + * + * @{ + */ + +/* Generic errors */ +#define PXENV_STATUS_SUCCESS 0x0000 +#define PXENV_STATUS_FAILURE 0x0001 +#define PXENV_STATUS_BAD_FUNC 0x0002 +#define PXENV_STATUS_UNSUPPORTED 0x0003 +#define PXENV_STATUS_KEEP_UNDI 0x0004 +#define PXENV_STATUS_KEEP_ALL 0x0005 +#define PXENV_STATUS_OUT_OF_RESOURCES 0x0006 + +/* ARP errors (0x0010 to 0x001f) */ +#define PXENV_STATUS_ARP_TIMEOUT 0x0011 + +/* Base-Code state errors */ +#define PXENV_STATUS_UDP_CLOSED 0x0018 +#define PXENV_STATUS_UDP_OPEN 0x0019 +#define PXENV_STATUS_TFTP_CLOSED 0x001a +#define PXENV_STATUS_TFTP_OPEN 0x001b + +/* BIOS/system errors (0x0020 to 0x002f) */ +#define PXENV_STATUS_MCOPY_PROBLEM 0x0020 +#define PXENV_STATUS_BIS_INTEGRITY_FAILURE 0x0021 +#define PXENV_STATUS_BIS_VALIDATE_FAILURE 0x0022 +#define PXENV_STATUS_BIS_INIT_FAILURE 0x0023 +#define PXENV_STATUS_BIS_SHUTDOWN_FAILURE 0x0024 +#define PXENV_STATUS_BIS_GBOA_FAILURE 0x0025 +#define PXENV_STATUS_BIS_FREE_FAILURE 0x0026 +#define PXENV_STATUS_BIS_GSI_FAILURE 0x0027 +#define PXENV_STATUS_BIS_BAD_CKSUM 0x0028 + +/* TFTP/MTFTP errors (0x0030 to 0x003f) */ +#define PXENV_STATUS_TFTP_CANNOT_ARP_ADDRESS 0x0030 +#define PXENV_STATUS_TFTP_OPEN_TIMEOUT 0x0032 +#define PXENV_STATUS_TFTP_UNKNOWN_OPCODE 0x0033 +#define PXENV_STATUS_TFTP_READ_TIMEOUT 0x0035 +#define PXENV_STATUS_TFTP_ERROR_OPCODE 0x0036 +#define PXENV_STATUS_TFTP_CANNOT_OPEN_CONNECTION 0x0038 +#define PXENV_STATUS_TFTP_CANNOT_READ_FROM_CONNECTION 0x0039 +#define PXENV_STATUS_TFTP_TOO_MANY_PACKAGES 0x003a +#define PXENV_STATUS_TFTP_FILE_NOT_FOUND 0x003b +#define PXENV_STATUS_TFTP_ACCESS_VIOLATION 0x003c +#define PXENV_STATUS_TFTP_NO_MCAST_ADDRESS 0x003d +#define PXENV_STATUS_TFTP_NO_FILESIZE 0x003e +#define PXENV_STATUS_TFTP_INVALID_PACKET_SIZE 0x003f + +/* Reserved errors 0x0040 to 0x004f) */ + +/* DHCP/BOOTP errors (0x0050 to 0x005f) */ +#define PXENV_STATUS_DHCP_TIMEOUT 0x0051 +#define PXENV_STATUS_DHCP_NO_IP_ADDRESS 0x0052 +#define PXENV_STATUS_DHCP_NO_BOOTFILE_NAME 0x0053 +#define PXENV_STATUS_DHCP_BAD_IP_ADDRESS 0x0054 + +/* Driver errors (0x0060 to 0x006f) */ +#define PXENV_STATUS_UNDI_INVALID_FUNCTION 0x0060 +#define PXENV_STATUS_UNDI_MEDIATEST_FAILED 0x0061 +#define PXENV_STATUS_UNDI_CANNOT_INIT_NIC_FOR_MCAST 0x0062 +#define PXENV_STATUS_UNDI_CANNOT_INITIALIZE_NIC 0x0063 +#define PXENV_STATUS_UNDI_CANNOT_INITIALIZE_PHY 0x0064 +#define PXENV_STATUS_UNDI_CANNOT_READ_CONFIG_DATA 0x0065 +#define PXENV_STATUS_UNDI_CANNOT_READ_INIT_DATA 0x0066 +#define PXENV_STATUS_UNDI_BAD_MAC_ADDRESS 0x0067 +#define PXENV_STATUS_UNDI_BAD_EEPROM_CHECKSUM 0x0068 +#define PXENV_STATUS_UNDI_ERROR_SETTING_ISR 0x0069 +#define PXENV_STATUS_UNDI_INVALID_STATE 0x006a +#define PXENV_STATUS_UNDI_TRANSMIT_ERROR 0x006b +#define PXENV_STATUS_UNDI_INVALID_PARAMETER 0x006c + +/* ROM and NBP bootstrap errors (0x0070 to 0x007f) */ +#define PXENV_STATUS_BSTRAP_PROMPT_MENU 0x0074 +#define PXENV_STATUS_BSTRAP_MCAST_ADDR 0x0076 +#define PXENV_STATUS_BSTRAP_MISSING_LIST 0x0077 +#define PXENV_STATUS_BSTRAP_NO_RESPONSE 0x0078 +#define PXENV_STATUS_BSTRAP_FILE_TOO_BIG 0x0079 + +/* Environment NBP errors (0x0080 to 0x008f) */ + +/* Reserved errors (0x0090 to 0x009f) */ + +/* Miscellaneous errors (0x00a0 to 0x00af) */ +#define PXENV_STATUS_BINL_CANCELED_BY_KEYSTROKE 0x00a0 +#define PXENV_STATUS_BINL_NO_PXE_SERVER 0x00a1 +#define PXENV_STATUS_NOT_AVAILABLE_IN_PMODE 0x00a2 +#define PXENV_STATUS_NOT_AVAILABLE_IN_RMODE 0x00a3 + +/* BUSD errors (0x00b0 to 0x00bf) */ +#define PXENV_STATUS_BUSD_DEVICE_NOT_SUPPORTED 0x00b0 + +/* Loader errors (0x00c0 to 0x00cf) */ +#define PXENV_STATUS_LOADER_NO_FREE_BASE_MEMORY 0x00c0 +#define PXENV_STATUS_LOADER_NO_BC_ROMID 0x00c1 +#define PXENV_STATUS_LOADER_BAD_BC_ROMID 0x00c2 +#define PXENV_STATUS_LOADER_BAD_BC_RUNTIME_IMAGE 0x00c3 +#define PXENV_STATUS_LOADER_NO_UNDI_ROMID 0x00c4 +#define PXENV_STATUS_LOADER_BAD_UNDI_ROMID 0x00c5 +#define PXENV_STATUS_LOADER_BAD_UNDI_DRIVER_IMAGE 0x00c6 +#define PXENV_STATUS_LOADER_NO_PXE_STRUCT 0x00c8 +#define PXENV_STATUS_LOADER_NO_PXENV_STRUCT 0x00c9 +#define PXENV_STATUS_LOADER_UNDI_START 0x00ca +#define PXENV_STATUS_LOADER_BC_START 0x00cb + +/** @} */ + +/** Derive PXENV_STATUS code from iPXE error number */ +#define PXENV_STATUS( rc ) ( (-(rc)) & 0x00ff ) + +#endif /* PXE_ERROR_H */ diff --git a/src/arch/x86/include/pxe_types.h b/src/arch/x86/include/pxe_types.h new file mode 100644 index 00000000..483666e3 --- /dev/null +++ b/src/arch/x86/include/pxe_types.h @@ -0,0 +1,127 @@ +#ifndef PXE_TYPES_H +#define PXE_TYPES_H + +/** @file + * + * PXE data types + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include /* PXE status codes */ + +/** @addtogroup pxe Preboot eXecution Environment (PXE) API + * @{ + */ + +/** @defgroup pxe_types PXE data types + * + * Basic PXE data types such as #UINT16_t, #ADDR32_t, #SEGSEL_t etc. + * + * These definitions are based on Table 1-1 ("Data Type Definitions") + * in the Intel PXE specification version 2.1. They have been + * generalised to non-x86 architectures where possible. + * + * @{ + */ + +/** An 8-bit unsigned integer */ +typedef uint8_t UINT8_t; + +/** A 16-bit unsigned integer */ +typedef uint16_t UINT16_t; + +/** A 32-bit unsigned integer */ +typedef uint32_t UINT32_t; + +/** A PXE exit code. + * + * Permitted values are #PXENV_EXIT_SUCCESS and #PXENV_EXIT_FAILURE. + * + */ +typedef UINT16_t PXENV_EXIT_t; +#define PXENV_EXIT_SUCCESS 0x0000 /**< No error occurred */ +#define PXENV_EXIT_FAILURE 0x0001 /**< An error occurred */ + +/** A PXE status code. + * + * Status codes are defined in errno.h. + * + */ +typedef UINT16_t PXENV_STATUS_t; + +/** An IPv4 address. + * + * @note This data type is in network (big-endian) byte order. + * + */ +typedef UINT32_t IP4_t; + +/** A UDP port. + * + * @note This data type is in network (big-endian) byte order. + * + */ +typedef UINT16_t UDP_PORT_t; + +/** Maximum length of a MAC address */ +#define MAC_ADDR_LEN 16 + +/** A MAC address */ +typedef UINT8_t MAC_ADDR_t[MAC_ADDR_LEN]; + +#ifndef HAVE_ARCH_ADDR32 +/** A physical address. + * + * For x86, this is a 32-bit physical address, and is therefore + * limited to the low 4GB. + * + */ +typedef UINT32_t ADDR32_t; +#endif + +#ifndef HAVE_ARCH_SEGSEL +/** A segment selector. + * + * For x86, this is a real mode segment (0x0000-0xffff), or a + * protected-mode segment selector, such as could be loaded into a + * segment register. + * + */ +typedef UINT16_t SEGSEL_t; +#endif + +#ifndef HAVE_ARCH_OFF16 +/** An offset within a segment identified by #SEGSEL + * + * For x86, this is a 16-bit offset. + * + */ +typedef UINT16_t OFF16_t; +#endif + +/** A segment:offset address + * + * For x86, this is a 16-bit real-mode or protected-mode + * segment:offset address. + * + */ +typedef struct s_SEGOFF16 { + OFF16_t offset; /**< Offset within the segment */ + SEGSEL_t segment; /**< Segment selector */ +} __attribute__ (( packed )) SEGOFF16_t; + +/** A segment descriptor */ +typedef struct s_SEGDESC { + SEGSEL_t segment_address; /**< Segment selector */ + ADDR32_t Physical_address; /**< Segment base address */ + OFF16_t Seg_size; /**< Size of the segment */ +} __attribute__ (( packed )) SEGDESC_t; + +/** @} */ /* pxe_types */ + +/** @} */ /* pxe */ + +#endif /* PXE_TYPES_H */ diff --git a/src/arch/x86/include/realmode.h b/src/arch/x86/include/realmode.h new file mode 100644 index 00000000..4defd3b9 --- /dev/null +++ b/src/arch/x86/include/realmode.h @@ -0,0 +1,139 @@ +#ifndef REALMODE_H +#define REALMODE_H + +#include +#include +#include + +/* + * Data structures and type definitions + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/* + * Declaration of variables in .data16 + * + * To place a variable in the .data16 segment, declare it using the + * pattern: + * + * int __data16 ( foo ); + * #define foo __use_data16 ( foo ); + * + * extern uint32_t __data16 ( bar ); + * #define bar __use_data16 ( bar ); + * + * static long __data16 ( baz ) = 0xff000000UL; + * #define baz __use_data16 ( baz ); + * + * i.e. take a normal declaration, add __data16() around the variable + * name, and add a line saying "#define __use_data16 ( ) + * + * You can then access them just like any other variable, for example + * + * int x = foo + bar; + * + * This magic is achieved at a cost of only around 7 extra bytes per + * group of accesses to .data16 variables. When using KEEP_IT_REAL, + * there is no extra cost. + * + * You should place variables in .data16 when they need to be accessed + * by real-mode code. Real-mode assembly (e.g. as created by + * REAL_CODE()) can access these variables via the usual data segment. + * You can therefore write something like + * + * static uint16_t __data16 ( foo ); + * #define foo __use_data16 ( foo ) + * + * int bar ( void ) { + * __asm__ __volatile__ ( REAL_CODE ( "int $0xff\n\t" + * "movw %ax, foo" ) + * : : ); + * return foo; + * } + * + * Variables may also be placed in .text16 using __text16 and + * __use_text16. Some variables (e.g. chained interrupt vectors) fit + * most naturally in .text16; most should be in .data16. + * + * If you have only a pointer to a magic symbol within .data16 or + * .text16, rather than the symbol itself, you can attempt to extract + * the underlying symbol name using __from_data16() or + * __from_text16(). This is not for the faint-hearted; check the + * assembler output to make sure that it's doing the right thing. + */ + +/** + * Convert segment:offset address to user buffer + * + * @v segment Real-mode segment + * @v offset Real-mode offset + * @ret buffer User buffer + */ +static inline __always_inline userptr_t +real_to_user ( unsigned int segment, unsigned int offset ) { + return ( phys_to_user ( ( segment << 4 ) + offset ) ); +} + +/** + * Copy data to base memory + * + * @v dest_seg Destination segment + * @v dest_off Destination offset + * @v src Source + * @v len Length + */ +static inline __always_inline void +copy_to_real ( unsigned int dest_seg, unsigned int dest_off, + void *src, size_t n ) { + copy_to_user ( real_to_user ( dest_seg, dest_off ), 0, src, n ); +} + +/** + * Copy data to base memory + * + * @v dest Destination + * @v src_seg Source segment + * @v src_off Source offset + * @v len Length + */ +static inline __always_inline void +copy_from_real ( void *dest, unsigned int src_seg, + unsigned int src_off, size_t n ) { + copy_from_user ( dest, real_to_user ( src_seg, src_off ), 0, n ); +} + +/** + * Write a single variable to base memory + * + * @v var Variable to write + * @v dest_seg Destination segment + * @v dest_off Destination offset + */ +#define put_real( var, dest_seg, dest_off ) \ + copy_to_real ( (dest_seg), (dest_off), &(var), sizeof (var) ) + +/** + * Read a single variable from base memory + * + * @v var Variable to read + * @v src_seg Source segment + * @v src_off Source offset + */ +#define get_real( var, src_seg, src_off ) \ + copy_from_real ( &(var), (src_seg), (src_off), sizeof (var) ) + +/* + * REAL_CODE ( asm_code_str ) + * + * This can be used in inline assembly to create a fragment of code + * that will execute in real mode. For example: to write a character + * to the BIOS console using INT 10, you would do something like: + * + * __asm__ __volatile__ ( REAL_CODE ( "int $0x16" ) + * : "=a" ( character ) : "a" ( 0x0000 ) ); + * + */ + +#endif /* REALMODE_H */ diff --git a/src/arch/x86/include/registers.h b/src/arch/x86/include/registers.h new file mode 100644 index 00000000..dd3b59fd --- /dev/null +++ b/src/arch/x86/include/registers.h @@ -0,0 +1,198 @@ +#ifndef REGISTERS_H +#define REGISTERS_H + +/** @file + * + * i386 registers. + * + * This file defines data structures that allow easy access to i386 + * register dumps. + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** + * A 16-bit general register. + * + * This type encapsulates a 16-bit register such as %ax, %bx, %cx, + * %dx, %si, %di, %bp or %sp. + * + */ +typedef union { + struct { + union { + uint8_t l; + uint8_t byte; + }; + uint8_t h; + } __attribute__ (( packed )); + uint16_t word; +} __attribute__ (( packed )) reg16_t; + +/** + * A 32-bit general register. + * + * This type encapsulates a 32-bit register such as %eax, %ebx, %ecx, + * %edx, %esi, %edi, %ebp or %esp. + * + */ +typedef union { + struct { + union { + uint8_t l; + uint8_t byte; + }; + uint8_t h; + } __attribute__ (( packed )); + uint16_t word; + uint32_t dword; +} __attribute__ (( packed )) reg32_t; + +/** + * A 32-bit general register dump. + * + * This is the data structure that is created on the stack by the @c + * pushal instruction, and can be read back using the @c popal + * instruction. + * + */ +struct i386_regs { + union { + uint16_t di; + uint32_t edi; + }; + union { + uint16_t si; + uint32_t esi; + }; + union { + uint16_t bp; + uint32_t ebp; + }; + union { + uint16_t sp; + uint32_t esp; + }; + union { + struct { + uint8_t bl; + uint8_t bh; + } __attribute__ (( packed )); + uint16_t bx; + uint32_t ebx; + }; + union { + struct { + uint8_t dl; + uint8_t dh; + } __attribute__ (( packed )); + uint16_t dx; + uint32_t edx; + }; + union { + struct { + uint8_t cl; + uint8_t ch; + } __attribute__ (( packed )); + uint16_t cx; + uint32_t ecx; + }; + union { + struct { + uint8_t al; + uint8_t ah; + } __attribute__ (( packed )); + uint16_t ax; + uint32_t eax; + }; +} __attribute__ (( packed )); + +/** + * A segment register dump. + * + * The i386 has no equivalent of the @c pushal or @c popal + * instructions for the segment registers. We adopt the convention of + * always using the sequences + * + * @code + * + * pushw %gs ; pushw %fs ; pushw %es ; pushw %ds ; pushw %ss ; pushw %cs + * + * @endcode + * + * and + * + * @code + * + * addw $4, %sp ; popw %ds ; popw %es ; popw %fs ; popw %gs + * + * @endcode + * + * This is the data structure that is created and read back by these + * instruction sequences. + * + */ +struct i386_seg_regs { + uint16_t cs; + uint16_t ss; + uint16_t ds; + uint16_t es; + uint16_t fs; + uint16_t gs; +} __attribute__ (( packed )); + +/** + * A full register dump. + * + * This data structure is created by the instructions + * + * @code + * + * pushfl + * pushal + * pushw %gs ; pushw %fs ; pushw %es ; pushw %ds ; pushw %ss ; pushw %cs + * + * @endcode + * + * and can be read back using the instructions + * + * @code + * + * addw $4, %sp ; popw %ds ; popw %es ; popw %fs ; popw %gs + * popal + * popfl + * + * @endcode + * + * virt_call() and kir_call() create this data structure on the stack + * and pass in a pointer to this structure. + * + */ +struct i386_all_regs { + struct i386_seg_regs segs; + struct i386_regs regs; + uint32_t flags; +} __attribute__ (( packed )); + +/* Flags */ +#define CF ( 1 << 0 ) +#define PF ( 1 << 2 ) +#define AF ( 1 << 4 ) +#define ZF ( 1 << 6 ) +#define SF ( 1 << 7 ) +#define OF ( 1 << 11 ) + +/* Segment:offset structure. Note that the order within the structure + * is offset:segment. + */ +struct segoff { + uint16_t offset; + uint16_t segment; +} __attribute__ (( packed )); + +typedef struct segoff segoff_t; + +#endif /* REGISTERS_H */ diff --git a/src/arch/x86/include/rmsetjmp.h b/src/arch/x86/include/rmsetjmp.h new file mode 100644 index 00000000..3470be47 --- /dev/null +++ b/src/arch/x86/include/rmsetjmp.h @@ -0,0 +1,28 @@ +#ifndef _RMSETJMP_H +#define _RMSETJMP_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** A real-mode-extended jump buffer */ +typedef struct { + /** Jump buffer */ + jmp_buf env; + /** Real-mode stack pointer */ + segoff_t rm_stack; +} rmjmp_buf[1]; + +#define rmsetjmp( _env ) ( { \ + (_env)->rm_stack.segment = rm_ss; \ + (_env)->rm_stack.offset = rm_sp; \ + setjmp ( (_env)->env ); } ) \ + +#define rmlongjmp( _env, _val ) do { \ + rm_ss = (_env)->rm_stack.segment; \ + rm_sp = (_env)->rm_stack.offset; \ + longjmp ( (_env)->env, (_val) ); \ + } while ( 0 ) + +#endif /* _RMSETJMP_H */ diff --git a/src/arch/x86/include/rtc.h b/src/arch/x86/include/rtc.h new file mode 100644 index 00000000..6294b63e --- /dev/null +++ b/src/arch/x86/include/rtc.h @@ -0,0 +1,83 @@ +#ifndef _RTC_H +#define _RTC_H + +/** @file + * + * CMOS Real-Time Clock (RTC) + * + * The CMOS/RTC registers are documented (with varying degrees of + * accuracy and consistency) at + * + * http://www.nondot.org/sabre/os/files/MiscHW/RealtimeClockFAQ.txt + * http://wiki.osdev.org/RTC + * http://wiki.osdev.org/CMOS + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** RTC IRQ */ +#define RTC_IRQ 8 + +/** RTC interrupt vector */ +#define RTC_INT IRQ_INT ( RTC_IRQ ) + +/** CMOS/RTC address (and NMI) register */ +#define CMOS_ADDRESS 0x70 + +/** NMI disable bit */ +#define CMOS_DISABLE_NMI 0x80 + +/** CMOS/RTC data register */ +#define CMOS_DATA 0x71 + +/** RTC seconds */ +#define RTC_SEC 0x00 + +/** RTC minutes */ +#define RTC_MIN 0x02 + +/** RTC hours */ +#define RTC_HOUR 0x04 + +/** RTC weekday */ +#define RTC_WDAY 0x06 + +/** RTC day of month */ +#define RTC_MDAY 0x07 + +/** RTC month */ +#define RTC_MON 0x08 + +/** RTC year */ +#define RTC_YEAR 0x09 + +/** RTC status register A */ +#define RTC_STATUS_A 0x0a + +/** RTC update in progress bit */ +#define RTC_STATUS_A_UPDATE_IN_PROGRESS 0x80 + +/** RTC status register B */ +#define RTC_STATUS_B 0x0b + +/** RTC 24 hour format bit */ +#define RTC_STATUS_B_24_HOUR 0x02 + +/** RTC binary mode bit */ +#define RTC_STATUS_B_BINARY 0x04 + +/** RTC Periodic Interrupt Enabled bit */ +#define RTC_STATUS_B_PIE 0x40 + +/** RTC status register C */ +#define RTC_STATUS_C 0x0c + +/** RTC status register D */ +#define RTC_STATUS_D 0x0d + +/** CMOS default address */ +#define CMOS_DEFAULT_ADDRESS RTC_STATUS_D + +#endif /* _RTC_H */ diff --git a/src/arch/x86/include/sdi.h b/src/arch/x86/include/sdi.h new file mode 100644 index 00000000..806c3f19 --- /dev/null +++ b/src/arch/x86/include/sdi.h @@ -0,0 +1,39 @@ +#ifndef _SDI_H +#define _SDI_H + +/** @file + * + * System Deployment Image (SDI) + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** SDI image header */ +struct sdi_header { + /** Signature */ + uint32_t magic; + /** Version (as an ASCII string) */ + uint32_t version; + /** Reserved */ + uint8_t reserved[8]; + /** Boot code offset */ + uint64_t boot_offset; + /** Boot code size */ + uint64_t boot_size; +} __attribute__ (( packed )); + +/** SDI image signature */ +#define SDI_MAGIC \ + ( ( '$' << 0 ) | ( 'S' << 8 ) | ( 'D' << 16 ) | ( 'I' << 24 ) ) + +/** SDI boot segment */ +#define SDI_BOOT_SEG 0x0000 + +/** SDI boot offset */ +#define SDI_BOOT_OFF 0x7c00 + +/** Constant to binary-OR with physical address of SDI image */ +#define SDI_WTF 0x41 + +#endif /* _SDI_H */ diff --git a/src/arch/x86/include/undi.h b/src/arch/x86/include/undi.h new file mode 100644 index 00000000..adf0c01e --- /dev/null +++ b/src/arch/x86/include/undi.h @@ -0,0 +1,104 @@ +#ifndef _UNDI_H +#define _UNDI_H + +/** @file + * + * UNDI driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifndef ASSEMBLY + +#include +#include + +/** An UNDI device + * + * This structure is used by assembly code as well as C; do not alter + * this structure without editing pxeprefix.S to match. + */ +struct undi_device { + /** PXENV+ structure address */ + SEGOFF16_t pxenv; + /** !PXE structure address */ + SEGOFF16_t ppxe; + /** Entry point */ + SEGOFF16_t entry; + /** Free base memory after load */ + UINT16_t fbms; + /** Free base memory prior to load */ + UINT16_t restore_fbms; + /** PCI bus:dev.fn, or @c UNDI_NO_PCI_BUSDEVFN */ + UINT16_t pci_busdevfn; + /** ISAPnP card select number, or @c UNDI_NO_ISAPNP_CSN */ + UINT16_t isapnp_csn; + /** ISAPnP read port, or @c UNDI_NO_ISAPNP_READ_PORT */ + UINT16_t isapnp_read_port; + /** PCI vendor ID + * + * Filled in only for the preloaded UNDI device by pxeprefix.S + */ + UINT16_t pci_vendor; + /** PCI device ID + * + * Filled in only for the preloaded UNDI device by pxeprefix.S + */ + UINT16_t pci_device; + /** Flags + * + * This is the bitwise OR of zero or more UNDI_FL_XXX + * constants. + */ + UINT16_t flags; + + /** Driver-private data + * + * Use undi_set_drvdata() and undi_get_drvdata() to access this + * field. + */ + void *priv; +} __attribute__ (( packed )); + +/** + * Set UNDI driver-private data + * + * @v undi UNDI device + * @v priv Private data + */ +static inline void undi_set_drvdata ( struct undi_device *undi, void *priv ) { + undi->priv = priv; +} + +/** + * Get UNDI driver-private data + * + * @v undi UNDI device + * @ret priv Private data + */ +static inline void * undi_get_drvdata ( struct undi_device *undi ) { + return undi->priv; +} + +#endif /* ASSEMBLY */ + +/** PCI bus:dev.fn field is invalid */ +#define UNDI_NO_PCI_BUSDEVFN 0xffff + +/** ISAPnP card select number field is invalid */ +#define UNDI_NO_ISAPNP_CSN 0xffff + +/** ISAPnP read port field is invalid */ +#define UNDI_NO_ISAPNP_READ_PORT 0xffff + +/** UNDI flag: START_UNDI has been called */ +#define UNDI_FL_STARTED 0x0001 + +/** UNDI flag: UNDI_STARTUP and UNDI_INITIALIZE have been called */ +#define UNDI_FL_INITIALIZED 0x0002 + +/** UNDI flag: keep stack resident */ +#define UNDI_FL_KEEP_ALL 0x0004 + +#endif /* _UNDI_H */ diff --git a/src/arch/x86/include/undiload.h b/src/arch/x86/include/undiload.h new file mode 100644 index 00000000..235e7a79 --- /dev/null +++ b/src/arch/x86/include/undiload.h @@ -0,0 +1,35 @@ +#ifndef _UNDILOAD_H +#define _UNDILOAD_H + +/** @file + * + * UNDI load/unload + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +struct undi_device; +struct undi_rom; + +extern int undi_load ( struct undi_device *undi, struct undi_rom *undirom ); +extern int undi_unload ( struct undi_device *undi ); + +/** + * Call UNDI loader to create a pixie + * + * @v undi UNDI device + * @v undirom UNDI ROM + * @v pci_busdevfn PCI bus:dev.fn + * @ret rc Return status code + */ +static inline int undi_load_pci ( struct undi_device *undi, + struct undi_rom *undirom, + unsigned int pci_busdevfn ) { + undi->pci_busdevfn = pci_busdevfn; + undi->isapnp_csn = UNDI_NO_ISAPNP_CSN; + undi->isapnp_read_port = UNDI_NO_ISAPNP_READ_PORT; + return undi_load ( undi, undirom ); +} + +#endif /* _UNDILOAD_H */ diff --git a/src/arch/x86/include/undinet.h b/src/arch/x86/include/undinet.h new file mode 100644 index 00000000..04fdd600 --- /dev/null +++ b/src/arch/x86/include/undinet.h @@ -0,0 +1,18 @@ +#ifndef _UNDINET_H +#define _UNDINET_H + +/** @file + * + * UNDI network device driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +struct undi_device; +struct device; + +extern int undinet_probe ( struct undi_device *undi, struct device *dev ); +extern void undinet_remove ( struct undi_device *undi ); + +#endif /* _UNDINET_H */ diff --git a/src/arch/x86/include/undipreload.h b/src/arch/x86/include/undipreload.h new file mode 100644 index 00000000..57f493ce --- /dev/null +++ b/src/arch/x86/include/undipreload.h @@ -0,0 +1,18 @@ +#ifndef _UNDIPRELOAD_H +#define _UNDIPRELOAD_H + +/** @file + * + * Preloaded UNDI stack + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +extern struct undi_device __data16 ( preloaded_undi ); +#define preloaded_undi __use_data16 ( preloaded_undi ) + +#endif /* _UNDIPRELOAD_H */ diff --git a/src/arch/x86/include/undirom.h b/src/arch/x86/include/undirom.h new file mode 100644 index 00000000..1c530118 --- /dev/null +++ b/src/arch/x86/include/undirom.h @@ -0,0 +1,53 @@ +#ifndef _UNDIROM_H +#define _UNDIROM_H + +/** @file + * + * UNDI expansion ROMs + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** An UNDI PCI device ID */ +struct undi_pci_device_id { + /** PCI vendor ID */ + unsigned int vendor_id; + /** PCI device ID */ + unsigned int device_id; +}; + +/** An UNDI device ID */ +union undi_device_id { + /** PCI device ID */ + struct undi_pci_device_id pci; +}; + +/** An UNDI ROM */ +struct undi_rom { + /** List of UNDI ROMs */ + struct list_head list; + /** ROM segment address */ + unsigned int rom_segment; + /** UNDI loader entry point */ + SEGOFF16_t loader_entry; + /** Code segment size */ + size_t code_size; + /** Data segment size */ + size_t data_size; + /** Bus type + * + * Values are as used by @c PXENV_UNDI_GET_NIC_TYPE + */ + unsigned int bus_type; + /** Device ID */ + union undi_device_id bus_id; +}; + +extern struct undi_rom * undirom_find_pci ( unsigned int vendor_id, + unsigned int device_id, + unsigned int rombase ); + +#endif /* _UNDIROM_H */ diff --git a/src/arch/x86/include/vga.h b/src/arch/x86/include/vga.h new file mode 100644 index 00000000..01fc39d8 --- /dev/null +++ b/src/arch/x86/include/vga.h @@ -0,0 +1,228 @@ +/* + * + * modified + * by Steve M. Gehlbach + * + * Originally from linux/drivers/video/vga16.c by + * Ben Pfaff and Petr Vandrovec + * Copyright 1999 Ben Pfaff and Petr Vandrovec + * Based on VGA info at http://www.goodnet.com/~tinara/FreeVGA/home.htm + * Based on VESA framebuffer (c) 1998 Gerd Knorr + * + */ + +#ifndef VGA_H_INCL +#define VGA_H_INCL 1 + +//#include + +#define u8 unsigned char +#define u16 unsigned short +#define u32 unsigned int +#define __u32 u32 + +#define VERROR -1 +#define CHAR_HEIGHT 16 +#define LINES 25 +#define COLS 80 + +// macros for writing to vga regs +#define write_crtc(data,addr) outb(addr,CRT_IC); outb(data,CRT_DC) +#define write_att(data,addr) inb(IS1_RC); inb(0x80); outb(addr,ATT_IW); inb(0x80); outb(data,ATT_IW); inb(0x80) +#define write_seq(data,addr) outb(addr,SEQ_I); outb(data,SEQ_D) +#define write_gra(data,addr) outb(addr,GRA_I); outb(data,GRA_D) +u8 read_seq_b(u16 addr); +u8 read_gra_b(u16 addr); +u8 read_crtc_b(u16 addr); +u8 read_att_b(u16 addr); + + +#ifdef VGA_HARDWARE_FIXUP +void vga_hardware_fixup(void); +#else +#define vga_hardware_fixup() do{} while(0) +#endif + +#define SYNC_HOR_HIGH_ACT 1 /* horizontal sync high active */ +#define SYNC_VERT_HIGH_ACT 2 /* vertical sync high active */ +#define SYNC_EXT 4 /* external sync */ +#define SYNC_COMP_HIGH_ACT 8 /* composite sync high active */ +#define SYNC_BROADCAST 16 /* broadcast video timings */ + /* vtotal = 144d/288n/576i => PAL */ + /* vtotal = 121d/242n/484i => NTSC */ + +#define SYNC_ON_GREEN 32 /* sync on green */ + +#define VMODE_NONINTERLACED 0 /* non interlaced */ +#define VMODE_INTERLACED 1 /* interlaced */ +#define VMODE_DOUBLE 2 /* double scan */ +#define VMODE_MASK 255 + +#define VMODE_YWRAP 256 /* ywrap instead of panning */ +#define VMODE_SMOOTH_XPAN 512 /* smooth xpan possible (internally used) */ +#define VMODE_CONUPDATE 512 /* don't update x/yoffset */ + +/* VGA data register ports */ +#define CRT_DC 0x3D5 /* CRT Controller Data Register - color emulation */ +#define CRT_DM 0x3B5 /* CRT Controller Data Register - mono emulation */ +#define ATT_R 0x3C1 /* Attribute Controller Data Read Register */ +#define GRA_D 0x3CF /* Graphics Controller Data Register */ +#define SEQ_D 0x3C5 /* Sequencer Data Register */ + +#define MIS_R 0x3CC // Misc Output Read Register +#define MIS_W 0x3C2 // Misc Output Write Register + +#define IS1_RC 0x3DA /* Input Status Register 1 - color emulation */ +#define IS1_RM 0x3BA /* Input Status Register 1 - mono emulation */ +#define PEL_D 0x3C9 /* PEL Data Register */ +#define PEL_MSK 0x3C6 /* PEL mask register */ + +/* EGA-specific registers */ +#define GRA_E0 0x3CC /* Graphics enable processor 0 */ +#define GRA_E1 0x3CA /* Graphics enable processor 1 */ + + +/* VGA index register ports */ +#define CRT_IC 0x3D4 /* CRT Controller Index - color emulation */ +#define CRT_IM 0x3B4 /* CRT Controller Index - mono emulation */ +#define ATT_IW 0x3C0 /* Attribute Controller Index & Data Write Register */ +#define GRA_I 0x3CE /* Graphics Controller Index */ +#define SEQ_I 0x3C4 /* Sequencer Index */ +#define PEL_IW 0x3C8 /* PEL Write Index */ +#define PEL_IR 0x3C7 /* PEL Read Index */ + +/* standard VGA indexes max counts */ +#define CRTC_C 25 /* 25 CRT Controller Registers sequentially set*/ + // the remainder are not in the par array +#define ATT_C 21 /* 21 Attribute Controller Registers */ +#define GRA_C 9 /* 9 Graphics Controller Registers */ +#define SEQ_C 5 /* 5 Sequencer Registers */ +#define MIS_C 1 /* 1 Misc Output Register */ + +#define CRTC_H_TOTAL 0 +#define CRTC_H_DISP 1 +#define CRTC_H_BLANK_START 2 +#define CRTC_H_BLANK_END 3 +#define CRTC_H_SYNC_START 4 +#define CRTC_H_SYNC_END 5 +#define CRTC_V_TOTAL 6 +#define CRTC_OVERFLOW 7 +#define CRTC_PRESET_ROW 8 +#define CRTC_MAX_SCAN 9 +#define CRTC_CURSOR_START 0x0A +#define CRTC_CURSOR_END 0x0B +#define CRTC_START_HI 0x0C +#define CRTC_START_LO 0x0D +#define CRTC_CURSOR_HI 0x0E +#define CRTC_CURSOR_LO 0x0F +#define CRTC_V_SYNC_START 0x10 +#define CRTC_V_SYNC_END 0x11 +#define CRTC_V_DISP_END 0x12 +#define CRTC_OFFSET 0x13 +#define CRTC_UNDERLINE 0x14 +#define CRTC_V_BLANK_START 0x15 +#define CRTC_V_BLANK_END 0x16 +#define CRTC_MODE 0x17 +#define CRTC_LINE_COMPARE 0x18 + +#define ATC_MODE 0x10 +#define ATC_OVERSCAN 0x11 +#define ATC_PLANE_ENABLE 0x12 +#define ATC_PEL 0x13 +#define ATC_COLOR_PAGE 0x14 + +#define SEQ_CLOCK_MODE 0x01 +#define SEQ_PLANE_WRITE 0x02 +#define SEQ_CHARACTER_MAP 0x03 +#define SEQ_MEMORY_MODE 0x04 + +#define GDC_SR_VALUE 0x00 +#define GDC_SR_ENABLE 0x01 +#define GDC_COMPARE_VALUE 0x02 +#define GDC_DATA_ROTATE 0x03 +#define GDC_PLANE_READ 0x04 +#define GDC_MODE 0x05 +#define GDC_MISC 0x06 +#define GDC_COMPARE_MASK 0x07 +#define GDC_BIT_MASK 0x08 + +// text attributes +#define VGA_ATTR_CLR_RED 0x4 +#define VGA_ATTR_CLR_GRN 0x2 +#define VGA_ATTR_CLR_BLU 0x1 +#define VGA_ATTR_CLR_YEL (VGA_ATTR_CLR_RED | VGA_ATTR_CLR_GRN) +#define VGA_ATTR_CLR_CYN (VGA_ATTR_CLR_GRN | VGA_ATTR_CLR_BLU) +#define VGA_ATTR_CLR_MAG (VGA_ATTR_CLR_BLU | VGA_ATTR_CLR_RED) +#define VGA_ATTR_CLR_BLK 0 +#define VGA_ATTR_CLR_WHT (VGA_ATTR_CLR_RED | VGA_ATTR_CLR_GRN | VGA_ATTR_CLR_BLU) +#define VGA_ATTR_BNK 0x80 +#define VGA_ATTR_ITN 0x08 + +/* + * vga register parameters + * these are copied to the + * registers. + * + */ +struct vga_par { + u8 crtc[CRTC_C]; + u8 atc[ATT_C]; + u8 gdc[GRA_C]; + u8 seq[SEQ_C]; + u8 misc; // the misc register, MIS_W + u8 vss; +}; + + +/* Interpretation of offset for color fields: All offsets are from the right, + * inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you + * can use the offset as right argument to <<). A pixel afterwards is a bit + * stream and is written to video memory as that unmodified. This implies + * big-endian byte order if bits_per_pixel is greater than 8. + */ +struct fb_bitfield { + __u32 offset; /* beginning of bitfield */ + __u32 length; /* length of bitfield */ + __u32 msb_right; /* != 0 : Most significant bit is */ + /* right */ +}; + +struct screeninfo { + __u32 xres; /* visible resolution */ + __u32 yres; + __u32 xres_virtual; /* virtual resolution */ + __u32 yres_virtual; + __u32 xoffset; /* offset from virtual to visible */ + __u32 yoffset; /* resolution */ + + __u32 bits_per_pixel; /* guess what */ + __u32 grayscale; /* != 0 Graylevels instead of colors */ + + struct fb_bitfield red; /* bitfield in fb mem if true color, */ + struct fb_bitfield green; /* else only length is significant */ + struct fb_bitfield blue; + struct fb_bitfield transp; /* transparency */ + + __u32 nonstd; /* != 0 Non standard pixel format */ + + __u32 activate; /* see FB_ACTIVATE_* */ + + __u32 height; /* height of picture in mm */ + __u32 width; /* width of picture in mm */ + + __u32 accel_flags; /* acceleration flags (hints) */ + + /* Timing: All values in pixclocks, except pixclock (of course) */ + __u32 pixclock; /* pixel clock in ps (pico seconds) */ + __u32 left_margin; /* time from sync to picture */ + __u32 right_margin; /* time from picture to sync */ + __u32 upper_margin; /* time from sync to picture */ + __u32 lower_margin; + __u32 hsync_len; /* length of horizontal sync */ + __u32 vsync_len; /* length of vertical sync */ + __u32 sync; /* sync polarity */ + __u32 vmode; /* interlaced etc */ + __u32 reserved[6]; /* Reserved for future compatibility */ +}; + +#endif diff --git a/src/arch/x86/interface/pcbios/acpi_timer.c b/src/arch/x86/interface/pcbios/acpi_timer.c new file mode 100644 index 00000000..82e85a03 --- /dev/null +++ b/src/arch/x86/interface/pcbios/acpi_timer.c @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2018 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * ACPI power management timer + * + */ + +/** ACPI timer frequency (fixed 3.579545MHz) */ +#define ACPI_TIMER_HZ 3579545 + +/** ACPI timer mask + * + * Timers may be implemented as either 24-bit or 32-bit counters. We + * simplify the code by pessimistically assuming that the timer has + * only 24 bits. + */ +#define ACPI_TIMER_MASK 0x00ffffffUL + +/** Power management timer register address */ +static unsigned int pm_tmr; + +struct timer acpi_timer __timer ( TIMER_PREFERRED ); + +/** + * Get current system time in ticks + * + * @ret ticks Current time, in ticks + */ +static unsigned long acpi_currticks ( void ) { + static unsigned long offset; + static uint32_t prev; + uint32_t now; + + /* Read timer and account for wraparound */ + now = ( inl ( pm_tmr ) & ACPI_TIMER_MASK ); + if ( now < prev ) { + offset += ( ( ACPI_TIMER_MASK + 1 ) / + ( ACPI_TIMER_HZ / TICKS_PER_SEC ) ); + } + prev = now; + + /* Convert to timer ticks */ + return ( offset + ( now / ( ACPI_TIMER_HZ / TICKS_PER_SEC ) ) ); +} + +/** + * Delay for a fixed number of microseconds + * + * @v usecs Number of microseconds for which to delay + */ +static void acpi_udelay ( unsigned long usecs ) { + uint32_t start; + uint32_t elapsed; + uint32_t threshold; + + /* Delay until a suitable number of ticks have elapsed. We do + * not need to allow for multiple wraparound, since the + * wraparound period for a 24-bit timer at 3.579545MHz is + * around 4700000us. + */ + start = inl ( pm_tmr ); + threshold = ( ( usecs * ACPI_TIMER_HZ ) / 1000000 ); + do { + elapsed = ( ( inl ( pm_tmr ) - start ) & ACPI_TIMER_MASK ); + } while ( elapsed < threshold ); +} + +/** + * Probe ACPI power management timer + * + * @ret rc Return status code + */ +static int acpi_timer_probe ( void ) { + struct acpi_fadt fadtab; + userptr_t fadt; + unsigned int pm_tmr_blk; + + /* Locate FADT */ + fadt = acpi_find ( FADT_SIGNATURE, 0 ); + if ( ! fadt ) { + DBGC ( &acpi_timer, "ACPI could not find FADT\n" ); + return -ENOENT; + } + + /* Read FADT */ + copy_from_user ( &fadtab, fadt, 0, sizeof ( fadtab ) ); + pm_tmr_blk = le32_to_cpu ( fadtab.pm_tmr_blk ); + if ( ! pm_tmr_blk ) { + DBGC ( &acpi_timer, "ACPI has no timer\n" ); + return -ENOENT; + } + + /* Record power management timer register address */ + pm_tmr = ( pm_tmr_blk + ACPI_PM_TMR ); + + return 0; +} + +/** ACPI timer */ +struct timer acpi_timer __timer ( TIMER_PREFERRED ) = { + .name = "acpi", + .probe = acpi_timer_probe, + .currticks = acpi_currticks, + .udelay = acpi_udelay, +}; diff --git a/src/arch/x86/interface/pcbios/acpipwr.c b/src/arch/x86/interface/pcbios/acpipwr.c new file mode 100644 index 00000000..dc164c7d --- /dev/null +++ b/src/arch/x86/interface/pcbios/acpipwr.c @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * ACPI power off + * + */ + +/** Colour for debug messages */ +#define colour FADT_SIGNATURE + +/** _S5_ signature */ +#define S5_SIGNATURE ACPI_SIGNATURE ( '_', 'S', '5', '_' ) + +/** + * Power off the computer using ACPI + * + * @ret rc Return status code + */ +int acpi_poweroff ( void ) { + struct acpi_fadt fadtab; + userptr_t fadt; + unsigned int pm1a_cnt_blk; + unsigned int pm1b_cnt_blk; + unsigned int pm1a_cnt; + unsigned int pm1b_cnt; + unsigned int slp_typa; + unsigned int slp_typb; + int s5; + int rc; + + /* Locate FADT */ + fadt = acpi_find ( FADT_SIGNATURE, 0 ); + if ( ! fadt ) { + DBGC ( colour, "ACPI could not find FADT\n" ); + return -ENOENT; + } + + /* Read FADT */ + copy_from_user ( &fadtab, fadt, 0, sizeof ( fadtab ) ); + pm1a_cnt_blk = le32_to_cpu ( fadtab.pm1a_cnt_blk ); + pm1b_cnt_blk = le32_to_cpu ( fadtab.pm1b_cnt_blk ); + pm1a_cnt = ( pm1a_cnt_blk + ACPI_PM1_CNT ); + pm1b_cnt = ( pm1b_cnt_blk + ACPI_PM1_CNT ); + + /* Extract \_S5 from DSDT or any SSDT */ + s5 = acpi_sx ( S5_SIGNATURE ); + if ( s5 < 0 ) { + rc = s5; + DBGC ( colour, "ACPI could not extract \\_S5: %s\n", + strerror ( rc ) ); + return rc; + } + + /* Power off system */ + if ( pm1a_cnt_blk ) { + slp_typa = ( ( s5 >> 0 ) & 0xff ); + DBGC ( colour, "ACPI PM1a sleep type %#x => %04x\n", + slp_typa, pm1a_cnt ); + outw ( ( ACPI_PM1_CNT_SLP_TYP ( slp_typa ) | + ACPI_PM1_CNT_SLP_EN ), pm1a_cnt ); + } + if ( pm1b_cnt_blk ) { + slp_typb = ( ( s5 >> 8 ) & 0xff ); + DBGC ( colour, "ACPI PM1b sleep type %#x => %04x\n", + slp_typb, pm1b_cnt ); + outw ( ( ACPI_PM1_CNT_SLP_TYP ( slp_typb ) | + ACPI_PM1_CNT_SLP_EN ), pm1b_cnt ); + } + + /* On some systems, execution will continue briefly. Delay to + * avoid potentially confusing log messages. + */ + mdelay ( 1000 ); + + DBGC ( colour, "ACPI power off failed\n" ); + return -EPROTO; +} diff --git a/src/arch/x86/interface/pcbios/apm.c b/src/arch/x86/interface/pcbios/apm.c new file mode 100644 index 00000000..680dbb16 --- /dev/null +++ b/src/arch/x86/interface/pcbios/apm.c @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2013 Marin Hannache . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Advanced Power Management + * + */ + +#include +#include +#include + +/** + * Power off the computer using APM + * + * @ret rc Return status code + */ +int apm_poweroff ( void ) { + uint16_t apm_version; + uint16_t apm_signature; + uint16_t apm_flags; + uint16_t carry; + + /* APM check */ + __asm__ __volatile__ ( REAL_CODE ( "int $0x15\n\t" + "adc %%edx,0\n\t" ) + : "=a" ( apm_version ), "=b" ( apm_signature ), + "=c" ( apm_flags ), "=d" ( carry ) + : "a" ( 0x5300 ), "b" ( 0x0000 ), + "d" ( 0x0000 ) ); + if ( carry ) { + DBG ( "APM not present\n" ); + return -ENOTSUP; + } + if ( apm_signature != 0x504d ) { /* signature 'PM' */ + DBG ( "APM not present\n" ); + return -ENOTSUP; + } + if ( apm_version < 0x0101 ) { /* Need version 1.1+ */ + DBG ( "APM 1.1+ not supported\n" ); + return -ENOTSUP; + } + if ( ( apm_flags & 0x8 ) == 0x8 ) { + DBG ( "APM power management disabled\n" ); + return -EPERM; + } + DBG2 ( "APM check completed\n" ); + + /* APM initialisation */ + __asm__ __volatile__ ( REAL_CODE ( "int $0x15\n\t" + "adc %%edx,0\n\t" ) + : "=d" ( carry ) + : "a" ( 0x5301 ), "b" ( 0x0000 ), + "d" ( 0x0000 ) ); + if ( carry ) { + DBG ( "APM initialisation failed\n" ); + return -EIO; + } + DBG2 ( "APM initialisation completed\n" ); + + /* Set APM driver version */ + __asm__ __volatile__ ( REAL_CODE ( "int $0x15\n\t" + "adc %%edx,0\n\t" ) + : "=d" ( carry ) + : "a" ( 0x530e ), "b" ( 0x0000 ), + "c" ( 0x0101 ), "d" ( 0x0000 ) ); + if ( carry ) { + DBG ( "APM setting driver version failed\n" ); + return -EIO; + } + DBG2 ( "APM driver version set\n" ); + + /* Setting power state to off */ + __asm__ __volatile__ ( REAL_CODE ( "int $0x15\n\t" + "adc %%edx,0\n\t" ) + : "=d" ( carry ) + : "a" ( 0x5307 ), "b" ( 0x0001 ), + "c" ( 0x0003 ), "d" ( 0x0000) ); + if ( carry ) { + DBG ( "APM setting power state failed\n" ); + return -ENOTTY; + } + + /* Should never happen */ + return -ECANCELED; +} diff --git a/src/arch/x86/interface/pcbios/basemem.c b/src/arch/x86/interface/pcbios/basemem.c new file mode 100644 index 00000000..6a46081a --- /dev/null +++ b/src/arch/x86/interface/pcbios/basemem.c @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include + +/** @file + * + * Base memory allocation + * + */ + +/** + * Set the BIOS free base memory counter + * + * @v new_fbms New free base memory counter (in kB) + */ +void set_fbms ( unsigned int new_fbms ) { + uint16_t fbms = new_fbms; + + /* Update the BIOS memory counter */ + put_real ( fbms, BDA_SEG, BDA_FBMS ); + + /* Update our hidden memory region map */ + hide_basemem(); +} diff --git a/src/arch/x86/interface/pcbios/bios_console.c b/src/arch/x86/interface/pcbios/bios_console.c new file mode 100644 index 00000000..52a02fba --- /dev/null +++ b/src/arch/x86/interface/pcbios/bios_console.c @@ -0,0 +1,553 @@ +/* + * Copyright (C) 2006 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ATTR_BOLD 0x08 + +#define ATTR_FCOL_MASK 0x07 +#define ATTR_FCOL_BLACK 0x00 +#define ATTR_FCOL_BLUE 0x01 +#define ATTR_FCOL_GREEN 0x02 +#define ATTR_FCOL_CYAN 0x03 +#define ATTR_FCOL_RED 0x04 +#define ATTR_FCOL_MAGENTA 0x05 +#define ATTR_FCOL_YELLOW 0x06 +#define ATTR_FCOL_WHITE 0x07 + +#define ATTR_BLINK 0x80 + +#define ATTR_BCOL_MASK 0x70 +#define ATTR_BCOL_BLACK 0x00 +#define ATTR_BCOL_BLUE 0x10 +#define ATTR_BCOL_GREEN 0x20 +#define ATTR_BCOL_CYAN 0x30 +#define ATTR_BCOL_RED 0x40 +#define ATTR_BCOL_MAGENTA 0x50 +#define ATTR_BCOL_YELLOW 0x60 +#define ATTR_BCOL_WHITE 0x70 + +#define ATTR_DEFAULT ATTR_FCOL_WHITE + +/* Set default console usage if applicable */ +#if ! ( defined ( CONSOLE_PCBIOS ) && CONSOLE_EXPLICIT ( CONSOLE_PCBIOS ) ) +#undef CONSOLE_PCBIOS +#define CONSOLE_PCBIOS ( CONSOLE_USAGE_ALL & ~CONSOLE_USAGE_LOG ) +#endif + +/** Current character attribute */ +static unsigned int bios_attr = ATTR_DEFAULT; + +/** Keypress injection lock */ +static uint8_t __text16 ( bios_inject_lock ); +#define bios_inject_lock __use_text16 ( bios_inject_lock ) + +/** Vector for chaining to other INT 16 handlers */ +static struct segoff __text16 ( int16_vector ); +#define int16_vector __use_text16 ( int16_vector ) + +/** Assembly wrapper */ +extern void int16_wrapper ( void ); + +/** + * Handle ANSI CUP (cursor position) + * + * @v ctx ANSI escape sequence context + * @v count Parameter count + * @v params[0] Row (1 is top) + * @v params[1] Column (1 is left) + */ +static void bios_handle_cup ( struct ansiesc_context *ctx __unused, + unsigned int count __unused, int params[] ) { + int cx = ( params[1] - 1 ); + int cy = ( params[0] - 1 ); + + if ( cx < 0 ) + cx = 0; + if ( cy < 0 ) + cy = 0; + + __asm__ __volatile__ ( REAL_CODE ( "int $0x10\n\t" ) + : : "a" ( 0x0200 ), "b" ( 1 ), + "d" ( ( cy << 8 ) | cx ) ); +} + +/** + * Handle ANSI ED (erase in page) + * + * @v ctx ANSI escape sequence context + * @v count Parameter count + * @v params[0] Region to erase + */ +static void bios_handle_ed ( struct ansiesc_context *ctx __unused, + unsigned int count __unused, + int params[] __unused ) { + /* We assume that we always clear the whole screen */ + assert ( params[0] == ANSIESC_ED_ALL ); + + __asm__ __volatile__ ( REAL_CODE ( "int $0x10\n\t" ) + : : "a" ( 0x0600 ), "b" ( bios_attr << 8 ), + "c" ( 0 ), + "d" ( ( ( console_height - 1 ) << 8 ) | + ( console_width - 1 ) ) ); +} + +/** + * Handle ANSI SGR (set graphics rendition) + * + * @v ctx ANSI escape sequence context + * @v count Parameter count + * @v params List of graphic rendition aspects + */ +static void bios_handle_sgr ( struct ansiesc_context *ctx __unused, + unsigned int count, int params[] ) { + static const uint8_t bios_attr_fcols[10] = { + ATTR_FCOL_BLACK, ATTR_FCOL_RED, ATTR_FCOL_GREEN, + ATTR_FCOL_YELLOW, ATTR_FCOL_BLUE, ATTR_FCOL_MAGENTA, + ATTR_FCOL_CYAN, ATTR_FCOL_WHITE, + ATTR_FCOL_WHITE, ATTR_FCOL_WHITE /* defaults */ + }; + static const uint8_t bios_attr_bcols[10] = { + ATTR_BCOL_BLACK, ATTR_BCOL_RED, ATTR_BCOL_GREEN, + ATTR_BCOL_YELLOW, ATTR_BCOL_BLUE, ATTR_BCOL_MAGENTA, + ATTR_BCOL_CYAN, ATTR_BCOL_WHITE, + ATTR_BCOL_BLACK, ATTR_BCOL_BLACK /* defaults */ + }; + unsigned int i; + int aspect; + + for ( i = 0 ; i < count ; i++ ) { + aspect = params[i]; + if ( aspect == 0 ) { + bios_attr = ATTR_DEFAULT; + } else if ( aspect == 1 ) { + bios_attr |= ATTR_BOLD; + } else if ( aspect == 5 ) { + bios_attr |= ATTR_BLINK; + } else if ( aspect == 22 ) { + bios_attr &= ~ATTR_BOLD; + } else if ( aspect == 25 ) { + bios_attr &= ~ATTR_BLINK; + } else if ( ( aspect >= 30 ) && ( aspect <= 39 ) ) { + bios_attr &= ~ATTR_FCOL_MASK; + bios_attr |= bios_attr_fcols[ aspect - 30 ]; + } else if ( ( aspect >= 40 ) && ( aspect <= 49 ) ) { + bios_attr &= ~ATTR_BCOL_MASK; + bios_attr |= bios_attr_bcols[ aspect - 40 ]; + } + } +} + +/** + * Handle ANSI DECTCEM set (show cursor) + * + * @v ctx ANSI escape sequence context + * @v count Parameter count + * @v params List of graphic rendition aspects + */ +static void bios_handle_dectcem_set ( struct ansiesc_context *ctx __unused, + unsigned int count __unused, + int params[] __unused ) { + uint8_t height; + + /* Get character height */ + get_real ( height, BDA_SEG, BDA_CHAR_HEIGHT ); + + __asm__ __volatile__ ( REAL_CODE ( "int $0x10\n\t" ) + : : "a" ( 0x0100 ), + "c" ( ( ( height - 2 ) << 8 ) | + ( height - 1 ) ) ); +} + +/** + * Handle ANSI DECTCEM reset (hide cursor) + * + * @v ctx ANSI escape sequence context + * @v count Parameter count + * @v params List of graphic rendition aspects + */ +static void bios_handle_dectcem_reset ( struct ansiesc_context *ctx __unused, + unsigned int count __unused, + int params[] __unused ) { + + __asm__ __volatile__ ( REAL_CODE ( "int $0x10\n\t" ) + : : "a" ( 0x0100 ), "c" ( 0x2000 ) ); +} + +/** BIOS console ANSI escape sequence handlers */ +static struct ansiesc_handler bios_ansiesc_handlers[] = { + { ANSIESC_CUP, bios_handle_cup }, + { ANSIESC_ED, bios_handle_ed }, + { ANSIESC_SGR, bios_handle_sgr }, + { ANSIESC_DECTCEM_SET, bios_handle_dectcem_set }, + { ANSIESC_DECTCEM_RESET, bios_handle_dectcem_reset }, + { 0, NULL } +}; + +/** BIOS console ANSI escape sequence context */ +static struct ansiesc_context bios_ansiesc_ctx = { + .handlers = bios_ansiesc_handlers, +}; + +/** + * Print a character to BIOS console + * + * @v character Character to be printed + */ +static void bios_putchar ( int character ) { + int discard_a, discard_b, discard_c; + + /* Intercept ANSI escape sequences */ + character = ansiesc_process ( &bios_ansiesc_ctx, character ); + if ( character < 0 ) + return; + + /* Print character with attribute */ + __asm__ __volatile__ ( REAL_CODE ( "pushl %%ebp\n\t" /* gcc bug */ + /* Skip non-printable characters */ + "cmpb $0x20, %%al\n\t" + "jb 1f\n\t" + /* Read attribute */ + "movb %%al, %%cl\n\t" + "movb $0x08, %%ah\n\t" + "int $0x10\n\t" + "xchgb %%al, %%cl\n\t" + /* Skip if attribute matches */ + "cmpb %%ah, %%bl\n\t" + "je 1f\n\t" + /* Set attribute */ + "movw $0x0001, %%cx\n\t" + "movb $0x09, %%ah\n\t" + "int $0x10\n\t" + "\n1:\n\t" + /* Print character */ + "xorw %%bx, %%bx\n\t" + "movb $0x0e, %%ah\n\t" + "int $0x10\n\t" + "popl %%ebp\n\t" /* gcc bug */ ) + : "=a" ( discard_a ), "=b" ( discard_b ), + "=c" ( discard_c ) + : "a" ( character ), "b" ( bios_attr ) ); +} + +/** + * Pointer to current ANSI output sequence + * + * While we are in the middle of returning an ANSI sequence for a + * special key, this will point to the next character to return. When + * not in the middle of such a sequence, this will point to a NUL + * (note: not "will be NULL"). + */ +static const char *bios_ansi_input = ""; + +/** A BIOS key */ +struct bios_key { + /** Scancode */ + uint8_t scancode; + /** Key code */ + uint16_t key; +} __attribute__ (( packed )); + +/** Mapping from BIOS scan codes to iPXE key codes */ +static const struct bios_key bios_keys[] = { + { 0x53, KEY_DC }, + { 0x48, KEY_UP }, + { 0x50, KEY_DOWN }, + { 0x4b, KEY_LEFT }, + { 0x4d, KEY_RIGHT }, + { 0x47, KEY_HOME }, + { 0x4f, KEY_END }, + { 0x49, KEY_PPAGE }, + { 0x51, KEY_NPAGE }, + { 0x3f, KEY_F5 }, + { 0x40, KEY_F6 }, + { 0x41, KEY_F7 }, + { 0x42, KEY_F8 }, + { 0x43, KEY_F9 }, + { 0x44, KEY_F10 }, + { 0x85, KEY_F11 }, + { 0x86, KEY_F12 }, +}; + +/** + * Get ANSI escape sequence corresponding to BIOS scancode + * + * @v scancode BIOS scancode + * @ret ansi_seq ANSI escape sequence, if any, otherwise NULL + */ +static const char * bios_ansi_seq ( unsigned int scancode ) { + static char buf[ 5 /* "[" + two digits + terminator + NUL */ ]; + unsigned int key; + unsigned int terminator; + unsigned int n; + unsigned int i; + char *tmp = buf; + + /* Construct ANSI escape sequence for scancode, if known */ + for ( i = 0 ; i < ( sizeof ( bios_keys ) / + sizeof ( bios_keys[0] ) ) ; i++ ) { + + /* Look for matching scancode */ + if ( bios_keys[i].scancode != scancode ) + continue; + + /* Construct escape sequence */ + key = bios_keys[i].key; + n = KEY_ANSI_N ( key ); + terminator = KEY_ANSI_TERMINATOR ( key ); + *(tmp++) = '['; + if ( n ) + tmp += sprintf ( tmp, "%d", n ); + *(tmp++) = terminator; + *(tmp++) = '\0'; + assert ( tmp <= &buf[ sizeof ( buf ) ] ); + return buf; + } + + DBG ( "Unrecognised BIOS scancode %02x\n", scancode ); + return NULL; +} + +/** + * Map a key + * + * @v character Character read from console + * @ret character Mapped character + */ +static int bios_keymap ( unsigned int character ) { + struct key_mapping *mapping; + + for_each_table_entry ( mapping, KEYMAP ) { + if ( mapping->from == character ) + return mapping->to; + } + return character; +} + +/** + * Get character from BIOS console + * + * @ret character Character read from console + */ +static int bios_getchar ( void ) { + uint16_t keypress; + unsigned int character; + const char *ansi_seq; + + /* If we are mid-sequence, pass out the next byte */ + if ( ( character = *bios_ansi_input ) ) { + bios_ansi_input++; + return character; + } + + /* Do nothing if injection is in progress */ + if ( bios_inject_lock ) + return 0; + + /* Read character from real BIOS console */ + bios_inject_lock++; + __asm__ __volatile__ ( REAL_CODE ( "sti\n\t" + "int $0x16\n\t" + "cli\n\t" ) + : "=a" ( keypress ) + : "a" ( 0x1000 ), "m" ( bios_inject_lock ) ); + bios_inject_lock--; + character = ( keypress & 0xff ); + + /* If it's a normal character, just map and return it */ + if ( character && ( character < 0x80 ) ) + return bios_keymap ( character ); + + /* Otherwise, check for a special key that we know about */ + if ( ( ansi_seq = bios_ansi_seq ( keypress >> 8 ) ) ) { + /* Start of escape sequence: return ESC (0x1b) */ + bios_ansi_input = ansi_seq; + return 0x1b; + } + + return 0; +} + +/** + * Check for character ready to read from BIOS console + * + * @ret True Character available to read + * @ret False No character available to read + */ +static int bios_iskey ( void ) { + unsigned int discard_a; + unsigned int flags; + + /* If we are mid-sequence, we are always ready */ + if ( *bios_ansi_input ) + return 1; + + /* Do nothing if injection is in progress */ + if ( bios_inject_lock ) + return 0; + + /* Otherwise check the real BIOS console */ + bios_inject_lock++; + __asm__ __volatile__ ( REAL_CODE ( "sti\n\t" + "int $0x16\n\t" + "pushfw\n\t" + "popw %w0\n\t" + "cli\n\t" ) + : "=R" ( flags ), "=a" ( discard_a ) + : "a" ( 0x1100 ), "m" ( bios_inject_lock ) ); + bios_inject_lock--; + return ( ! ( flags & ZF ) ); +} + +/** BIOS console */ +struct console_driver bios_console __console_driver = { + .putchar = bios_putchar, + .getchar = bios_getchar, + .iskey = bios_iskey, + .usage = CONSOLE_PCBIOS, +}; + +/** + * Inject keypresses + * + * @v ix86 Registers as passed to INT 16 + */ +static __asmcall void bios_inject ( struct i386_all_regs *ix86 ) { + unsigned int discard_a; + unsigned int scancode; + unsigned int i; + uint16_t keypress; + int key; + + /* If this is a blocking call, then loop until the + * non-blocking variant of the call indicates that a keypress + * is available. Do this without acquiring the injection + * lock, so that injection may take place. + */ + if ( ( ix86->regs.ah & ~0x10 ) == 0x00 ) { + __asm__ __volatile__ ( REAL_CODE ( "sti\n\t" + "\n1:\n\t" + "pushw %%ax\n\t" + "int $0x16\n\t" + "popw %%ax\n\t" + "jc 2f\n\t" + "jz 1b\n\t" + "\n2:\n\t" + "cli\n\t" ) + : "=a" ( discard_a ) + : "a" ( ix86->regs.eax | 0x0100 ), + "m" ( bios_inject_lock ) ); + } + + /* Acquire injection lock */ + bios_inject_lock++; + + /* Check for keypresses */ + if ( iskey() ) { + + /* Get key */ + key = getkey ( 0 ); + + /* Reverse internal CR->LF mapping */ + if ( key == '\n' ) + key = '\r'; + + /* Convert to keypress */ + keypress = ( ( key << 8 ) | key ); + + /* Handle special keys */ + if ( key >= KEY_MIN ) { + for ( i = 0 ; i < ( sizeof ( bios_keys ) / + sizeof ( bios_keys[0] ) ) ; i++ ) { + if ( bios_keys[i].key == key ) { + scancode = bios_keys[i].scancode; + keypress = ( scancode << 8 ); + break; + } + } + } + + /* Inject keypress */ + DBGC ( &bios_console, "BIOS injecting keypress %04x\n", + keypress ); + __asm__ __volatile__ ( REAL_CODE ( "int $0x16\n\t" ) + : "=a" ( discard_a ) + : "a" ( 0x0500 ), "c" ( keypress ), + "m" ( bios_inject_lock ) ); + } + + /* Release injection lock */ + bios_inject_lock--; +} + +/** + * Start up keypress injection + * + */ +static void bios_inject_startup ( void ) { + + /* Assembly wrapper to call bios_inject() */ + __asm__ __volatile__ ( + TEXT16_CODE ( "\nint16_wrapper:\n\t" + "pushfw\n\t" + "cmpb $0, %%cs:bios_inject_lock\n\t" + "jnz 1f\n\t" + VIRT_CALL ( bios_inject ) + "\n1:\n\t" + "popfw\n\t" + "ljmp *%%cs:int16_vector\n\t" ) : ); + + /* Hook INT 16 */ + hook_bios_interrupt ( 0x16, ( ( intptr_t ) int16_wrapper ), + &int16_vector ); +} + +/** + * Shut down keypress injection + * + * @v booting System is shutting down for OS boot + */ +static void bios_inject_shutdown ( int booting __unused ) { + + /* Unhook INT 16 */ + unhook_bios_interrupt ( 0x16, ( ( intptr_t ) int16_wrapper ), + &int16_vector ); +} + +/** Keypress injection startup function */ +struct startup_fn bios_inject_startup_fn __startup_fn ( STARTUP_NORMAL ) = { + .name = "bios_inject", + .startup = bios_inject_startup, + .shutdown = bios_inject_shutdown, +}; diff --git a/src/arch/x86/interface/pcbios/bios_nap.c b/src/arch/x86/interface/pcbios/bios_nap.c new file mode 100644 index 00000000..f1ba8297 --- /dev/null +++ b/src/arch/x86/interface/pcbios/bios_nap.c @@ -0,0 +1,16 @@ +#include +#include + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * Save power by halting the CPU until the next interrupt + * + */ +static void bios_cpu_nap ( void ) { + __asm__ __volatile__ ( "sti\n\t" + "hlt\n\t" + "cli\n\t" ); +} + +PROVIDE_NAP ( pcbios, cpu_nap, bios_cpu_nap ); diff --git a/src/arch/x86/interface/pcbios/bios_reboot.c b/src/arch/x86/interface/pcbios/bios_reboot.c new file mode 100644 index 00000000..071173f1 --- /dev/null +++ b/src/arch/x86/interface/pcbios/bios_reboot.c @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2010 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * Standard PC-BIOS reboot mechanism + * + */ + +#include +#include +#include +#include +#include + +/** + * Reboot system + * + * @v warm Perform a warm reboot + */ +static void bios_reboot ( int warm ) { + uint16_t flag; + + /* Configure BIOS for cold/warm reboot */ + flag = ( warm ? BDA_REBOOT_WARM : 0 ); + put_real ( flag, BDA_SEG, BDA_REBOOT ); + + /* Jump to system reset vector */ + __asm__ __volatile__ ( REAL_CODE ( "ljmp $0xf000, $0xfff0" ) : ); +} + +/** + * Power off system + * + * @ret rc Return status code + */ +static int bios_poweroff ( void ) { + int rc; + + /* Try APM */ + if ( ( rc = apm_poweroff() ) != 0 ) + DBG ( "APM power off failed: %s\n", strerror ( rc ) ); + + /* Try ACPI */ + if ( ( rc = acpi_poweroff() ) != 0 ) + DBG ( "ACPI power off failed: %s\n", strerror ( rc ) ); + + return rc; +} + +PROVIDE_REBOOT ( pcbios, reboot, bios_reboot ); +PROVIDE_REBOOT ( pcbios, poweroff, bios_poweroff ); diff --git a/src/arch/x86/interface/pcbios/bios_smbios.c b/src/arch/x86/interface/pcbios/bios_smbios.c new file mode 100644 index 00000000..a8c0fc32 --- /dev/null +++ b/src/arch/x86/interface/pcbios/bios_smbios.c @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * System Management BIOS + * + */ + +/** + * Find SMBIOS + * + * @v smbios SMBIOS entry point descriptor structure to fill in + * @ret rc Return status code + */ +static int bios_find_smbios ( struct smbios *smbios ) { + struct smbios_entry entry; + int rc; + + /* Scan through BIOS segment to find SMBIOS entry point */ + if ( ( rc = find_smbios_entry ( real_to_user ( BIOS_SEG, 0 ), 0x10000, + &entry ) ) != 0 ) + return rc; + + /* Fill in entry point descriptor structure */ + smbios->address = phys_to_user ( entry.smbios_address ); + smbios->len = entry.smbios_len; + smbios->count = entry.smbios_count; + smbios->version = SMBIOS_VERSION ( entry.major, entry.minor ); + + return 0; +} + +PROVIDE_SMBIOS ( pcbios, find_smbios, bios_find_smbios ); diff --git a/src/arch/x86/interface/pcbios/bios_timer.c b/src/arch/x86/interface/pcbios/bios_timer.c new file mode 100644 index 00000000..49e1d226 --- /dev/null +++ b/src/arch/x86/interface/pcbios/bios_timer.c @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2008 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * BIOS timer + * + */ + +#include +#include +#include +#include + +/** Number of ticks per day + * + * This seems to be the normative value, as used by e.g. SeaBIOS to + * decide when to set the midnight rollover flag. + */ +#define BIOS_TICKS_PER_DAY 0x1800b0 + +/** Number of ticks per BIOS tick */ +#define TICKS_PER_BIOS_TICK \ + ( ( TICKS_PER_SEC * 60 * 60 * 24 ) / BIOS_TICKS_PER_DAY ) + +/** + * Get current system time in ticks + * + * @ret ticks Current time, in ticks + * + * Use direct memory access to BIOS variables, longword 0040:006C + * (ticks today) and byte 0040:0070 (midnight crossover flag) instead + * of calling timeofday BIOS interrupt. + */ +static unsigned long bios_currticks ( void ) { + static uint32_t offset; + uint32_t ticks; + uint8_t midnight; + + /* Re-enable interrupts so that the timer interrupt can occur */ + __asm__ __volatile__ ( "sti\n\t" + "nop\n\t" + "nop\n\t" + "cli\n\t" ); + + /* Read current BIOS time of day */ + get_real ( ticks, BDA_SEG, BDA_TICKS ); + get_real ( midnight, BDA_SEG, BDA_MIDNIGHT ); + + /* Handle midnight rollover */ + if ( midnight ) { + midnight = 0; + put_real ( midnight, BDA_SEG, BDA_MIDNIGHT ); + offset += BIOS_TICKS_PER_DAY; + } + ticks += offset; + + /* Convert to timer ticks */ + return ( ticks * TICKS_PER_BIOS_TICK ); +} + +/** BIOS timer */ +struct timer bios_timer __timer ( TIMER_NORMAL ) = { + .name = "bios", + .currticks = bios_currticks, + .udelay = pit8254_udelay, +}; diff --git a/src/arch/x86/interface/pcbios/biosint.c b/src/arch/x86/interface/pcbios/biosint.c new file mode 100644 index 00000000..667e9ed8 --- /dev/null +++ b/src/arch/x86/interface/pcbios/biosint.c @@ -0,0 +1,119 @@ +#include +#include +#include + +/** + * @file BIOS interrupts + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * Hook INT vector + * + * @v interrupt INT number + * @v handler Offset within .text16 to interrupt handler + * @v chain_vector Vector for chaining to previous handler + * + * Hooks in an i386 INT handler. The handler itself must reside + * within the .text16 segment. @c chain_vector will be filled in with + * the address of the previously-installed handler for this interrupt; + * the handler should probably exit by ljmping via this vector. + */ +void hook_bios_interrupt ( unsigned int interrupt, unsigned int handler, + struct segoff *chain_vector ) { + struct segoff vector = { + .segment = rm_cs, + .offset = handler, + }; + + DBG ( "Hooking INT %#02x to %04x:%04x\n", + interrupt, rm_cs, handler ); + + if ( ( chain_vector->segment != 0 ) || + ( chain_vector->offset != 0 ) ) { + /* Already hooked; do nothing */ + DBG ( "...already hooked\n" ); + return; + } + + copy_from_real ( chain_vector, 0, ( interrupt * 4 ), + sizeof ( *chain_vector ) ); + DBG ( "...chaining to %04x:%04x\n", + chain_vector->segment, chain_vector->offset ); + if ( DBG_LOG ) { + char code[64]; + copy_from_real ( code, chain_vector->segment, + chain_vector->offset, sizeof ( code ) ); + DBG_HDA ( *chain_vector, code, sizeof ( code ) ); + } + + copy_to_real ( 0, ( interrupt * 4 ), &vector, sizeof ( vector ) ); + hooked_bios_interrupts++; +} + +/** + * Unhook INT vector + * + * @v interrupt INT number + * @v handler Offset within .text16 to interrupt handler + * @v chain_vector Vector containing address of previous handler + * + * Unhooks an i386 interrupt handler hooked by hook_i386_vector(). + * Note that this operation may fail, if some external code has hooked + * the vector since we hooked in our handler. If it fails, it means + * that it is not possible to unhook our handler, and we must leave it + * (and its chaining vector) resident in memory. + */ +int unhook_bios_interrupt ( unsigned int interrupt, unsigned int handler, + struct segoff *chain_vector ) { + struct segoff vector; + + DBG ( "Unhooking INT %#02x from %04x:%04x\n", + interrupt, rm_cs, handler ); + + copy_from_real ( &vector, 0, ( interrupt * 4 ), sizeof ( vector ) ); + if ( ( vector.segment != rm_cs ) || ( vector.offset != handler ) ) { + DBG ( "...cannot unhook; vector points to %04x:%04x\n", + vector.segment, vector.offset ); + return -EBUSY; + } + + DBG ( "...restoring to %04x:%04x\n", + chain_vector->segment, chain_vector->offset ); + copy_to_real ( 0, ( interrupt * 4 ), chain_vector, + sizeof ( *chain_vector ) ); + + chain_vector->segment = 0; + chain_vector->offset = 0; + hooked_bios_interrupts--; + return 0; +} + +/** + * Dump changes to interrupt vector table (for debugging) + * + */ +void check_bios_interrupts ( void ) { + static struct segoff vectors[256]; + static uint8_t initialised; + struct segoff vector; + unsigned int i; + + /* Print any changed interrupt vectors */ + for ( i = 0; i < ( sizeof ( vectors ) / sizeof ( vectors[0] ) ); i++ ) { + copy_from_real ( &vector, 0, ( i * sizeof ( vector ) ), + sizeof ( vector ) ); + if ( memcmp ( &vector, &vectors[i], sizeof ( vector ) ) == 0 ) + continue; + if ( initialised ) { + dbg_printf ( "INT %02x changed %04x:%04x => " + "%04x:%04x\n", i, vectors[i].segment, + vectors[i].offset, vector.segment, + vector.offset ); + } + memcpy ( &vectors[i], &vector, sizeof ( vectors[i] ) ); + } + initialised = 1; +} diff --git a/src/arch/x86/interface/pcbios/e820mangler.S b/src/arch/x86/interface/pcbios/e820mangler.S new file mode 100644 index 00000000..d5d97b48 --- /dev/null +++ b/src/arch/x86/interface/pcbios/e820mangler.S @@ -0,0 +1,589 @@ +/* + * Copyright (C) 2006 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .text + .arch i386 + .code16 + +#define SMAP 0x534d4150 + +/* Most documentation refers to the E820 buffer as being 20 bytes, and + * the API makes it perfectly legitimate to pass only a 20-byte buffer + * and expect to get valid data. However, some morons at ACPI decided + * to extend the data structure by adding an extra "extended + * attributes" field and by including critical information within this + * field, such as whether or not the region is enabled. A caller who + * passes in only a 20-byte buffer therefore risks getting very, very + * misleading information. + * + * I have personally witnessed an HP BIOS that returns a value of + * 0x0009 in the extended attributes field. If we don't pass this + * value through to the caller, 32-bit WinPE will die, usually with a + * PAGE_FAULT_IN_NONPAGED_AREA blue screen of death. + * + * Allow a ridiculously large maximum value (64 bytes) for the E820 + * buffer as a guard against insufficiently creative idiots in the + * future. + */ +#define E820MAXSIZE 64 + +/**************************************************************************** + * + * Allowed memory windows + * + * There are two ways to view this list. The first is as a list of + * (non-overlapping) allowed memory regions, sorted by increasing + * address. The second is as a list of (non-overlapping) hidden + * memory regions, again sorted by increasing address. The second + * view is offset by half an entry from the first: think about this + * for a moment and it should make sense. + * + * xxx_memory_window is used to indicate an "allowed region" + * structure, hidden_xxx_memory is used to indicate a "hidden region" + * structure. Each structure is 16 bytes in length. + * + **************************************************************************** + */ + .section ".data16", "aw", @progbits + .align 16 + .globl hidemem_base + .globl hidemem_umalloc + .globl hidemem_textdata +memory_windows: +base_memory_window: .long 0x00000000, 0x00000000 /* Start of memory */ + +hidemem_base: .long 0x000a0000, 0x00000000 /* Changes at runtime */ +ext_memory_window: .long 0x000a0000, 0x00000000 /* 640kB mark */ + +hidemem_umalloc: .long 0xffffffff, 0xffffffff /* Changes at runtime */ + .long 0xffffffff, 0xffffffff /* Changes at runtime */ + +hidemem_textdata: .long 0xffffffff, 0xffffffff /* Changes at runtime */ + .long 0xffffffff, 0xffffffff /* Changes at runtime */ + + .long 0xffffffff, 0xffffffff /* End of memory */ +memory_windows_end: + +/**************************************************************************** + * Truncate region to memory window + * + * Parameters: + * %edx:%eax Start of region + * %ecx:%ebx Length of region + * %si Memory window + * Returns: + * %edx:%eax Start of windowed region + * %ecx:%ebx Length of windowed region + **************************************************************************** + */ + .section ".text16", "ax", @progbits +window_region: + /* Convert (start,len) to (start, end) */ + addl %eax, %ebx + adcl %edx, %ecx + /* Truncate to window start */ + cmpl 4(%si), %edx + jne 1f + cmpl 0(%si), %eax +1: jae 2f + movl 4(%si), %edx + movl 0(%si), %eax +2: /* Truncate to window end */ + cmpl 12(%si), %ecx + jne 1f + cmpl 8(%si), %ebx +1: jbe 2f + movl 12(%si), %ecx + movl 8(%si), %ebx +2: /* Convert (start, end) back to (start, len) */ + subl %eax, %ebx + sbbl %edx, %ecx + /* If length is <0, set length to 0 */ + jae 1f + xorl %ebx, %ebx + xorl %ecx, %ecx + ret + .size window_region, . - window_region + +/**************************************************************************** + * Patch "memory above 1MB" figure + * + * Parameters: + * %ax Memory above 1MB, in 1kB blocks + * Returns: + * %ax Modified memory above 1M in 1kB blocks + **************************************************************************** + */ + .section ".text16", "ax", @progbits +patch_1m: + pushal + /* Convert to (start,len) format and call truncate */ + xorl %ecx, %ecx + movzwl %ax, %ebx + shll $10, %ebx + xorl %edx, %edx + movl $0x100000, %eax + movw $ext_memory_window, %si + call window_region + /* Convert back to "memory above 1MB" format and return via %ax */ + pushfw + shrl $10, %ebx + popfw + movw %sp, %bp + movw %bx, 28(%bp) + popal + ret + .size patch_1m, . - patch_1m + +/**************************************************************************** + * Patch "memory above 16MB" figure + * + * Parameters: + * %bx Memory above 16MB, in 64kB blocks + * Returns: + * %bx Modified memory above 16M in 64kB blocks + **************************************************************************** + */ + .section ".text16", "ax", @progbits +patch_16m: + pushal + /* Convert to (start,len) format and call truncate */ + xorl %ecx, %ecx + shll $16, %ebx + xorl %edx, %edx + movl $0x1000000, %eax + movw $ext_memory_window, %si + call window_region + /* Convert back to "memory above 16MB" format and return via %bx */ + pushfw + shrl $16, %ebx + popfw + movw %sp, %bp + movw %bx, 16(%bp) + popal + ret + .size patch_16m, . - patch_16m + +/**************************************************************************** + * Patch "memory between 1MB and 16MB" and "memory above 16MB" figures + * + * Parameters: + * %ax Memory between 1MB and 16MB, in 1kB blocks + * %bx Memory above 16MB, in 64kB blocks + * Returns: + * %ax Modified memory between 1MB and 16MB, in 1kB blocks + * %bx Modified memory above 16MB, in 64kB blocks + **************************************************************************** + */ + .section ".text16", "ax", @progbits +patch_1m_16m: + call patch_1m + call patch_16m + /* If 1M region is no longer full-length, kill off the 16M region */ + cmpw $( 15 * 1024 ), %ax + je 1f + xorw %bx, %bx +1: ret + .size patch_1m_16m, . - patch_1m_16m + +/**************************************************************************** + * Get underlying e820 memory region to underlying_e820 buffer + * + * Parameters: + * As for INT 15,e820 + * Returns: + * As for INT 15,e820 + * + * Wraps the underlying INT 15,e820 call so that the continuation + * value (%ebx) is a 16-bit simple sequence counter (with the high 16 + * bits ignored), and termination is always via CF=1 rather than + * %ebx=0. + * + **************************************************************************** + */ + .section ".text16", "ax", @progbits +get_underlying_e820: + + /* If the requested region is in the cache, return it */ + cmpw %bx, underlying_e820_index + jne 2f + pushw %di + pushw %si + movw $underlying_e820_cache, %si + cmpl underlying_e820_cache_size, %ecx + jbe 1f + movl underlying_e820_cache_size, %ecx +1: pushl %ecx + rep movsb + popl %ecx + popw %si + popw %di + incw %bx + movl %edx, %eax + clc + ret +2: + /* If the requested region is earlier than the cached region, + * invalidate the cache. + */ + cmpw %bx, underlying_e820_index + jbe 1f + movw $0xffff, underlying_e820_index +1: + /* If the cache is invalid, reset the underlying %ebx */ + cmpw $0xffff, underlying_e820_index + jne 1f + andl $0, underlying_e820_ebx +1: + /* If the cache is valid but the continuation value is zero, + * this means that the previous underlying call returned with + * %ebx=0. Return with CF=1 in this case. + */ + cmpw $0xffff, underlying_e820_index + je 1f + cmpl $0, underlying_e820_ebx + jne 1f + stc + ret +1: + /* Get the next region into the cache */ + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + pushl %esi /* Some implementations corrupt %esi, so we */ + pushl %edi /* preserve %esi, %edi and %ebp to be paranoid */ + pushl %ebp + pushw %es + pushw %ds + popw %es + movw $underlying_e820_cache, %di + cmpl $E820MAXSIZE, %ecx + jbe 1f + movl $E820MAXSIZE, %ecx +1: movl underlying_e820_ebx, %ebx + stc + pushfw + lcall *%cs:int15_vector + popw %es + popl %ebp + popl %edi + popl %esi + /* Check for error return from underlying e820 call */ + jc 2f /* CF set: error */ + cmpl $SMAP, %eax + je 3f /* 'SMAP' missing: error */ +2: /* An error occurred: return values returned by underlying e820 call */ + stc /* Force CF set if SMAP was missing */ + addr32 leal 16(%esp), %esp /* avoid changing other flags */ + ret +3: /* No error occurred */ + movl %ebx, underlying_e820_ebx + movl %ecx, underlying_e820_cache_size + popl %edx + popl %ecx + popl %ebx + popl %eax + /* Mark cache as containing this result */ + incw underlying_e820_index + + /* Loop until found */ + jmp get_underlying_e820 + .size get_underlying_e820, . - get_underlying_e820 + + .section ".data16", "aw", @progbits +underlying_e820_index: + .word 0xffff /* Initialise to an invalid value */ + .size underlying_e820_index, . - underlying_e820_index + + .section ".bss16", "aw", @nobits +underlying_e820_ebx: + .long 0 + .size underlying_e820_ebx, . - underlying_e820_ebx + + .section ".bss16", "aw", @nobits +underlying_e820_cache: + .space E820MAXSIZE + .size underlying_e820_cache, . - underlying_e820_cache + + .section ".bss16", "aw", @nobits +underlying_e820_cache_size: + .long 0 + .size underlying_e820_cache_size, . - underlying_e820_cache_size + +/**************************************************************************** + * Get windowed e820 region, without empty region stripping + * + * Parameters: + * As for INT 15,e820 + * Returns: + * As for INT 15,e820 + * + * Wraps the underlying INT 15,e820 call so that each underlying + * region is returned N times, windowed to fit within N visible-memory + * windows. Termination is always via CF=1. + * + **************************************************************************** + */ + .section ".text16", "ax", @progbits +get_windowed_e820: + + /* Preserve registers */ + pushl %esi + pushw %bp + + /* Split %ebx into %si:%bx, store original %bx in %bp */ + pushl %ebx + popw %bp + popw %si + + /* %si == 0 => start of memory_windows list */ + testw %si, %si + jne 1f + movw $memory_windows, %si +1: + /* Get (cached) underlying e820 region to buffer */ + call get_underlying_e820 + jc 99f /* Abort on error */ + + /* Preserve registers */ + pushal + /* start => %edx:%eax, len => %ecx:%ebx */ + movl %es:0(%di), %eax + movl %es:4(%di), %edx + movl %es:8(%di), %ebx + movl %es:12(%di), %ecx + /* Truncate region to current window */ + call window_region +1: /* Store modified values in e820 map entry */ + movl %eax, %es:0(%di) + movl %edx, %es:4(%di) + movl %ebx, %es:8(%di) + movl %ecx, %es:12(%di) + /* Restore registers */ + popal + + /* Derive continuation value for next call */ + addw $16, %si + cmpw $memory_windows_end, %si + jne 1f + /* End of memory windows: reset %si and allow %bx to continue */ + xorw %si, %si + jmp 2f +1: /* More memory windows to go: restore original %bx */ + movw %bp, %bx +2: /* Construct %ebx from %si:%bx */ + pushw %si + pushw %bx + popl %ebx + +98: /* Clear CF */ + clc +99: /* Restore registers and return */ + popw %bp + popl %esi + ret + .size get_windowed_e820, . - get_windowed_e820 + +/**************************************************************************** + * Get windowed e820 region, with empty region stripping + * + * Parameters: + * As for INT 15,e820 + * Returns: + * As for INT 15,e820 + * + * Wraps the underlying INT 15,e820 call so that each underlying + * region is returned up to N times, windowed to fit within N + * visible-memory windows. Empty windows are never returned. + * Termination is always via CF=1. + * + **************************************************************************** + */ + .section ".text16", "ax", @progbits +get_nonempty_e820: + + /* Record entry parameters */ + pushl %eax + pushl %ecx + pushl %edx + + /* Get next windowed region */ + call get_windowed_e820 + jc 99f /* abort on error */ + + /* If region is non-empty, finish here */ + cmpl $0, %es:8(%di) + jne 98f + cmpl $0, %es:12(%di) + jne 98f + + /* Region was empty: restore entry parameters and go to next region */ + popl %edx + popl %ecx + popl %eax + jmp get_nonempty_e820 + +98: /* Clear CF */ + clc +99: /* Return values from underlying call */ + addr32 leal 12(%esp), %esp /* avoid changing flags */ + ret + .size get_nonempty_e820, . - get_nonempty_e820 + +/**************************************************************************** + * Get mangled e820 region, with empty region stripping + * + * Parameters: + * As for INT 15,e820 + * Returns: + * As for INT 15,e820 + * + * Wraps the underlying INT 15,e820 call so that underlying regions + * are windowed to the allowed memory regions. Empty regions are + * stripped from the map. Termination is always via %ebx=0. + * + **************************************************************************** + */ + .section ".text16", "ax", @progbits +get_mangled_e820: + + /* Get a nonempty region */ + call get_nonempty_e820 + jc 99f /* Abort on error */ + + /* Peek ahead to see if there are any further nonempty regions */ + pushal + pushw %es + movw %sp, %bp + subw %cx, %sp + movl $0xe820, %eax + movl $SMAP, %edx + pushw %ss + popw %es + movw %sp, %di + call get_nonempty_e820 + movw %bp, %sp + popw %es + popal + jnc 99f /* There are further nonempty regions */ + + /* No futher nonempty regions: zero %ebx and clear CF */ + xorl %ebx, %ebx + +99: /* Return */ + ret + .size get_mangled_e820, . - get_mangled_e820 + +/**************************************************************************** + * INT 15,e820 handler + **************************************************************************** + */ + .section ".text16", "ax", @progbits +int15_e820: + pushw %ds + pushw %cs:rm_ds + popw %ds + call get_mangled_e820 + popw %ds + call patch_cf + iret + .size int15_e820, . - int15_e820 + +/**************************************************************************** + * INT 15,e801 handler + **************************************************************************** + */ + .section ".text16", "ax", @progbits +int15_e801: + /* Call previous handler */ + pushfw + lcall *%cs:int15_vector + call patch_cf + /* Edit result */ + pushw %ds + pushw %cs:rm_ds + popw %ds + call patch_1m_16m + xchgw %ax, %cx + xchgw %bx, %dx + call patch_1m_16m + xchgw %ax, %cx + xchgw %bx, %dx + popw %ds + iret + .size int15_e801, . - int15_e801 + +/**************************************************************************** + * INT 15,88 handler + **************************************************************************** + */ + .section ".text16", "ax", @progbits +int15_88: + /* Call previous handler */ + pushfw + lcall *%cs:int15_vector + call patch_cf + /* Edit result */ + pushw %ds + pushw %cs:rm_ds + popw %ds + call patch_1m + popw %ds + iret + .size int15_88, . - int15_88 + +/**************************************************************************** + * INT 15 handler + **************************************************************************** + */ + .section ".text16", "ax", @progbits + .globl int15 +int15: + /* See if we want to intercept this call */ + pushfw + cmpw $0xe820, %ax + jne 1f + cmpl $SMAP, %edx + jne 1f + popfw + jmp int15_e820 +1: cmpw $0xe801, %ax + jne 2f + popfw + jmp int15_e801 +2: cmpb $0x88, %ah + jne 3f + popfw + jmp int15_88 +3: popfw + ljmp *%cs:int15_vector + .size int15, . - int15 + + .section ".text16.data", "aw", @progbits + .globl int15_vector +int15_vector: + .long 0 + .size int15_vector, . - int15_vector diff --git a/src/arch/x86/interface/pcbios/fakee820.c b/src/arch/x86/interface/pcbios/fakee820.c new file mode 100644 index 00000000..8b083c4f --- /dev/null +++ b/src/arch/x86/interface/pcbios/fakee820.c @@ -0,0 +1,98 @@ +/* Copyright (C) 2008 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** Assembly routine in inline asm */ +extern void int15_fakee820(); + +/** Original INT 15 handler */ +static struct segoff __text16 ( real_int15_vector ); +#define real_int15_vector __use_text16 ( real_int15_vector ) + +/** An INT 15,e820 memory map entry */ +struct e820_entry { + /** Start of region */ + uint64_t start; + /** Length of region */ + uint64_t len; + /** Type of region */ + uint32_t type; +} __attribute__ (( packed )); + +#define E820_TYPE_RAM 1 /**< Normal memory */ +#define E820_TYPE_RSVD 2 /**< Reserved and unavailable */ +#define E820_TYPE_ACPI 3 /**< ACPI reclaim memory */ +#define E820_TYPE_NVS 4 /**< ACPI NVS memory */ + +/** Fake e820 map */ +static struct e820_entry __text16_array ( e820map, [] ) __used = { + { 0x00000000ULL, ( 0x000a0000ULL - 0x00000000ULL ), E820_TYPE_RAM }, + { 0x00100000ULL, ( 0xcfb50000ULL - 0x00100000ULL ), E820_TYPE_RAM }, + { 0xcfb50000ULL, ( 0xcfb64000ULL - 0xcfb50000ULL ), E820_TYPE_RSVD }, + { 0xcfb64000ULL, ( 0xcfb66000ULL - 0xcfb64000ULL ), E820_TYPE_RSVD }, + { 0xcfb66000ULL, ( 0xcfb85c00ULL - 0xcfb66000ULL ), E820_TYPE_ACPI }, + { 0xcfb85c00ULL, ( 0xd0000000ULL - 0xcfb85c00ULL ), E820_TYPE_RSVD }, + { 0xe0000000ULL, ( 0xf0000000ULL - 0xe0000000ULL ), E820_TYPE_RSVD }, + { 0xfe000000ULL, (0x100000000ULL - 0xfe000000ULL ), E820_TYPE_RSVD }, + {0x100000000ULL, (0x230000000ULL -0x100000000ULL ), E820_TYPE_RAM }, +}; +#define e820map __use_text16 ( e820map ) + +void fake_e820 ( void ) { + __asm__ __volatile__ ( + TEXT16_CODE ( "\nint15_fakee820:\n\t" + "pushfw\n\t" + "cmpl $0xe820, %%eax\n\t" + "jne 99f\n\t" + "cmpl $0x534d4150, %%edx\n\t" + "jne 99f\n\t" + "pushaw\n\t" + "movw %%sp, %%bp\n\t" + "andb $~0x01, 22(%%bp)\n\t" /* Clear return CF */ + "leaw e820map(%%bx), %%si\n\t" + "cs rep movsb\n\t" + "popaw\n\t" + "movl %%edx, %%eax\n\t" + "addl $20, %%ebx\n\t" + "cmpl %0, %%ebx\n\t" + "jne 1f\n\t" + "xorl %%ebx,%%ebx\n\t" + "\n1:\n\t" + "popfw\n\t" + "iret\n\t" + "\n99:\n\t" + "popfw\n\t" + "ljmp *%%cs:real_int15_vector\n\t" ) + : : "i" ( sizeof ( e820map ) ) ); + + hook_bios_interrupt ( 0x15, ( intptr_t ) int15_fakee820, + &real_int15_vector ); +} + +void unfake_e820 ( void ) { + unhook_bios_interrupt ( 0x15, ( intptr_t ) int15_fakee820, + &real_int15_vector ); +} diff --git a/src/arch/x86/interface/pcbios/hidemem.c b/src/arch/x86/interface/pcbios/hidemem.c new file mode 100644 index 00000000..1a3022c5 --- /dev/null +++ b/src/arch/x86/interface/pcbios/hidemem.c @@ -0,0 +1,235 @@ +/* Copyright (C) 2006 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** Set to true if you want to test a fake E820 map */ +#define FAKE_E820 0 + +/** Alignment for hidden memory regions */ +#define ALIGN_HIDDEN 4096 /* 4kB page alignment should be enough */ + +/** + * A hidden region of iPXE + * + * This represents a region that will be edited out of the system's + * memory map. + * + * This structure is accessed by assembly code, so must not be + * changed. + */ +struct hidden_region { + /** Physical start address */ + uint64_t start; + /** Physical end address */ + uint64_t end; +}; + +/** Hidden base memory */ +extern struct hidden_region __data16 ( hidemem_base ); +#define hidemem_base __use_data16 ( hidemem_base ) + +/** Hidden umalloc memory */ +extern struct hidden_region __data16 ( hidemem_umalloc ); +#define hidemem_umalloc __use_data16 ( hidemem_umalloc ) + +/** Hidden text memory */ +extern struct hidden_region __data16 ( hidemem_textdata ); +#define hidemem_textdata __use_data16 ( hidemem_textdata ) + +/** Assembly routine in e820mangler.S */ +extern void int15(); + +/** Vector for storing original INT 15 handler */ +extern struct segoff __text16 ( int15_vector ); +#define int15_vector __use_text16 ( int15_vector ) + +/* The linker defines these symbols for us */ +extern char _textdata[]; +extern char _etextdata[]; +extern char _text16_memsz[]; +#define _text16_memsz ( ( size_t ) _text16_memsz ) +extern char _data16_memsz[]; +#define _data16_memsz ( ( size_t ) _data16_memsz ) + +/** + * Hide region of memory from system memory map + * + * @v region Hidden memory region + * @v start Start of region + * @v end End of region + */ +static void hide_region ( struct hidden_region *region, + physaddr_t start, physaddr_t end ) { + + /* Some operating systems get a nasty shock if a region of the + * E820 map seems to start on a non-page boundary. Make life + * safer by rounding out our edited region. + */ + region->start = ( start & ~( ALIGN_HIDDEN - 1 ) ); + region->end = ( ( end + ALIGN_HIDDEN - 1 ) & ~( ALIGN_HIDDEN - 1 ) ); + + DBG ( "Hiding region [%llx,%llx)\n", region->start, region->end ); +} + +/** + * Hide used base memory + * + */ +void hide_basemem ( void ) { + /* Hide from the top of free base memory to 640kB. Don't use + * hide_region(), because we don't want this rounded to the + * nearest page boundary. + */ + hidemem_base.start = ( get_fbms() * 1024 ); +} + +/** + * Hide umalloc() region + * + */ +void hide_umalloc ( physaddr_t start, physaddr_t end ) { + assert ( end <= virt_to_phys ( _textdata ) ); + hide_region ( &hidemem_umalloc, start, end ); +} + +/** + * Hide .text and .data + * + */ +void hide_textdata ( void ) { + hide_region ( &hidemem_textdata, virt_to_phys ( _textdata ), + virt_to_phys ( _etextdata ) ); +} + +/** + * Hide Etherboot + * + * Installs an INT 15 handler to edit Etherboot out of the memory map + * returned by the BIOS. + */ +static void hide_etherboot ( void ) { + struct memory_map memmap; + unsigned int rm_ds_top; + unsigned int rm_cs_top; + unsigned int fbms; + + /* Dump memory map before mangling */ + DBG ( "Hiding iPXE from system memory map\n" ); + get_memmap ( &memmap ); + + /* Hook in fake E820 map, if we're testing one */ + if ( FAKE_E820 ) { + DBG ( "Hooking in fake E820 map\n" ); + fake_e820(); + get_memmap ( &memmap ); + } + + /* Initialise the hidden regions */ + hide_basemem(); + hide_umalloc ( virt_to_phys ( _textdata ), virt_to_phys ( _textdata ) ); + hide_textdata(); + + /* Some really moronic BIOSes bring up the PXE stack via the + * UNDI loader entry point and then don't bother to unload it + * before overwriting the code and data segments. If this + * happens, we really don't want to leave INT 15 hooked, + * because that will cause any loaded OS to die horribly as + * soon as it attempts to fetch the system memory map. + * + * We use a heuristic to guess whether or not we are being + * loaded sensibly. + */ + rm_cs_top = ( ( ( rm_cs << 4 ) + _text16_memsz + 1024 - 1 ) >> 10 ); + rm_ds_top = ( ( ( rm_ds << 4 ) + _data16_memsz + 1024 - 1 ) >> 10 ); + fbms = get_fbms(); + if ( ( rm_cs_top < fbms ) && ( rm_ds_top < fbms ) ) { + DBG ( "Detected potentially unsafe UNDI load at CS=%04x " + "DS=%04x FBMS=%dkB\n", rm_cs, rm_ds, fbms ); + DBG ( "Disabling INT 15 memory hiding\n" ); + return; + } + + /* Hook INT 15 */ + hook_bios_interrupt ( 0x15, ( intptr_t ) int15, &int15_vector ); + + /* Dump memory map after mangling */ + DBG ( "Hidden iPXE from system memory map\n" ); + get_memmap ( &memmap ); +} + +/** + * Unhide Etherboot + * + * Uninstalls the INT 15 handler installed by hide_etherboot(), if + * possible. + */ +static void unhide_etherboot ( int flags __unused ) { + struct memory_map memmap; + int rc; + + /* If we have more than one hooked interrupt at this point, it + * means that some other vector is still hooked, in which case + * we can't safely unhook INT 15 because we need to keep our + * memory protected. (We expect there to be at least one + * hooked interrupt, because INT 15 itself is still hooked). + */ + if ( hooked_bios_interrupts > 1 ) { + DBG ( "Cannot unhide: %d interrupt vectors still hooked\n", + hooked_bios_interrupts ); + return; + } + + /* Try to unhook INT 15 */ + if ( ( rc = unhook_bios_interrupt ( 0x15, ( intptr_t ) int15, + &int15_vector ) ) != 0 ) { + DBG ( "Cannot unhook INT15: %s\n", strerror ( rc ) ); + /* Leave it hooked; there's nothing else we can do, + * and it should be intrinsically safe (though + * wasteful of RAM). + */ + } + + /* Unhook fake E820 map, if used */ + if ( FAKE_E820 ) + unfake_e820(); + + /* Dump memory map after unhiding */ + DBG ( "Unhidden iPXE from system memory map\n" ); + get_memmap ( &memmap ); +} + +/** Hide Etherboot startup function */ +struct startup_fn hide_etherboot_startup_fn __startup_fn ( STARTUP_EARLY ) = { + .name = "hidemem", + .startup = hide_etherboot, + .shutdown = unhide_etherboot, +}; diff --git a/src/arch/x86/interface/pcbios/int13.c b/src/arch/x86/interface/pcbios/int13.c new file mode 100644 index 00000000..ca789a0d --- /dev/null +++ b/src/arch/x86/interface/pcbios/int13.c @@ -0,0 +1,1627 @@ +/* + * Copyright (C) 2006 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * INT 13 emulation + * + * This module provides a mechanism for exporting block devices via + * the BIOS INT 13 disk interrupt interface. + * + */ + +/** INT 13 SAN device private data */ +struct int13_data { + /** BIOS natural drive number (0x00-0xff) + * + * This is the drive number that would have been assigned by + * 'naturally' appending the drive to the end of the BIOS + * drive list. + * + * If the emulated drive replaces a preexisting drive, this is + * the drive number that the preexisting drive gets remapped + * to. + */ + unsigned int natural_drive; + + /** Number of cylinders + * + * The cylinder number field in an INT 13 call is ten bits + * wide, giving a maximum of 1024 cylinders. Conventionally, + * when the 7.8GB limit of a CHS address is exceeded, it is + * the number of cylinders that is increased beyond the + * addressable limit. + */ + unsigned int cylinders; + /** Number of heads + * + * The head number field in an INT 13 call is eight bits wide, + * giving a maximum of 256 heads. However, apparently all + * versions of MS-DOS up to and including Win95 fail with 256 + * heads, so the maximum encountered in practice is 255. + */ + unsigned int heads; + /** Number of sectors per track + * + * The sector number field in an INT 13 call is six bits wide, + * giving a maximum of 63 sectors, since sector numbering + * (unlike head and cylinder numbering) starts at 1, not 0. + */ + unsigned int sectors_per_track; + + /** Address of El Torito boot catalog (if any) */ + unsigned int boot_catalog; + /** Status of last operation */ + int last_status; +}; + +/** Vector for chaining to other INT 13 handlers */ +static struct segoff __text16 ( int13_vector ); +#define int13_vector __use_text16 ( int13_vector ) + +/** Assembly wrapper */ +extern void int13_wrapper ( void ); + +/** Dummy floppy disk parameter table */ +static struct int13_fdd_parameters __data16 ( int13_fdd_params ) = { + /* 512 bytes per sector */ + .bytes_per_sector = 0x02, + /* Highest sectors per track that we ever return */ + .sectors_per_track = 48, +}; +#define int13_fdd_params __use_data16 ( int13_fdd_params ) + +/** + * Equipment word + * + * This is a cached copy of the BIOS Data Area equipment word at + * 40:10. + */ +static uint16_t equipment_word; + +/** + * Number of BIOS floppy disk drives + * + * This is derived from the equipment word. It is held in .text16 to + * allow for easy access by the INT 13,08 wrapper. + */ +static uint8_t __text16 ( num_fdds ); +#define num_fdds __use_text16 ( num_fdds ) + +/** + * Number of BIOS hard disk drives + * + * This is a cached copy of the BIOS Data Area number of hard disk + * drives at 40:75. It is held in .text16 to allow for easy access by + * the INT 13,08 wrapper. + */ +static uint8_t __text16 ( num_drives ); +#define num_drives __use_text16 ( num_drives ) + +/** + * Calculate SAN device capacity (limited to 32 bits) + * + * @v sandev SAN device + * @ret blocks Number of blocks + */ +static inline uint32_t int13_capacity32 ( struct san_device *sandev ) { + uint64_t capacity = sandev_capacity ( sandev ); + return ( ( capacity <= 0xffffffffUL ) ? capacity : 0xffffffff ); +} + +/** + * Test if SAN device is a floppy disk drive + * + * @v sandev SAN device + * @ret is_fdd SAN device is a floppy disk drive + */ +static inline int int13_is_fdd ( struct san_device *sandev ) { + return ( ! ( sandev->drive & 0x80 ) ); +} + +/** + * Parse El Torito parameters + * + * @v sandev SAN device + * @v scratch Scratch area for single-sector reads + * @ret rc Return status code + * + * Reads and parses El Torito parameters, if present. + */ +static int int13_parse_eltorito ( struct san_device *sandev, void *scratch ) { + struct int13_data *int13 = sandev->priv; + static const struct eltorito_descriptor_fixed boot_check = { + .type = ISO9660_TYPE_BOOT, + .id = ISO9660_ID, + .version = 1, + .system_id = "EL TORITO SPECIFICATION", + }; + struct eltorito_descriptor *boot = scratch; + int rc; + + /* Read boot record volume descriptor */ + if ( ( rc = sandev_read ( sandev, ELTORITO_LBA, 1, + virt_to_user ( boot ) ) ) != 0 ) { + DBGC ( sandev, "INT13 drive %02x could not read El Torito boot " + "record volume descriptor: %s\n", + sandev->drive, strerror ( rc ) ); + return rc; + } + + /* Check for an El Torito boot catalog */ + if ( memcmp ( boot, &boot_check, sizeof ( boot_check ) ) == 0 ) { + int13->boot_catalog = boot->sector; + DBGC ( sandev, "INT13 drive %02x has an El Torito boot catalog " + "at LBA %08x\n", sandev->drive, int13->boot_catalog ); + } else { + DBGC ( sandev, "INT13 drive %02x has no El Torito boot " + "catalog\n", sandev->drive ); + } + + return 0; +} + +/** + * Guess INT 13 hard disk drive geometry + * + * @v sandev SAN device + * @v scratch Scratch area for single-sector reads + * @ret heads Guessed number of heads + * @ret sectors Guessed number of sectors per track + * @ret rc Return status code + * + * Guesses the drive geometry by inspecting the partition table. + */ +static int int13_guess_geometry_hdd ( struct san_device *sandev, void *scratch, + unsigned int *heads, + unsigned int *sectors ) { + struct master_boot_record *mbr = scratch; + struct partition_table_entry *partition; + unsigned int i; + unsigned int start_cylinder; + unsigned int start_head; + unsigned int start_sector; + unsigned int end_head; + unsigned int end_sector; + int rc; + + /* Read partition table */ + if ( ( rc = sandev_read ( sandev, 0, 1, virt_to_user ( mbr ) ) ) != 0 ) { + DBGC ( sandev, "INT13 drive %02x could not read " + "partition table to guess geometry: %s\n", + sandev->drive, strerror ( rc ) ); + return rc; + } + DBGC2 ( sandev, "INT13 drive %02x has MBR:\n", sandev->drive ); + DBGC2_HDA ( sandev, 0, mbr, sizeof ( *mbr ) ); + DBGC ( sandev, "INT13 drive %02x has signature %08x\n", + sandev->drive, mbr->signature ); + + /* Scan through partition table and modify guesses for + * heads and sectors_per_track if we find any used + * partitions. + */ + *heads = 0; + *sectors = 0; + for ( i = 0 ; i < 4 ; i++ ) { + + /* Skip empty partitions */ + partition = &mbr->partitions[i]; + if ( ! partition->type ) + continue; + + /* If partition starts on cylinder 0 then we can + * unambiguously determine the number of sectors. + */ + start_cylinder = PART_CYLINDER ( partition->chs_start ); + start_head = PART_HEAD ( partition->chs_start ); + start_sector = PART_SECTOR ( partition->chs_start ); + if ( ( start_cylinder == 0 ) && ( start_head != 0 ) ) { + *sectors = ( ( partition->start + 1 - start_sector ) / + start_head ); + DBGC ( sandev, "INT13 drive %02x guessing C/H/S " + "xx/xx/%d based on partition %d\n", + sandev->drive, *sectors, ( i + 1 ) ); + } + + /* If partition ends on a higher head or sector number + * than our current guess, then increase the guess. + */ + end_head = PART_HEAD ( partition->chs_end ); + end_sector = PART_SECTOR ( partition->chs_end ); + if ( ( end_head + 1 ) > *heads ) { + *heads = ( end_head + 1 ); + DBGC ( sandev, "INT13 drive %02x guessing C/H/S " + "xx/%d/xx based on partition %d\n", + sandev->drive, *heads, ( i + 1 ) ); + } + if ( end_sector > *sectors ) { + *sectors = end_sector; + DBGC ( sandev, "INT13 drive %02x guessing C/H/S " + "xx/xx/%d based on partition %d\n", + sandev->drive, *sectors, ( i + 1 ) ); + } + } + + /* Default guess is xx/255/63 */ + if ( ! *heads ) + *heads = 255; + if ( ! *sectors ) + *sectors = 63; + + return 0; +} + +/** Recognised floppy disk geometries */ +static const struct int13_fdd_geometry int13_fdd_geometries[] = { + INT13_FDD_GEOMETRY ( 40, 1, 8 ), + INT13_FDD_GEOMETRY ( 40, 1, 9 ), + INT13_FDD_GEOMETRY ( 40, 2, 8 ), + INT13_FDD_GEOMETRY ( 40, 1, 9 ), + INT13_FDD_GEOMETRY ( 80, 2, 8 ), + INT13_FDD_GEOMETRY ( 80, 2, 9 ), + INT13_FDD_GEOMETRY ( 80, 2, 15 ), + INT13_FDD_GEOMETRY ( 80, 2, 18 ), + INT13_FDD_GEOMETRY ( 80, 2, 20 ), + INT13_FDD_GEOMETRY ( 80, 2, 21 ), + INT13_FDD_GEOMETRY ( 82, 2, 21 ), + INT13_FDD_GEOMETRY ( 83, 2, 21 ), + INT13_FDD_GEOMETRY ( 80, 2, 22 ), + INT13_FDD_GEOMETRY ( 80, 2, 23 ), + INT13_FDD_GEOMETRY ( 80, 2, 24 ), + INT13_FDD_GEOMETRY ( 80, 2, 36 ), + INT13_FDD_GEOMETRY ( 80, 2, 39 ), + INT13_FDD_GEOMETRY ( 80, 2, 40 ), + INT13_FDD_GEOMETRY ( 80, 2, 44 ), + INT13_FDD_GEOMETRY ( 80, 2, 48 ), +}; + +/** + * Guess INT 13 floppy disk drive geometry + * + * @v sandev SAN device + * @ret heads Guessed number of heads + * @ret sectors Guessed number of sectors per track + * @ret rc Return status code + * + * Guesses the drive geometry by inspecting the disk size. + */ +static int int13_guess_geometry_fdd ( struct san_device *sandev, + unsigned int *heads, + unsigned int *sectors ) { + unsigned int blocks = sandev_capacity ( sandev ); + const struct int13_fdd_geometry *geometry; + unsigned int cylinders; + unsigned int i; + + /* Look for a match against a known geometry */ + for ( i = 0 ; i < ( sizeof ( int13_fdd_geometries ) / + sizeof ( int13_fdd_geometries[0] ) ) ; i++ ) { + geometry = &int13_fdd_geometries[i]; + cylinders = INT13_FDD_CYLINDERS ( geometry ); + *heads = INT13_FDD_HEADS ( geometry ); + *sectors = INT13_FDD_SECTORS ( geometry ); + if ( ( cylinders * (*heads) * (*sectors) ) == blocks ) { + DBGC ( sandev, "INT13 drive %02x guessing C/H/S " + "%d/%d/%d based on size %dK\n", sandev->drive, + cylinders, *heads, *sectors, ( blocks / 2 ) ); + return 0; + } + } + + /* Otherwise, assume a partial disk image in the most common + * format (1440K, 80/2/18). + */ + *heads = 2; + *sectors = 18; + DBGC ( sandev, "INT13 drive %02x guessing C/H/S xx/%d/%d based on size " + "%dK\n", sandev->drive, *heads, *sectors, ( blocks / 2 ) ); + return 0; +} + +/** + * Guess INT 13 drive geometry + * + * @v sandev SAN device + * @v scratch Scratch area for single-sector reads + * @ret rc Return status code + */ +static int int13_guess_geometry ( struct san_device *sandev, void *scratch ) { + struct int13_data *int13 = sandev->priv; + unsigned int guessed_heads; + unsigned int guessed_sectors; + unsigned int blocks; + unsigned int blocks_per_cyl; + int rc; + + /* Guess geometry according to drive type */ + if ( int13_is_fdd ( sandev ) ) { + if ( ( rc = int13_guess_geometry_fdd ( sandev, &guessed_heads, + &guessed_sectors )) != 0) + return rc; + } else { + if ( ( rc = int13_guess_geometry_hdd ( sandev, scratch, + &guessed_heads, + &guessed_sectors )) != 0) + return rc; + } + + /* Apply guesses if no geometry already specified */ + if ( ! int13->heads ) + int13->heads = guessed_heads; + if ( ! int13->sectors_per_track ) + int13->sectors_per_track = guessed_sectors; + if ( ! int13->cylinders ) { + /* Avoid attempting a 64-bit divide on a 32-bit system */ + blocks = int13_capacity32 ( sandev ); + blocks_per_cyl = ( int13->heads * int13->sectors_per_track ); + assert ( blocks_per_cyl != 0 ); + int13->cylinders = ( blocks / blocks_per_cyl ); + if ( int13->cylinders > 1024 ) + int13->cylinders = 1024; + } + + return 0; +} + +/** + * Update BIOS drive count + */ +static void int13_sync_num_drives ( void ) { + struct san_device *sandev; + struct int13_data *int13; + uint8_t *counter; + uint8_t max_drive; + uint8_t required; + + /* Get current drive counts */ + get_real ( equipment_word, BDA_SEG, BDA_EQUIPMENT_WORD ); + get_real ( num_drives, BDA_SEG, BDA_NUM_DRIVES ); + num_fdds = ( ( equipment_word & 0x0001 ) ? + ( ( ( equipment_word >> 6 ) & 0x3 ) + 1 ) : 0 ); + + /* Ensure count is large enough to cover all of our SAN devices */ + for_each_sandev ( sandev ) { + int13 = sandev->priv; + counter = ( int13_is_fdd ( sandev ) ? &num_fdds : &num_drives ); + max_drive = sandev->drive; + if ( max_drive < int13->natural_drive ) + max_drive = int13->natural_drive; + required = ( ( max_drive & 0x7f ) + 1 ); + if ( *counter < required ) { + *counter = required; + DBGC ( sandev, "INT13 drive %02x added to drive count: " + "%d HDDs, %d FDDs\n", + sandev->drive, num_drives, num_fdds ); + } + } + + /* Update current drive count */ + equipment_word &= ~( ( 0x3 << 6 ) | 0x0001 ); + if ( num_fdds ) { + equipment_word |= ( 0x0001 | + ( ( ( num_fdds - 1 ) & 0x3 ) << 6 ) ); + } + put_real ( equipment_word, BDA_SEG, BDA_EQUIPMENT_WORD ); + put_real ( num_drives, BDA_SEG, BDA_NUM_DRIVES ); +} + +/** + * Check number of drives + */ +static void int13_check_num_drives ( void ) { + uint16_t check_equipment_word; + uint8_t check_num_drives; + + get_real ( check_equipment_word, BDA_SEG, BDA_EQUIPMENT_WORD ); + get_real ( check_num_drives, BDA_SEG, BDA_NUM_DRIVES ); + if ( ( check_equipment_word != equipment_word ) || + ( check_num_drives != num_drives ) ) { + int13_sync_num_drives(); + } +} + +/** + * INT 13, 00 - Reset disk system + * + * @v sandev SAN device + * @ret status Status code + */ +static int int13_reset ( struct san_device *sandev, + struct i386_all_regs *ix86 __unused ) { + int rc; + + DBGC2 ( sandev, "Reset drive\n" ); + + /* Reset SAN device */ + if ( ( rc = sandev_reset ( sandev ) ) != 0 ) + return -INT13_STATUS_RESET_FAILED; + + return 0; +} + +/** + * INT 13, 01 - Get status of last operation + * + * @v sandev SAN device + * @ret status Status code + */ +static int int13_get_last_status ( struct san_device *sandev, + struct i386_all_regs *ix86 __unused ) { + struct int13_data *int13 = sandev->priv; + + DBGC2 ( sandev, "Get status of last operation\n" ); + return int13->last_status; +} + +/** + * Read / write sectors + * + * @v sandev SAN device + * @v al Number of sectors to read or write (must be nonzero) + * @v ch Low bits of cylinder number + * @v cl (bits 7:6) High bits of cylinder number + * @v cl (bits 5:0) Sector number + * @v dh Head number + * @v es:bx Data buffer + * @v sandev_rw SAN device read/write method + * @ret status Status code + * @ret al Number of sectors read or written + */ +static int int13_rw_sectors ( struct san_device *sandev, + struct i386_all_regs *ix86, + int ( * sandev_rw ) ( struct san_device *sandev, + uint64_t lba, + unsigned int count, + userptr_t buffer ) ) { + struct int13_data *int13 = sandev->priv; + unsigned int cylinder, head, sector; + unsigned long lba; + unsigned int count; + userptr_t buffer; + int rc; + + /* Validate blocksize */ + if ( sandev_blksize ( sandev ) != INT13_BLKSIZE ) { + DBGC ( sandev, "\nINT 13 drive %02x invalid blocksize (%zd) " + "for non-extended read/write\n", + sandev->drive, sandev_blksize ( sandev ) ); + return -INT13_STATUS_INVALID; + } + + /* Calculate parameters */ + cylinder = ( ( ( ix86->regs.cl & 0xc0 ) << 2 ) | ix86->regs.ch ); + head = ix86->regs.dh; + sector = ( ix86->regs.cl & 0x3f ); + if ( ( cylinder >= int13->cylinders ) || + ( head >= int13->heads ) || + ( sector < 1 ) || ( sector > int13->sectors_per_track ) ) { + DBGC ( sandev, "C/H/S %d/%d/%d out of range for geometry " + "%d/%d/%d\n", cylinder, head, sector, int13->cylinders, + int13->heads, int13->sectors_per_track ); + return -INT13_STATUS_INVALID; + } + lba = ( ( ( ( cylinder * int13->heads ) + head ) + * int13->sectors_per_track ) + sector - 1 ); + count = ix86->regs.al; + buffer = real_to_user ( ix86->segs.es, ix86->regs.bx ); + + DBGC2 ( sandev, "C/H/S %d/%d/%d = LBA %08lx <-> %04x:%04x (count %d)\n", + cylinder, head, sector, lba, ix86->segs.es, ix86->regs.bx, + count ); + + /* Read from / write to block device */ + if ( ( rc = sandev_rw ( sandev, lba, count, buffer ) ) != 0 ){ + DBGC ( sandev, "INT13 drive %02x I/O failed: %s\n", + sandev->drive, strerror ( rc ) ); + return -INT13_STATUS_READ_ERROR; + } + + return 0; +} + +/** + * INT 13, 02 - Read sectors + * + * @v sandev SAN device + * @v al Number of sectors to read (must be nonzero) + * @v ch Low bits of cylinder number + * @v cl (bits 7:6) High bits of cylinder number + * @v cl (bits 5:0) Sector number + * @v dh Head number + * @v es:bx Data buffer + * @ret status Status code + * @ret al Number of sectors read + */ +static int int13_read_sectors ( struct san_device *sandev, + struct i386_all_regs *ix86 ) { + + DBGC2 ( sandev, "Read: " ); + return int13_rw_sectors ( sandev, ix86, sandev_read ); +} + +/** + * INT 13, 03 - Write sectors + * + * @v sandev SAN device + * @v al Number of sectors to write (must be nonzero) + * @v ch Low bits of cylinder number + * @v cl (bits 7:6) High bits of cylinder number + * @v cl (bits 5:0) Sector number + * @v dh Head number + * @v es:bx Data buffer + * @ret status Status code + * @ret al Number of sectors written + */ +static int int13_write_sectors ( struct san_device *sandev, + struct i386_all_regs *ix86 ) { + + DBGC2 ( sandev, "Write: " ); + return int13_rw_sectors ( sandev, ix86, sandev_write ); +} + +/** + * INT 13, 08 - Get drive parameters + * + * @v sandev SAN device + * @ret status Status code + * @ret ch Low bits of maximum cylinder number + * @ret cl (bits 7:6) High bits of maximum cylinder number + * @ret cl (bits 5:0) Maximum sector number + * @ret dh Maximum head number + * @ret dl Number of drives + */ +static int int13_get_parameters ( struct san_device *sandev, + struct i386_all_regs *ix86 ) { + struct int13_data *int13 = sandev->priv; + unsigned int max_cylinder = int13->cylinders - 1; + unsigned int max_head = int13->heads - 1; + unsigned int max_sector = int13->sectors_per_track; /* sic */ + + DBGC2 ( sandev, "Get drive parameters\n" ); + + /* Validate blocksize */ + if ( sandev_blksize ( sandev ) != INT13_BLKSIZE ) { + DBGC ( sandev, "\nINT 13 drive %02x invalid blocksize (%zd) " + "for non-extended parameters\n", + sandev->drive, sandev_blksize ( sandev ) ); + return -INT13_STATUS_INVALID; + } + + /* Common parameters */ + ix86->regs.ch = ( max_cylinder & 0xff ); + ix86->regs.cl = ( ( ( max_cylinder >> 8 ) << 6 ) | max_sector ); + ix86->regs.dh = max_head; + ix86->regs.dl = ( int13_is_fdd ( sandev ) ? num_fdds : num_drives ); + + /* Floppy-specific parameters */ + if ( int13_is_fdd ( sandev ) ) { + ix86->regs.bl = INT13_FDD_TYPE_1M44; + ix86->segs.es = rm_ds; + ix86->regs.di = __from_data16 ( &int13_fdd_params ); + } + + return 0; +} + +/** + * INT 13, 15 - Get disk type + * + * @v sandev SAN device + * @ret ah Type code + * @ret cx:dx Sector count + * @ret status Status code / disk type + */ +static int int13_get_disk_type ( struct san_device *sandev, + struct i386_all_regs *ix86 ) { + uint32_t blocks; + + DBGC2 ( sandev, "Get disk type\n" ); + + if ( int13_is_fdd ( sandev ) ) { + return INT13_DISK_TYPE_FDD; + } else { + blocks = int13_capacity32 ( sandev ); + ix86->regs.cx = ( blocks >> 16 ); + ix86->regs.dx = ( blocks & 0xffff ); + return INT13_DISK_TYPE_HDD; + } +} + +/** + * INT 13, 41 - Extensions installation check + * + * @v sandev SAN device + * @v bx 0x55aa + * @ret bx 0xaa55 + * @ret cx Extensions API support bitmap + * @ret status Status code / API version + */ +static int int13_extension_check ( struct san_device *sandev __unused, + struct i386_all_regs *ix86 ) { + + if ( ix86->regs.bx == 0x55aa ) { + DBGC2 ( sandev, "INT13 extensions installation check\n" ); + ix86->regs.bx = 0xaa55; + ix86->regs.cx = ( INT13_EXTENSION_LINEAR | + INT13_EXTENSION_EDD | + INT13_EXTENSION_64BIT ); + return INT13_EXTENSION_VER_3_0; + } else { + return -INT13_STATUS_INVALID; + } +} + +/** + * Extended read / write + * + * @v sandev SAN device + * @v ds:si Disk address packet + * @v sandev_rw SAN device read/write method + * @ret status Status code + */ +static int int13_extended_rw ( struct san_device *sandev, + struct i386_all_regs *ix86, + int ( * sandev_rw ) ( struct san_device *sandev, + uint64_t lba, + unsigned int count, + userptr_t buffer ) ) { + struct int13_disk_address addr; + uint8_t bufsize; + uint64_t lba; + unsigned long count; + userptr_t buffer; + int rc; + + /* Extended reads are not allowed on floppy drives. + * ELTORITO.SYS seems to assume that we are really a CD-ROM if + * we support extended reads for a floppy drive. + */ + if ( int13_is_fdd ( sandev ) ) + return -INT13_STATUS_INVALID; + + /* Get buffer size */ + get_real ( bufsize, ix86->segs.ds, + ( ix86->regs.si + offsetof ( typeof ( addr ), bufsize ) ) ); + if ( bufsize < offsetof ( typeof ( addr ), buffer_phys ) ) { + DBGC2 ( sandev, "\n", bufsize ); + return -INT13_STATUS_INVALID; + } + + /* Read parameters from disk address structure */ + memset ( &addr, 0, sizeof ( addr ) ); + copy_from_real ( &addr, ix86->segs.ds, ix86->regs.si, bufsize ); + lba = addr.lba; + DBGC2 ( sandev, "LBA %08llx <-> ", ( ( unsigned long long ) lba ) ); + if ( ( addr.count == 0xff ) || + ( ( addr.buffer.segment == 0xffff ) && + ( addr.buffer.offset == 0xffff ) ) ) { + buffer = phys_to_user ( addr.buffer_phys ); + DBGC2 ( sandev, "%08llx", + ( ( unsigned long long ) addr.buffer_phys ) ); + } else { + buffer = real_to_user ( addr.buffer.segment, + addr.buffer.offset ); + DBGC2 ( sandev, "%04x:%04x", addr.buffer.segment, + addr.buffer.offset ); + } + if ( addr.count <= 0x7f ) { + count = addr.count; + } else if ( addr.count == 0xff ) { + count = addr.long_count; + } else { + DBGC2 ( sandev, " \n", addr.count ); + return -INT13_STATUS_INVALID; + } + DBGC2 ( sandev, " (count %ld)\n", count ); + + /* Read from / write to block device */ + if ( ( rc = sandev_rw ( sandev, lba, count, buffer ) ) != 0 ) { + DBGC ( sandev, "INT13 drive %02x extended I/O failed: %s\n", + sandev->drive, strerror ( rc ) ); + /* Record that no blocks were transferred successfully */ + addr.count = 0; + put_real ( addr.count, ix86->segs.ds, + ( ix86->regs.si + + offsetof ( typeof ( addr ), count ) ) ); + return -INT13_STATUS_READ_ERROR; + } + + return 0; +} + +/** + * INT 13, 42 - Extended read + * + * @v sandev SAN device + * @v ds:si Disk address packet + * @ret status Status code + */ +static int int13_extended_read ( struct san_device *sandev, + struct i386_all_regs *ix86 ) { + + DBGC2 ( sandev, "Extended read: " ); + return int13_extended_rw ( sandev, ix86, sandev_read ); +} + +/** + * INT 13, 43 - Extended write + * + * @v sandev SAN device + * @v ds:si Disk address packet + * @ret status Status code + */ +static int int13_extended_write ( struct san_device *sandev, + struct i386_all_regs *ix86 ) { + + DBGC2 ( sandev, "Extended write: " ); + return int13_extended_rw ( sandev, ix86, sandev_write ); +} + +/** + * INT 13, 44 - Verify sectors + * + * @v sandev SAN device + * @v ds:si Disk address packet + * @ret status Status code + */ +static int int13_extended_verify ( struct san_device *sandev, + struct i386_all_regs *ix86 ) { + struct int13_disk_address addr; + uint64_t lba; + unsigned long count; + + /* Read parameters from disk address structure */ + if ( DBG_EXTRA ) { + copy_from_real ( &addr, ix86->segs.ds, ix86->regs.si, + sizeof ( addr )); + lba = addr.lba; + count = addr.count; + DBGC2 ( sandev, "Verify: LBA %08llx (count %ld)\n", + ( ( unsigned long long ) lba ), count ); + } + + /* We have no mechanism for verifying sectors */ + return -INT13_STATUS_INVALID; +} + +/** + * INT 13, 44 - Extended seek + * + * @v sandev SAN device + * @v ds:si Disk address packet + * @ret status Status code + */ +static int int13_extended_seek ( struct san_device *sandev, + struct i386_all_regs *ix86 ) { + struct int13_disk_address addr; + uint64_t lba; + unsigned long count; + + /* Read parameters from disk address structure */ + if ( DBG_EXTRA ) { + copy_from_real ( &addr, ix86->segs.ds, ix86->regs.si, + sizeof ( addr )); + lba = addr.lba; + count = addr.count; + DBGC2 ( sandev, "Seek: LBA %08llx (count %ld)\n", + ( ( unsigned long long ) lba ), count ); + } + + /* Ignore and return success */ + return 0; +} + +/** + * Build device path information + * + * @v sandev SAN device + * @v dpi Device path information + * @ret rc Return status code + */ +static int int13_device_path_info ( struct san_device *sandev, + struct edd_device_path_information *dpi ) { + struct san_path *sanpath; + struct device *device; + struct device_description *desc; + unsigned int i; + uint8_t sum = 0; + int rc; + + /* Reopen block device if necessary */ + if ( sandev_needs_reopen ( sandev ) && + ( ( rc = sandev_reopen ( sandev ) ) != 0 ) ) + return rc; + sanpath = sandev->active; + assert ( sanpath != NULL ); + + /* Get underlying hardware device */ + device = identify_device ( &sanpath->block ); + if ( ! device ) { + DBGC ( sandev, "INT13 drive %02x cannot identify hardware " + "device\n", sandev->drive ); + return -ENODEV; + } + + /* Fill in bus type and interface path */ + desc = &device->desc; + switch ( desc->bus_type ) { + case BUS_TYPE_PCI: + dpi->host_bus_type.type = EDD_BUS_TYPE_PCI; + dpi->interface_path.pci.bus = PCI_BUS ( desc->location ); + dpi->interface_path.pci.slot = PCI_SLOT ( desc->location ); + dpi->interface_path.pci.function = PCI_FUNC ( desc->location ); + dpi->interface_path.pci.channel = 0xff; /* unused */ + break; + default: + DBGC ( sandev, "INT13 drive %02x unrecognised bus type %d\n", + sandev->drive, desc->bus_type ); + return -ENOTSUP; + } + + /* Get EDD block device description */ + if ( ( rc = edd_describe ( &sanpath->block, &dpi->interface_type, + &dpi->device_path ) ) != 0 ) { + DBGC ( sandev, "INT13 drive %02x cannot identify block device: " + "%s\n", sandev->drive, strerror ( rc ) ); + return rc; + } + + /* Fill in common fields and fix checksum */ + dpi->key = EDD_DEVICE_PATH_INFO_KEY; + dpi->len = sizeof ( *dpi ); + for ( i = 0 ; i < sizeof ( *dpi ) ; i++ ) + sum += *( ( ( uint8_t * ) dpi ) + i ); + dpi->checksum -= sum; + + return 0; +} + +/** + * INT 13, 48 - Get extended parameters + * + * @v sandev SAN device + * @v ds:si Drive parameter table + * @ret status Status code + */ +static int int13_get_extended_parameters ( struct san_device *sandev, + struct i386_all_regs *ix86 ) { + struct int13_data *int13 = sandev->priv; + struct int13_disk_parameters params; + struct segoff address; + size_t len = sizeof ( params ); + uint16_t bufsize; + int rc; + + /* Get buffer size */ + get_real ( bufsize, ix86->segs.ds, + ( ix86->regs.si + offsetof ( typeof ( params ), bufsize ))); + + DBGC2 ( sandev, "Get extended drive parameters to %04x:%04x+%02x\n", + ix86->segs.ds, ix86->regs.si, bufsize ); + + /* Build drive parameters */ + memset ( ¶ms, 0, sizeof ( params ) ); + params.flags = INT13_FL_DMA_TRANSPARENT; + if ( ( int13->cylinders < 1024 ) && + ( sandev_capacity ( sandev ) <= INT13_MAX_CHS_SECTORS ) ) { + params.flags |= INT13_FL_CHS_VALID; + } + params.cylinders = int13->cylinders; + params.heads = int13->heads; + params.sectors_per_track = int13->sectors_per_track; + params.sectors = sandev_capacity ( sandev ); + params.sector_size = sandev_blksize ( sandev ); + memset ( ¶ms.dpte, 0xff, sizeof ( params.dpte ) ); + if ( ( rc = int13_device_path_info ( sandev, ¶ms.dpi ) ) != 0 ) { + DBGC ( sandev, "INT13 drive %02x could not provide device " + "path information: %s\n", + sandev->drive, strerror ( rc ) ); + len = offsetof ( typeof ( params ), dpi ); + } + + /* Calculate returned "buffer size" (which will be less than + * the length actually copied if device path information is + * present). + */ + if ( bufsize < offsetof ( typeof ( params ), dpte ) ) + return -INT13_STATUS_INVALID; + if ( bufsize < offsetof ( typeof ( params ), dpi ) ) { + params.bufsize = offsetof ( typeof ( params ), dpte ); + } else { + params.bufsize = offsetof ( typeof ( params ), dpi ); + } + + DBGC ( sandev, "INT 13 drive %02x described using extended " + "parameters:\n", sandev->drive ); + address.segment = ix86->segs.ds; + address.offset = ix86->regs.si; + DBGC_HDA ( sandev, address, ¶ms, len ); + + /* Return drive parameters */ + if ( len > bufsize ) + len = bufsize; + copy_to_real ( ix86->segs.ds, ix86->regs.si, ¶ms, len ); + + return 0; +} + +/** + * INT 13, 4b - Get status or terminate CD-ROM emulation + * + * @v sandev SAN device + * @v ds:si Specification packet + * @ret status Status code + */ +static int int13_cdrom_status_terminate ( struct san_device *sandev, + struct i386_all_regs *ix86 ) { + struct int13_cdrom_specification specification; + + DBGC2 ( sandev, "Get CD-ROM emulation status to %04x:%04x%s\n", + ix86->segs.ds, ix86->regs.si, + ( ix86->regs.al ? "" : " and terminate" ) ); + + /* Fail if we are not a CD-ROM */ + if ( ! sandev->is_cdrom ) { + DBGC ( sandev, "INT13 drive %02x is not a CD-ROM\n", + sandev->drive ); + return -INT13_STATUS_INVALID; + } + + /* Build specification packet */ + memset ( &specification, 0, sizeof ( specification ) ); + specification.size = sizeof ( specification ); + specification.drive = sandev->drive; + + /* Return specification packet */ + copy_to_real ( ix86->segs.ds, ix86->regs.si, &specification, + sizeof ( specification ) ); + + return 0; +} + + +/** + * INT 13, 4d - Read CD-ROM boot catalog + * + * @v sandev SAN device + * @v ds:si Command packet + * @ret status Status code + */ +static int int13_cdrom_read_boot_catalog ( struct san_device *sandev, + struct i386_all_regs *ix86 ) { + struct int13_data *int13 = sandev->priv; + struct int13_cdrom_boot_catalog_command command; + unsigned int start; + int rc; + + /* Read parameters from command packet */ + copy_from_real ( &command, ix86->segs.ds, ix86->regs.si, + sizeof ( command ) ); + DBGC2 ( sandev, "Read CD-ROM boot catalog to %08x\n", command.buffer ); + + /* Fail if we have no boot catalog */ + if ( ! int13->boot_catalog ) { + DBGC ( sandev, "INT13 drive %02x has no boot catalog\n", + sandev->drive ); + return -INT13_STATUS_INVALID; + } + start = ( int13->boot_catalog + command.start ); + + /* Read from boot catalog */ + if ( ( rc = sandev_read ( sandev, start, command.count, + phys_to_user ( command.buffer ) ) ) != 0 ) { + DBGC ( sandev, "INT13 drive %02x could not read boot catalog: " + "%s\n", sandev->drive, strerror ( rc ) ); + return -INT13_STATUS_READ_ERROR; + } + + return 0; +} + +/** + * INT 13 handler + * + */ +static __asmcall void int13 ( struct i386_all_regs *ix86 ) { + int command = ix86->regs.ah; + unsigned int bios_drive = ix86->regs.dl; + struct san_device *sandev; + struct int13_data *int13; + int status; + + /* Check BIOS hasn't killed off our drive */ + int13_check_num_drives(); + + for_each_sandev ( sandev ) { + + int13 = sandev->priv; + if ( bios_drive != sandev->drive ) { + /* Remap any accesses to this drive's natural number */ + if ( bios_drive == int13->natural_drive ) { + DBGC2 ( sandev, "INT13,%02x (%02x) remapped to " + "(%02x)\n", ix86->regs.ah, + bios_drive, sandev->drive ); + ix86->regs.dl = sandev->drive; + return; + } else if ( ( ( bios_drive & 0x7f ) == 0x7f ) && + ( command == INT13_CDROM_STATUS_TERMINATE ) + && sandev->is_cdrom ) { + /* Catch non-drive-specific CD-ROM calls */ + } else { + continue; + } + } + + DBGC2 ( sandev, "INT13,%02x (%02x): ", + ix86->regs.ah, bios_drive ); + + switch ( command ) { + case INT13_RESET: + status = int13_reset ( sandev, ix86 ); + break; + case INT13_GET_LAST_STATUS: + status = int13_get_last_status ( sandev, ix86 ); + break; + case INT13_READ_SECTORS: + status = int13_read_sectors ( sandev, ix86 ); + break; + case INT13_WRITE_SECTORS: + status = int13_write_sectors ( sandev, ix86 ); + break; + case INT13_GET_PARAMETERS: + status = int13_get_parameters ( sandev, ix86 ); + break; + case INT13_GET_DISK_TYPE: + status = int13_get_disk_type ( sandev, ix86 ); + break; + case INT13_EXTENSION_CHECK: + status = int13_extension_check ( sandev, ix86 ); + break; + case INT13_EXTENDED_READ: + status = int13_extended_read ( sandev, ix86 ); + break; + case INT13_EXTENDED_WRITE: + status = int13_extended_write ( sandev, ix86 ); + break; + case INT13_EXTENDED_VERIFY: + status = int13_extended_verify ( sandev, ix86 ); + break; + case INT13_EXTENDED_SEEK: + status = int13_extended_seek ( sandev, ix86 ); + break; + case INT13_GET_EXTENDED_PARAMETERS: + status = int13_get_extended_parameters ( sandev, ix86 ); + break; + case INT13_CDROM_STATUS_TERMINATE: + status = int13_cdrom_status_terminate ( sandev, ix86 ); + break; + case INT13_CDROM_READ_BOOT_CATALOG: + status = int13_cdrom_read_boot_catalog ( sandev, ix86 ); + break; + default: + DBGC2 ( sandev, "*** Unrecognised INT13 ***\n" ); + status = -INT13_STATUS_INVALID; + break; + } + + /* Store status for INT 13,01 */ + int13->last_status = status; + + /* Negative status indicates an error */ + if ( status < 0 ) { + status = -status; + DBGC ( sandev, "INT13,%02x (%02x) failed with status " + "%02x\n", ix86->regs.ah, sandev->drive, status ); + } else { + ix86->flags &= ~CF; + } + ix86->regs.ah = status; + + /* Set OF to indicate to wrapper not to chain this call */ + ix86->flags |= OF; + + return; + } +} + +/** + * Hook INT 13 handler + * + */ +static void int13_hook_vector ( void ) { + /* Assembly wrapper to call int13(). int13() sets OF if we + * should not chain to the previous handler. (The wrapper + * clears CF and OF before calling int13()). + */ + __asm__ __volatile__ ( + TEXT16_CODE ( "\nint13_wrapper:\n\t" + /* Preserve %ax and %dx for future reference */ + "pushw %%bp\n\t" + "movw %%sp, %%bp\n\t" + "pushw %%ax\n\t" + "pushw %%dx\n\t" + /* Clear OF, set CF, call int13() */ + "orb $0, %%al\n\t" + "stc\n\t" + VIRT_CALL ( int13 ) + /* Chain if OF not set */ + "jo 1f\n\t" + "pushfw\n\t" + "lcall *%%cs:int13_vector\n\t" + "\n1:\n\t" + /* Overwrite flags for iret */ + "pushfw\n\t" + "popw 6(%%bp)\n\t" + /* Fix up %dl: + * + * INT 13,15 : do nothing if hard disk + * INT 13,08 : load with number of drives + * all others: restore original value + */ + "cmpb $0x15, -1(%%bp)\n\t" + "jne 2f\n\t" + "testb $0x80, -4(%%bp)\n\t" + "jnz 3f\n\t" + "\n2:\n\t" + "movb -4(%%bp), %%dl\n\t" + "cmpb $0x08, -1(%%bp)\n\t" + "jne 3f\n\t" + "testb $0x80, %%dl\n\t" + "movb %%cs:num_drives, %%dl\n\t" + "jnz 3f\n\t" + "movb %%cs:num_fdds, %%dl\n\t" + /* Return */ + "\n3:\n\t" + "movw %%bp, %%sp\n\t" + "popw %%bp\n\t" + "iret\n\t" ) : : ); + + hook_bios_interrupt ( 0x13, ( intptr_t ) int13_wrapper, &int13_vector ); +} + +/** + * Unhook INT 13 handler + */ +static void int13_unhook_vector ( void ) { + unhook_bios_interrupt ( 0x13, ( intptr_t ) int13_wrapper, + &int13_vector ); +} + +/** + * Hook INT 13 SAN device + * + * @v drive Drive number + * @v uris List of URIs + * @v count Number of URIs + * @v flags Flags + * @ret drive Drive number, or negative error + * + * Registers the drive with the INT 13 emulation subsystem, and hooks + * the INT 13 interrupt vector (if not already hooked). + */ +static int int13_hook ( unsigned int drive, struct uri **uris, + unsigned int count, unsigned int flags ) { + struct san_device *sandev; + struct int13_data *int13; + unsigned int natural_drive; + void *scratch; + int need_hook = ( ! have_sandevs() ); + int rc; + + /* Calculate natural drive number */ + int13_sync_num_drives(); + natural_drive = ( ( drive & 0x80 ) ? ( num_drives | 0x80 ) : num_fdds ); + + /* Use natural drive number if directed to do so */ + if ( ( drive & 0x7f ) == 0x7f ) + drive = natural_drive; + + /* Allocate SAN device */ + sandev = alloc_sandev ( uris, count, sizeof ( *int13 ) ); + if ( ! sandev ) { + rc = -ENOMEM; + goto err_alloc; + } + int13 = sandev->priv; + int13->natural_drive = natural_drive; + + /* Register SAN device */ + if ( ( rc = register_sandev ( sandev, drive, flags ) ) != 0 ) { + DBGC ( sandev, "INT13 drive %02x could not register: %s\n", + drive, strerror ( rc ) ); + goto err_register; + } + + /* Allocate scratch area */ + scratch = malloc ( sandev_blksize ( sandev ) ); + if ( ! scratch ) + goto err_alloc_scratch; + + /* Parse parameters, if present */ + if ( sandev->is_cdrom && + ( ( rc = int13_parse_eltorito ( sandev, scratch ) ) != 0 ) ) + goto err_parse_eltorito; + + /* Give drive a default geometry, if applicable */ + if ( ( sandev_blksize ( sandev ) == INT13_BLKSIZE ) && + ( ( rc = int13_guess_geometry ( sandev, scratch ) ) != 0 ) ) + goto err_guess_geometry; + + DBGC ( sandev, "INT13 drive %02x (naturally %02x) registered with " + "C/H/S geometry %d/%d/%d\n", + sandev->drive, int13->natural_drive, int13->cylinders, + int13->heads, int13->sectors_per_track ); + + /* Hook INT 13 vector if not already hooked */ + if ( need_hook ) { + int13_hook_vector(); + devices_get(); + } + + /* Update BIOS drive count */ + int13_sync_num_drives(); + + free ( scratch ); + return drive; + + err_guess_geometry: + err_parse_eltorito: + free ( scratch ); + err_alloc_scratch: + unregister_sandev ( sandev ); + err_register: + sandev_put ( sandev ); + err_alloc: + return rc; +} + +/** + * Unhook INT 13 SAN device + * + * @v drive Drive number + * + * Unregisters the drive from the INT 13 emulation subsystem. If this + * is the last SAN device, the INT 13 vector is unhooked (if + * possible). + */ +static void int13_unhook ( unsigned int drive ) { + struct san_device *sandev; + + /* Find drive */ + sandev = sandev_find ( drive ); + if ( ! sandev ) { + DBG ( "INT13 cannot find drive %02x\n", drive ); + return; + } + + /* Unregister SAN device */ + unregister_sandev ( sandev ); + + /* Should adjust BIOS drive count, but it's difficult + * to do so reliably. + */ + + DBGC ( sandev, "INT13 drive %02x unregistered\n", sandev->drive ); + + /* Unhook INT 13 vector if no more drives */ + if ( ! have_sandevs() ) { + devices_put(); + int13_unhook_vector(); + } + + /* Drop reference to drive */ + sandev_put ( sandev ); +} + +/** + * Load and verify master boot record from INT 13 drive + * + * @v drive Drive number + * @v address Boot code address to fill in + * @ret rc Return status code + */ +static int int13_load_mbr ( unsigned int drive, struct segoff *address ) { + uint16_t status; + int discard_b, discard_c, discard_d; + uint16_t magic; + + /* Use INT 13, 02 to read the MBR */ + address->segment = 0; + address->offset = 0x7c00; + __asm__ __volatile__ ( REAL_CODE ( "pushw %%es\n\t" + "pushl %%ebx\n\t" + "popw %%bx\n\t" + "popw %%es\n\t" + "stc\n\t" + "sti\n\t" + "int $0x13\n\t" + "sti\n\t" /* BIOS bugs */ + "jc 1f\n\t" + "xorw %%ax, %%ax\n\t" + "\n1:\n\t" + "popw %%es\n\t" ) + : "=a" ( status ), "=b" ( discard_b ), + "=c" ( discard_c ), "=d" ( discard_d ) + : "a" ( 0x0201 ), "b" ( *address ), + "c" ( 1 ), "d" ( drive ) ); + if ( status ) { + DBG ( "INT13 drive %02x could not read MBR (status %04x)\n", + drive, status ); + return -EIO; + } + + /* Check magic signature */ + get_real ( magic, address->segment, + ( address->offset + + offsetof ( struct master_boot_record, magic ) ) ); + if ( magic != INT13_MBR_MAGIC ) { + DBG ( "INT13 drive %02x does not contain a valid MBR\n", + drive ); + return -ENOEXEC; + } + + return 0; +} + +/** El Torito boot catalog command packet */ +static struct int13_cdrom_boot_catalog_command __data16 ( eltorito_cmd ) = { + .size = sizeof ( struct int13_cdrom_boot_catalog_command ), + .count = 1, + .buffer = 0x7c00, + .start = 0, +}; +#define eltorito_cmd __use_data16 ( eltorito_cmd ) + +/** El Torito disk address packet */ +static struct int13_disk_address __bss16 ( eltorito_address ); +#define eltorito_address __use_data16 ( eltorito_address ) + +/** + * Load and verify El Torito boot record from INT 13 drive + * + * @v drive Drive number + * @v address Boot code address to fill in + * @ret rc Return status code + */ +static int int13_load_eltorito ( unsigned int drive, struct segoff *address ) { + struct { + struct eltorito_validation_entry valid; + struct eltorito_boot_entry boot; + } __attribute__ (( packed )) catalog; + uint16_t status; + + /* Use INT 13, 4d to read the boot catalog */ + __asm__ __volatile__ ( REAL_CODE ( "stc\n\t" + "sti\n\t" + "int $0x13\n\t" + "sti\n\t" /* BIOS bugs */ + "jc 1f\n\t" + "xorw %%ax, %%ax\n\t" + "\n1:\n\t" ) + : "=a" ( status ) + : "a" ( 0x4d00 ), "d" ( drive ), + "S" ( __from_data16 ( &eltorito_cmd ) ) ); + if ( status ) { + DBG ( "INT13 drive %02x could not read El Torito boot catalog " + "(status %04x)\n", drive, status ); + return -EIO; + } + copy_from_user ( &catalog, phys_to_user ( eltorito_cmd.buffer ), 0, + sizeof ( catalog ) ); + + /* Sanity checks */ + if ( catalog.valid.platform_id != ELTORITO_PLATFORM_X86 ) { + DBG ( "INT13 drive %02x El Torito specifies unknown platform " + "%02x\n", drive, catalog.valid.platform_id ); + return -ENOEXEC; + } + if ( catalog.boot.indicator != ELTORITO_BOOTABLE ) { + DBG ( "INT13 drive %02x El Torito is not bootable\n", drive ); + return -ENOEXEC; + } + if ( catalog.boot.media_type != ELTORITO_NO_EMULATION ) { + DBG ( "INT13 drive %02x El Torito requires emulation " + "type %02x\n", drive, catalog.boot.media_type ); + return -ENOTSUP; + } + DBG ( "INT13 drive %02x El Torito boot image at LBA %08x (count %d)\n", + drive, catalog.boot.start, catalog.boot.length ); + address->segment = ( catalog.boot.load_segment ? + catalog.boot.load_segment : 0x7c0 ); + address->offset = 0; + DBG ( "INT13 drive %02x El Torito boot image loads at %04x:%04x\n", + drive, address->segment, address->offset ); + + /* Use INT 13, 42 to read the boot image */ + eltorito_address.bufsize = + offsetof ( typeof ( eltorito_address ), buffer_phys ); + eltorito_address.count = catalog.boot.length; + eltorito_address.buffer = *address; + eltorito_address.lba = catalog.boot.start; + __asm__ __volatile__ ( REAL_CODE ( "stc\n\t" + "sti\n\t" + "int $0x13\n\t" + "sti\n\t" /* BIOS bugs */ + "jc 1f\n\t" + "xorw %%ax, %%ax\n\t" + "\n1:\n\t" ) + : "=a" ( status ) + : "a" ( 0x4200 ), "d" ( drive ), + "S" ( __from_data16 ( &eltorito_address ) ) ); + if ( status ) { + DBG ( "INT13 drive %02x could not read El Torito boot image " + "(status %04x)\n", drive, status ); + return -EIO; + } + + return 0; +} + +/** + * Attempt to boot from an INT 13 drive + * + * @v drive Drive number + * @v filename Filename (or NULL to use default) + * @ret rc Return status code + * + * This boots from the specified INT 13 drive by loading the Master + * Boot Record to 0000:7c00 and jumping to it. INT 18 is hooked to + * capture an attempt by the MBR to boot the next device. (This is + * the closest thing to a return path from an MBR). + * + * Note that this function can never return success, by definition. + */ +static int int13_boot ( unsigned int drive, const char *filename __unused ) { + struct memory_map memmap; + struct segoff address; + int rc; + + /* Look for a usable boot sector */ + if ( ( ( rc = int13_load_mbr ( drive, &address ) ) != 0 ) && + ( ( rc = int13_load_eltorito ( drive, &address ) ) != 0 ) ) + return rc; + + /* Dump out memory map prior to boot, if memmap debugging is + * enabled. Not required for program flow, but we have so + * many problems that turn out to be memory-map related that + * it's worth doing. + */ + get_memmap ( &memmap ); + + /* Jump to boot sector */ + if ( ( rc = call_bootsector ( address.segment, address.offset, + drive ) ) != 0 ) { + DBG ( "INT13 drive %02x boot returned: %s\n", + drive, strerror ( rc ) ); + return rc; + } + + return -ECANCELED; /* -EIMPOSSIBLE */ +} + +/** Maximum size of boot firmware table(s) */ +#define XBFTAB_SIZE 768 + +/** Alignment of boot firmware table entries */ +#define XBFTAB_ALIGN 16 + +/** The boot firmware table(s) generated by iPXE */ +static uint8_t __bss16_array ( xbftab, [XBFTAB_SIZE] ) + __attribute__ (( aligned ( XBFTAB_ALIGN ) )); +#define xbftab __use_data16 ( xbftab ) + +/** Total used length of boot firmware tables */ +static size_t xbftab_used; + +/** + * Install ACPI table + * + * @v acpi ACPI description header + * @ret rc Return status code + */ +static int int13_install ( struct acpi_header *acpi ) { + struct segoff xbft_address; + struct acpi_header *installed; + size_t len; + + /* Check length */ + len = acpi->length; + if ( len > ( sizeof ( xbftab ) - xbftab_used ) ) { + DBGC ( acpi, "INT13 out of space for %s table\n", + acpi_name ( acpi->signature ) ); + return -ENOSPC; + } + + /* Install table */ + installed = ( ( ( void * ) xbftab ) + xbftab_used ); + memcpy ( installed, acpi, len ); + xbft_address.segment = rm_ds; + xbft_address.offset = __from_data16 ( installed ); + + /* Fill in common parameters */ + strncpy ( installed->oem_id, "FENSYS", + sizeof ( installed->oem_id ) ); + strncpy ( installed->oem_table_id, "iPXE", + sizeof ( installed->oem_table_id ) ); + + /* Fix checksum */ + acpi_fix_checksum ( installed ); + + /* Update used length */ + xbftab_used = ( ( xbftab_used + len + XBFTAB_ALIGN - 1 ) & + ~( XBFTAB_ALIGN - 1 ) ); + + DBGC ( acpi, "INT13 installed %s:\n", + acpi_name ( installed->signature ) ); + DBGC_HDA ( acpi, xbft_address, installed, len ); + return 0; +} + +/** + * Describe SAN devices for SAN-booted operating system + * + * @ret rc Return status code + */ +static int int13_describe ( void ) { + int rc; + + /* Clear tables */ + memset ( &xbftab, 0, sizeof ( xbftab ) ); + xbftab_used = 0; + + /* Install ACPI tables */ + if ( ( rc = acpi_install ( int13_install ) ) != 0 ) { + DBG ( "INT13 could not install ACPI tables: %s\n", + strerror ( rc ) ); + return rc; + } + + return 0; +} + +PROVIDE_SANBOOT ( pcbios, san_hook, int13_hook ); +PROVIDE_SANBOOT ( pcbios, san_unhook, int13_unhook ); +PROVIDE_SANBOOT ( pcbios, san_boot, int13_boot ); +PROVIDE_SANBOOT ( pcbios, san_describe, int13_describe ); diff --git a/src/arch/x86/interface/pcbios/int13con.c b/src/arch/x86/interface/pcbios/int13con.c new file mode 100644 index 00000000..8106cd15 --- /dev/null +++ b/src/arch/x86/interface/pcbios/int13con.c @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * INT13 disk log console + * + */ + +/* Set default console usage if applicable */ +#if ! ( defined ( CONSOLE_INT13 ) && CONSOLE_EXPLICIT ( CONSOLE_INT13 ) ) +#undef CONSOLE_INT13 +#define CONSOLE_INT13 ( CONSOLE_USAGE_ALL & ~CONSOLE_USAGE_LOG ) +#endif + +/** Disk drive number */ +#define INT13CON_DRIVE 0x80 + +/** Log partition type */ +#define INT13CON_PARTITION_TYPE 0xe0 + +/** Maximum number of outstanding unwritten characters */ +#define INT13CON_MAX_UNWRITTEN 64 + +/** Log partition header */ +struct int13con_header { + /** Magic signature */ + char magic[10]; +} __attribute__ (( packed )); + +/** Log partition magic signature */ +#define INT13CON_MAGIC "iPXE LOG\n\n" + +/** Original INT13 vector */ +static struct segoff __bss16 ( int13con_vector ); +#define int13con_vector __use_data16 ( int13con_vector ) + +/** Sector buffer */ +static uint8_t __bss16_array ( int13con_buffer, [INT13_BLKSIZE] ); +#define int13con_buffer __use_data16 ( int13con_buffer ) + +/** Disk address packet */ +static struct int13_disk_address __bss16 ( int13con_address ); +#define int13con_address __use_data16 ( int13con_address ) + +/** Current LBA */ +static uint64_t int13con_lba; + +/** Maximum LBA */ +static uint64_t int13con_max_lba; + +/** Current offset within sector */ +static size_t int13con_offset; + +/** Number of unwritten characters */ +static size_t int13con_unwritten; + +struct console_driver int13con __console_driver; + +/** + * Read/write disk sector + * + * @v op Operation + * @v lba Logical block address + * @ret rc Return status code + */ +static int int13con_rw ( unsigned int op, uint64_t lba ) { + uint8_t error; + + /* Construct disk address packet */ + int13con_address.bufsize = sizeof ( int13con_address ); + int13con_address.count = 1; + int13con_address.buffer.segment = rm_ds; + int13con_address.buffer.offset = __from_data16 ( int13con_buffer ); + int13con_address.lba = lba; + + /* Emulate INT13 via original vector. We do this since iPXE + * (or another subsequent bootloader) may hook INT13 and remap + * drive numbers. + */ + __asm__ ( REAL_CODE ( "pushfw\n\t" + "cli\n\t" + "lcall *int13con_vector\n\t" ) + : "=a" ( error ) + : "0" ( op << 8 ), "d" ( INT13CON_DRIVE ), + "S" ( __from_data16 ( &int13con_address ) ) ); + if ( error ) { + DBG ( "INT13CON operation %04x failed: %02x\n", + op, error ); + return -EIO; + } + + return 0; +} + +/** + * Write character to console + * + * @v character Character + */ +static void int13con_putchar ( int character ) { + static int busy; + int rc; + + /* Ignore if we are already mid-logging */ + if ( busy ) + return; + busy = 1; + + /* Write character to buffer */ + int13con_buffer[int13con_offset++] = character; + int13con_unwritten++; + + /* Write sector to disk, if applicable */ + if ( ( int13con_offset == INT13_BLKSIZE ) || + ( int13con_unwritten == INT13CON_MAX_UNWRITTEN ) || + ( character == '\n' ) ) { + + /* Write sector to disk */ + if ( ( rc = int13con_rw ( INT13_EXTENDED_WRITE, + int13con_lba ) ) != 0 ) { + DBG ( "INT13CON could not write log\n" ); + /* Ignore and continue; there's nothing we can do */ + } + + /* Reset count of unwritten characters */ + int13con_unwritten = 0; + } + + /* Move to next sector, if applicable */ + if ( int13con_offset == INT13_BLKSIZE ) { + + /* Disable console if we have run out of space */ + if ( int13con_lba >= int13con_max_lba ) + int13con.disabled = 1; + + /* Clear log buffer */ + memset ( int13con_buffer, 0, sizeof ( int13con_buffer ) ); + int13con_offset = 0; + + /* Move to next sector */ + int13con_lba++; + } + + /* Clear busy flag */ + busy = 0; +} + +/** + * Find log partition + * + * @ret rc Return status code + */ +static int int13con_find ( void ) { + struct master_boot_record *mbr = + ( ( struct master_boot_record * ) int13con_buffer ); + struct int13con_header *hdr = + ( ( struct int13con_header * ) int13con_buffer ); + struct partition_table_entry part[4]; + unsigned int i; + int rc; + + /* Read MBR */ + if ( ( rc = int13con_rw ( INT13_EXTENDED_READ, 0 ) ) != 0 ) { + DBG ( "INT13CON could not read MBR: %s\n", strerror ( rc ) ); + return rc; + } + + /* Check MBR magic */ + if ( mbr->magic != INT13_MBR_MAGIC ) { + DBG ( "INT13CON incorrect MBR magic\n" ); + DBG2_HDA ( 0, mbr, sizeof ( *mbr ) ); + return -EINVAL; + } + + /* Look for magic partition */ + memcpy ( part, mbr->partitions, sizeof ( part ) ); + for ( i = 0 ; i < ( sizeof ( part ) / sizeof ( part[0] ) ) ; i++ ) { + + /* Skip partitions of the wrong type */ + if ( part[i].type != INT13CON_PARTITION_TYPE ) + continue; + + /* Read partition header */ + if ( ( rc = int13con_rw ( INT13_EXTENDED_READ, + part[i].start ) ) != 0 ) { + DBG ( "INT13CON partition %d could not read header: " + "%s\n", ( i + 1 ), strerror ( rc ) ); + continue; + } + + /* Check partition header */ + if ( memcmp ( hdr->magic, INT13CON_MAGIC, + sizeof ( hdr->magic ) ) != 0 ) { + DBG ( "INT13CON partition %d bad magic\n", ( i + 1 ) ); + DBG2_HDA ( 0, hdr, sizeof ( *hdr ) ); + continue; + } + + /* Found log partition */ + DBG ( "INT13CON partition %d at [%08x,%08x)\n", ( i + 1 ), + part[i].start, ( part[i].start + part[i].length ) ); + int13con_lba = part[i].start; + int13con_max_lba = ( part[i].start + part[i].length - 1 ); + + /* Initialise log buffer */ + memset ( &int13con_buffer[ sizeof ( *hdr ) ], 0, + ( sizeof ( int13con_buffer ) - sizeof ( *hdr ) ) ); + int13con_offset = sizeof ( hdr->magic ); + + return 0; + } + + DBG ( "INT13CON found no log partition\n" ); + return -ENOENT; +} + +/** + * Initialise INT13 console + * + */ +static void int13con_init ( void ) { + uint8_t error; + uint16_t check; + unsigned int discard_c; + unsigned int discard_d; + int rc; + + /* Check for INT13 extensions */ + __asm__ __volatile__ ( REAL_CODE ( "int $0x13\n\t" + "setc %%al\n\t" ) + : "=a" ( error ), "=b" ( check ), + "=c" ( discard_c ), "=d" ( discard_d ) + : "0" ( INT13_EXTENSION_CHECK << 8 ), + "1" ( 0x55aa ), "3" ( INT13CON_DRIVE ) ); + if ( error || ( check != 0xaa55 ) ) { + DBG ( "INT13CON missing extensions (%02x,%04x)\n", + error, check ); + return; + } + + /* Store original INT13 vector */ + copy_from_real ( &int13con_vector, 0, ( 0x13 * 4 ), + sizeof ( int13con_vector ) ); + DBG ( "INT13CON using original INT13 vector %04x:%04x\n", + int13con_vector.segment, int13con_vector.offset ); + + /* Locate log partition */ + if ( ( rc = int13con_find() ) != 0) + return; + + /* Enable console */ + int13con.disabled = 0; +} + +/** + * INT13 console initialisation function + */ +struct init_fn int13con_init_fn __init_fn ( INIT_CONSOLE ) = { + .initialise = int13con_init, +}; + +/** INT13 console driver */ +struct console_driver int13con __console_driver = { + .putchar = int13con_putchar, + .disabled = CONSOLE_DISABLED, + .usage = CONSOLE_INT13, +}; diff --git a/src/arch/x86/interface/pcbios/memmap.c b/src/arch/x86/interface/pcbios/memmap.c new file mode 100644 index 00000000..daae382b --- /dev/null +++ b/src/arch/x86/interface/pcbios/memmap.c @@ -0,0 +1,343 @@ +/* + * Copyright (C) 2006 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** + * @file + * + * Memory mapping + * + */ + +/** Magic value for INT 15,e820 calls */ +#define SMAP ( 0x534d4150 ) + +/** An INT 15,e820 memory map entry */ +struct e820_entry { + /** Start of region */ + uint64_t start; + /** Length of region */ + uint64_t len; + /** Type of region */ + uint32_t type; + /** Extended attributes (optional) */ + uint32_t attrs; +} __attribute__ (( packed )); + +#define E820_TYPE_RAM 1 /**< Normal memory */ +#define E820_TYPE_RESERVED 2 /**< Reserved and unavailable */ +#define E820_TYPE_ACPI 3 /**< ACPI reclaim memory */ +#define E820_TYPE_NVS 4 /**< ACPI NVS memory */ + +#define E820_ATTR_ENABLED 0x00000001UL +#define E820_ATTR_NONVOLATILE 0x00000002UL +#define E820_ATTR_UNKNOWN 0xfffffffcUL + +#define E820_MIN_SIZE 20 + +/** Buffer for INT 15,e820 calls */ +static struct e820_entry __bss16 ( e820buf ); +#define e820buf __use_data16 ( e820buf ) + +/** We are running during POST; inhibit INT 15,e820 and INT 15,e801 */ +uint8_t __bss16 ( memmap_post ); +#define memmap_post __use_data16 ( memmap_post ) + +/** + * Get size of extended memory via INT 15,e801 + * + * @ret extmem Extended memory size, in kB, or 0 + */ +static unsigned int extmemsize_e801 ( void ) { + uint16_t extmem_1m_to_16m_k, extmem_16m_plus_64k; + uint16_t confmem_1m_to_16m_k, confmem_16m_plus_64k; + unsigned int flags; + unsigned int extmem; + + /* Inhibit INT 15,e801 during POST */ + if ( memmap_post ) { + DBG ( "INT 15,e801 not available during POST\n" ); + return 0; + } + + __asm__ __volatile__ ( REAL_CODE ( "stc\n\t" + "int $0x15\n\t" + "pushfw\n\t" + "popw %w0\n\t" ) + : "=R" ( flags ), + "=a" ( extmem_1m_to_16m_k ), + "=b" ( extmem_16m_plus_64k ), + "=c" ( confmem_1m_to_16m_k ), + "=d" ( confmem_16m_plus_64k ) + : "a" ( 0xe801 ) ); + + if ( flags & CF ) { + DBG ( "INT 15,e801 failed with CF set\n" ); + return 0; + } + + if ( ! ( extmem_1m_to_16m_k | extmem_16m_plus_64k ) ) { + DBG ( "INT 15,e801 extmem=0, using confmem\n" ); + extmem_1m_to_16m_k = confmem_1m_to_16m_k; + extmem_16m_plus_64k = confmem_16m_plus_64k; + } + + extmem = ( extmem_1m_to_16m_k + ( extmem_16m_plus_64k * 64 ) ); + DBG ( "INT 15,e801 extended memory size %d+64*%d=%d kB " + "[100000,%llx)\n", extmem_1m_to_16m_k, extmem_16m_plus_64k, + extmem, ( 0x100000 + ( ( ( uint64_t ) extmem ) * 1024 ) ) ); + + /* Sanity check. Some BIOSes report the entire 4GB address + * space as available, which cannot be correct (since that + * would leave no address space available for 32-bit PCI + * BARs). + */ + if ( extmem == ( 0x400000 - 0x400 ) ) { + DBG ( "INT 15,e801 reported whole 4GB; assuming insane\n" ); + return 0; + } + + return extmem; +} + +/** + * Get size of extended memory via INT 15,88 + * + * @ret extmem Extended memory size, in kB + */ +static unsigned int extmemsize_88 ( void ) { + uint16_t extmem; + + /* Ignore CF; it is not reliable for this call */ + __asm__ __volatile__ ( REAL_CODE ( "int $0x15" ) + : "=a" ( extmem ) : "a" ( 0x8800 ) ); + + DBG ( "INT 15,88 extended memory size %d kB [100000, %x)\n", + extmem, ( 0x100000 + ( extmem * 1024 ) ) ); + return extmem; +} + +/** + * Get size of extended memory + * + * @ret extmem Extended memory size, in kB + * + * Note that this is only an approximation; for an accurate picture, + * use the E820 memory map obtained via get_memmap(); + */ +unsigned int extmemsize ( void ) { + unsigned int extmem_e801; + unsigned int extmem_88; + + /* Try INT 15,e801 first, then fall back to INT 15,88 */ + extmem_88 = extmemsize_88(); + extmem_e801 = extmemsize_e801(); + return ( extmem_e801 ? extmem_e801 : extmem_88 ); +} + +/** + * Get e820 memory map + * + * @v memmap Memory map to fill in + * @ret rc Return status code + */ +static int meme820 ( struct memory_map *memmap ) { + struct memory_region *region = memmap->regions; + struct memory_region *prev_region = NULL; + uint32_t next = 0; + uint32_t smap; + uint32_t size; + unsigned int flags; + unsigned int discard_D; + + /* Inhibit INT 15,e820 during POST */ + if ( memmap_post ) { + DBG ( "INT 15,e820 not available during POST\n" ); + return -ENOTTY; + } + + /* Clear the E820 buffer. Do this once before starting, + * rather than on each call; some BIOSes rely on the contents + * being preserved between calls. + */ + memset ( &e820buf, 0, sizeof ( e820buf ) ); + + do { + /* Some BIOSes corrupt %esi for fun. Guard against + * this by telling gcc that all non-output registers + * may be corrupted. + */ + __asm__ __volatile__ ( REAL_CODE ( "pushl %%ebp\n\t" + "stc\n\t" + "int $0x15\n\t" + "pushfw\n\t" + "popw %%dx\n\t" + "popl %%ebp\n\t" ) + : "=a" ( smap ), "=b" ( next ), + "=c" ( size ), "=d" ( flags ), + "=D" ( discard_D ) + : "a" ( 0xe820 ), "b" ( next ), + "D" ( __from_data16 ( &e820buf ) ), + "c" ( sizeof ( e820buf ) ), + "d" ( SMAP ) + : "esi", "memory" ); + + if ( smap != SMAP ) { + DBG ( "INT 15,e820 failed SMAP signature check\n" ); + return -ENOTSUP; + } + + if ( size < E820_MIN_SIZE ) { + DBG ( "INT 15,e820 returned only %d bytes\n", size ); + return -EINVAL; + } + + if ( flags & CF ) { + DBG ( "INT 15,e820 terminated on CF set\n" ); + break; + } + + /* If first region is not RAM, assume map is invalid */ + if ( ( memmap->count == 0 ) && + ( e820buf.type != E820_TYPE_RAM ) ) { + DBG ( "INT 15,e820 failed, first entry not RAM\n" ); + return -EINVAL; + } + + DBG ( "INT 15,e820 region [%llx,%llx) type %d", + e820buf.start, ( e820buf.start + e820buf.len ), + ( int ) e820buf.type ); + if ( size > offsetof ( typeof ( e820buf ), attrs ) ) { + DBG ( " (%s", ( ( e820buf.attrs & E820_ATTR_ENABLED ) + ? "enabled" : "disabled" ) ); + if ( e820buf.attrs & E820_ATTR_NONVOLATILE ) + DBG ( ", non-volatile" ); + if ( e820buf.attrs & E820_ATTR_UNKNOWN ) + DBG ( ", other [%08x]", e820buf.attrs ); + DBG ( ")" ); + } + DBG ( "\n" ); + + /* Discard non-RAM regions */ + if ( e820buf.type != E820_TYPE_RAM ) + continue; + + /* Check extended attributes, if present */ + if ( size > offsetof ( typeof ( e820buf ), attrs ) ) { + if ( ! ( e820buf.attrs & E820_ATTR_ENABLED ) ) + continue; + if ( e820buf.attrs & E820_ATTR_NONVOLATILE ) + continue; + } + + region->start = e820buf.start; + region->end = e820buf.start + e820buf.len; + + /* Check for adjacent regions and merge them */ + if ( prev_region && ( region->start == prev_region->end ) ) { + prev_region->end = region->end; + } else { + prev_region = region; + region++; + memmap->count++; + } + + if ( memmap->count >= ( sizeof ( memmap->regions ) / + sizeof ( memmap->regions[0] ) ) ) { + DBG ( "INT 15,e820 too many regions returned\n" ); + /* Not a fatal error; what we've got so far at + * least represents valid regions of memory, + * even if we couldn't get them all. + */ + break; + } + } while ( next != 0 ); + + /* Sanity checks. Some BIOSes report complete garbage via INT + * 15,e820 (especially at POST time), despite passing the + * signature checks. We currently check for a base memory + * region (starting at 0) and at least one high memory region + * (starting at 0x100000). + */ + if ( memmap->count < 2 ) { + DBG ( "INT 15,e820 returned only %d regions; assuming " + "insane\n", memmap->count ); + return -EINVAL; + } + if ( memmap->regions[0].start != 0 ) { + DBG ( "INT 15,e820 region 0 starts at %llx (expected 0); " + "assuming insane\n", memmap->regions[0].start ); + return -EINVAL; + } + if ( memmap->regions[1].start != 0x100000 ) { + DBG ( "INT 15,e820 region 1 starts at %llx (expected 100000); " + "assuming insane\n", memmap->regions[0].start ); + return -EINVAL; + } + + return 0; +} + +/** + * Get memory map + * + * @v memmap Memory map to fill in + */ +void x86_get_memmap ( struct memory_map *memmap ) { + unsigned int basemem, extmem; + int rc; + + DBG ( "Fetching system memory map\n" ); + + /* Clear memory map */ + memset ( memmap, 0, sizeof ( *memmap ) ); + + /* Get base and extended memory sizes */ + basemem = basememsize(); + DBG ( "FBMS base memory size %d kB [0,%x)\n", + basemem, ( basemem * 1024 ) ); + extmem = extmemsize(); + + /* Try INT 15,e820 first */ + if ( ( rc = meme820 ( memmap ) ) == 0 ) { + DBG ( "Obtained system memory map via INT 15,e820\n" ); + return; + } + + /* Fall back to constructing a map from basemem and extmem sizes */ + DBG ( "INT 15,e820 failed; constructing map\n" ); + memmap->regions[0].end = ( basemem * 1024 ); + memmap->regions[1].start = 0x100000; + memmap->regions[1].end = 0x100000 + ( extmem * 1024 ); + memmap->count = 2; +} + +PROVIDE_IOAPI ( x86, get_memmap, x86_get_memmap ); diff --git a/src/arch/x86/interface/pcbios/memtop_umalloc.c b/src/arch/x86/interface/pcbios/memtop_umalloc.c new file mode 100644 index 00000000..1d3f40a1 --- /dev/null +++ b/src/arch/x86/interface/pcbios/memtop_umalloc.c @@ -0,0 +1,235 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * External memory allocation + * + */ + +#include +#include +#include +#include +#include +#include +#include + +/** Maximum usable address for external allocated memory */ +#define EM_MAX_ADDRESS 0xffffffffUL + +/** Alignment of external allocated memory */ +#define EM_ALIGN ( 4 * 1024 ) + +/** Equivalent of NOWHERE for user pointers */ +#define UNOWHERE ( ~UNULL ) + +/** An external memory block */ +struct external_memory { + /** Size of this memory block (excluding this header) */ + size_t size; + /** Block is currently in use */ + int used; +}; + +/** Top of heap */ +static userptr_t top = UNULL; + +/** Bottom of heap (current lowest allocated block) */ +static userptr_t bottom = UNULL; + +/** Remaining space on heap */ +static size_t heap_size; + +/** + * Find largest usable memory region + * + * @ret start Start of region + * @ret len Length of region + */ +size_t largest_memblock ( userptr_t *start ) { + struct memory_map memmap; + struct memory_region *region; + physaddr_t max = EM_MAX_ADDRESS; + physaddr_t region_start; + physaddr_t region_end; + size_t region_len; + unsigned int i; + size_t len = 0; + + /* Avoid returning uninitialised data on error */ + *start = UNULL; + + /* Scan through all memory regions */ + get_memmap ( &memmap ); + for ( i = 0 ; i < memmap.count ; i++ ) { + region = &memmap.regions[i]; + DBG ( "Considering [%llx,%llx)\n", region->start, region->end ); + + /* Truncate block to maximum physical address */ + if ( region->start > max ) { + DBG ( "...starts after maximum address %lx\n", max ); + continue; + } + region_start = region->start; + if ( region->end > max ) { + DBG ( "...end truncated to maximum address %lx\n", max); + region_end = 0; /* =max, given the wraparound */ + } else { + region_end = region->end; + } + region_len = ( region_end - region_start ); + + /* Use largest block */ + if ( region_len > len ) { + DBG ( "...new best block found\n" ); + *start = phys_to_user ( region_start ); + len = region_len; + } + } + + return len; +} + +/** + * Initialise external heap + * + */ +static void init_eheap ( void ) { + userptr_t base; + + heap_size = largest_memblock ( &base ); + bottom = top = userptr_add ( base, heap_size ); + DBG ( "External heap grows downwards from %lx (size %zx)\n", + user_to_phys ( top, 0 ), heap_size ); +} + +/** + * Collect free blocks + * + */ +static void ecollect_free ( void ) { + struct external_memory extmem; + size_t len; + + /* Walk the free list and collect empty blocks */ + while ( bottom != top ) { + copy_from_user ( &extmem, bottom, -sizeof ( extmem ), + sizeof ( extmem ) ); + if ( extmem.used ) + break; + DBG ( "EXTMEM freeing [%lx,%lx)\n", user_to_phys ( bottom, 0 ), + user_to_phys ( bottom, extmem.size ) ); + len = ( extmem.size + sizeof ( extmem ) ); + bottom = userptr_add ( bottom, len ); + heap_size += len; + } +} + +/** + * Reallocate external memory + * + * @v old_ptr Memory previously allocated by umalloc(), or UNULL + * @v new_size Requested size + * @ret new_ptr Allocated memory, or UNULL + * + * Calling realloc() with a new size of zero is a valid way to free a + * memory block. + */ +static userptr_t memtop_urealloc ( userptr_t ptr, size_t new_size ) { + struct external_memory extmem; + userptr_t new = ptr; + size_t align; + + /* (Re)initialise external memory allocator if necessary */ + if ( bottom == top ) + init_eheap(); + + /* Get block properties into extmem */ + if ( ptr && ( ptr != UNOWHERE ) ) { + /* Determine old size */ + copy_from_user ( &extmem, ptr, -sizeof ( extmem ), + sizeof ( extmem ) ); + } else { + /* Create a zero-length block */ + if ( heap_size < sizeof ( extmem ) ) { + DBG ( "EXTMEM out of space\n" ); + return UNULL; + } + ptr = bottom = userptr_add ( bottom, -sizeof ( extmem ) ); + heap_size -= sizeof ( extmem ); + DBG ( "EXTMEM allocating [%lx,%lx)\n", + user_to_phys ( ptr, 0 ), user_to_phys ( ptr, 0 ) ); + extmem.size = 0; + } + extmem.used = ( new_size > 0 ); + + /* Expand/shrink block if possible */ + if ( ptr == bottom ) { + /* Update block */ + new = userptr_add ( ptr, - ( new_size - extmem.size ) ); + align = ( user_to_phys ( new, 0 ) & ( EM_ALIGN - 1 ) ); + new_size += align; + new = userptr_add ( new, -align ); + if ( new_size > ( heap_size + extmem.size ) ) { + DBG ( "EXTMEM out of space\n" ); + return UNULL; + } + DBG ( "EXTMEM expanding [%lx,%lx) to [%lx,%lx)\n", + user_to_phys ( ptr, 0 ), + user_to_phys ( ptr, extmem.size ), + user_to_phys ( new, 0 ), + user_to_phys ( new, new_size )); + memmove_user ( new, 0, ptr, 0, ( ( extmem.size < new_size ) ? + extmem.size : new_size ) ); + bottom = new; + heap_size -= ( new_size - extmem.size ); + extmem.size = new_size; + } else { + /* Cannot expand; can only pretend to shrink */ + if ( new_size > extmem.size ) { + /* Refuse to expand */ + DBG ( "EXTMEM cannot expand [%lx,%lx)\n", + user_to_phys ( ptr, 0 ), + user_to_phys ( ptr, extmem.size ) ); + return UNULL; + } + } + + /* Write back block properties */ + copy_to_user ( new, -sizeof ( extmem ), &extmem, + sizeof ( extmem ) ); + + /* Collect any free blocks and update hidden memory region */ + ecollect_free(); + hide_umalloc ( user_to_phys ( bottom, ( ( bottom == top ) ? + 0 : -sizeof ( extmem ) ) ), + user_to_phys ( top, 0 ) ); + + return ( new_size ? new : UNOWHERE ); +} + +PROVIDE_UMALLOC ( memtop, urealloc, memtop_urealloc ); diff --git a/src/arch/x86/interface/pcbios/pcibios.c b/src/arch/x86/interface/pcbios/pcibios.c new file mode 100644 index 00000000..bf812f77 --- /dev/null +++ b/src/arch/x86/interface/pcbios/pcibios.c @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2006 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * PCI configuration space access via PCI BIOS + * + */ + +/** + * Determine number of PCI buses within system + * + * @ret num_bus Number of buses + */ +static int pcibios_num_bus ( void ) { + int discard_a, discard_D; + uint8_t max_bus; + + /* We issue this call using flat real mode, to work around a + * bug in some HP BIOSes. + */ + __asm__ __volatile__ ( REAL_CODE ( "call flatten_real_mode\n\t" + "stc\n\t" + "int $0x1a\n\t" + "jnc 1f\n\t" + "xorw %%cx, %%cx\n\t" + "\n1:\n\t" ) + : "=c" ( max_bus ), "=a" ( discard_a ), + "=D" ( discard_D ) + : "a" ( PCIBIOS_INSTALLATION_CHECK >> 16 ), + "D" ( 0 ) + : "ebx", "edx" ); + + return ( max_bus + 1 ); +} + +/** + * Read configuration space via PCI BIOS + * + * @v pci PCI device + * @v command PCI BIOS command + * @v value Value read + * @ret rc Return status code + */ +int pcibios_read ( struct pci_device *pci, uint32_t command, uint32_t *value ){ + int discard_b, discard_D; + uint16_t status; + + __asm__ __volatile__ ( REAL_CODE ( "stc\n\t" + "int $0x1a\n\t" + "jnc 1f\n\t" + "xorl %%eax, %%eax\n\t" + "decl %%eax\n\t" + "movl %%eax, %%ecx\n\t" + "\n1:\n\t" ) + : "=a" ( status ), "=b" ( discard_b ), + "=c" ( *value ), "=D" ( discard_D ) + : "a" ( command >> 16 ), "D" ( command ), + "b" ( pci->busdevfn ) + : "edx" ); + + return ( status >> 8 ); +} + +/** + * Write configuration space via PCI BIOS + * + * @v pci PCI device + * @v command PCI BIOS command + * @v value Value to be written + * @ret rc Return status code + */ +int pcibios_write ( struct pci_device *pci, uint32_t command, uint32_t value ){ + int discard_b, discard_c, discard_D; + uint16_t status; + + __asm__ __volatile__ ( REAL_CODE ( "stc\n\t" + "int $0x1a\n\t" + "jnc 1f\n\t" + "movb $0xff, %%ah\n\t" + "\n1:\n\t" ) + : "=a" ( status ), "=b" ( discard_b ), + "=c" ( discard_c ), "=D" ( discard_D ) + : "a" ( command >> 16 ), "D" ( command ), + "b" ( pci->busdevfn ), "c" ( value ) + : "edx" ); + + return ( status >> 8 ); +} + +PROVIDE_PCIAPI ( pcbios, pci_num_bus, pcibios_num_bus ); +PROVIDE_PCIAPI_INLINE ( pcbios, pci_read_config_byte ); +PROVIDE_PCIAPI_INLINE ( pcbios, pci_read_config_word ); +PROVIDE_PCIAPI_INLINE ( pcbios, pci_read_config_dword ); +PROVIDE_PCIAPI_INLINE ( pcbios, pci_write_config_byte ); +PROVIDE_PCIAPI_INLINE ( pcbios, pci_write_config_word ); +PROVIDE_PCIAPI_INLINE ( pcbios, pci_write_config_dword ); +PROVIDE_PCIAPI_INLINE ( pcbios, pci_ioremap ); diff --git a/src/arch/x86/interface/pcbios/pnpbios.c b/src/arch/x86/interface/pcbios/pnpbios.c new file mode 100644 index 00000000..20ec35d7 --- /dev/null +++ b/src/arch/x86/interface/pcbios/pnpbios.c @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include + +/** @file + * + * PnP BIOS + * + */ + +/** PnP BIOS structure */ +struct pnp_bios { + /** Signature + * + * Must be equal to @c PNP_BIOS_SIGNATURE + */ + uint32_t signature; + /** Version as BCD (e.g. 1.0 is 0x10) */ + uint8_t version; + /** Length of this structure */ + uint8_t length; + /** System capabilities */ + uint16_t control; + /** Checksum */ + uint8_t checksum; +} __attribute__ (( packed )); + +/** Signature for a PnP BIOS structure */ +#define PNP_BIOS_SIGNATURE \ + ( ( '$' << 0 ) + ( 'P' << 8 ) + ( 'n' << 16 ) + ( 'P' << 24 ) ) + +/** + * Test address for PnP BIOS structure + * + * @v offset Offset within BIOS segment to test + * @ret rc Return status code + */ +static int is_pnp_bios ( unsigned int offset ) { + union { + struct pnp_bios pnp_bios; + uint8_t bytes[256]; /* 256 is maximum length possible */ + } u; + size_t len; + unsigned int i; + uint8_t sum = 0; + + /* Read start of header and verify signature */ + copy_from_real ( &u.pnp_bios, BIOS_SEG, offset, sizeof ( u.pnp_bios )); + if ( u.pnp_bios.signature != PNP_BIOS_SIGNATURE ) + return -EINVAL; + + /* Read whole header and verify checksum */ + len = u.pnp_bios.length; + copy_from_real ( &u.bytes, BIOS_SEG, offset, len ); + for ( i = 0 ; i < len ; i++ ) { + sum += u.bytes[i]; + } + if ( sum != 0 ) + return -EINVAL; + + DBG ( "Found PnP BIOS at %04x:%04x\n", BIOS_SEG, offset ); + + return 0; +} + +/** + * Locate Plug-and-Play BIOS + * + * @ret pnp_offset Offset of PnP BIOS structure within BIOS segment + * + * The PnP BIOS structure will be at BIOS_SEG:pnp_offset. If no PnP + * BIOS is found, -1 is returned. + */ +int find_pnp_bios ( void ) { + static int pnp_offset = 0; + + if ( pnp_offset ) + return pnp_offset; + + for ( pnp_offset = 0 ; pnp_offset < 0x10000 ; pnp_offset += 0x10 ) { + if ( is_pnp_bios ( pnp_offset ) == 0 ) + return pnp_offset; + } + + pnp_offset = -1; + return pnp_offset; +} diff --git a/src/arch/x86/interface/pcbios/rsdp.c b/src/arch/x86/interface/pcbios/rsdp.c new file mode 100644 index 00000000..8da0b558 --- /dev/null +++ b/src/arch/x86/interface/pcbios/rsdp.c @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * ACPI Root System Description Pointer + * + */ + +#include +#include +#include +#include +#include + +/** EBDA RSDP maximum segment */ +#define RSDP_EBDA_END_SEG 0xa000 + +/** Fixed BIOS area RSDP start address */ +#define RSDP_BIOS_START 0xe0000 + +/** Fixed BIOS area RSDP length */ +#define RSDP_BIOS_LEN 0x20000 + +/** Stride at which to search for RSDP */ +#define RSDP_STRIDE 16 + +/** + * Locate ACPI root system description table within a memory range + * + * @v start Start address to search + * @v len Length to search + * @ret rsdt ACPI root system description table, or UNULL + */ +static userptr_t rsdp_find_rsdt_range ( userptr_t start, size_t len ) { + static const char signature[8] = RSDP_SIGNATURE; + struct acpi_rsdp rsdp; + userptr_t rsdt; + size_t offset; + uint8_t sum; + unsigned int i; + + /* Search for RSDP */ + for ( offset = 0 ; ( ( offset + sizeof ( rsdp ) ) < len ) ; + offset += RSDP_STRIDE ) { + + /* Check signature and checksum */ + copy_from_user ( &rsdp, start, offset, sizeof ( rsdp ) ); + if ( memcmp ( rsdp.signature, signature, + sizeof ( signature ) ) != 0 ) + continue; + for ( sum = 0, i = 0 ; i < sizeof ( rsdp ) ; i++ ) + sum += *( ( ( uint8_t * ) &rsdp ) + i ); + if ( sum != 0 ) + continue; + + /* Extract RSDT */ + rsdt = phys_to_user ( le32_to_cpu ( rsdp.rsdt ) ); + DBGC ( rsdt, "RSDT %#08lx found via RSDP %#08lx\n", + user_to_phys ( rsdt, 0 ), + user_to_phys ( start, offset ) ); + return rsdt; + } + + return UNULL; +} + +/** + * Locate ACPI root system description table + * + * @ret rsdt ACPI root system description table, or UNULL + */ +static userptr_t rsdp_find_rsdt ( void ) { + static userptr_t rsdt; + uint16_t ebda_seg; + userptr_t ebda; + size_t ebda_len; + + /* Return existing RSDT if already found */ + if ( rsdt ) + return rsdt; + + /* Search EBDA */ + get_real ( ebda_seg, BDA_SEG, BDA_EBDA ); + if ( ebda_seg < RSDP_EBDA_END_SEG ) { + ebda = real_to_user ( ebda_seg, 0 ); + ebda_len = ( ( RSDP_EBDA_END_SEG - ebda_seg ) * 16 ); + rsdt = rsdp_find_rsdt_range ( ebda, ebda_len ); + if ( rsdt ) + return rsdt; + } + + /* Search fixed BIOS area */ + rsdt = rsdp_find_rsdt_range ( phys_to_user ( RSDP_BIOS_START ), + RSDP_BIOS_LEN ); + if ( rsdt ) + return rsdt; + + return UNULL; +} + +PROVIDE_ACPI ( rsdp, acpi_find_rsdt, rsdp_find_rsdt ); diff --git a/src/arch/x86/interface/pcbios/rtc_entropy.c b/src/arch/x86/interface/pcbios/rtc_entropy.c new file mode 100644 index 00000000..e9e6baa5 --- /dev/null +++ b/src/arch/x86/interface/pcbios/rtc_entropy.c @@ -0,0 +1,255 @@ +/* + * Copyright (C) 2012 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * RTC-based entropy source + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/** Maximum time to wait for an RTC interrupt, in milliseconds */ +#define RTC_MAX_WAIT_MS 100 + +/** RTC interrupt handler */ +extern void rtc_isr ( void ); + +/** Previous RTC interrupt handler */ +static struct segoff rtc_old_handler; + +/** Flag set by RTC interrupt handler */ +extern volatile uint8_t __text16 ( rtc_flag ); +#define rtc_flag __use_text16 ( rtc_flag ) + +/** + * Hook RTC interrupt handler + * + */ +static void rtc_hook_isr ( void ) { + + /* RTC interrupt handler */ + __asm__ __volatile__ ( + TEXT16_CODE ( "\nrtc_isr:\n\t" + /* Preserve registers */ + "pushw %%ax\n\t" + /* Set "interrupt triggered" flag */ + "movb $0x01, %%cs:rtc_flag\n\t" + /* Read RTC status register C to + * acknowledge interrupt + */ + "movb %2, %%al\n\t" + "outb %%al, %0\n\t" + "inb %1\n\t" + /* Send EOI */ + "movb $0x20, %%al\n\t" + "outb %%al, $0xa0\n\t" + "outb %%al, $0x20\n\t" + /* Restore registers and return */ + "popw %%ax\n\t" + "iret\n\t" + "\nrtc_flag:\n\t" + ".byte 0\n\t" ) + : + : "i" ( CMOS_ADDRESS ), "i" ( CMOS_DATA ), + "i" ( RTC_STATUS_C ) ); + + hook_bios_interrupt ( RTC_INT, ( intptr_t ) rtc_isr, &rtc_old_handler ); +} + +/** + * Unhook RTC interrupt handler + * + */ +static void rtc_unhook_isr ( void ) { + int rc; + + rc = unhook_bios_interrupt ( RTC_INT, ( intptr_t ) rtc_isr, + &rtc_old_handler ); + assert ( rc == 0 ); /* Should always be able to unhook */ +} + +/** + * Enable RTC interrupts + * + */ +static void rtc_enable_int ( void ) { + uint8_t status_b; + + /* Clear any stale pending interrupts via status register C */ + outb ( ( RTC_STATUS_C | CMOS_DISABLE_NMI ), CMOS_ADDRESS ); + inb ( CMOS_DATA ); + + /* Set Periodic Interrupt Enable bit in status register B */ + outb ( ( RTC_STATUS_B | CMOS_DISABLE_NMI ), CMOS_ADDRESS ); + status_b = inb ( CMOS_DATA ); + outb ( ( RTC_STATUS_B | CMOS_DISABLE_NMI ), CMOS_ADDRESS ); + outb ( ( status_b | RTC_STATUS_B_PIE ), CMOS_DATA ); + + /* Re-enable NMI and reset to default address */ + outb ( CMOS_DEFAULT_ADDRESS, CMOS_ADDRESS ); + inb ( CMOS_DATA ); /* Discard; may be needed on some platforms */ +} + +/** + * Disable RTC interrupts + * + */ +static void rtc_disable_int ( void ) { + uint8_t status_b; + + /* Clear Periodic Interrupt Enable bit in status register B */ + outb ( ( RTC_STATUS_B | CMOS_DISABLE_NMI ), CMOS_ADDRESS ); + status_b = inb ( CMOS_DATA ); + outb ( ( RTC_STATUS_B | CMOS_DISABLE_NMI ), CMOS_ADDRESS ); + outb ( ( status_b & ~RTC_STATUS_B_PIE ), CMOS_DATA ); + + /* Re-enable NMI and reset to default address */ + outb ( CMOS_DEFAULT_ADDRESS, CMOS_ADDRESS ); + inb ( CMOS_DATA ); /* Discard; may be needed on some platforms */ +} + +/** + * Check that entropy gathering is functional + * + * @ret rc Return status code + */ +static int rtc_entropy_check ( void ) { + unsigned int i; + + /* Check that RTC interrupts are working */ + rtc_flag = 0; + for ( i = 0 ; i < RTC_MAX_WAIT_MS ; i++ ) { + + /* Allow interrupts to occur */ + __asm__ __volatile__ ( "sti\n\t" + "nop\n\t" + "nop\n\t" + "cli\n\t" ); + + /* Check for RTC interrupt flag */ + if ( rtc_flag ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( &rtc_flag, "RTC timed out waiting for interrupt\n" ); + return -ETIMEDOUT; +} + +/** + * Enable entropy gathering + * + * @ret rc Return status code + */ +static int rtc_entropy_enable ( void ) { + int rc; + + /* Hook ISR and enable RTC interrupts */ + rtc_hook_isr(); + enable_irq ( RTC_IRQ ); + rtc_enable_int(); + + /* Check that RTC interrupts are working */ + if ( ( rc = rtc_entropy_check() ) != 0 ) + goto err_check; + + return 0; + + err_check: + rtc_disable_int(); + disable_irq ( RTC_IRQ ); + rtc_unhook_isr(); + return rc; +} + +/** + * Disable entropy gathering + * + */ +static void rtc_entropy_disable ( void ) { + + /* Disable RTC interrupts and unhook ISR */ + rtc_disable_int(); + disable_irq ( RTC_IRQ ); + rtc_unhook_isr(); +} + +/** + * Measure a single RTC tick + * + * @ret delta Length of RTC tick (in TSC units) + */ +uint8_t rtc_sample ( void ) { + uint32_t before; + uint32_t after; + uint32_t temp; + + __asm__ __volatile__ ( + REAL_CODE ( /* Enable interrupts */ + "sti\n\t" + /* Wait for RTC interrupt */ + "movb %b2, %%cs:rtc_flag\n\t" + "\n1:\n\t" + "xchgb %b2, %%cs:rtc_flag\n\t" /* Serialize */ + "testb %b2, %b2\n\t" + "jz 1b\n\t" + /* Read "before" TSC */ + "rdtsc\n\t" + /* Store "before" TSC on stack */ + "pushl %0\n\t" + /* Wait for another RTC interrupt */ + "xorb %b2, %b2\n\t" + "movb %b2, %%cs:rtc_flag\n\t" + "\n1:\n\t" + "xchgb %b2, %%cs:rtc_flag\n\t" /* Serialize */ + "testb %b2, %b2\n\t" + "jz 1b\n\t" + /* Read "after" TSC */ + "rdtsc\n\t" + /* Retrieve "before" TSC on stack */ + "popl %1\n\t" + /* Disable interrupts */ + "cli\n\t" + ) + : "=a" ( after ), "=d" ( before ), "=Q" ( temp ) + : "2" ( 0 ) ); + + return ( after - before ); +} + +PROVIDE_ENTROPY_INLINE ( rtc, min_entropy_per_sample ); +PROVIDE_ENTROPY ( rtc, entropy_enable, rtc_entropy_enable ); +PROVIDE_ENTROPY ( rtc, entropy_disable, rtc_entropy_disable ); +PROVIDE_ENTROPY_INLINE ( rtc, get_noise ); diff --git a/src/arch/x86/interface/pcbios/rtc_time.c b/src/arch/x86/interface/pcbios/rtc_time.c new file mode 100644 index 00000000..cdbeac8d --- /dev/null +++ b/src/arch/x86/interface/pcbios/rtc_time.c @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2012 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * RTC-based time source + * + */ + +#include +#include +#include +#include + +/** + * Read RTC register + * + * @v address Register address + * @ret data Data + */ +static unsigned int rtc_readb ( int address ) { + outb ( address, CMOS_ADDRESS ); + return inb ( CMOS_DATA ); +} + +/** + * Check if RTC update is in progress + * + * @ret is_busy RTC update is in progress + */ +static int rtc_is_busy ( void ) { + return ( rtc_readb ( RTC_STATUS_A ) & RTC_STATUS_A_UPDATE_IN_PROGRESS ); +} + +/** + * Read RTC BCD register + * + * @v address Register address + * @ret value Value + */ +static unsigned int rtc_readb_bcd ( int address ) { + unsigned int bcd; + + bcd = rtc_readb ( address ); + return ( bcd - ( 6 * ( bcd >> 4 ) ) ); +} + +/** + * Read RTC time + * + * @ret time Time, in seconds + */ +static time_t rtc_read_time ( void ) { + unsigned int status_b; + int is_binary; + int is_24hour; + unsigned int ( * read_component ) ( int address ); + struct tm tm; + int is_pm; + unsigned int hour; + time_t time; + + /* Wait for any in-progress update to complete */ + while ( rtc_is_busy() ) {} + + /* Determine RTC mode */ + status_b = rtc_readb ( RTC_STATUS_B ); + is_binary = ( status_b & RTC_STATUS_B_BINARY ); + is_24hour = ( status_b & RTC_STATUS_B_24_HOUR ); + read_component = ( is_binary ? rtc_readb : rtc_readb_bcd ); + + /* Read time values */ + tm.tm_sec = read_component ( RTC_SEC ); + tm.tm_min = read_component ( RTC_MIN ); + hour = read_component ( RTC_HOUR ); + if ( ! is_24hour ) { + is_pm = ( hour >= 80 ); + hour = ( ( ( ( hour & 0x7f ) % 80 ) % 12 ) + + ( is_pm ? 12 : 0 ) ); + } + tm.tm_hour = hour; + tm.tm_mday = read_component ( RTC_MDAY ); + tm.tm_mon = ( read_component ( RTC_MON ) - 1 ); + tm.tm_year = ( read_component ( RTC_YEAR ) + + 100 /* Assume we are in the 21st century, since + * this code was written in 2012 */ ); + + DBGC ( RTC_STATUS_A, "RTCTIME is %04d-%02d-%02d %02d:%02d:%02d " + "(%s,%d-hour)\n", ( tm.tm_year + 1900 ), ( tm.tm_mon + 1 ), + tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, + ( is_binary ? "binary" : "BCD" ), ( is_24hour ? 24 : 12 ) ); + + /* Convert to seconds since the Epoch */ + time = mktime ( &tm ); + + return time; +} + +/** + * Get current time in seconds + * + * @ret time Time, in seconds + */ +static time_t rtc_now ( void ) { + time_t time = 0; + time_t last_time; + + /* Read time until we get two matching values in a row, in + * case we end up reading a corrupted value in the middle of + * an update. + */ + do { + last_time = time; + time = rtc_read_time(); + } while ( time != last_time ); + + return time; +} + +PROVIDE_TIME ( rtc, time_now, rtc_now ); diff --git a/src/arch/x86/interface/pcbios/vesafb.c b/src/arch/x86/interface/pcbios/vesafb.c new file mode 100644 index 00000000..50e48585 --- /dev/null +++ b/src/arch/x86/interface/pcbios/vesafb.c @@ -0,0 +1,540 @@ +/* + * Copyright (C) 2013 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * VESA frame buffer console + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Avoid dragging in BIOS console if not otherwise used */ +extern struct console_driver bios_console; +struct console_driver bios_console __attribute__ (( weak )); + +/* Disambiguate the various error causes */ +#define EIO_FAILED __einfo_error ( EINFO_EIO_FAILED ) +#define EINFO_EIO_FAILED \ + __einfo_uniqify ( EINFO_EIO, 0x01, \ + "Function call failed" ) +#define EIO_HARDWARE __einfo_error ( EINFO_EIO_HARDWARE ) +#define EINFO_EIO_HARDWARE \ + __einfo_uniqify ( EINFO_EIO, 0x02, \ + "Not supported in current configuration" ) +#define EIO_MODE __einfo_error ( EINFO_EIO_MODE ) +#define EINFO_EIO_MODE \ + __einfo_uniqify ( EINFO_EIO, 0x03, \ + "Invalid in current video mode" ) +#define EIO_VBE( code ) \ + EUNIQ ( EINFO_EIO, (code), EIO_FAILED, EIO_HARDWARE, EIO_MODE ) + +/* Set default console usage if applicable + * + * We accept either CONSOLE_FRAMEBUFFER or CONSOLE_VESAFB. + */ +#if ( defined ( CONSOLE_FRAMEBUFFER ) && ! defined ( CONSOLE_VESAFB ) ) +#define CONSOLE_VESAFB CONSOLE_FRAMEBUFFER +#endif +#if ! ( defined ( CONSOLE_VESAFB ) && CONSOLE_EXPLICIT ( CONSOLE_VESAFB ) ) +#undef CONSOLE_VESAFB +#define CONSOLE_VESAFB ( CONSOLE_USAGE_ALL & ~CONSOLE_USAGE_LOG ) +#endif + +/** Character height */ +#define VESAFB_CHAR_HEIGHT 16 + +/** Font corresponding to selected character width and height */ +#define VESAFB_FONT VBE_FONT_8x16 + +/* Forward declaration */ +struct console_driver vesafb_console __console_driver; + +/** A VESA frame buffer */ +struct vesafb { + /** Frame buffer console */ + struct fbcon fbcon; + /** Physical start address */ + physaddr_t start; + /** Pixel geometry */ + struct fbcon_geometry pixel; + /** Colour mapping */ + struct fbcon_colour_map map; + /** Font definition */ + struct fbcon_font font; + /** Character glyphs */ + struct segoff glyphs; + /** Saved VGA mode */ + uint8_t saved_mode; +}; + +/** The VESA frame buffer */ +static struct vesafb vesafb; + +/** Base memory buffer used for VBE calls */ +union vbe_buffer { + /** VBE controller information block */ + struct vbe_controller_info controller; + /** VBE mode information block */ + struct vbe_mode_info mode; +}; +static union vbe_buffer __bss16 ( vbe_buf ); +#define vbe_buf __use_data16 ( vbe_buf ) + +/** + * Convert VBE status code to iPXE status code + * + * @v status VBE status code + * @ret rc Return status code + */ +static int vesafb_rc ( unsigned int status ) { + unsigned int code; + + if ( ( status & 0xff ) != 0x4f ) + return -ENOTSUP; + code = ( ( status >> 8 ) & 0xff ); + return ( code ? -EIO_VBE ( code ) : 0 ); +} + +/** + * Get character glyph + * + * @v character Character + * @v glyph Character glyph to fill in + */ +static void vesafb_glyph ( unsigned int character, uint8_t *glyph ) { + size_t offset = ( character * VESAFB_CHAR_HEIGHT ); + + copy_from_real ( glyph, vesafb.glyphs.segment, + ( vesafb.glyphs.offset + offset ), VESAFB_CHAR_HEIGHT); +} + +/** + * Get font definition + * + */ +static void vesafb_font ( void ) { + + /* Get font information + * + * Working around gcc bugs is icky here. The value we want is + * returned in %ebp, but there's no way to specify %ebp in an + * output constraint. We can't put %ebp in the clobber list, + * because this tends to cause random build failures on some + * gcc versions. We can't manually push/pop %ebp and return + * the value via a generic register output constraint, because + * gcc might choose to use %ebp to satisfy that constraint + * (and we have no way to prevent it from so doing). + * + * Work around this hideous mess by using %ecx and %edx as the + * output registers, since they get clobbered anyway. + */ + __asm__ __volatile__ ( REAL_CODE ( "pushw %%bp\n\t" /* gcc bug */ + "int $0x10\n\t" + "movw %%es, %%cx\n\t" + "movw %%bp, %%dx\n\t" + "popw %%bp\n\t" /* gcc bug */ ) + : "=c" ( vesafb.glyphs.segment ), + "=d" ( vesafb.glyphs.offset ) + : "a" ( VBE_GET_FONT ), + "b" ( VESAFB_FONT ) ); + DBGC ( &vbe_buf, "VESAFB has font %04x at %04x:%04x\n", + VESAFB_FONT, vesafb.glyphs.segment, vesafb.glyphs.offset ); + vesafb.font.height = VESAFB_CHAR_HEIGHT; + vesafb.font.glyph = vesafb_glyph; +} + +/** + * Get VBE mode list + * + * @ret mode_numbers Mode number list (terminated with VBE_MODE_END) + * @ret rc Return status code + * + * The caller is responsible for eventually freeing the mode list. + */ +static int vesafb_mode_list ( uint16_t **mode_numbers ) { + struct vbe_controller_info *controller = &vbe_buf.controller; + userptr_t video_mode_ptr; + uint16_t mode_number; + uint16_t status; + size_t len; + int rc; + + /* Avoid returning uninitialised data on error */ + *mode_numbers = NULL; + + /* Get controller information block */ + controller->vbe_signature = 0; + __asm__ __volatile__ ( REAL_CODE ( "int $0x10" ) + : "=a" ( status ) + : "a" ( VBE_CONTROLLER_INFO ), + "D" ( __from_data16 ( controller ) ) + : "memory", "ebx", "edx" ); + if ( ( rc = vesafb_rc ( status ) ) != 0 ) { + DBGC ( &vbe_buf, "VESAFB could not get controller information: " + "[%04x] %s\n", status, strerror ( rc ) ); + return rc; + } + if ( controller->vbe_signature != VBE_CONTROLLER_SIGNATURE ) { + DBGC ( &vbe_buf, "VESAFB invalid controller signature " + "\"%c%c%c%c\"\n", ( controller->vbe_signature >> 0 ), + ( controller->vbe_signature >> 8 ), + ( controller->vbe_signature >> 16 ), + ( controller->vbe_signature >> 24 ) ); + DBGC_HDA ( &vbe_buf, 0, controller, sizeof ( *controller ) ); + return -EINVAL; + } + DBGC ( &vbe_buf, "VESAFB found VBE version %d.%d with mode list at " + "%04x:%04x\n", controller->vbe_major_version, + controller->vbe_minor_version, + controller->video_mode_ptr.segment, + controller->video_mode_ptr.offset ); + + /* Calculate length of mode list */ + video_mode_ptr = real_to_user ( controller->video_mode_ptr.segment, + controller->video_mode_ptr.offset ); + len = 0; + do { + copy_from_user ( &mode_number, video_mode_ptr, len, + sizeof ( mode_number ) ); + len += sizeof ( mode_number ); + } while ( mode_number != VBE_MODE_END ); + + /* Allocate and fill mode list */ + *mode_numbers = malloc ( len ); + if ( ! *mode_numbers ) + return -ENOMEM; + copy_from_user ( *mode_numbers, video_mode_ptr, 0, len ); + + return 0; +} + +/** + * Get video mode information + * + * @v mode_number Mode number + * @ret rc Return status code + */ +static int vesafb_mode_info ( unsigned int mode_number ) { + struct vbe_mode_info *mode = &vbe_buf.mode; + uint16_t status; + int rc; + + /* Get mode information */ + __asm__ __volatile__ ( REAL_CODE ( "int $0x10" ) + : "=a" ( status ) + : "a" ( VBE_MODE_INFO ), + "c" ( mode_number ), + "D" ( __from_data16 ( mode ) ) + : "memory" ); + if ( ( rc = vesafb_rc ( status ) ) != 0 ) { + DBGC ( &vbe_buf, "VESAFB could not get mode %04x information: " + "[%04x] %s\n", mode_number, status, strerror ( rc ) ); + return rc; + } + DBGC ( &vbe_buf, "VESAFB mode %04x %dx%d %dbpp(%d:%d:%d:%d) model " + "%02x [x%d]%s%s%s%s%s\n", mode_number, mode->x_resolution, + mode->y_resolution, mode->bits_per_pixel, mode->rsvd_mask_size, + mode->red_mask_size, mode->green_mask_size, mode->blue_mask_size, + mode->memory_model, ( mode->number_of_image_pages + 1 ), + ( ( mode->mode_attributes & VBE_MODE_ATTR_SUPPORTED ) ? + "" : " [unsupported]" ), + ( ( mode->mode_attributes & VBE_MODE_ATTR_TTY ) ? + " [tty]" : "" ), + ( ( mode->mode_attributes & VBE_MODE_ATTR_GRAPHICS ) ? + "" : " [text]" ), + ( ( mode->mode_attributes & VBE_MODE_ATTR_LINEAR ) ? + "" : " [nonlinear]" ), + ( ( mode->mode_attributes & VBE_MODE_ATTR_TRIPLE_BUF ) ? + " [buf]" : "" ) ); + + return 0; +} + +/** + * Set video mode + * + * @v mode_number Mode number + * @ret rc Return status code + */ +static int vesafb_set_mode ( unsigned int mode_number ) { + struct vbe_mode_info *mode = &vbe_buf.mode; + uint16_t status; + int rc; + + /* Get mode information */ + if ( ( rc = vesafb_mode_info ( mode_number ) ) != 0 ) + return rc; + + /* Record mode parameters */ + vesafb.start = mode->phys_base_ptr; + vesafb.pixel.width = mode->x_resolution; + vesafb.pixel.height = mode->y_resolution; + vesafb.pixel.len = ( ( mode->bits_per_pixel + 7 ) / 8 ); + vesafb.pixel.stride = mode->bytes_per_scan_line; + DBGC ( &vbe_buf, "VESAFB mode %04x has frame buffer at %08x\n", + mode_number, mode->phys_base_ptr ); + + /* Initialise font colours */ + vesafb.map.red_scale = ( 8 - mode->red_mask_size ); + vesafb.map.green_scale = ( 8 - mode->green_mask_size ); + vesafb.map.blue_scale = ( 8 - mode->blue_mask_size ); + vesafb.map.red_lsb = mode->red_field_position; + vesafb.map.green_lsb = mode->green_field_position; + vesafb.map.blue_lsb = mode->blue_field_position; + + /* Select this mode */ + __asm__ __volatile__ ( REAL_CODE ( "int $0x10" ) + : "=a" ( status ) + : "a" ( VBE_SET_MODE ), + "b" ( mode_number ) ); + if ( ( rc = vesafb_rc ( status ) ) != 0 ) { + DBGC ( &vbe_buf, "VESAFB could not set mode %04x: [%04x] %s\n", + mode_number, status, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Select video mode + * + * @v mode_numbers Mode number list (terminated with VBE_MODE_END) + * @v min_width Minimum required width (in pixels) + * @v min_height Minimum required height (in pixels) + * @v min_bpp Minimum required colour depth (in bits per pixel) + * @ret mode_number Mode number, or negative error + */ +static int vesafb_select_mode ( const uint16_t *mode_numbers, + unsigned int min_width, unsigned int min_height, + unsigned int min_bpp ) { + struct vbe_mode_info *mode = &vbe_buf.mode; + int best_mode_number = -ENOENT; + unsigned int best_score = INT_MAX; + unsigned int score; + uint16_t mode_number; + int rc; + + /* Find the first suitable mode */ + while ( ( mode_number = *(mode_numbers++) ) != VBE_MODE_END ) { + + /* Force linear mode variant */ + mode_number |= VBE_MODE_LINEAR; + + /* Get mode information */ + if ( ( rc = vesafb_mode_info ( mode_number ) ) != 0 ) + continue; + + /* Skip unusable modes */ + if ( ( mode->mode_attributes & ( VBE_MODE_ATTR_SUPPORTED | + VBE_MODE_ATTR_GRAPHICS | + VBE_MODE_ATTR_LINEAR ) ) != + ( VBE_MODE_ATTR_SUPPORTED | VBE_MODE_ATTR_GRAPHICS | + VBE_MODE_ATTR_LINEAR ) ) { + continue; + } + if ( mode->memory_model != VBE_MODE_MODEL_DIRECT_COLOUR ) + continue; + + /* Skip modes not meeting the requirements */ + if ( ( mode->x_resolution < min_width ) || + ( mode->y_resolution < min_height ) || + ( mode->bits_per_pixel < min_bpp ) ) { + continue; + } + + /* Select this mode if it has the best (i.e. lowest) + * score. We choose the scoring system to favour + * modes close to the specified width and height; + * within modes of the same width and height we prefer + * a higher colour depth. + */ + score = ( ( mode->x_resolution * mode->y_resolution ) - + mode->bits_per_pixel ); + if ( score < best_score ) { + best_mode_number = mode_number; + best_score = score; + } + } + + if ( best_mode_number >= 0 ) { + DBGC ( &vbe_buf, "VESAFB selected mode %04x\n", + best_mode_number ); + } else { + DBGC ( &vbe_buf, "VESAFB found no suitable mode\n" ); + } + + return best_mode_number; +} + +/** + * Restore video mode + * + */ +static void vesafb_restore ( void ) { + uint32_t discard_a; + + /* Restore saved VGA mode */ + __asm__ __volatile__ ( REAL_CODE ( "int $0x10" ) + : "=a" ( discard_a ) + : "a" ( VBE_SET_VGA_MODE | vesafb.saved_mode ) ); + DBGC ( &vbe_buf, "VESAFB restored VGA mode %#02x\n", + vesafb.saved_mode ); +} + +/** + * Initialise VESA frame buffer + * + * @v config Console configuration, or NULL to reset + * @ret rc Return status code + */ +static int vesafb_init ( struct console_configuration *config ) { + uint32_t discard_b; + uint16_t *mode_numbers; + int mode_number; + int rc; + + /* Record current VGA mode */ + __asm__ __volatile__ ( REAL_CODE ( "int $0x10" ) + : "=a" ( vesafb.saved_mode ), "=b" ( discard_b ) + : "a" ( VBE_GET_VGA_MODE ) ); + DBGC ( &vbe_buf, "VESAFB saved VGA mode %#02x\n", vesafb.saved_mode ); + + /* Get VESA mode list */ + if ( ( rc = vesafb_mode_list ( &mode_numbers ) ) != 0 ) + goto err_mode_list; + + /* Select mode */ + if ( ( mode_number = vesafb_select_mode ( mode_numbers, config->width, + config->height, + config->depth ) ) < 0 ) { + rc = mode_number; + goto err_select_mode; + } + + /* Set mode */ + if ( ( rc = vesafb_set_mode ( mode_number ) ) != 0 ) + goto err_set_mode; + + /* Get font data */ + vesafb_font(); + + /* Initialise frame buffer console */ + if ( ( rc = fbcon_init ( &vesafb.fbcon, phys_to_user ( vesafb.start ), + &vesafb.pixel, &vesafb.map, &vesafb.font, + config ) ) != 0 ) + goto err_fbcon_init; + + free ( mode_numbers ); + return 0; + + fbcon_fini ( &vesafb.fbcon ); + err_fbcon_init: + err_set_mode: + vesafb_restore(); + err_select_mode: + free ( mode_numbers ); + err_mode_list: + return rc; +} + +/** + * Finalise VESA frame buffer + * + */ +static void vesafb_fini ( void ) { + + /* Finalise frame buffer console */ + fbcon_fini ( &vesafb.fbcon ); + + /* Restore saved VGA mode */ + vesafb_restore(); +} + +/** + * Print a character to current cursor position + * + * @v character Character + */ +static void vesafb_putchar ( int character ) { + + fbcon_putchar ( &vesafb.fbcon, character ); +} + +/** + * Configure console + * + * @v config Console configuration, or NULL to reset + * @ret rc Return status code + */ +static int vesafb_configure ( struct console_configuration *config ) { + int rc; + + /* Reset console, if applicable */ + if ( ! vesafb_console.disabled ) { + vesafb_fini(); + bios_console.disabled &= ~CONSOLE_DISABLED_OUTPUT; + ansicol_reset_magic(); + } + vesafb_console.disabled = CONSOLE_DISABLED; + + /* Do nothing more unless we have a usable configuration */ + if ( ( config == NULL ) || + ( config->width == 0 ) || ( config->height == 0 ) ) { + return 0; + } + + /* Initialise VESA frame buffer */ + if ( ( rc = vesafb_init ( config ) ) != 0 ) + return rc; + + /* Mark console as enabled */ + vesafb_console.disabled = 0; + bios_console.disabled |= CONSOLE_DISABLED_OUTPUT; + + /* Set magic colour to transparent if we have a background picture */ + if ( config->pixbuf ) + ansicol_set_magic_transparent(); + + return 0; +} + +/** VESA frame buffer console driver */ +struct console_driver vesafb_console __console_driver = { + .usage = CONSOLE_VESAFB, + .putchar = vesafb_putchar, + .configure = vesafb_configure, + .disabled = CONSOLE_DISABLED, +}; diff --git a/src/arch/x86/interface/pxe/pxe_call.c b/src/arch/x86/interface/pxe/pxe_call.c new file mode 100644 index 00000000..67118299 --- /dev/null +++ b/src/arch/x86/interface/pxe/pxe_call.c @@ -0,0 +1,404 @@ +/* + * Copyright (C) 2006 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * PXE API entry point + */ + +/* Disambiguate the various error causes */ +#define EINFO_EPXENBP \ + __einfo_uniqify ( EINFO_EPLATFORM, 0x01, \ + "External PXE NBP error" ) +#define EPXENBP( status ) EPLATFORM ( EINFO_EPXENBP, status ) + +/** Vector for chaining INT 1A */ +extern struct segoff __text16 ( pxe_int_1a_vector ); +#define pxe_int_1a_vector __use_text16 ( pxe_int_1a_vector ) + +/** INT 1A handler */ +extern void pxe_int_1a ( void ); + +/** INT 1A hooked flag */ +static int int_1a_hooked = 0; + +/** Real-mode code segment size */ +extern char _text16_memsz[]; +#define _text16_memsz ( ( size_t ) _text16_memsz ) + +/** Real-mode data segment size */ +extern char _data16_memsz[]; +#define _data16_memsz ( ( size_t ) _data16_memsz ) + +/** PXENV_UNDI_TRANSMIT API call profiler */ +static struct profiler pxe_api_tx_profiler __profiler = + { .name = "pxeapi.tx" }; + +/** PXENV_UNDI_ISR API call profiler */ +static struct profiler pxe_api_isr_profiler __profiler = + { .name = "pxeapi.isr" }; + +/** PXE unknown API call profiler + * + * This profiler can be used to measure the overhead of a dummy PXE + * API call. + */ +static struct profiler pxe_api_unknown_profiler __profiler = + { .name = "pxeapi.unknown" }; + +/** Miscellaneous PXE API call profiler */ +static struct profiler pxe_api_misc_profiler __profiler = + { .name = "pxeapi.misc" }; + +/** + * Handle an unknown PXE API call + * + * @v pxenv_unknown Pointer to a struct s_PXENV_UNKNOWN + * @ret #PXENV_EXIT_FAILURE Always + * @err #PXENV_STATUS_UNSUPPORTED Always + */ +static PXENV_EXIT_t pxenv_unknown ( struct s_PXENV_UNKNOWN *pxenv_unknown ) { + pxenv_unknown->Status = PXENV_STATUS_UNSUPPORTED; + return PXENV_EXIT_FAILURE; +} + +/** Unknown PXE API call list */ +struct pxe_api_call pxenv_unknown_api __pxe_api_call = + PXE_API_CALL ( PXENV_UNKNOWN, pxenv_unknown, struct s_PXENV_UNKNOWN ); + +/** + * Locate PXE API call + * + * @v opcode Opcode + * @ret call PXE API call, or NULL + */ +static struct pxe_api_call * find_pxe_api_call ( uint16_t opcode ) { + struct pxe_api_call *call; + + for_each_table_entry ( call, PXE_API_CALLS ) { + if ( call->opcode == opcode ) + return call; + } + return NULL; +} + +/** + * Determine applicable profiler (for debugging) + * + * @v opcode PXE opcode + * @ret profiler Profiler + */ +static struct profiler * pxe_api_profiler ( unsigned int opcode ) { + + /* Determine applicable profiler */ + switch ( opcode ) { + case PXENV_UNDI_TRANSMIT: + return &pxe_api_tx_profiler; + case PXENV_UNDI_ISR: + return &pxe_api_isr_profiler; + case PXENV_UNKNOWN: + return &pxe_api_unknown_profiler; + default: + return &pxe_api_misc_profiler; + } +} + +/** + * Dispatch PXE API call + * + * @v bx PXE opcode + * @v es:di Address of PXE parameter block + * @ret ax PXE exit code + */ +__asmcall void pxe_api_call ( struct i386_all_regs *ix86 ) { + uint16_t opcode = ix86->regs.bx; + userptr_t uparams = real_to_user ( ix86->segs.es, ix86->regs.di ); + struct profiler *profiler = pxe_api_profiler ( opcode ); + struct pxe_api_call *call; + union u_PXENV_ANY params; + PXENV_EXIT_t ret; + + /* Start profiling */ + profile_start ( profiler ); + + /* Locate API call */ + call = find_pxe_api_call ( opcode ); + if ( ! call ) { + DBGC ( &pxe_netdev, "PXENV_UNKNOWN_%04x\n", opcode ); + call = &pxenv_unknown_api; + } + + /* Copy parameter block from caller */ + copy_from_user ( ¶ms, uparams, 0, call->params_len ); + + /* Set default status in case child routine fails to do so */ + params.Status = PXENV_STATUS_FAILURE; + + /* Hand off to relevant API routine */ + ret = call->entry ( ¶ms ); + + /* Copy modified parameter block back to caller and return */ + copy_to_user ( uparams, 0, ¶ms, call->params_len ); + ix86->regs.ax = ret; + + /* Stop profiling, if applicable */ + profile_stop ( profiler ); +} + +/** + * Dispatch weak PXE API call with PXE stack available + * + * @v ix86 Registers for PXE call + * @ret present Zero (PXE stack present) + */ +int pxe_api_call_weak ( struct i386_all_regs *ix86 ) { + pxe_api_call ( ix86 ); + return 0; +} + +/** + * Dispatch PXE loader call + * + * @v es:di Address of PXE parameter block + * @ret ax PXE exit code + */ +__asmcall void pxe_loader_call ( struct i386_all_regs *ix86 ) { + userptr_t uparams = real_to_user ( ix86->segs.es, ix86->regs.di ); + struct s_UNDI_LOADER params; + PXENV_EXIT_t ret; + + /* Copy parameter block from caller */ + copy_from_user ( ¶ms, uparams, 0, sizeof ( params ) ); + + /* Fill in ROM segment address */ + ppxe.UNDIROMID.segment = ix86->segs.ds; + + /* Set default status in case child routine fails to do so */ + params.Status = PXENV_STATUS_FAILURE; + + /* Call UNDI loader */ + ret = undi_loader ( ¶ms ); + + /* Copy modified parameter block back to caller and return */ + copy_to_user ( uparams, 0, ¶ms, sizeof ( params ) ); + ix86->regs.ax = ret; +} + +/** + * Calculate byte checksum as used by PXE + * + * @v data Data + * @v size Length of data + * @ret sum Checksum + */ +static uint8_t pxe_checksum ( void *data, size_t size ) { + uint8_t *bytes = data; + uint8_t sum = 0; + + while ( size-- ) { + sum += *bytes++; + } + return sum; +} + +/** + * Initialise !PXE and PXENV+ structures + * + */ +static void pxe_init_structures ( void ) { + uint32_t rm_cs_phys = ( rm_cs << 4 ); + uint32_t rm_ds_phys = ( rm_ds << 4 ); + + /* Fill in missing segment fields */ + ppxe.EntryPointSP.segment = rm_cs; + ppxe.EntryPointESP.segment = rm_cs; + ppxe.Stack.segment_address = rm_ds; + ppxe.Stack.Physical_address = rm_ds_phys; + ppxe.UNDIData.segment_address = rm_ds; + ppxe.UNDIData.Physical_address = rm_ds_phys; + ppxe.UNDICode.segment_address = rm_cs; + ppxe.UNDICode.Physical_address = rm_cs_phys; + ppxe.UNDICodeWrite.segment_address = rm_cs; + ppxe.UNDICodeWrite.Physical_address = rm_cs_phys; + pxenv.RMEntry.segment = rm_cs; + pxenv.StackSeg = rm_ds; + pxenv.UNDIDataSeg = rm_ds; + pxenv.UNDICodeSeg = rm_cs; + pxenv.PXEPtr.segment = rm_cs; + + /* Update checksums */ + ppxe.StructCksum -= pxe_checksum ( &ppxe, sizeof ( ppxe ) ); + pxenv.Checksum -= pxe_checksum ( &pxenv, sizeof ( pxenv ) ); +} + +/** PXE structure initialiser */ +struct init_fn pxe_init_fn __init_fn ( INIT_NORMAL ) = { + .initialise = pxe_init_structures, +}; + +/** + * Activate PXE stack + * + * @v netdev Net device to use as PXE net device + */ +void pxe_activate ( struct net_device *netdev ) { + uint32_t discard_a; + uint32_t discard_b; + uint32_t discard_d; + + /* Ensure INT 1A is hooked */ + if ( ! int_1a_hooked ) { + hook_bios_interrupt ( 0x1a, ( intptr_t ) pxe_int_1a, + &pxe_int_1a_vector ); + devices_get(); + int_1a_hooked = 1; + } + + /* Set PXE network device */ + pxe_set_netdev ( netdev ); + + /* Notify BIOS of installation */ + __asm__ __volatile__ ( REAL_CODE ( "pushw %%cs\n\t" + "popw %%es\n\t" + "int $0x1a\n\t" ) + : "=a" ( discard_a ), "=b" ( discard_b ), + "=d" ( discard_d ) + : "0" ( 0x564e ), + "1" ( __from_text16 ( &pxenv ) ) ); +} + +/** + * Deactivate PXE stack + * + * @ret rc Return status code + */ +int pxe_deactivate ( void ) { + int rc; + + /* Clear PXE network device */ + pxe_set_netdev ( NULL ); + + /* Ensure INT 1A is unhooked, if possible */ + if ( int_1a_hooked ) { + if ( ( rc = unhook_bios_interrupt ( 0x1a, + ( intptr_t ) pxe_int_1a, + &pxe_int_1a_vector ))!= 0){ + DBGC ( &pxe_netdev, "PXE could not unhook INT 1A: %s\n", + strerror ( rc ) ); + return rc; + } + devices_put(); + int_1a_hooked = 0; + } + + return 0; +} + +/** Jump buffer for PXENV_RESTART_TFTP */ +rmjmp_buf pxe_restart_nbp; + +/** + * Start PXE NBP at 0000:7c00 + * + * @ret rc Return status code + */ +int pxe_start_nbp ( void ) { + int jmp; + int discard_b, discard_c, discard_d, discard_D; + uint16_t status; + + DBGC ( &pxe_netdev, "PXE NBP starting with netdev %s, code %04x:%04zx, " + "data %04x:%04zx\n", ( pxe_netdev ? pxe_netdev->name : ""), + rm_cs, _text16_memsz, rm_ds, _data16_memsz ); + + /* Allow restarting NBP via PXENV_RESTART_TFTP */ + jmp = rmsetjmp ( pxe_restart_nbp ); + if ( jmp ) + DBGC ( &pxe_netdev, "PXE NBP restarting (%x)\n", jmp ); + + /* Far call to PXE NBP */ + __asm__ __volatile__ ( REAL_CODE ( "pushl %%ebp\n\t" /* gcc bug */ + "movw %%cx, %%es\n\t" + "pushw %%es\n\t" + "pushw %%di\n\t" + "sti\n\t" + "lcall $0, $0x7c00\n\t" + "popl %%ebp\n\t" /* discard */ + "popl %%ebp\n\t" /* gcc bug */ ) + : "=a" ( status ), "=b" ( discard_b ), + "=c" ( discard_c ), "=d" ( discard_d ), + "=D" ( discard_D ) + : "a" ( 0 ), "b" ( __from_text16 ( &pxenv ) ), + "c" ( rm_cs ), + "d" ( virt_to_phys ( &pxenv ) ), + "D" ( __from_text16 ( &ppxe ) ) + : "esi", "memory" ); + if ( status ) + return -EPXENBP ( status ); + + return 0; +} + +/** + * Notify BIOS of existence of network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int pxe_notify ( struct net_device *netdev ) { + + /* Do nothing if we already have a network device */ + if ( pxe_netdev ) + return 0; + + /* Activate (and deactivate) PXE stack to notify BIOS */ + pxe_activate ( netdev ); + pxe_deactivate(); + + return 0; +} + +/** PXE BIOS notification driver */ +struct net_driver pxe_driver __net_driver = { + .name = "PXE", + .probe = pxe_notify, +}; + +REQUIRING_SYMBOL ( pxe_api_call ); +REQUIRE_OBJECT ( pxe_preboot ); +REQUIRE_OBJECT ( pxe_undi ); +REQUIRE_OBJECT ( pxe_udp ); +REQUIRE_OBJECT ( pxe_tftp ); +REQUIRE_OBJECT ( pxe_file ); diff --git a/src/arch/x86/interface/pxe/pxe_entry.S b/src/arch/x86/interface/pxe/pxe_entry.S new file mode 100644 index 00000000..663aa842 --- /dev/null +++ b/src/arch/x86/interface/pxe/pxe_entry.S @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2006 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#include + + .arch i386 + +/**************************************************************************** + * !PXE structure + **************************************************************************** + */ + .section ".text16.data", "aw", @progbits + .globl ppxe + .align 16 +ppxe: + .ascii "!PXE" /* Signature */ + .byte pxe_length /* StructLength */ + .byte 0 /* StructCksum */ + .byte 0 /* StructRev */ + .byte 0 /* reserved_1 */ + .word undiheader, 0 /* UNDIROMID */ + .word 0, 0 /* BaseROMID */ + .word pxe_entry_sp, 0 /* EntryPointSP */ + .word pxe_entry_esp, 0 /* EntryPointESP */ + .word -1, -1 /* StatusCallout */ + .byte 0 /* reserved_2 */ + .byte SegDescCnt /* SegDescCnt */ + .word 0 /* FirstSelector */ +pxe_segments: + .word 0, 0, 0, _data16_memsz /* Stack */ + .word 0, 0, 0, _data16_memsz /* UNDIData */ + .word 0, 0, 0, _text16_memsz /* UNDICode */ + .word 0, 0, 0, _text16_memsz /* UNDICodeWrite */ + .word 0, 0, 0, 0 /* BC_Data */ + .word 0, 0, 0, 0 /* BC_Code */ + .word 0, 0, 0, 0 /* BC_CodeWrite */ + .equ SegDescCnt, ( ( . - pxe_segments ) / 8 ) + .equ pxe_length, . - ppxe + .size ppxe, . - ppxe + + /* Define undiheader=0 as a weak symbol for non-ROM builds */ + .section ".weak", "a", @nobits + .weak undiheader +undiheader: + +/**************************************************************************** + * PXENV+ structure + **************************************************************************** + */ + .section ".text16.data", "aw", @progbits + .globl pxenv + .align 16 +pxenv: + .ascii "PXENV+" /* Signature */ + .word 0x0201 /* Version */ + .byte pxenv_length /* Length */ + .byte 0 /* Checksum */ + .word pxenv_entry, 0 /* RMEntry */ + .long 0 /* PMEntry */ + .word 0 /* PMSelector */ + .word 0 /* StackSeg */ + .word _data16_memsz /* StackSize */ + .word 0 /* BC_CodeSeg */ + .word 0 /* BC_CodeSize */ + .word 0 /* BC_DataSeg */ + .word 0 /* BC_DataSize */ + .word 0 /* UNDIDataSeg */ + .word _data16_memsz /* UNDIDataSize */ + .word 0 /* UNDICodeSeg */ + .word _text16_memsz /* UNDICodeSize */ + .word ppxe, 0 /* PXEPtr */ + .equ pxenv_length, . - pxenv + .size pxenv, . - pxenv + +/**************************************************************************** + * pxenv_entry (16-bit far call) + * + * PXE API call PXENV+ entry point + * + * Parameters: + * %es:di : Far pointer to PXE parameter structure + * %bx : PXE API call + * Returns: + * %ax : PXE exit status + * Corrupts: + * none + **************************************************************************** + */ + /* Wyse Streaming Manager server (WLDRM13.BIN) assumes that + * the PXENV+ entry point is at UNDI_CS:0000; apparently, + * somebody at Wyse has difficulty distinguishing between the + * words "may" and "must"... + */ + .section ".text16.null", "ax", @progbits + .code16 +pxenv_null_entry: + jmp pxenv_entry + + .section ".text16", "ax", @progbits + .code16 +pxenv_entry: + virtcall pxe_api_call + lret + .size pxenv_entry, . - pxenv_entry + +/**************************************************************************** + * pxe_entry + * + * PXE API call !PXE entry point + * + * Parameters: + * stack : Far pointer to PXE parameter structure + * stack : PXE API call + * Returns: + * %ax : PXE exit status + * Corrupts: + * none + **************************************************************************** + */ + .section ".text16", "ax", @progbits + .code16 +pxe_entry: +pxe_entry_sp: + /* Preserve original %esp */ + pushl %esp + /* Zero high word of %esp to allow use of common code */ + movzwl %sp, %esp + jmp pxe_entry_common +pxe_entry_esp: + /* Preserve %esp to match behaviour of pxe_entry_sp */ + pushl %esp +pxe_entry_common: + /* Save PXENV+ API call registers */ + pushw %es + pushw %di + pushw %bx + /* Load !PXE parameters from stack into PXENV+ registers */ + addr32 movw 18(%esp), %bx + movw %bx, %es + addr32 movw 16(%esp), %di + addr32 movw 14(%esp), %bx + /* Make call as for PXENV+ */ + pushw %cs + call pxenv_entry + /* Restore PXENV+ registers */ + popw %bx + popw %di + popw %es + /* Restore original %esp and return */ + popl %esp + lret + .size pxe_entry, . - pxe_entry + +/**************************************************************************** + * pxe_int_1a + * + * PXE INT 1A handler + * + * Parameters: + * %ax : 0x5650 + * Returns: + * %ax : 0x564e + * %es:bx : Far pointer to the PXENV+ structure + * %edx : Physical address of the PXENV+ structure + * CF cleared + * Corrupts: + * none + **************************************************************************** + */ + .section ".text16", "ax", @progbits + .code16 + .globl pxe_int_1a +pxe_int_1a: + pushfw + cmpw $0x5650, %ax + jne 1f + /* INT 1A,5650 - PXE installation check */ + xorl %edx, %edx + movw %cs, %dx + movw %dx, %es + movw $pxenv, %bx + shll $4, %edx + addl $pxenv, %edx + movw $0x564e, %ax + pushw %bp + movw %sp, %bp + andb $~0x01, 8(%bp) /* Clear CF on return */ + popw %bp + popfw + iret +1: /* INT 1A,other - pass through */ + popfw + ljmp *%cs:pxe_int_1a_vector + + .section ".text16.data", "aw", @progbits + .globl pxe_int_1a_vector +pxe_int_1a_vector: .long 0 diff --git a/src/arch/x86/interface/pxe/pxe_exit_hook.c b/src/arch/x86/interface/pxe/pxe_exit_hook.c new file mode 100644 index 00000000..f92dae0d --- /dev/null +++ b/src/arch/x86/interface/pxe/pxe_exit_hook.c @@ -0,0 +1,65 @@ +/** @file + * + * PXE exit hook + * + */ + +/* + * Copyright (C) 2010 Shao Miller . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** PXE exit hook */ +extern segoff_t __data16 ( pxe_exit_hook ); +#define pxe_exit_hook __use_data16 ( pxe_exit_hook ) + +/** + * FILE EXIT HOOK + * + * @v file_exit_hook Pointer to a struct + * s_PXENV_FILE_EXIT_HOOK + * @v s_PXENV_FILE_EXIT_HOOK::Hook SEG16:OFF16 to jump to + * @ret #PXENV_EXIT_SUCCESS Successfully set hook + * @ret #PXENV_EXIT_FAILURE We're not an NBP build + * @ret s_PXENV_FILE_EXIT_HOOK::Status PXE status code + * + */ +static PXENV_EXIT_t +pxenv_file_exit_hook ( struct s_PXENV_FILE_EXIT_HOOK *file_exit_hook ) { + DBG ( "PXENV_FILE_EXIT_HOOK" ); + + /* We'll jump to the specified SEG16:OFF16 during exit */ + pxe_exit_hook.segment = file_exit_hook->Hook.segment; + pxe_exit_hook.offset = file_exit_hook->Hook.offset; + file_exit_hook->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** PXE file API */ +struct pxe_api_call pxe_file_api_exit_hook __pxe_api_call = + PXE_API_CALL ( PXENV_FILE_EXIT_HOOK, pxenv_file_exit_hook, + struct s_PXENV_FILE_EXIT_HOOK ); diff --git a/src/arch/x86/interface/pxe/pxe_file.c b/src/arch/x86/interface/pxe/pxe_file.c new file mode 100644 index 00000000..456ffb5f --- /dev/null +++ b/src/arch/x86/interface/pxe/pxe_file.c @@ -0,0 +1,346 @@ +/** @file + * + * PXE FILE API + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +FEATURE ( FEATURE_MISC, "PXEXT", DHCP_EB_FEATURE_PXE_EXT, 2 ); + +/** + * FILE OPEN + * + * @v file_open Pointer to a struct s_PXENV_FILE_OPEN + * @v s_PXENV_FILE_OPEN::FileName URL of file to open + * @ret #PXENV_EXIT_SUCCESS File was opened + * @ret #PXENV_EXIT_FAILURE File was not opened + * @ret s_PXENV_FILE_OPEN::Status PXE status code + * @ret s_PXENV_FILE_OPEN::FileHandle Handle of opened file + * + */ +static PXENV_EXIT_t pxenv_file_open ( struct s_PXENV_FILE_OPEN *file_open ) { + userptr_t filename; + size_t filename_len; + int fd; + + DBG ( "PXENV_FILE_OPEN" ); + + /* Copy name from external program, and open it */ + filename = real_to_user ( file_open->FileName.segment, + file_open->FileName.offset ); + filename_len = strlen_user ( filename, 0 ); + { + char uri_string[ filename_len + 1 ]; + + copy_from_user ( uri_string, filename, 0, + sizeof ( uri_string ) ); + DBG ( " %s", uri_string ); + fd = open ( uri_string ); + } + + if ( fd < 0 ) { + file_open->Status = PXENV_STATUS ( fd ); + return PXENV_EXIT_FAILURE; + } + + DBG ( " as file %d", fd ); + + file_open->FileHandle = fd; + file_open->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** + * FILE CLOSE + * + * @v file_close Pointer to a struct s_PXENV_FILE_CLOSE + * @v s_PXENV_FILE_CLOSE::FileHandle File handle + * @ret #PXENV_EXIT_SUCCESS File was closed + * @ret #PXENV_EXIT_FAILURE File was not closed + * @ret s_PXENV_FILE_CLOSE::Status PXE status code + * + */ +static PXENV_EXIT_t pxenv_file_close ( struct s_PXENV_FILE_CLOSE *file_close ) { + + DBG ( "PXENV_FILE_CLOSE %d", file_close->FileHandle ); + + close ( file_close->FileHandle ); + file_close->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** + * FILE SELECT + * + * @v file_select Pointer to a struct s_PXENV_FILE_SELECT + * @v s_PXENV_FILE_SELECT::FileHandle File handle + * @ret #PXENV_EXIT_SUCCESS File has been checked for readiness + * @ret #PXENV_EXIT_FAILURE File has not been checked for readiness + * @ret s_PXENV_FILE_SELECT::Status PXE status code + * @ret s_PXENV_FILE_SELECT::Ready Indication of readiness + * + */ +static PXENV_EXIT_t +pxenv_file_select ( struct s_PXENV_FILE_SELECT *file_select ) { + fd_set fdset; + int ready; + + DBG ( "PXENV_FILE_SELECT %d", file_select->FileHandle ); + + FD_ZERO ( &fdset ); + FD_SET ( file_select->FileHandle, &fdset ); + if ( ( ready = select ( &fdset, 0 ) ) < 0 ) { + file_select->Status = PXENV_STATUS ( ready ); + return PXENV_EXIT_FAILURE; + } + + file_select->Ready = ( ready ? RDY_READ : 0 ); + file_select->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** + * FILE READ + * + * @v file_read Pointer to a struct s_PXENV_FILE_READ + * @v s_PXENV_FILE_READ::FileHandle File handle + * @v s_PXENV_FILE_READ::BufferSize Size of data buffer + * @v s_PXENV_FILE_READ::Buffer Data buffer + * @ret #PXENV_EXIT_SUCCESS Data has been read from file + * @ret #PXENV_EXIT_FAILURE Data has not been read from file + * @ret s_PXENV_FILE_READ::Status PXE status code + * @ret s_PXENV_FILE_READ::Ready Indication of readiness + * @ret s_PXENV_FILE_READ::BufferSize Length of data read + * + */ +static PXENV_EXIT_t pxenv_file_read ( struct s_PXENV_FILE_READ *file_read ) { + userptr_t buffer; + ssize_t len; + + DBG ( "PXENV_FILE_READ %d to %04x:%04x+%04x", file_read->FileHandle, + file_read->Buffer.segment, file_read->Buffer.offset, + file_read->BufferSize ); + + buffer = real_to_user ( file_read->Buffer.segment, + file_read->Buffer.offset ); + if ( ( len = read_user ( file_read->FileHandle, buffer, 0, + file_read->BufferSize ) ) < 0 ) { + file_read->Status = PXENV_STATUS ( len ); + return PXENV_EXIT_FAILURE; + } + + DBG ( " read %04zx", ( ( size_t ) len ) ); + + file_read->BufferSize = len; + file_read->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** + * GET FILE SIZE + * + * @v get_file_size Pointer to a struct s_PXENV_GET_FILE_SIZE + * @v s_PXENV_GET_FILE_SIZE::FileHandle File handle + * @ret #PXENV_EXIT_SUCCESS File size has been determined + * @ret #PXENV_EXIT_FAILURE File size has not been determined + * @ret s_PXENV_GET_FILE_SIZE::Status PXE status code + * @ret s_PXENV_GET_FILE_SIZE::FileSize Size of file + */ +static PXENV_EXIT_t +pxenv_get_file_size ( struct s_PXENV_GET_FILE_SIZE *get_file_size ) { + ssize_t filesize; + + DBG ( "PXENV_GET_FILE_SIZE %d", get_file_size->FileHandle ); + + filesize = fsize ( get_file_size->FileHandle ); + if ( filesize < 0 ) { + get_file_size->Status = PXENV_STATUS ( filesize ); + return PXENV_EXIT_FAILURE; + } + + DBG ( " is %zd", ( ( size_t ) filesize ) ); + + get_file_size->FileSize = filesize; + get_file_size->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** + * FILE EXEC + * + * @v file_exec Pointer to a struct s_PXENV_FILE_EXEC + * @v s_PXENV_FILE_EXEC::Command Command to execute + * @ret #PXENV_EXIT_SUCCESS Command was executed successfully + * @ret #PXENV_EXIT_FAILURE Command was not executed successfully + * @ret s_PXENV_FILE_EXEC::Status PXE status code + * + */ +static PXENV_EXIT_t pxenv_file_exec ( struct s_PXENV_FILE_EXEC *file_exec ) { + userptr_t command; + size_t command_len; + int rc; + + DBG ( "PXENV_FILE_EXEC" ); + + /* Copy name from external program, and exec it */ + command = real_to_user ( file_exec->Command.segment, + file_exec->Command.offset ); + command_len = strlen_user ( command, 0 ); + { + char command_string[ command_len + 1 ]; + + copy_from_user ( command_string, command, 0, + sizeof ( command_string ) ); + DBG ( " %s", command_string ); + + if ( ( rc = system ( command_string ) ) != 0 ) { + file_exec->Status = PXENV_STATUS ( rc ); + return PXENV_EXIT_FAILURE; + } + } + + file_exec->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** + * FILE CMDLINE + * + * @v file_cmdline Pointer to a struct s_PXENV_FILE_CMDLINE + * @v s_PXENV_FILE_CMDLINE::Buffer Buffer to contain command line + * @v s_PXENV_FILE_CMDLINE::BufferSize Size of buffer + * @ret #PXENV_EXIT_SUCCESS Command was executed successfully + * @ret #PXENV_EXIT_FAILURE Command was not executed successfully + * @ret s_PXENV_FILE_EXEC::Status PXE status code + * @ret s_PXENV_FILE_EXEC::BufferSize Length of command line (including NUL) + * + */ +static PXENV_EXIT_t +pxenv_file_cmdline ( struct s_PXENV_FILE_CMDLINE *file_cmdline ) { + userptr_t buffer; + size_t max_len; + size_t len; + + DBG ( "PXENV_FILE_CMDLINE to %04x:%04x+%04x \"%s\"\n", + file_cmdline->Buffer.segment, file_cmdline->Buffer.offset, + file_cmdline->BufferSize, pxe_cmdline ); + + buffer = real_to_user ( file_cmdline->Buffer.segment, + file_cmdline->Buffer.offset ); + len = file_cmdline->BufferSize; + max_len = ( pxe_cmdline ? + ( strlen ( pxe_cmdline ) + 1 /* NUL */ ) : 0 ); + if ( len > max_len ) + len = max_len; + copy_to_user ( buffer, 0, pxe_cmdline, len ); + file_cmdline->BufferSize = max_len; + + file_cmdline->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** + * FILE API CHECK + * + * @v file_exec Pointer to a struct s_PXENV_FILE_API_CHECK + * @v s_PXENV_FILE_API_CHECK::Magic Inbound magic number (0x91d447b2) + * @ret #PXENV_EXIT_SUCCESS Command was executed successfully + * @ret #PXENV_EXIT_FAILURE Command was not executed successfully + * @ret s_PXENV_FILE_API_CHECK::Status PXE status code + * @ret s_PXENV_FILE_API_CHECK::Magic Outbound magic number (0xe9c17b20) + * @ret s_PXENV_FILE_API_CHECK::Provider "iPXE" (0x45585067) + * @ret s_PXENV_FILE_API_CHECK::APIMask API function bitmask + * @ret s_PXENV_FILE_API_CHECK::Flags Reserved + * + */ +static PXENV_EXIT_t +pxenv_file_api_check ( struct s_PXENV_FILE_API_CHECK *file_api_check ) { + struct pxe_api_call *call; + unsigned int mask = 0; + unsigned int offset; + + DBG ( "PXENV_FILE_API_CHECK" ); + + /* Check for magic value */ + if ( file_api_check->Magic != 0x91d447b2 ) { + file_api_check->Status = PXENV_STATUS_BAD_FUNC; + return PXENV_EXIT_FAILURE; + } + + /* Check for required parameter size */ + if ( file_api_check->Size < sizeof ( *file_api_check ) ) { + file_api_check->Status = PXENV_STATUS_OUT_OF_RESOURCES; + return PXENV_EXIT_FAILURE; + } + + /* Determine supported calls */ + for_each_table_entry ( call, PXE_API_CALLS ) { + offset = ( call->opcode - PXENV_FILE_MIN ); + if ( offset <= ( PXENV_FILE_MAX - PXENV_FILE_MIN ) ) + mask |= ( 1 << offset ); + } + + /* Fill in parameters */ + file_api_check->Size = sizeof ( *file_api_check ); + file_api_check->Magic = 0xe9c17b20; + file_api_check->Provider = 0x45585067; /* "iPXE" */ + file_api_check->APIMask = mask; + file_api_check->Flags = 0; /* None defined */ + + file_api_check->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** PXE file API */ +struct pxe_api_call pxe_file_api[] __pxe_api_call = { + PXE_API_CALL ( PXENV_FILE_OPEN, pxenv_file_open, + struct s_PXENV_FILE_OPEN ), + PXE_API_CALL ( PXENV_FILE_CLOSE, pxenv_file_close, + struct s_PXENV_FILE_CLOSE ), + PXE_API_CALL ( PXENV_FILE_SELECT, pxenv_file_select, + struct s_PXENV_FILE_SELECT ), + PXE_API_CALL ( PXENV_FILE_READ, pxenv_file_read, + struct s_PXENV_FILE_READ ), + PXE_API_CALL ( PXENV_GET_FILE_SIZE, pxenv_get_file_size, + struct s_PXENV_GET_FILE_SIZE ), + PXE_API_CALL ( PXENV_FILE_EXEC, pxenv_file_exec, + struct s_PXENV_FILE_EXEC ), + PXE_API_CALL ( PXENV_FILE_CMDLINE, pxenv_file_cmdline, + struct s_PXENV_FILE_CMDLINE ), + PXE_API_CALL ( PXENV_FILE_API_CHECK, pxenv_file_api_check, + struct s_PXENV_FILE_API_CHECK ), +}; diff --git a/src/arch/x86/interface/pxe/pxe_loader.c b/src/arch/x86/interface/pxe/pxe_loader.c new file mode 100644 index 00000000..e6a2e072 --- /dev/null +++ b/src/arch/x86/interface/pxe/pxe_loader.c @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2007 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include "pxe.h" +#include "pxe_call.h" + +/** @file + * + * PXE UNDI loader + * + */ + +/* PXENV_UNDI_LOADER + * + */ +PXENV_EXIT_t undi_loader ( struct s_UNDI_LOADER *undi_loader ) { + + /* Perform one-time initialisation (e.g. heap) */ + initialise(); + + DBG ( "[PXENV_UNDI_LOADER to CS %04x DS %04x]", + undi_loader->UNDI_CS, undi_loader->UNDI_DS ); + + /* Fill in UNDI loader structure */ + undi_loader->PXEptr.segment = rm_cs; + undi_loader->PXEptr.offset = __from_text16 ( &ppxe ); + undi_loader->PXENVptr.segment = rm_cs; + undi_loader->PXENVptr.offset = __from_text16 ( &pxenv ); + + undi_loader->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} diff --git a/src/arch/x86/interface/pxe/pxe_preboot.c b/src/arch/x86/interface/pxe/pxe_preboot.c new file mode 100644 index 00000000..09e721b3 --- /dev/null +++ b/src/arch/x86/interface/pxe/pxe_preboot.c @@ -0,0 +1,397 @@ +/** @file + * + * PXE Preboot API + * + */ + +/* PXE API interface for Etherboot. + * + * Copyright (C) 2004 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pxe.h" +#include "pxe_call.h" + +/* Avoid dragging in isapnp.o unnecessarily */ +uint16_t isapnp_read_port; + +/** Zero-based versions of PXENV_GET_CACHED_INFO::PacketType */ +enum pxe_cached_info_indices { + CACHED_INFO_DHCPDISCOVER = ( PXENV_PACKET_TYPE_DHCP_DISCOVER - 1 ), + CACHED_INFO_DHCPACK = ( PXENV_PACKET_TYPE_DHCP_ACK - 1 ), + CACHED_INFO_BINL = ( PXENV_PACKET_TYPE_CACHED_REPLY - 1 ), + NUM_CACHED_INFOS +}; + +/** A cached DHCP packet */ +union pxe_cached_info { + struct dhcphdr dhcphdr; + /* This buffer must be *exactly* the size of a BOOTPLAYER_t + * structure, otherwise WinPE will die horribly. It takes the + * size of *our* buffer and feeds it in to us as the size of + * one of *its* buffers. If our buffer is larger than it + * expects, we therefore end up overwriting part of its data + * segment, since it tells us to do so. (D'oh!) + * + * Note that a BOOTPLAYER_t is not necessarily large enough to + * hold a DHCP packet; this is a flaw in the PXE spec. + */ + BOOTPLAYER_t packet; +} __attribute__ (( packed )); + +/** A PXE DHCP packet creator */ +struct pxe_dhcp_packet_creator { + /** Create DHCP packet + * + * @v netdev Network device + * @v data Buffer for DHCP packet + * @v max_len Size of DHCP packet buffer + * @ret rc Return status code + */ + int ( * create ) ( struct net_device *netdev, void *data, + size_t max_len ); +}; + +/** PXE DHCP packet creators */ +static struct pxe_dhcp_packet_creator pxe_dhcp_packet_creators[] = { + [CACHED_INFO_DHCPDISCOVER] = { create_fakedhcpdiscover }, + [CACHED_INFO_DHCPACK] = { create_fakedhcpack }, + [CACHED_INFO_BINL] = { create_fakepxebsack }, +}; + +/** + * Name PXENV_GET_CACHED_INFO packet type + * + * @v packet_type Packet type + * @ret name Name of packet type + */ +static inline __attribute__ (( always_inline )) const char * +pxenv_get_cached_info_name ( int packet_type ) { + switch ( packet_type ) { + case PXENV_PACKET_TYPE_DHCP_DISCOVER: + return "DHCPDISCOVER"; + case PXENV_PACKET_TYPE_DHCP_ACK: + return "DHCPACK"; + case PXENV_PACKET_TYPE_CACHED_REPLY: + return "BINL"; + default: + return ""; + } +} + +/* The case in which the caller doesn't supply a buffer is really + * awkward to support given that we have multiple sources of options, + * and that we don't actually store the DHCP packets. (We may not + * even have performed DHCP; we may have obtained all configuration + * from non-volatile stored options or from the command line.) + * + * Some NBPs rely on the buffers we provide being persistent, so we + * can't just use the temporary packet buffer. 4.5kB of base memory + * always wasted just because some clients are too lazy to provide + * their own buffers... + */ +static union pxe_cached_info __bss16_array ( cached_info, [NUM_CACHED_INFOS] ); +#define cached_info __use_data16 ( cached_info ) + +/** + * Construct cached DHCP packets + * + */ +void pxe_fake_cached_info ( void ) { + struct pxe_dhcp_packet_creator *creator; + union pxe_cached_info *info; + unsigned int i; + int rc; + + /* Sanity check */ + assert ( pxe_netdev != NULL ); + + /* Erase any stale packets */ + memset ( cached_info, 0, sizeof ( cached_info ) ); + + /* Construct all DHCP packets */ + for ( i = 0 ; i < ( sizeof ( pxe_dhcp_packet_creators ) / + sizeof ( pxe_dhcp_packet_creators[0] ) ) ; i++ ) { + + /* Construct DHCP packet */ + creator = &pxe_dhcp_packet_creators[i]; + info = &cached_info[i]; + if ( ( rc = creator->create ( pxe_netdev, info, + sizeof ( *info ) ) ) != 0 ) { + DBGC ( &pxe_netdev, " failed to build packet: %s\n", + strerror ( rc ) ); + /* Continue constructing remaining packets */ + } + } +} + +/** + * UNLOAD BASE CODE STACK + * + * @v None - + * @ret ... + * + */ +static PXENV_EXIT_t +pxenv_unload_stack ( struct s_PXENV_UNLOAD_STACK *unload_stack ) { + DBGC ( &pxe_netdev, "PXENV_UNLOAD_STACK\n" ); + + unload_stack->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_GET_CACHED_INFO + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_get_cached_info ( struct s_PXENV_GET_CACHED_INFO *get_cached_info ) { + union pxe_cached_info *info; + unsigned int idx; + size_t len; + userptr_t buffer; + + DBGC ( &pxe_netdev, "PXENV_GET_CACHED_INFO %s to %04x:%04x+%x", + pxenv_get_cached_info_name ( get_cached_info->PacketType ), + get_cached_info->Buffer.segment, + get_cached_info->Buffer.offset, get_cached_info->BufferSize ); + + /* Sanity check */ + idx = ( get_cached_info->PacketType - 1 ); + if ( idx >= NUM_CACHED_INFOS ) { + DBGC ( &pxe_netdev, " bad PacketType %d\n", + get_cached_info->PacketType ); + get_cached_info->Status = PXENV_STATUS_UNSUPPORTED; + return PXENV_EXIT_FAILURE; + } + info = &cached_info[idx]; + + /* Copy packet (if applicable) */ + len = get_cached_info->BufferSize; + if ( len == 0 ) { + /* Point client at our cached buffer. + * + * To add to the fun, Intel decided at some point in + * the evolution of the PXE specification to add the + * BufferLimit field, which we are meant to fill in + * with the length of our packet buffer, so that the + * caller can safely modify the boot server reply + * packet stored therein. However, this field was not + * present in earlier versions of the PXE spec, and + * there is at least one PXE NBP (Altiris) which + * allocates only exactly enough space for this + * earlier, shorter version of the structure. If we + * actually fill in the BufferLimit field, we + * therefore risk trashing random areas of the + * caller's memory. If we *don't* fill it in, then + * the caller is at liberty to assume that whatever + * random value happened to be in that location + * represents the length of the buffer we've just + * passed back to it. + * + * Since older PXE stacks won't fill this field in + * anyway, it's probably safe to assume that no + * callers actually rely on it, so we choose to not + * fill it in. + */ + get_cached_info->Buffer.segment = rm_ds; + get_cached_info->Buffer.offset = __from_data16 ( info ); + get_cached_info->BufferSize = sizeof ( *info ); + DBGC ( &pxe_netdev, " using %04x:%04x+%04x['%x']", + get_cached_info->Buffer.segment, + get_cached_info->Buffer.offset, + get_cached_info->BufferSize, + get_cached_info->BufferLimit ); + } else { + /* Copy packet to client buffer */ + if ( len > sizeof ( *info ) ) + len = sizeof ( *info ); + if ( len < sizeof ( *info ) ) + DBGC ( &pxe_netdev, " buffer may be too short" ); + buffer = real_to_user ( get_cached_info->Buffer.segment, + get_cached_info->Buffer.offset ); + copy_to_user ( buffer, 0, info, len ); + get_cached_info->BufferSize = len; + } + + DBGC ( &pxe_netdev, "\n" ); + get_cached_info->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_RESTART_TFTP + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_restart_tftp ( struct s_PXENV_TFTP_READ_FILE *restart_tftp ) { + PXENV_EXIT_t tftp_exit; + + DBGC ( &pxe_netdev, "PXENV_RESTART_TFTP\n" ); + + /* Words cannot describe the complete mismatch between the PXE + * specification and any possible version of reality... + */ + restart_tftp->Buffer = PXE_LOAD_PHYS; /* Fixed by spec, apparently */ + restart_tftp->BufferSize = ( 0xa0000 - PXE_LOAD_PHYS ); /* Near enough */ + tftp_exit = pxenv_tftp_read_file ( restart_tftp ); + if ( tftp_exit != PXENV_EXIT_SUCCESS ) + return tftp_exit; + + /* Restart NBP */ + rmlongjmp ( pxe_restart_nbp, PXENV_RESTART_TFTP ); +} + +/* PXENV_START_UNDI + * + * Status: working + */ +static PXENV_EXIT_t pxenv_start_undi ( struct s_PXENV_START_UNDI *start_undi ) { + unsigned int bus_type; + unsigned int location; + struct net_device *netdev; + + DBGC ( &pxe_netdev, "PXENV_START_UNDI %04x:%04x:%04x\n", + start_undi->AX, start_undi->BX, start_undi->DX ); + + /* Determine bus type and location. Use a heuristic to decide + * whether we are PCI or ISAPnP + */ + if ( ( start_undi->DX >= ISAPNP_READ_PORT_MIN ) && + ( start_undi->DX <= ISAPNP_READ_PORT_MAX ) && + ( start_undi->BX >= ISAPNP_CSN_MIN ) && + ( start_undi->BX <= ISAPNP_CSN_MAX ) ) { + bus_type = BUS_TYPE_ISAPNP; + location = start_undi->BX; + /* Record ISAPnP read port for use by isapnp.c */ + isapnp_read_port = start_undi->DX; + } else { + bus_type = BUS_TYPE_PCI; + location = start_undi->AX; + } + + /* Probe for devices, etc. */ + startup(); + + /* Look for a matching net device */ + netdev = find_netdev_by_location ( bus_type, location ); + if ( ! netdev ) { + DBGC ( &pxe_netdev, "PXENV_START_UNDI could not find matching " + "net device\n" ); + start_undi->Status = PXENV_STATUS_UNDI_CANNOT_INITIALIZE_NIC; + return PXENV_EXIT_FAILURE; + } + DBGC ( &pxe_netdev, "PXENV_START_UNDI found net device %s\n", + netdev->name ); + + /* Activate PXE */ + pxe_activate ( netdev ); + + start_undi->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_STOP_UNDI + * + * Status: working + */ +static PXENV_EXIT_t pxenv_stop_undi ( struct s_PXENV_STOP_UNDI *stop_undi ) { + DBGC ( &pxe_netdev, "PXENV_STOP_UNDI\n" ); + + /* Deactivate PXE */ + pxe_deactivate(); + + /* Prepare for unload */ + shutdown_boot(); + + /* Check to see if we still have any hooked interrupts */ + if ( hooked_bios_interrupts != 0 ) { + DBGC ( &pxe_netdev, "PXENV_STOP_UNDI failed: %d interrupts " + "still hooked\n", hooked_bios_interrupts ); + stop_undi->Status = PXENV_STATUS_KEEP_UNDI; + return PXENV_EXIT_FAILURE; + } + + stop_undi->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_START_BASE + * + * Status: won't implement (requires major structural changes) + */ +static PXENV_EXIT_t pxenv_start_base ( struct s_PXENV_START_BASE *start_base ) { + DBGC ( &pxe_netdev, "PXENV_START_BASE\n" ); + + start_base->Status = PXENV_STATUS_UNSUPPORTED; + return PXENV_EXIT_FAILURE; +} + +/* PXENV_STOP_BASE + * + * Status: working + */ +static PXENV_EXIT_t pxenv_stop_base ( struct s_PXENV_STOP_BASE *stop_base ) { + DBGC ( &pxe_netdev, "PXENV_STOP_BASE\n" ); + + /* The only time we will be called is when the NBP is trying + * to shut down the PXE stack. There's nothing we need to do + * in this call. + */ + + stop_base->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** PXE preboot API */ +struct pxe_api_call pxe_preboot_api[] __pxe_api_call = { + PXE_API_CALL ( PXENV_UNLOAD_STACK, pxenv_unload_stack, + struct s_PXENV_UNLOAD_STACK ), + PXE_API_CALL ( PXENV_GET_CACHED_INFO, pxenv_get_cached_info, + struct s_PXENV_GET_CACHED_INFO ), + PXE_API_CALL ( PXENV_RESTART_TFTP, pxenv_restart_tftp, + struct s_PXENV_TFTP_READ_FILE ), + PXE_API_CALL ( PXENV_START_UNDI, pxenv_start_undi, + struct s_PXENV_START_UNDI ), + PXE_API_CALL ( PXENV_STOP_UNDI, pxenv_stop_undi, + struct s_PXENV_STOP_UNDI ), + PXE_API_CALL ( PXENV_START_BASE, pxenv_start_base, + struct s_PXENV_START_BASE ), + PXE_API_CALL ( PXENV_STOP_BASE, pxenv_stop_base, + struct s_PXENV_STOP_BASE ), +}; diff --git a/src/arch/x86/interface/pxe/pxe_tftp.c b/src/arch/x86/interface/pxe/pxe_tftp.c new file mode 100644 index 00000000..3b4c6d84 --- /dev/null +++ b/src/arch/x86/interface/pxe/pxe_tftp.c @@ -0,0 +1,595 @@ +/** @file + * + * PXE TFTP API + * + */ + +/* + * Copyright (C) 2004 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** A PXE TFTP connection */ +struct pxe_tftp_connection { + /** Data transfer interface */ + struct interface xfer; + /** Data buffer */ + userptr_t buffer; + /** Size of data buffer */ + size_t size; + /** Starting offset of data buffer */ + size_t start; + /** File position */ + size_t offset; + /** Maximum file position */ + size_t max_offset; + /** Block size */ + size_t blksize; + /** Block index */ + unsigned int blkidx; + /** Overall return status code */ + int rc; +}; + +/** + * Close PXE TFTP connection + * + * @v pxe_tftp PXE TFTP connection + * @v rc Final status code + */ +static void pxe_tftp_close ( struct pxe_tftp_connection *pxe_tftp, int rc ) { + intf_shutdown ( &pxe_tftp->xfer, rc ); + pxe_tftp->rc = rc; +} + +/** + * Check flow control window + * + * @v pxe_tftp PXE TFTP connection + * @ret len Length of window + */ +static size_t pxe_tftp_xfer_window ( struct pxe_tftp_connection *pxe_tftp ) { + + return pxe_tftp->blksize; +} + +/** + * Receive new data + * + * @v pxe_tftp PXE TFTP connection + * @v iobuf I/O buffer + * @v meta Transfer metadata + * @ret rc Return status code + */ +static int pxe_tftp_xfer_deliver ( struct pxe_tftp_connection *pxe_tftp, + struct io_buffer *iobuf, + struct xfer_metadata *meta ) { + size_t len = iob_len ( iobuf ); + int rc = 0; + + /* Calculate new buffer position */ + if ( meta->flags & XFER_FL_ABS_OFFSET ) + pxe_tftp->offset = 0; + pxe_tftp->offset += meta->offset; + + /* Copy data block to buffer */ + if ( len == 0 ) { + /* No data (pure seek); treat as success */ + } else if ( pxe_tftp->offset < pxe_tftp->start ) { + DBG ( " buffer underrun at %zx (min %zx)", + pxe_tftp->offset, pxe_tftp->start ); + rc = -ENOBUFS; + } else if ( ( pxe_tftp->offset + len ) > + ( pxe_tftp->start + pxe_tftp->size ) ) { + DBG ( " buffer overrun at %zx (max %zx)", + ( pxe_tftp->offset + len ), + ( pxe_tftp->start + pxe_tftp->size ) ); + rc = -ENOBUFS; + } else { + copy_to_user ( pxe_tftp->buffer, + ( pxe_tftp->offset - pxe_tftp->start ), + iobuf->data, len ); + } + + /* Calculate new buffer position */ + pxe_tftp->offset += len; + + /* Record maximum offset as the file size */ + if ( pxe_tftp->max_offset < pxe_tftp->offset ) + pxe_tftp->max_offset = pxe_tftp->offset; + + /* Terminate transfer on error */ + if ( rc != 0 ) + pxe_tftp_close ( pxe_tftp, rc ); + + free_iob ( iobuf ); + return rc; +} + +/** PXE TFTP connection interface operations */ +static struct interface_operation pxe_tftp_xfer_ops[] = { + INTF_OP ( xfer_deliver, struct pxe_tftp_connection *, + pxe_tftp_xfer_deliver ), + INTF_OP ( xfer_window, struct pxe_tftp_connection *, + pxe_tftp_xfer_window ), + INTF_OP ( intf_close, struct pxe_tftp_connection *, pxe_tftp_close ), +}; + +/** PXE TFTP connection interface descriptor */ +static struct interface_descriptor pxe_tftp_xfer_desc = + INTF_DESC ( struct pxe_tftp_connection, xfer, pxe_tftp_xfer_ops ); + +/** The PXE TFTP connection */ +static struct pxe_tftp_connection pxe_tftp = { + .xfer = INTF_INIT ( pxe_tftp_xfer_desc ), +}; + +/** + * Open PXE TFTP connection + * + * @v ipaddress IP address + * @v port TFTP server port (in network byte order) + * @v filename File name + * @v blksize Requested block size + * @ret rc Return status code + */ +static int pxe_tftp_open ( IP4_t ipaddress, UDP_PORT_t port, + UINT8_t *filename, UINT16_t blksize ) { + union { + struct sockaddr sa; + struct sockaddr_in sin; + } server; + struct uri *uri; + int rc; + + /* Reset PXE TFTP connection structure */ + memset ( &pxe_tftp, 0, sizeof ( pxe_tftp ) ); + intf_init ( &pxe_tftp.xfer, &pxe_tftp_xfer_desc, NULL ); + if ( blksize < TFTP_DEFAULT_BLKSIZE ) + blksize = TFTP_DEFAULT_BLKSIZE; + pxe_tftp.blksize = blksize; + pxe_tftp.rc = -EINPROGRESS; + + /* Construct URI */ + memset ( &server, 0, sizeof ( server ) ); + server.sin.sin_family = AF_INET; + server.sin.sin_addr.s_addr = ipaddress; + server.sin.sin_port = port; + DBG ( " %s", sock_ntoa ( &server.sa ) ); + if ( port ) + DBG ( ":%d", ntohs ( port ) ); + DBG ( ":%s", filename ); + uri = pxe_uri ( &server.sa, ( ( char * ) filename ) ); + if ( ! uri ) { + DBG ( " could not create URI\n" ); + return -ENOMEM; + } + + /* Open PXE TFTP connection */ + if ( ( rc = xfer_open_uri ( &pxe_tftp.xfer, uri ) ) != 0 ) { + DBG ( " could not open (%s)\n", strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * TFTP OPEN + * + * @v tftp_open Pointer to a struct s_PXENV_TFTP_OPEN + * @v s_PXENV_TFTP_OPEN::ServerIPAddress TFTP server IP address + * @v s_PXENV_TFTP_OPEN::GatewayIPAddress Relay agent IP address, or 0.0.0.0 + * @v s_PXENV_TFTP_OPEN::FileName Name of file to open + * @v s_PXENV_TFTP_OPEN::TFTPPort TFTP server UDP port + * @v s_PXENV_TFTP_OPEN::PacketSize TFTP blksize option to request + * @ret #PXENV_EXIT_SUCCESS File was opened + * @ret #PXENV_EXIT_FAILURE File was not opened + * @ret s_PXENV_TFTP_OPEN::Status PXE status code + * @ret s_PXENV_TFTP_OPEN::PacketSize Negotiated blksize + * @err #PXENV_STATUS_TFTP_INVALID_PACKET_SIZE Requested blksize too small + * + * Opens a TFTP connection for downloading a file a block at a time + * using pxenv_tftp_read(). + * + * If s_PXENV_TFTP_OPEN::GatewayIPAddress is 0.0.0.0, normal IP + * routing will take place. See the relevant + * @ref pxe_routing "implementation note" for more details. + * + * On x86, you must set the s_PXE::StatusCallout field to a nonzero + * value before calling this function in protected mode. You cannot + * call this function with a 32-bit stack segment. (See the relevant + * @ref pxe_x86_pmode16 "implementation note" for more details.) + * + * @note According to the PXE specification version 2.1, this call + * "opens a file for reading/writing", though how writing is to be + * achieved without the existence of an API call %pxenv_tftp_write() + * is not made clear. + * + * @note Despite the existence of the numerous statements within the + * PXE specification of the form "...if a TFTP/MTFTP or UDP connection + * is active...", you cannot use pxenv_tftp_open() and + * pxenv_tftp_read() to read a file via MTFTP; only via plain old + * TFTP. If you want to use MTFTP, use pxenv_tftp_read_file() + * instead. Astute readers will note that, since + * pxenv_tftp_read_file() is an atomic operation from the point of + * view of the PXE API, it is conceptually impossible to issue any + * other PXE API call "if an MTFTP connection is active". + */ +static PXENV_EXIT_t pxenv_tftp_open ( struct s_PXENV_TFTP_OPEN *tftp_open ) { + int rc; + + DBG ( "PXENV_TFTP_OPEN" ); + + /* Guard against callers that fail to close before re-opening */ + pxe_tftp_close ( &pxe_tftp, 0 ); + + /* Open connection */ + if ( ( rc = pxe_tftp_open ( tftp_open->ServerIPAddress, + tftp_open->TFTPPort, + tftp_open->FileName, + tftp_open->PacketSize ) ) != 0 ) { + tftp_open->Status = PXENV_STATUS ( rc ); + return PXENV_EXIT_FAILURE; + } + + /* Wait for OACK to arrive so that we have the block size */ + while ( ( ( rc = pxe_tftp.rc ) == -EINPROGRESS ) && + ( pxe_tftp.max_offset == 0 ) ) { + step(); + } + pxe_tftp.blksize = xfer_window ( &pxe_tftp.xfer ); + tftp_open->PacketSize = pxe_tftp.blksize; + DBG ( " blksize=%d", tftp_open->PacketSize ); + + /* EINPROGRESS is normal; we don't wait for the whole transfer */ + if ( rc == -EINPROGRESS ) + rc = 0; + + tftp_open->Status = PXENV_STATUS ( rc ); + return ( rc ? PXENV_EXIT_FAILURE : PXENV_EXIT_SUCCESS ); +} + +/** + * TFTP CLOSE + * + * @v tftp_close Pointer to a struct s_PXENV_TFTP_CLOSE + * @ret #PXENV_EXIT_SUCCESS File was closed successfully + * @ret #PXENV_EXIT_FAILURE File was not closed + * @ret s_PXENV_TFTP_CLOSE::Status PXE status code + * @err None - + * + * Close a connection previously opened with pxenv_tftp_open(). You + * must have previously opened a connection with pxenv_tftp_open(). + * + * On x86, you must set the s_PXE::StatusCallout field to a nonzero + * value before calling this function in protected mode. You cannot + * call this function with a 32-bit stack segment. (See the relevant + * @ref pxe_x86_pmode16 "implementation note" for more details.) + */ +static PXENV_EXIT_t pxenv_tftp_close ( struct s_PXENV_TFTP_CLOSE *tftp_close ) { + DBG ( "PXENV_TFTP_CLOSE" ); + + pxe_tftp_close ( &pxe_tftp, 0 ); + tftp_close->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** + * TFTP READ + * + * @v tftp_read Pointer to a struct s_PXENV_TFTP_READ + * @v s_PXENV_TFTP_READ::Buffer Address of data buffer + * @ret #PXENV_EXIT_SUCCESS Data was read successfully + * @ret #PXENV_EXIT_FAILURE Data was not read + * @ret s_PXENV_TFTP_READ::Status PXE status code + * @ret s_PXENV_TFTP_READ::PacketNumber TFTP packet number + * @ret s_PXENV_TFTP_READ::BufferSize Length of data written into buffer + * + * Reads a single packet from a connection previously opened with + * pxenv_tftp_open() into the data buffer pointed to by + * s_PXENV_TFTP_READ::Buffer. You must have previously opened a + * connection with pxenv_tftp_open(). The data written into + * s_PXENV_TFTP_READ::Buffer is just the file data; the various + * network headers have already been removed. + * + * The buffer must be large enough to contain a packet of the size + * negotiated via the s_PXENV_TFTP_OPEN::PacketSize field in the + * pxenv_tftp_open() call. It is worth noting that the PXE + * specification does @b not require the caller to fill in + * s_PXENV_TFTP_READ::BufferSize before calling pxenv_tftp_read(), so + * the PXE stack is free to ignore whatever value the caller might + * place there and just assume that the buffer is large enough. That + * said, it may be worth the caller always filling in + * s_PXENV_TFTP_READ::BufferSize to guard against PXE stacks that + * mistake it for an input parameter. + * + * The length of the TFTP data packet will be returned via + * s_PXENV_TFTP_READ::BufferSize. If this length is less than the + * blksize negotiated via s_PXENV_TFTP_OPEN::PacketSize in the call to + * pxenv_tftp_open(), this indicates that the block is the last block + * in the file. Note that zero is a valid length for + * s_PXENV_TFTP_READ::BufferSize, and will occur when the length of + * the file is a multiple of the blksize. + * + * The PXE specification doesn't actually state that calls to + * pxenv_tftp_read() will return the data packets in strict sequential + * order, though most PXE stacks will probably do so. The sequence + * number of the packet will be returned in + * s_PXENV_TFTP_READ::PacketNumber. The first packet in the file has + * a sequence number of one, not zero. + * + * To guard against flawed PXE stacks, the caller should probably set + * s_PXENV_TFTP_READ::PacketNumber to one less than the expected + * returned value (i.e. set it to zero for the first call to + * pxenv_tftp_read() and then re-use the returned s_PXENV_TFTP_READ + * parameter block for subsequent calls without modifying + * s_PXENV_TFTP_READ::PacketNumber between calls). The caller should + * also guard against potential problems caused by flawed + * implementations returning the occasional duplicate packet, by + * checking that the value returned in s_PXENV_TFTP_READ::PacketNumber + * is as expected (i.e. one greater than that returned from the + * previous call to pxenv_tftp_read()). + * + * On x86, you must set the s_PXE::StatusCallout field to a nonzero + * value before calling this function in protected mode. You cannot + * call this function with a 32-bit stack segment. (See the relevant + * @ref pxe_x86_pmode16 "implementation note" for more details.) + */ +static PXENV_EXIT_t pxenv_tftp_read ( struct s_PXENV_TFTP_READ *tftp_read ) { + int rc; + + DBG ( "PXENV_TFTP_READ to %04x:%04x", + tftp_read->Buffer.segment, tftp_read->Buffer.offset ); + + /* Read single block into buffer */ + pxe_tftp.buffer = real_to_user ( tftp_read->Buffer.segment, + tftp_read->Buffer.offset ); + pxe_tftp.size = pxe_tftp.blksize; + pxe_tftp.start = pxe_tftp.offset; + while ( ( ( rc = pxe_tftp.rc ) == -EINPROGRESS ) && + ( pxe_tftp.offset == pxe_tftp.start ) ) + step(); + pxe_tftp.buffer = UNULL; + tftp_read->BufferSize = ( pxe_tftp.offset - pxe_tftp.start ); + tftp_read->PacketNumber = ++pxe_tftp.blkidx; + + /* EINPROGRESS is normal if we haven't reached EOF yet */ + if ( rc == -EINPROGRESS ) + rc = 0; + + tftp_read->Status = PXENV_STATUS ( rc ); + return ( rc ? PXENV_EXIT_FAILURE : PXENV_EXIT_SUCCESS ); +} + +/** + * TFTP/MTFTP read file + * + * @v tftp_read_file Pointer to a struct s_PXENV_TFTP_READ_FILE + * @v s_PXENV_TFTP_READ_FILE::FileName File name + * @v s_PXENV_TFTP_READ_FILE::BufferSize Size of the receive buffer + * @v s_PXENV_TFTP_READ_FILE::Buffer Address of the receive buffer + * @v s_PXENV_TFTP_READ_FILE::ServerIPAddress TFTP server IP address + * @v s_PXENV_TFTP_READ_FILE::GatewayIPAddress Relay agent IP address + * @v s_PXENV_TFTP_READ_FILE::McastIPAddress File's multicast IP address + * @v s_PXENV_TFTP_READ_FILE::TFTPClntPort Client multicast UDP port + * @v s_PXENV_TFTP_READ_FILE::TFTPSrvPort Server multicast UDP port + * @v s_PXENV_TFTP_READ_FILE::TFTPOpenTimeOut Time to wait for first packet + * @v s_PXENV_TFTP_READ_FILE::TFTPReopenDelay MTFTP inactivity timeout + * @ret #PXENV_EXIT_SUCCESS File downloaded successfully + * @ret #PXENV_EXIT_FAILURE File not downloaded + * @ret s_PXENV_TFTP_READ_FILE::Status PXE status code + * @ret s_PXENV_TFTP_READ_FILE::BufferSize Length of downloaded file + * + * Downloads an entire file via either TFTP or MTFTP into the buffer + * pointed to by s_PXENV_TFTP_READ_FILE::Buffer. + * + * The PXE specification does not make it clear how the caller + * requests that MTFTP be used rather than TFTP (or vice versa). One + * reasonable guess is that setting + * s_PXENV_TFTP_READ_FILE::McastIPAddress to 0.0.0.0 would cause TFTP + * to be used instead of MTFTP, though it is conceivable that some PXE + * stacks would interpret that as "use the DHCP-provided multicast IP + * address" instead. Some PXE stacks will not implement MTFTP at all, + * and will always use TFTP. + * + * It is not specified whether or not + * s_PXENV_TFTP_READ_FILE::TFTPSrvPort will be used as the TFTP server + * port for TFTP (rather than MTFTP) downloads. Callers should assume + * that the only way to access a TFTP server on a non-standard port is + * to use pxenv_tftp_open() and pxenv_tftp_read(). + * + * If s_PXENV_TFTP_READ_FILE::GatewayIPAddress is 0.0.0.0, normal IP + * routing will take place. See the relevant + * @ref pxe_routing "implementation note" for more details. + * + * It is interesting to note that s_PXENV_TFTP_READ_FILE::Buffer is an + * #ADDR32_t type, i.e. nominally a flat physical address. Some PXE + * NBPs (e.g. NTLDR) are known to call pxenv_tftp_read_file() in real + * mode with s_PXENV_TFTP_READ_FILE::Buffer set to an address above + * 1MB. This means that PXE stacks must be prepared to write to areas + * outside base memory. Exactly how this is to be achieved is not + * specified, though using INT 15,87 is as close to a standard method + * as any, and should probably be used. Switching to protected-mode + * in order to access high memory will fail if pxenv_tftp_read_file() + * is called in V86 mode; it is reasonably to expect that a V86 + * monitor would intercept the relatively well-defined INT 15,87 if it + * wants the PXE stack to be able to write to high memory. + * + * Things get even more interesting if pxenv_tftp_read_file() is + * called in protected mode, because there is then absolutely no way + * for the PXE stack to write to an absolute physical address. You + * can't even get around the problem by creating a special "access + * everything" segment in the s_PXE data structure, because the + * #SEGDESC_t descriptors are limited to 64kB in size. + * + * Previous versions of the PXE specification (e.g. WfM 1.1a) provide + * a separate API call, %pxenv_tftp_read_file_pmode(), specifically to + * work around this problem. The s_PXENV_TFTP_READ_FILE_PMODE + * parameter block splits s_PXENV_TFTP_READ_FILE::Buffer into + * s_PXENV_TFTP_READ_FILE_PMODE::BufferSelector and + * s_PXENV_TFTP_READ_FILE_PMODE::BufferOffset, i.e. it provides a + * protected-mode segment:offset address for the data buffer. This + * API call is no longer present in version 2.1 of the PXE + * specification. + * + * Etherboot makes the assumption that s_PXENV_TFTP_READ_FILE::Buffer + * is an offset relative to the caller's data segment, when + * pxenv_tftp_read_file() is called in protected mode. + * + * On x86, you must set the s_PXE::StatusCallout field to a nonzero + * value before calling this function in protected mode. You cannot + * call this function with a 32-bit stack segment. (See the relevant + * @ref pxe_x86_pmode16 "implementation note" for more details.) + */ +PXENV_EXIT_t pxenv_tftp_read_file ( struct s_PXENV_TFTP_READ_FILE + *tftp_read_file ) { + int rc; + + DBG ( "PXENV_TFTP_READ_FILE to %08x+%x", tftp_read_file->Buffer, + tftp_read_file->BufferSize ); + + /* Open TFTP file */ + if ( ( rc = pxe_tftp_open ( tftp_read_file->ServerIPAddress, 0, + tftp_read_file->FileName, 0 ) ) != 0 ) { + tftp_read_file->Status = PXENV_STATUS ( rc ); + return PXENV_EXIT_FAILURE; + } + + /* Read entire file */ + pxe_tftp.buffer = phys_to_user ( tftp_read_file->Buffer ); + pxe_tftp.size = tftp_read_file->BufferSize; + while ( ( rc = pxe_tftp.rc ) == -EINPROGRESS ) + step(); + pxe_tftp.buffer = UNULL; + tftp_read_file->BufferSize = pxe_tftp.max_offset; + + /* Close TFTP file */ + pxe_tftp_close ( &pxe_tftp, rc ); + + tftp_read_file->Status = PXENV_STATUS ( rc ); + return ( rc ? PXENV_EXIT_FAILURE : PXENV_EXIT_SUCCESS ); +} + +/** + * TFTP GET FILE SIZE + * + * @v tftp_get_fsize Pointer to a struct s_PXENV_TFTP_GET_FSIZE + * @v s_PXENV_TFTP_GET_FSIZE::ServerIPAddress TFTP server IP address + * @v s_PXENV_TFTP_GET_FSIZE::GatewayIPAddress Relay agent IP address + * @v s_PXENV_TFTP_GET_FSIZE::FileName File name + * @ret #PXENV_EXIT_SUCCESS File size was determined successfully + * @ret #PXENV_EXIT_FAILURE File size was not determined + * @ret s_PXENV_TFTP_GET_FSIZE::Status PXE status code + * @ret s_PXENV_TFTP_GET_FSIZE::FileSize File size + * + * Determine the size of a file on a TFTP server. This uses the + * "tsize" TFTP option, and so will not work with a TFTP server that + * does not support TFTP options, or that does not support the "tsize" + * option. + * + * The PXE specification states that this API call will @b not open a + * TFTP connection for subsequent use with pxenv_tftp_read(). (This + * is somewhat daft, since the only way to obtain the file size via + * the "tsize" option involves issuing a TFTP open request, but that's + * life.) + * + * You cannot call pxenv_tftp_get_fsize() while a TFTP or UDP + * connection is open. + * + * If s_PXENV_TFTP_GET_FSIZE::GatewayIPAddress is 0.0.0.0, normal IP + * routing will take place. See the relevant + * @ref pxe_routing "implementation note" for more details. + * + * On x86, you must set the s_PXE::StatusCallout field to a nonzero + * value before calling this function in protected mode. You cannot + * call this function with a 32-bit stack segment. (See the relevant + * @ref pxe_x86_pmode16 "implementation note" for more details.) + * + * @note There is no way to specify the TFTP server port with this API + * call. Though you can open a file using a non-standard TFTP server + * port (via s_PXENV_TFTP_OPEN::TFTPPort or, potentially, + * s_PXENV_TFTP_READ_FILE::TFTPSrvPort), you can only get the size of + * a file from a TFTP server listening on the standard TFTP port. + * "Consistency" is not a word in Intel's vocabulary. + */ +static PXENV_EXIT_t pxenv_tftp_get_fsize ( struct s_PXENV_TFTP_GET_FSIZE + *tftp_get_fsize ) { + int rc; + + DBG ( "PXENV_TFTP_GET_FSIZE" ); + + /* Open TFTP file */ + if ( ( rc = pxe_tftp_open ( tftp_get_fsize->ServerIPAddress, 0, + tftp_get_fsize->FileName, 0 ) ) != 0 ) { + tftp_get_fsize->Status = PXENV_STATUS ( rc ); + return PXENV_EXIT_FAILURE; + } + + /* Wait for initial seek to arrive, and record size */ + while ( ( ( rc = pxe_tftp.rc ) == -EINPROGRESS ) && + ( pxe_tftp.max_offset == 0 ) ) { + step(); + } + tftp_get_fsize->FileSize = pxe_tftp.max_offset; + DBG ( " fsize=%d", tftp_get_fsize->FileSize ); + + /* EINPROGRESS is normal; we don't wait for the whole transfer */ + if ( rc == -EINPROGRESS ) + rc = 0; + + /* Close TFTP file */ + pxe_tftp_close ( &pxe_tftp, rc ); + + tftp_get_fsize->Status = PXENV_STATUS ( rc ); + return ( rc ? PXENV_EXIT_FAILURE : PXENV_EXIT_SUCCESS ); +} + +/** PXE TFTP API */ +struct pxe_api_call pxe_tftp_api[] __pxe_api_call = { + PXE_API_CALL ( PXENV_TFTP_OPEN, pxenv_tftp_open, + struct s_PXENV_TFTP_OPEN ), + PXE_API_CALL ( PXENV_TFTP_CLOSE, pxenv_tftp_close, + struct s_PXENV_TFTP_CLOSE ), + PXE_API_CALL ( PXENV_TFTP_READ, pxenv_tftp_read, + struct s_PXENV_TFTP_READ ), + PXE_API_CALL ( PXENV_TFTP_READ_FILE, pxenv_tftp_read_file, + struct s_PXENV_TFTP_READ_FILE ), + PXE_API_CALL ( PXENV_TFTP_GET_FSIZE, pxenv_tftp_get_fsize, + struct s_PXENV_TFTP_GET_FSIZE ), +}; diff --git a/src/arch/x86/interface/pxe/pxe_udp.c b/src/arch/x86/interface/pxe/pxe_udp.c new file mode 100644 index 00000000..5a04f086 --- /dev/null +++ b/src/arch/x86/interface/pxe/pxe_udp.c @@ -0,0 +1,484 @@ +/** @file + * + * PXE UDP API + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Copyright (C) 2004 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** A PXE UDP pseudo-header */ +struct pxe_udp_pseudo_header { + /** Source IP address */ + IP4_t src_ip; + /** Source port */ + UDP_PORT_t s_port; + /** Destination IP address */ + IP4_t dest_ip; + /** Destination port */ + UDP_PORT_t d_port; +} __attribute__ (( packed )); + +/** A PXE UDP connection */ +struct pxe_udp_connection { + /** Data transfer interface to UDP stack */ + struct interface xfer; + /** Local address */ + struct sockaddr_in local; + /** List of received packets */ + struct list_head list; +}; + +/** + * Receive PXE UDP data + * + * @v pxe_udp PXE UDP connection + * @v iobuf I/O buffer + * @v meta Data transfer metadata + * @ret rc Return status code + * + * Receives a packet as part of the current pxenv_udp_read() + * operation. + */ +static int pxe_udp_deliver ( struct pxe_udp_connection *pxe_udp, + struct io_buffer *iobuf, + struct xfer_metadata *meta ) { + struct pxe_udp_pseudo_header *pshdr; + struct sockaddr_in *sin_src; + struct sockaddr_in *sin_dest; + int rc; + + /* Extract metadata */ + assert ( meta ); + sin_src = ( struct sockaddr_in * ) meta->src; + assert ( sin_src ); + assert ( sin_src->sin_family == AF_INET ); + sin_dest = ( struct sockaddr_in * ) meta->dest; + assert ( sin_dest ); + assert ( sin_dest->sin_family == AF_INET ); + + /* Construct pseudo-header */ + if ( ( rc = iob_ensure_headroom ( iobuf, sizeof ( *pshdr ) ) ) != 0 ) { + DBG ( "PXE could not prepend pseudo-header\n" ); + rc = -ENOMEM; + goto drop; + } + pshdr = iob_push ( iobuf, sizeof ( *pshdr ) ); + pshdr->src_ip = sin_src->sin_addr.s_addr; + pshdr->s_port = sin_src->sin_port; + pshdr->dest_ip = sin_dest->sin_addr.s_addr; + pshdr->d_port = sin_dest->sin_port; + + /* Add to queue */ + list_add_tail ( &iobuf->list, &pxe_udp->list ); + + return 0; + + drop: + free_iob ( iobuf ); + return rc; +} + +/** PXE UDP data transfer interface operations */ +static struct interface_operation pxe_udp_xfer_operations[] = { + INTF_OP ( xfer_deliver, struct pxe_udp_connection *, pxe_udp_deliver ), +}; + +/** PXE UDP data transfer interface descriptor */ +static struct interface_descriptor pxe_udp_xfer_desc = + INTF_DESC ( struct pxe_udp_connection, xfer, pxe_udp_xfer_operations ); + +/** The PXE UDP connection */ +static struct pxe_udp_connection pxe_udp = { + .xfer = INTF_INIT ( pxe_udp_xfer_desc ), + .local = { + .sin_family = AF_INET, + }, + .list = LIST_HEAD_INIT ( pxe_udp.list ), +}; + +/** + * UDP OPEN + * + * @v pxenv_udp_open Pointer to a struct s_PXENV_UDP_OPEN + * @v s_PXENV_UDP_OPEN::src_ip IP address of this station, or 0.0.0.0 + * @ret #PXENV_EXIT_SUCCESS Always + * @ret s_PXENV_UDP_OPEN::Status PXE status code + * @err #PXENV_STATUS_UDP_OPEN UDP connection already open + * @err #PXENV_STATUS_OUT_OF_RESOURCES Could not open connection + * + * Prepares the PXE stack for communication using pxenv_udp_write() + * and pxenv_udp_read(). + * + * The IP address supplied in s_PXENV_UDP_OPEN::src_ip will be + * recorded and used as the local station's IP address for all further + * communication, including communication by means other than + * pxenv_udp_write() and pxenv_udp_read(). (If + * s_PXENV_UDP_OPEN::src_ip is 0.0.0.0, the local station's IP address + * will remain unchanged.) + * + * You can only have one open UDP connection at a time. This is not a + * meaningful restriction, since pxenv_udp_write() and + * pxenv_udp_read() allow you to specify arbitrary local and remote + * ports and an arbitrary remote address for each packet. According + * to the PXE specifiation, you cannot have a UDP connection open at + * the same time as a TFTP connection; this restriction does not apply + * to Etherboot. + * + * On x86, you must set the s_PXE::StatusCallout field to a nonzero + * value before calling this function in protected mode. You cannot + * call this function with a 32-bit stack segment. (See the relevant + * @ref pxe_x86_pmode16 "implementation note" for more details.) + * + * @note The PXE specification does not make it clear whether the IP + * address supplied in s_PXENV_UDP_OPEN::src_ip should be used only + * for this UDP connection, or retained for all future communication. + * The latter seems more consistent with typical PXE stack behaviour. + * + * @note Etherboot currently ignores the s_PXENV_UDP_OPEN::src_ip + * parameter. + * + */ +static PXENV_EXIT_t pxenv_udp_open ( struct s_PXENV_UDP_OPEN *pxenv_udp_open ) { + int rc; + + DBG ( "PXENV_UDP_OPEN" ); + + /* Record source IP address */ + pxe_udp.local.sin_addr.s_addr = pxenv_udp_open->src_ip; + DBG ( " %s\n", inet_ntoa ( pxe_udp.local.sin_addr ) ); + + /* Open network device, if necessary */ + if ( pxe_netdev && ( ! netdev_is_open ( pxe_netdev ) ) && + ( ( rc = netdev_open ( pxe_netdev ) ) != 0 ) ) { + DBG ( "PXENV_UDP_OPEN could not (implicitly) open %s: %s\n", + pxe_netdev->name, strerror ( rc ) ); + pxenv_udp_open->Status = PXENV_STATUS ( rc ); + return PXENV_EXIT_FAILURE; + } + + /* Open promiscuous UDP connection */ + intf_restart ( &pxe_udp.xfer, 0 ); + if ( ( rc = udp_open_promisc ( &pxe_udp.xfer ) ) != 0 ) { + DBG ( "PXENV_UDP_OPEN could not open promiscuous socket: %s\n", + strerror ( rc ) ); + pxenv_udp_open->Status = PXENV_STATUS ( rc ); + return PXENV_EXIT_FAILURE; + } + + pxenv_udp_open->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** + * UDP CLOSE + * + * @v pxenv_udp_close Pointer to a struct s_PXENV_UDP_CLOSE + * @ret #PXENV_EXIT_SUCCESS Always + * @ret s_PXENV_UDP_CLOSE::Status PXE status code + * @err None - + * + * Closes a UDP connection opened with pxenv_udp_open(). + * + * You can only have one open UDP connection at a time. You cannot + * have a UDP connection open at the same time as a TFTP connection. + * You cannot use pxenv_udp_close() to close a TFTP connection; use + * pxenv_tftp_close() instead. + * + * On x86, you must set the s_PXE::StatusCallout field to a nonzero + * value before calling this function in protected mode. You cannot + * call this function with a 32-bit stack segment. (See the relevant + * @ref pxe_x86_pmode16 "implementation note" for more details.) + * + */ +static PXENV_EXIT_t +pxenv_udp_close ( struct s_PXENV_UDP_CLOSE *pxenv_udp_close ) { + struct io_buffer *iobuf; + struct io_buffer *tmp; + + DBG ( "PXENV_UDP_CLOSE\n" ); + + /* Close UDP connection */ + intf_restart ( &pxe_udp.xfer, 0 ); + + /* Discard any received packets */ + list_for_each_entry_safe ( iobuf, tmp, &pxe_udp.list, list ) { + list_del ( &iobuf->list ); + free_iob ( iobuf ); + } + + pxenv_udp_close->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** + * UDP WRITE + * + * @v pxenv_udp_write Pointer to a struct s_PXENV_UDP_WRITE + * @v s_PXENV_UDP_WRITE::ip Destination IP address + * @v s_PXENV_UDP_WRITE::gw Relay agent IP address, or 0.0.0.0 + * @v s_PXENV_UDP_WRITE::src_port Source UDP port, or 0 + * @v s_PXENV_UDP_WRITE::dst_port Destination UDP port + * @v s_PXENV_UDP_WRITE::buffer_size Length of the UDP payload + * @v s_PXENV_UDP_WRITE::buffer Address of the UDP payload + * @ret #PXENV_EXIT_SUCCESS Packet was transmitted successfully + * @ret #PXENV_EXIT_FAILURE Packet could not be transmitted + * @ret s_PXENV_UDP_WRITE::Status PXE status code + * @err #PXENV_STATUS_UDP_CLOSED UDP connection is not open + * @err #PXENV_STATUS_UNDI_TRANSMIT_ERROR Could not transmit packet + * + * Transmits a single UDP packet. A valid IP and UDP header will be + * prepended to the payload in s_PXENV_UDP_WRITE::buffer; the buffer + * should not contain precomputed IP and UDP headers, nor should it + * contain space allocated for these headers. The first byte of the + * buffer will be transmitted as the first byte following the UDP + * header. + * + * If s_PXENV_UDP_WRITE::gw is 0.0.0.0, normal IP routing will take + * place. See the relevant @ref pxe_routing "implementation note" for + * more details. + * + * If s_PXENV_UDP_WRITE::src_port is 0, port 2069 will be used. + * + * You must have opened a UDP connection with pxenv_udp_open() before + * calling pxenv_udp_write(). + * + * On x86, you must set the s_PXE::StatusCallout field to a nonzero + * value before calling this function in protected mode. You cannot + * call this function with a 32-bit stack segment. (See the relevant + * @ref pxe_x86_pmode16 "implementation note" for more details.) + * + * @note Etherboot currently ignores the s_PXENV_UDP_WRITE::gw + * parameter. + * + */ +static PXENV_EXIT_t +pxenv_udp_write ( struct s_PXENV_UDP_WRITE *pxenv_udp_write ) { + struct sockaddr_in dest; + struct xfer_metadata meta = { + .src = ( struct sockaddr * ) &pxe_udp.local, + .dest = ( struct sockaddr * ) &dest, + .netdev = pxe_netdev, + }; + size_t len; + struct io_buffer *iobuf; + userptr_t buffer; + int rc; + + DBG ( "PXENV_UDP_WRITE" ); + + /* Construct destination socket address */ + memset ( &dest, 0, sizeof ( dest ) ); + dest.sin_family = AF_INET; + dest.sin_addr.s_addr = pxenv_udp_write->ip; + dest.sin_port = pxenv_udp_write->dst_port; + + /* Set local (source) port. PXE spec says source port is 2069 + * if not specified. Really, this ought to be set at UDP open + * time but hey, we didn't design this API. + */ + pxe_udp.local.sin_port = pxenv_udp_write->src_port; + if ( ! pxe_udp.local.sin_port ) + pxe_udp.local.sin_port = htons ( 2069 ); + + /* FIXME: we ignore the gateway specified, since we're + * confident of being able to do our own routing. We should + * probably allow for multiple gateways. + */ + + /* Allocate and fill data buffer */ + len = pxenv_udp_write->buffer_size; + iobuf = xfer_alloc_iob ( &pxe_udp.xfer, len ); + if ( ! iobuf ) { + DBG ( " out of memory\n" ); + pxenv_udp_write->Status = PXENV_STATUS_OUT_OF_RESOURCES; + return PXENV_EXIT_FAILURE; + } + buffer = real_to_user ( pxenv_udp_write->buffer.segment, + pxenv_udp_write->buffer.offset ); + copy_from_user ( iob_put ( iobuf, len ), buffer, 0, len ); + + DBG ( " %04x:%04x+%x %d->%s:%d\n", pxenv_udp_write->buffer.segment, + pxenv_udp_write->buffer.offset, pxenv_udp_write->buffer_size, + ntohs ( pxenv_udp_write->src_port ), + inet_ntoa ( dest.sin_addr ), + ntohs ( pxenv_udp_write->dst_port ) ); + + /* Transmit packet */ + if ( ( rc = xfer_deliver ( &pxe_udp.xfer, iobuf, &meta ) ) != 0 ) { + DBG ( "PXENV_UDP_WRITE could not transmit: %s\n", + strerror ( rc ) ); + pxenv_udp_write->Status = PXENV_STATUS ( rc ); + return PXENV_EXIT_FAILURE; + } + + pxenv_udp_write->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** + * UDP READ + * + * @v pxenv_udp_read Pointer to a struct s_PXENV_UDP_READ + * @v s_PXENV_UDP_READ::dest_ip Destination IP address, or 0.0.0.0 + * @v s_PXENV_UDP_READ::d_port Destination UDP port, or 0 + * @v s_PXENV_UDP_READ::buffer_size Size of the UDP payload buffer + * @v s_PXENV_UDP_READ::buffer Address of the UDP payload buffer + * @ret #PXENV_EXIT_SUCCESS A packet has been received + * @ret #PXENV_EXIT_FAILURE No packet has been received + * @ret s_PXENV_UDP_READ::Status PXE status code + * @ret s_PXENV_UDP_READ::src_ip Source IP address + * @ret s_PXENV_UDP_READ::dest_ip Destination IP address + * @ret s_PXENV_UDP_READ::s_port Source UDP port + * @ret s_PXENV_UDP_READ::d_port Destination UDP port + * @ret s_PXENV_UDP_READ::buffer_size Length of UDP payload + * @err #PXENV_STATUS_UDP_CLOSED UDP connection is not open + * @err #PXENV_STATUS_FAILURE No packet was ready to read + * + * Receive a single UDP packet. This is a non-blocking call; if no + * packet is ready to read, the call will return instantly with + * s_PXENV_UDP_READ::Status==PXENV_STATUS_FAILURE. + * + * If s_PXENV_UDP_READ::dest_ip is 0.0.0.0, UDP packets addressed to + * any IP address will be accepted and may be returned to the caller. + * + * If s_PXENV_UDP_READ::d_port is 0, UDP packets addressed to any UDP + * port will be accepted and may be returned to the caller. + * + * You must have opened a UDP connection with pxenv_udp_open() before + * calling pxenv_udp_read(). + * + * On x86, you must set the s_PXE::StatusCallout field to a nonzero + * value before calling this function in protected mode. You cannot + * call this function with a 32-bit stack segment. (See the relevant + * @ref pxe_x86_pmode16 "implementation note" for more details.) + * + * @note The PXE specification (version 2.1) does not state that we + * should fill in s_PXENV_UDP_READ::dest_ip and + * s_PXENV_UDP_READ::d_port, but Microsoft Windows' NTLDR program + * expects us to do so, and will fail if we don't. + * + */ +static PXENV_EXIT_t pxenv_udp_read ( struct s_PXENV_UDP_READ *pxenv_udp_read ) { + struct in_addr dest_ip_wanted = { .s_addr = pxenv_udp_read->dest_ip }; + struct in_addr dest_ip; + struct io_buffer *iobuf; + struct pxe_udp_pseudo_header *pshdr; + uint16_t d_port_wanted = pxenv_udp_read->d_port; + uint16_t d_port; + userptr_t buffer; + size_t len; + + /* Try receiving a packet, if the queue is empty */ + if ( list_empty ( &pxe_udp.list ) ) + step(); + + /* Remove first packet from the queue */ + iobuf = list_first_entry ( &pxe_udp.list, struct io_buffer, list ); + if ( ! iobuf ) { + /* No packet received */ + DBG2 ( "PXENV_UDP_READ\n" ); + goto no_packet; + } + list_del ( &iobuf->list ); + + /* Strip pseudo-header */ + assert ( iob_len ( iobuf ) >= sizeof ( *pshdr ) ); + pshdr = iobuf->data; + iob_pull ( iobuf, sizeof ( *pshdr ) ); + dest_ip.s_addr = pshdr->dest_ip; + d_port = pshdr->d_port; + DBG ( "PXENV_UDP_READ" ); + + /* Filter on destination address and/or port */ + if ( dest_ip_wanted.s_addr && + ( dest_ip_wanted.s_addr != dest_ip.s_addr ) ) { + DBG ( " wrong IP %s", inet_ntoa ( dest_ip ) ); + DBG ( " (wanted %s)\n", inet_ntoa ( dest_ip_wanted ) ); + goto drop; + } + if ( d_port_wanted && ( d_port_wanted != d_port ) ) { + DBG ( " wrong port %d", htons ( d_port ) ); + DBG ( " (wanted %d)\n", htons ( d_port_wanted ) ); + goto drop; + } + + /* Copy packet to buffer and record length */ + buffer = real_to_user ( pxenv_udp_read->buffer.segment, + pxenv_udp_read->buffer.offset ); + len = iob_len ( iobuf ); + if ( len > pxenv_udp_read->buffer_size ) + len = pxenv_udp_read->buffer_size; + copy_to_user ( buffer, 0, iobuf->data, len ); + pxenv_udp_read->buffer_size = len; + + /* Fill in source/dest information */ + pxenv_udp_read->src_ip = pshdr->src_ip; + pxenv_udp_read->s_port = pshdr->s_port; + pxenv_udp_read->dest_ip = pshdr->dest_ip; + pxenv_udp_read->d_port = pshdr->d_port; + + DBG ( " %04x:%04x+%x %s:", pxenv_udp_read->buffer.segment, + pxenv_udp_read->buffer.offset, pxenv_udp_read->buffer_size, + inet_ntoa ( *( ( struct in_addr * ) &pxenv_udp_read->src_ip ) )); + DBG ( "%d<-%s:%d\n", ntohs ( pxenv_udp_read->s_port ), + inet_ntoa ( *( ( struct in_addr * ) &pxenv_udp_read->dest_ip ) ), + ntohs ( pxenv_udp_read->d_port ) ); + + /* Free I/O buffer */ + free_iob ( iobuf ); + + pxenv_udp_read->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; + + drop: + free_iob ( iobuf ); + no_packet: + pxenv_udp_read->Status = PXENV_STATUS_FAILURE; + return PXENV_EXIT_FAILURE; +} + +/** PXE UDP API */ +struct pxe_api_call pxe_udp_api[] __pxe_api_call = { + PXE_API_CALL ( PXENV_UDP_OPEN, pxenv_udp_open, + struct s_PXENV_UDP_OPEN ), + PXE_API_CALL ( PXENV_UDP_CLOSE, pxenv_udp_close, + struct s_PXENV_UDP_CLOSE ), + PXE_API_CALL ( PXENV_UDP_WRITE, pxenv_udp_write, + struct s_PXENV_UDP_WRITE ), + PXE_API_CALL ( PXENV_UDP_READ, pxenv_udp_read, + struct s_PXENV_UDP_READ ), +}; diff --git a/src/arch/x86/interface/pxe/pxe_undi.c b/src/arch/x86/interface/pxe/pxe_undi.c new file mode 100644 index 00000000..2eb68178 --- /dev/null +++ b/src/arch/x86/interface/pxe/pxe_undi.c @@ -0,0 +1,1084 @@ +/** @file + * + * PXE UNDI API + * + */ + +/* + * Copyright (C) 2004 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pxe.h" + +/** + * Count of outstanding transmitted packets + * + * This is incremented each time PXENV_UNDI_TRANSMIT is called, and + * decremented each time that PXENV_UNDI_ISR is called with the TX + * queue empty, stopping when the count reaches zero. This allows us + * to provide a pessimistic approximation of TX completion events to + * the PXE NBP simply by monitoring the netdev's TX queue. + */ +static int undi_tx_count = 0; + +struct net_device *pxe_netdev = NULL; + +/** Transmit profiler */ +static struct profiler undi_tx_profiler __profiler = { .name = "undi.tx" }; + +/** + * Set network device as current PXE network device + * + * @v netdev Network device, or NULL + */ +void pxe_set_netdev ( struct net_device *netdev ) { + + if ( pxe_netdev ) { + netdev_rx_unfreeze ( pxe_netdev ); + netdev_put ( pxe_netdev ); + } + + pxe_netdev = NULL; + + if ( netdev ) + pxe_netdev = netdev_get ( netdev ); +} + +/** + * Open PXE network device + * + * @ret rc Return status code + */ +static int pxe_netdev_open ( void ) { + int rc; + + assert ( pxe_netdev != NULL ); + + if ( ( rc = netdev_open ( pxe_netdev ) ) != 0 ) + return rc; + + netdev_rx_freeze ( pxe_netdev ); + netdev_irq ( pxe_netdev, 1 ); + + return 0; +} + +/** + * Close PXE network device + * + */ +static void pxe_netdev_close ( void ) { + + assert ( pxe_netdev != NULL ); + netdev_rx_unfreeze ( pxe_netdev ); + netdev_irq ( pxe_netdev, 0 ); + netdev_close ( pxe_netdev ); + undi_tx_count = 0; +} + +/** + * Dump multicast address list + * + * @v mcast PXE multicast address list + */ +static void pxe_dump_mcast_list ( struct s_PXENV_UNDI_MCAST_ADDRESS *mcast ) { + struct ll_protocol *ll_protocol = pxe_netdev->ll_protocol; + unsigned int i; + + for ( i = 0 ; i < mcast->MCastAddrCount ; i++ ) { + DBGC ( &pxe_netdev, " %s", + ll_protocol->ntoa ( mcast->McastAddr[i] ) ); + } +} + +/* PXENV_UNDI_STARTUP + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_undi_startup ( struct s_PXENV_UNDI_STARTUP *undi_startup ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_STARTUP\n" ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_STARTUP called with no " + "network device\n" ); + undi_startup->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + undi_startup->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_CLEANUP + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_undi_cleanup ( struct s_PXENV_UNDI_CLEANUP *undi_cleanup ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_CLEANUP\n" ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_CLEANUP called with no " + "network device\n" ); + undi_cleanup->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + /* Close network device */ + pxe_netdev_close(); + + undi_cleanup->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_INITIALIZE + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_undi_initialize ( struct s_PXENV_UNDI_INITIALIZE *undi_initialize ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_INITIALIZE protocolini %08x\n", + undi_initialize->ProtocolIni ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_INITIALIZE called with no " + "network device\n" ); + undi_initialize->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + undi_initialize->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_RESET_ADAPTER + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_undi_reset_adapter ( struct s_PXENV_UNDI_RESET *undi_reset_adapter ) { + int rc; + + DBGC ( &pxe_netdev, "PXENV_UNDI_RESET_ADAPTER" ); + pxe_dump_mcast_list ( &undi_reset_adapter->R_Mcast_Buf ); + DBGC ( &pxe_netdev, "\n" ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_RESET_ADAPTER called with no " + "network device\n" ); + undi_reset_adapter->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + /* Close and reopen network device */ + pxe_netdev_close(); + if ( ( rc = pxe_netdev_open() ) != 0 ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_RESET_ADAPTER could not " + "reopen %s: %s\n", pxe_netdev->name, strerror ( rc ) ); + undi_reset_adapter->Status = PXENV_STATUS ( rc ); + return PXENV_EXIT_FAILURE; + } + + undi_reset_adapter->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_SHUTDOWN + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_undi_shutdown ( struct s_PXENV_UNDI_SHUTDOWN *undi_shutdown ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_SHUTDOWN\n" ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_SHUTDOWN called with no " + "network device\n" ); + undi_shutdown->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + /* Close network device */ + pxe_netdev_close(); + + undi_shutdown->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_OPEN + * + * Status: working + */ +static PXENV_EXIT_t pxenv_undi_open ( struct s_PXENV_UNDI_OPEN *undi_open ) { + int rc; + + DBGC ( &pxe_netdev, "PXENV_UNDI_OPEN flag %04x filter %04x", + undi_open->OpenFlag, undi_open->PktFilter ); + pxe_dump_mcast_list ( &undi_open->R_Mcast_Buf ); + DBGC ( &pxe_netdev, "\n" ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_OPEN called with no " + "network device\n" ); + undi_open->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + /* Open network device */ + if ( ( rc = pxe_netdev_open() ) != 0 ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_OPEN could not open %s: %s\n", + pxe_netdev->name, strerror ( rc ) ); + undi_open->Status = PXENV_STATUS ( rc ); + return PXENV_EXIT_FAILURE; + } + + undi_open->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_CLOSE + * + * Status: working + */ +static PXENV_EXIT_t pxenv_undi_close ( struct s_PXENV_UNDI_CLOSE *undi_close ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_CLOSE\n" ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_CLOSE called with no " + "network device\n" ); + undi_close->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + /* Close network device */ + pxe_netdev_close(); + + undi_close->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_TRANSMIT + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_undi_transmit ( struct s_PXENV_UNDI_TRANSMIT *undi_transmit ) { + struct s_PXENV_UNDI_TBD tbd; + struct DataBlk *datablk; + struct io_buffer *iobuf; + struct net_protocol *net_protocol; + struct ll_protocol *ll_protocol; + char destaddr[MAX_LL_ADDR_LEN]; + const void *ll_dest; + size_t len; + unsigned int i; + int rc; + + /* Start profiling */ + profile_start ( &undi_tx_profiler ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_TRANSMIT called with no " + "network device\n" ); + undi_transmit->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + DBGC2 ( &pxe_netdev, "PXENV_UNDI_TRANSMIT" ); + + /* Forcibly enable interrupts and freeze receive queue + * processing at this point, to work around callers that never + * call PXENV_UNDI_OPEN before attempting to use the UNDI API. + */ + if ( ! netdev_rx_frozen ( pxe_netdev ) ) { + netdev_rx_freeze ( pxe_netdev ); + netdev_irq ( pxe_netdev, 1 ); + } + + /* Identify network-layer protocol */ + switch ( undi_transmit->Protocol ) { + case P_IP: net_protocol = &ipv4_protocol; break; + case P_ARP: net_protocol = &arp_protocol; break; + case P_RARP: net_protocol = &rarp_protocol; break; + case P_UNKNOWN: + net_protocol = NULL; + break; + default: + DBGC2 ( &pxe_netdev, " %02x invalid protocol\n", + undi_transmit->Protocol ); + undi_transmit->Status = PXENV_STATUS_UNDI_INVALID_PARAMETER; + return PXENV_EXIT_FAILURE; + } + DBGC2 ( &pxe_netdev, " %s", + ( net_protocol ? net_protocol->name : "RAW" ) ); + + /* Calculate total packet length */ + copy_from_real ( &tbd, undi_transmit->TBD.segment, + undi_transmit->TBD.offset, sizeof ( tbd ) ); + len = tbd.ImmedLength; + DBGC2 ( &pxe_netdev, " %04x:%04x+%x", tbd.Xmit.segment, tbd.Xmit.offset, + tbd.ImmedLength ); + for ( i = 0 ; i < tbd.DataBlkCount ; i++ ) { + datablk = &tbd.DataBlock[i]; + len += datablk->TDDataLen; + DBGC2 ( &pxe_netdev, " %04x:%04x+%x", + datablk->TDDataPtr.segment, datablk->TDDataPtr.offset, + datablk->TDDataLen ); + } + + /* Allocate and fill I/O buffer */ + iobuf = alloc_iob ( MAX_LL_HEADER_LEN + + ( ( len > IOB_ZLEN ) ? len : IOB_ZLEN ) ); + if ( ! iobuf ) { + DBGC2 ( &pxe_netdev, " could not allocate iobuf\n" ); + undi_transmit->Status = PXENV_STATUS_OUT_OF_RESOURCES; + return PXENV_EXIT_FAILURE; + } + iob_reserve ( iobuf, MAX_LL_HEADER_LEN ); + copy_from_real ( iob_put ( iobuf, tbd.ImmedLength ), tbd.Xmit.segment, + tbd.Xmit.offset, tbd.ImmedLength ); + for ( i = 0 ; i < tbd.DataBlkCount ; i++ ) { + datablk = &tbd.DataBlock[i]; + copy_from_real ( iob_put ( iobuf, datablk->TDDataLen ), + datablk->TDDataPtr.segment, + datablk->TDDataPtr.offset, + datablk->TDDataLen ); + } + + /* Add link-layer header, if required to do so */ + if ( net_protocol != NULL ) { + + /* Calculate destination address */ + ll_protocol = pxe_netdev->ll_protocol; + if ( undi_transmit->XmitFlag == XMT_DESTADDR ) { + copy_from_real ( destaddr, + undi_transmit->DestAddr.segment, + undi_transmit->DestAddr.offset, + ll_protocol->ll_addr_len ); + ll_dest = destaddr; + DBGC2 ( &pxe_netdev, " DEST %s", + ll_protocol->ntoa ( ll_dest ) ); + } else { + ll_dest = pxe_netdev->ll_broadcast; + DBGC2 ( &pxe_netdev, " BCAST" ); + } + + /* Add link-layer header */ + if ( ( rc = ll_protocol->push ( pxe_netdev, iobuf, ll_dest, + pxe_netdev->ll_addr, + net_protocol->net_proto ))!=0){ + DBGC2 ( &pxe_netdev, " could not add link-layer " + "header: %s\n", strerror ( rc ) ); + free_iob ( iobuf ); + undi_transmit->Status = PXENV_STATUS ( rc ); + return PXENV_EXIT_FAILURE; + } + } + + /* Flag transmission as in-progress. Do this before starting + * to transmit the packet, because the ISR may trigger before + * we return from netdev_tx(). + */ + undi_tx_count++; + + /* Transmit packet */ + DBGC2 ( &pxe_netdev, "\n" ); + if ( ( rc = netdev_tx ( pxe_netdev, iobuf ) ) != 0 ) { + DBGC2 ( &pxe_netdev, "PXENV_UNDI_TRANSMIT could not transmit: " + "%s\n", strerror ( rc ) ); + undi_tx_count--; + undi_transmit->Status = PXENV_STATUS ( rc ); + return PXENV_EXIT_FAILURE; + } + + profile_stop ( &undi_tx_profiler ); + undi_transmit->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_SET_MCAST_ADDRESS + * + * Status: working (for NICs that support receive-all-multicast) + */ +static PXENV_EXIT_t +pxenv_undi_set_mcast_address ( struct s_PXENV_UNDI_SET_MCAST_ADDRESS + *undi_set_mcast_address ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_SET_MCAST_ADDRESS" ); + pxe_dump_mcast_list ( &undi_set_mcast_address->R_Mcast_Buf ); + DBGC ( &pxe_netdev, "\n" ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_SET_MCAST_ADDRESS called with " + "no network device\n" ); + undi_set_mcast_address->Status = + PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + undi_set_mcast_address->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_SET_STATION_ADDRESS + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_undi_set_station_address ( struct s_PXENV_UNDI_SET_STATION_ADDRESS + *undi_set_station_address ) { + struct ll_protocol *ll_protocol; + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_SET_STATION_ADDRESS called " + "with no network device\n" ); + undi_set_station_address->Status = + PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + ll_protocol = pxe_netdev->ll_protocol; + DBGC ( &pxe_netdev, "PXENV_UNDI_SET_STATION_ADDRESS %s", + ll_protocol->ntoa ( undi_set_station_address->StationAddress ) ); + + /* If adapter is open, the change will have no effect; return + * an error + */ + if ( netdev_is_open ( pxe_netdev ) ) { + DBGC ( &pxe_netdev, " failed: netdev is open\n" ); + undi_set_station_address->Status = + PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + /* Update MAC address */ + memcpy ( pxe_netdev->ll_addr, + &undi_set_station_address->StationAddress, + ll_protocol->ll_addr_len ); + + DBGC ( &pxe_netdev, "\n" ); + undi_set_station_address->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_SET_PACKET_FILTER + * + * Status: won't implement (would require driver API changes for no + * real benefit) + */ +static PXENV_EXIT_t +pxenv_undi_set_packet_filter ( struct s_PXENV_UNDI_SET_PACKET_FILTER + *undi_set_packet_filter ) { + + DBGC ( &pxe_netdev, "PXENV_UNDI_SET_PACKET_FILTER %02x\n", + undi_set_packet_filter->filter ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_SET_PACKET_FILTER called with " + "no network device\n" ); + undi_set_packet_filter->Status = + PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + /* Pretend that we succeeded, otherwise the 3Com DOS UNDI + * driver refuses to load. (We ignore the filter value in the + * PXENV_UNDI_OPEN call anyway.) + */ + undi_set_packet_filter->Status = PXENV_STATUS_SUCCESS; + + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_GET_INFORMATION + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_undi_get_information ( struct s_PXENV_UNDI_GET_INFORMATION + *undi_get_information ) { + struct device *dev; + struct ll_protocol *ll_protocol; + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_GET_INFORMATION called with no " + "network device\n" ); + undi_get_information->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + DBGC ( &pxe_netdev, "PXENV_UNDI_GET_INFORMATION" ); + + /* Fill in information */ + dev = pxe_netdev->dev; + ll_protocol = pxe_netdev->ll_protocol; + undi_get_information->BaseIo = dev->desc.ioaddr; + undi_get_information->IntNumber = + ( netdev_irq_supported ( pxe_netdev ) ? dev->desc.irq : 0 ); + /* Cheat: assume all cards can cope with this */ + undi_get_information->MaxTranUnit = ETH_MAX_MTU; + undi_get_information->HwType = ntohs ( ll_protocol->ll_proto ); + undi_get_information->HwAddrLen = ll_protocol->ll_addr_len; + assert ( ll_protocol->ll_addr_len <= + sizeof ( undi_get_information->CurrentNodeAddress ) ); + memcpy ( &undi_get_information->CurrentNodeAddress, + pxe_netdev->ll_addr, + sizeof ( undi_get_information->CurrentNodeAddress ) ); + ll_protocol->init_addr ( pxe_netdev->hw_addr, + &undi_get_information->PermNodeAddress ); + undi_get_information->ROMAddress = 0; + /* nic.rom_info->rom_segment; */ + /* We only provide the ability to receive or transmit a single + * packet at a time. This is a bootloader, not an OS. + */ + undi_get_information->RxBufCt = 1; + undi_get_information->TxBufCt = 1; + + DBGC ( &pxe_netdev, " io %04x irq %d mtu %d %s %s\n", + undi_get_information->BaseIo, undi_get_information->IntNumber, + undi_get_information->MaxTranUnit, ll_protocol->name, + ll_protocol->ntoa ( &undi_get_information->CurrentNodeAddress )); + undi_get_information->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_GET_STATISTICS + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_undi_get_statistics ( struct s_PXENV_UNDI_GET_STATISTICS + *undi_get_statistics ) { + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_GET_STATISTICS called with no " + "network device\n" ); + undi_get_statistics->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + DBGC ( &pxe_netdev, "PXENV_UNDI_GET_STATISTICS" ); + + /* Report statistics */ + undi_get_statistics->XmtGoodFrames = pxe_netdev->tx_stats.good; + undi_get_statistics->RcvGoodFrames = pxe_netdev->rx_stats.good; + undi_get_statistics->RcvCRCErrors = pxe_netdev->rx_stats.bad; + undi_get_statistics->RcvResourceErrors = pxe_netdev->rx_stats.bad; + DBGC ( &pxe_netdev, " txok %d rxok %d rxcrc %d rxrsrc %d\n", + undi_get_statistics->XmtGoodFrames, + undi_get_statistics->RcvGoodFrames, + undi_get_statistics->RcvCRCErrors, + undi_get_statistics->RcvResourceErrors ); + + undi_get_statistics->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_CLEAR_STATISTICS + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_undi_clear_statistics ( struct s_PXENV_UNDI_CLEAR_STATISTICS + *undi_clear_statistics ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_CLEAR_STATISTICS\n" ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_CLEAR_STATISTICS called with " + "no network device\n" ); + undi_clear_statistics->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + /* Clear statistics */ + memset ( &pxe_netdev->tx_stats, 0, sizeof ( pxe_netdev->tx_stats ) ); + memset ( &pxe_netdev->rx_stats, 0, sizeof ( pxe_netdev->rx_stats ) ); + + undi_clear_statistics->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_INITIATE_DIAGS + * + * Status: won't implement (would require driver API changes for no + * real benefit) + */ +static PXENV_EXIT_t +pxenv_undi_initiate_diags ( struct s_PXENV_UNDI_INITIATE_DIAGS + *undi_initiate_diags ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_INITIATE_DIAGS failed: unsupported\n" ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_INITIATE_DIAGS called with no " + "network device\n" ); + undi_initiate_diags->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + undi_initiate_diags->Status = PXENV_STATUS_UNSUPPORTED; + return PXENV_EXIT_FAILURE; +} + +/* PXENV_UNDI_FORCE_INTERRUPT + * + * Status: won't implement (would require driver API changes for no + * perceptible benefit) + */ +static PXENV_EXIT_t +pxenv_undi_force_interrupt ( struct s_PXENV_UNDI_FORCE_INTERRUPT + *undi_force_interrupt ) { + DBGC ( &pxe_netdev, + "PXENV_UNDI_FORCE_INTERRUPT failed: unsupported\n" ); + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_FORCE_INTERRUPT called with no " + "network device\n" ); + undi_force_interrupt->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + undi_force_interrupt->Status = PXENV_STATUS_UNSUPPORTED; + return PXENV_EXIT_FAILURE; +} + +/* PXENV_UNDI_GET_MCAST_ADDRESS + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_undi_get_mcast_address ( struct s_PXENV_UNDI_GET_MCAST_ADDRESS + *undi_get_mcast_address ) { + struct ll_protocol *ll_protocol; + struct in_addr ip = { .s_addr = undi_get_mcast_address->InetAddr }; + int rc; + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_GET_MCAST_ADDRESS called with " + "no network device\n" ); + undi_get_mcast_address->Status = + PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + DBGC ( &pxe_netdev, "PXENV_UNDI_GET_MCAST_ADDRESS %s", + inet_ntoa ( ip ) ); + + /* Hash address using the network device's link-layer protocol */ + ll_protocol = pxe_netdev->ll_protocol; + if ( ( rc = ll_protocol->mc_hash ( AF_INET, &ip, + undi_get_mcast_address->MediaAddr ))!=0){ + DBGC ( &pxe_netdev, " failed: %s\n", strerror ( rc ) ); + undi_get_mcast_address->Status = PXENV_STATUS ( rc ); + return PXENV_EXIT_FAILURE; + } + DBGC ( &pxe_netdev, "=>%s\n", + ll_protocol->ntoa ( undi_get_mcast_address->MediaAddr ) ); + + undi_get_mcast_address->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_GET_NIC_TYPE + * + * Status: working + */ +static PXENV_EXIT_t pxenv_undi_get_nic_type ( struct s_PXENV_UNDI_GET_NIC_TYPE + *undi_get_nic_type ) { + struct device *dev; + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_GET_NIC_TYPE called with " + "no network device\n" ); + undi_get_nic_type->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + DBGC ( &pxe_netdev, "PXENV_UNDI_GET_NIC_TYPE" ); + + /* Fill in information */ + memset ( &undi_get_nic_type->info, 0, + sizeof ( undi_get_nic_type->info ) ); + dev = pxe_netdev->dev; + switch ( dev->desc.bus_type ) { + case BUS_TYPE_PCI: { + struct pci_nic_info *info = &undi_get_nic_type->info.pci; + + undi_get_nic_type->NicType = PCI_NIC; + info->Vendor_ID = dev->desc.vendor; + info->Dev_ID = dev->desc.device; + info->Base_Class = PCI_BASE_CLASS ( dev->desc.class ); + info->Sub_Class = PCI_SUB_CLASS ( dev->desc.class ); + info->Prog_Intf = PCI_PROG_INTF ( dev->desc.class ); + info->BusDevFunc = dev->desc.location; + /* Earlier versions of the PXE specification do not + * have the SubVendor_ID and SubDevice_ID fields. It + * is possible that some NBPs will not provide space + * for them, and so we must not fill them in. + */ + DBGC ( &pxe_netdev, " PCI %02x:%02x.%x %04x:%04x " + "('%04x:%04x') %02x%02x%02x rev %02x\n", + PCI_BUS ( info->BusDevFunc ), + PCI_SLOT ( info->BusDevFunc ), + PCI_FUNC ( info->BusDevFunc ), info->Vendor_ID, + info->Dev_ID, info->SubVendor_ID, info->SubDevice_ID, + info->Base_Class, info->Sub_Class, info->Prog_Intf, + info->Rev ); + break; } + case BUS_TYPE_ISAPNP: { + struct pnp_nic_info *info = &undi_get_nic_type->info.pnp; + + undi_get_nic_type->NicType = PnP_NIC; + info->EISA_Dev_ID = ( ( dev->desc.vendor << 16 ) | + dev->desc.device ); + info->CardSelNum = dev->desc.location; + /* Cheat: remaining fields are probably unnecessary, + * and would require adding extra code to isapnp.c. + */ + DBGC ( &pxe_netdev, " ISAPnP CSN %04x %08x %02x%02x%02x\n", + info->CardSelNum, info->EISA_Dev_ID, + info->Base_Class, info->Sub_Class, info->Prog_Intf ); + break; } + default: + DBGC ( &pxe_netdev, " failed: unknown bus type\n" ); + undi_get_nic_type->Status = PXENV_STATUS_FAILURE; + return PXENV_EXIT_FAILURE; + } + + undi_get_nic_type->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_GET_IFACE_INFO + * + * Status: working + */ +static PXENV_EXIT_t +pxenv_undi_get_iface_info ( struct s_PXENV_UNDI_GET_IFACE_INFO + *undi_get_iface_info ) { + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxe_netdev, "PXENV_UNDI_GET_IFACE_INFO called with " + "no network device\n" ); + undi_get_iface_info->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + DBGC ( &pxe_netdev, "PXENV_UNDI_GET_IFACE_INFO" ); + + /* Just hand back some info, doesn't really matter what it is. + * Most PXE stacks seem to take this approach. + */ + snprintf ( ( char * ) undi_get_iface_info->IfaceType, + sizeof ( undi_get_iface_info->IfaceType ), "DIX+802.3" ); + undi_get_iface_info->LinkSpeed = 10000000; /* 10 Mbps */ + undi_get_iface_info->ServiceFlags = + ( SUPPORTED_BROADCAST | SUPPORTED_MULTICAST | + SUPPORTED_SET_STATION_ADDRESS | SUPPORTED_RESET | + SUPPORTED_OPEN_CLOSE ); + if ( netdev_irq_supported ( pxe_netdev ) ) + undi_get_iface_info->ServiceFlags |= SUPPORTED_IRQ; + memset ( undi_get_iface_info->Reserved, 0, + sizeof(undi_get_iface_info->Reserved) ); + + DBGC ( &pxe_netdev, " %s %dbps flags %08x\n", + undi_get_iface_info->IfaceType, undi_get_iface_info->LinkSpeed, + undi_get_iface_info->ServiceFlags ); + undi_get_iface_info->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/* PXENV_UNDI_GET_STATE + * + * Status: impossible due to opcode collision + */ + +/* PXENV_UNDI_ISR + * + * Status: working + */ +static PXENV_EXIT_t pxenv_undi_isr ( struct s_PXENV_UNDI_ISR *undi_isr ) { + struct io_buffer *iobuf; + size_t len; + struct ll_protocol *ll_protocol; + const void *ll_dest; + const void *ll_source; + uint16_t net_proto; + unsigned int flags; + size_t ll_hlen; + struct net_protocol *net_protocol; + unsigned int prottype; + int rc; + + /* Use a different debug colour, since UNDI ISR messages are + * likely to be interspersed amongst other UNDI messages. + */ + + /* Sanity check */ + if ( ! pxe_netdev ) { + DBGC ( &pxenv_undi_isr, "PXENV_UNDI_ISR called with " + "no network device\n" ); + undi_isr->Status = PXENV_STATUS_UNDI_INVALID_STATE; + return PXENV_EXIT_FAILURE; + } + + DBGC2 ( &pxenv_undi_isr, "PXENV_UNDI_ISR" ); + + /* Just in case some idiot actually looks at these fields when + * we weren't meant to fill them in... + */ + undi_isr->BufferLength = 0; + undi_isr->FrameLength = 0; + undi_isr->FrameHeaderLength = 0; + undi_isr->ProtType = 0; + undi_isr->PktType = 0; + + switch ( undi_isr->FuncFlag ) { + case PXENV_UNDI_ISR_IN_START : + DBGC2 ( &pxenv_undi_isr, " START" ); + + /* Call poll(). This should acknowledge the device + * interrupt and queue up any received packet. + */ + net_poll(); + + /* A 100% accurate determination of "OURS" vs "NOT + * OURS" is difficult to achieve without invasive and + * unpleasant changes to the driver model. We settle + * for always returning "OURS" if interrupts are + * currently enabled. + * + * Returning "NOT OURS" when interrupts are disabled + * allows us to avoid a potential interrupt storm when + * we are on a shared interrupt line; if we were to + * always return "OURS" then the other device's ISR + * may never be called. + */ + if ( netdev_irq_enabled ( pxe_netdev ) ) { + DBGC2 ( &pxenv_undi_isr, " OURS" ); + undi_isr->FuncFlag = PXENV_UNDI_ISR_OUT_OURS; + } else { + DBGC2 ( &pxenv_undi_isr, " NOT OURS" ); + undi_isr->FuncFlag = PXENV_UNDI_ISR_OUT_NOT_OURS; + } + + /* Disable interrupts */ + netdev_irq ( pxe_netdev, 0 ); + + break; + case PXENV_UNDI_ISR_IN_PROCESS : + case PXENV_UNDI_ISR_IN_GET_NEXT : + DBGC2 ( &pxenv_undi_isr, " %s", + ( ( undi_isr->FuncFlag == PXENV_UNDI_ISR_IN_PROCESS ) ? + "PROCESS" : "GET_NEXT" ) ); + + /* Some dumb NBPs (e.g. emBoot's winBoot/i) never call + * PXENV_UNDI_ISR with FuncFlag=PXENV_UNDI_ISR_START; + * they just sit in a tight polling loop merrily + * violating the PXE spec with repeated calls to + * PXENV_UNDI_ISR_IN_PROCESS. Force extra polls to + * cope with these out-of-spec clients. + */ + net_poll(); + + /* If we have not yet marked a TX as complete, and the + * netdev TX queue is empty, report the TX completion. + */ + if ( undi_tx_count && list_empty ( &pxe_netdev->tx_queue ) ) { + DBGC2 ( &pxenv_undi_isr, " TXC" ); + undi_tx_count--; + undi_isr->FuncFlag = PXENV_UNDI_ISR_OUT_TRANSMIT; + break; + } + + /* Remove first packet from netdev RX queue */ + iobuf = netdev_rx_dequeue ( pxe_netdev ); + if ( ! iobuf ) { + DBGC2 ( &pxenv_undi_isr, " DONE" ); + /* No more packets remaining */ + undi_isr->FuncFlag = PXENV_UNDI_ISR_OUT_DONE; + /* Re-enable interrupts */ + netdev_irq ( pxe_netdev, 1 ); + break; + } + + /* Copy packet to base memory buffer */ + len = iob_len ( iobuf ); + DBGC2 ( &pxenv_undi_isr, " RX" ); + if ( len > sizeof ( basemem_packet ) ) { + /* Should never happen */ + DBGC2 ( &pxenv_undi_isr, " overlength (%zx)", len ); + len = sizeof ( basemem_packet ); + } + memcpy ( basemem_packet, iobuf->data, len ); + + /* Strip link-layer header */ + ll_protocol = pxe_netdev->ll_protocol; + if ( ( rc = ll_protocol->pull ( pxe_netdev, iobuf, &ll_dest, + &ll_source, &net_proto, + &flags ) ) != 0 ) { + /* Assume unknown net_proto and no ll_source */ + net_proto = 0; + ll_source = NULL; + } + ll_hlen = ( len - iob_len ( iobuf ) ); + + /* Determine network-layer protocol */ + switch ( net_proto ) { + case htons ( ETH_P_IP ): + net_protocol = &ipv4_protocol; + prottype = P_IP; + break; + case htons ( ETH_P_ARP ): + net_protocol = &arp_protocol; + prottype = P_ARP; + break; + case htons ( ETH_P_RARP ): + net_protocol = &rarp_protocol; + prottype = P_RARP; + break; + default: + net_protocol = NULL; + prottype = P_UNKNOWN; + break; + } + + /* Fill in UNDI_ISR structure */ + undi_isr->FuncFlag = PXENV_UNDI_ISR_OUT_RECEIVE; + undi_isr->BufferLength = len; + undi_isr->FrameLength = len; + undi_isr->FrameHeaderLength = ll_hlen; + undi_isr->Frame.segment = rm_ds; + undi_isr->Frame.offset = __from_data16 ( basemem_packet ); + undi_isr->ProtType = prottype; + if ( flags & LL_BROADCAST ) { + undi_isr->PktType = P_BROADCAST; + } else if ( flags & LL_MULTICAST ) { + undi_isr->PktType = P_MULTICAST; + } else { + undi_isr->PktType = P_DIRECTED; + } + DBGC2 ( &pxenv_undi_isr, " %04x:%04x+%x(%x) %s hlen %d", + undi_isr->Frame.segment, undi_isr->Frame.offset, + undi_isr->BufferLength, undi_isr->FrameLength, + ( net_protocol ? net_protocol->name : "RAW" ), + undi_isr->FrameHeaderLength ); + + /* Free packet */ + free_iob ( iobuf ); + break; + default : + DBGC2 ( &pxenv_undi_isr, " INVALID(%04x)\n", + undi_isr->FuncFlag ); + + /* Should never happen */ + undi_isr->FuncFlag = PXENV_UNDI_ISR_OUT_DONE; + undi_isr->Status = PXENV_STATUS_UNDI_INVALID_PARAMETER; + return PXENV_EXIT_FAILURE; + } + + DBGC2 ( &pxenv_undi_isr, "\n" ); + undi_isr->Status = PXENV_STATUS_SUCCESS; + return PXENV_EXIT_SUCCESS; +} + +/** PXE UNDI API */ +struct pxe_api_call pxe_undi_api[] __pxe_api_call = { + PXE_API_CALL ( PXENV_UNDI_STARTUP, pxenv_undi_startup, + struct s_PXENV_UNDI_STARTUP ), + PXE_API_CALL ( PXENV_UNDI_CLEANUP, pxenv_undi_cleanup, + struct s_PXENV_UNDI_CLEANUP ), + PXE_API_CALL ( PXENV_UNDI_INITIALIZE, pxenv_undi_initialize, + struct s_PXENV_UNDI_INITIALIZE ), + PXE_API_CALL ( PXENV_UNDI_RESET_ADAPTER, pxenv_undi_reset_adapter, + struct s_PXENV_UNDI_RESET ), + PXE_API_CALL ( PXENV_UNDI_SHUTDOWN, pxenv_undi_shutdown, + struct s_PXENV_UNDI_SHUTDOWN ), + PXE_API_CALL ( PXENV_UNDI_OPEN, pxenv_undi_open, + struct s_PXENV_UNDI_OPEN ), + PXE_API_CALL ( PXENV_UNDI_CLOSE, pxenv_undi_close, + struct s_PXENV_UNDI_CLOSE ), + PXE_API_CALL ( PXENV_UNDI_TRANSMIT, pxenv_undi_transmit, + struct s_PXENV_UNDI_TRANSMIT ), + PXE_API_CALL ( PXENV_UNDI_SET_MCAST_ADDRESS, + pxenv_undi_set_mcast_address, + struct s_PXENV_UNDI_SET_MCAST_ADDRESS ), + PXE_API_CALL ( PXENV_UNDI_SET_STATION_ADDRESS, + pxenv_undi_set_station_address, + struct s_PXENV_UNDI_SET_STATION_ADDRESS ), + PXE_API_CALL ( PXENV_UNDI_SET_PACKET_FILTER, + pxenv_undi_set_packet_filter, + struct s_PXENV_UNDI_SET_PACKET_FILTER ), + PXE_API_CALL ( PXENV_UNDI_GET_INFORMATION, pxenv_undi_get_information, + struct s_PXENV_UNDI_GET_INFORMATION ), + PXE_API_CALL ( PXENV_UNDI_GET_STATISTICS, pxenv_undi_get_statistics, + struct s_PXENV_UNDI_GET_STATISTICS ), + PXE_API_CALL ( PXENV_UNDI_CLEAR_STATISTICS, pxenv_undi_clear_statistics, + struct s_PXENV_UNDI_CLEAR_STATISTICS ), + PXE_API_CALL ( PXENV_UNDI_INITIATE_DIAGS, pxenv_undi_initiate_diags, + struct s_PXENV_UNDI_INITIATE_DIAGS ), + PXE_API_CALL ( PXENV_UNDI_FORCE_INTERRUPT, pxenv_undi_force_interrupt, + struct s_PXENV_UNDI_FORCE_INTERRUPT ), + PXE_API_CALL ( PXENV_UNDI_GET_MCAST_ADDRESS, + pxenv_undi_get_mcast_address, + struct s_PXENV_UNDI_GET_MCAST_ADDRESS ), + PXE_API_CALL ( PXENV_UNDI_GET_NIC_TYPE, pxenv_undi_get_nic_type, + struct s_PXENV_UNDI_GET_NIC_TYPE ), + PXE_API_CALL ( PXENV_UNDI_GET_IFACE_INFO, pxenv_undi_get_iface_info, + struct s_PXENV_UNDI_GET_IFACE_INFO ), + PXE_API_CALL ( PXENV_UNDI_ISR, pxenv_undi_isr, + struct s_PXENV_UNDI_ISR ), +}; diff --git a/src/arch/x86/interface/syslinux/com32_call.c b/src/arch/x86/interface/syslinux/com32_call.c new file mode 100644 index 00000000..19fdbaff --- /dev/null +++ b/src/arch/x86/interface/syslinux/com32_call.c @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2008 Daniel Verkamp . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * @file SYSLINUX COM32 helpers + * + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include + +static com32sys_t __bss16 ( com32_regs ); +#define com32_regs __use_data16 ( com32_regs ) + +static uint8_t __bss16 ( com32_int_vector ); +#define com32_int_vector __use_data16 ( com32_int_vector ) + +static uint32_t __bss16 ( com32_farcall_proc ); +#define com32_farcall_proc __use_data16 ( com32_farcall_proc ) + +uint16_t __bss16 ( com32_saved_sp ); + +/** + * Interrupt call helper + */ +void __asmcall com32_intcall ( uint8_t interrupt, physaddr_t inregs_phys, physaddr_t outregs_phys ) { + + DBGC ( &com32_regs, "COM32 INT%x in %#08lx out %#08lx\n", + interrupt, inregs_phys, outregs_phys ); + + memcpy_user ( virt_to_user( &com32_regs ), 0, + phys_to_user ( inregs_phys ), 0, + sizeof(com32sys_t) ); + + com32_int_vector = interrupt; + + __asm__ __volatile__ ( + REAL_CODE ( /* Save all registers */ + "pushal\n\t" + "pushw %%ds\n\t" + "pushw %%es\n\t" + "pushw %%fs\n\t" + "pushw %%gs\n\t" + /* Mask off unsafe flags */ + "movl (com32_regs + 40), %%eax\n\t" + "andl $0x200cd7, %%eax\n\t" + "movl %%eax, (com32_regs + 40)\n\t" + /* Load com32_regs into the actual registers */ + "movw %%sp, %%ss:(com32_saved_sp)\n\t" + "movw $com32_regs, %%sp\n\t" + "popw %%gs\n\t" + "popw %%fs\n\t" + "popw %%es\n\t" + "popw %%ds\n\t" + "popal\n\t" + "popfl\n\t" + "movw %%ss:(com32_saved_sp), %%sp\n\t" + /* patch INT instruction */ + "pushw %%ax\n\t" + "movb %%ss:(com32_int_vector), %%al\n\t" + "movb %%al, %%cs:(com32_intcall_instr + 1)\n\t" + /* perform a jump to avoid problems with cache + * consistency in self-modifying code on some CPUs (486) + */ + "jmp 1f\n" + "1:\n\t" + "popw %%ax\n\t" + "com32_intcall_instr:\n\t" + /* INT instruction to be patched */ + "int $0xFF\n\t" + /* Copy regs back to com32_regs */ + "movw %%sp, %%ss:(com32_saved_sp)\n\t" + "movw $(com32_regs + 44), %%sp\n\t" + "pushfl\n\t" + "pushal\n\t" + "pushw %%ds\n\t" + "pushw %%es\n\t" + "pushw %%fs\n\t" + "pushw %%gs\n\t" + "movw %%ss:(com32_saved_sp), %%sp\n\t" + /* Restore registers */ + "popw %%gs\n\t" + "popw %%fs\n\t" + "popw %%es\n\t" + "popw %%ds\n\t" + "popal\n\t") + : : ); + + if ( outregs_phys ) { + memcpy_user ( phys_to_user ( outregs_phys ), 0, + virt_to_user( &com32_regs ), 0, + sizeof(com32sys_t) ); + } +} + +/** + * Farcall helper + */ +void __asmcall com32_farcall ( uint32_t proc, physaddr_t inregs_phys, physaddr_t outregs_phys ) { + + DBGC ( &com32_regs, "COM32 farcall %04x:%04x in %#08lx out %#08lx\n", + ( proc >> 16 ), ( proc & 0xffff ), inregs_phys, outregs_phys ); + + memcpy_user ( virt_to_user( &com32_regs ), 0, + phys_to_user ( inregs_phys ), 0, + sizeof(com32sys_t) ); + + com32_farcall_proc = proc; + + __asm__ __volatile__ ( + REAL_CODE ( /* Save all registers */ + "pushal\n\t" + "pushw %%ds\n\t" + "pushw %%es\n\t" + "pushw %%fs\n\t" + "pushw %%gs\n\t" + /* Mask off unsafe flags */ + "movl (com32_regs + 40), %%eax\n\t" + "andl $0x200cd7, %%eax\n\t" + "movl %%eax, (com32_regs + 40)\n\t" + /* Load com32_regs into the actual registers */ + "movw %%sp, %%ss:(com32_saved_sp)\n\t" + "movw $com32_regs, %%sp\n\t" + "popw %%gs\n\t" + "popw %%fs\n\t" + "popw %%es\n\t" + "popw %%ds\n\t" + "popal\n\t" + "popfl\n\t" + "movw %%ss:(com32_saved_sp), %%sp\n\t" + /* Call procedure */ + "lcall *%%ss:(com32_farcall_proc)\n\t" + /* Copy regs back to com32_regs */ + "movw %%sp, %%ss:(com32_saved_sp)\n\t" + "movw $(com32_regs + 44), %%sp\n\t" + "pushfl\n\t" + "pushal\n\t" + "pushw %%ds\n\t" + "pushw %%es\n\t" + "pushw %%fs\n\t" + "pushw %%gs\n\t" + "movw %%ss:(com32_saved_sp), %%sp\n\t" + /* Restore registers */ + "popw %%gs\n\t" + "popw %%fs\n\t" + "popw %%es\n\t" + "popw %%ds\n\t" + "popal\n\t") + : : ); + + if ( outregs_phys ) { + memcpy_user ( phys_to_user ( outregs_phys ), 0, + virt_to_user( &com32_regs ), 0, + sizeof(com32sys_t) ); + } +} + +/** + * CDECL farcall helper + */ +int __asmcall com32_cfarcall ( uint32_t proc, physaddr_t stack, size_t stacksz ) { + int32_t eax; + + DBGC ( &com32_regs, "COM32 cfarcall %04x:%04x params %#08lx+%#zx\n", + ( proc >> 16 ), ( proc & 0xffff ), stack, stacksz ); + + copy_user_to_rm_stack ( phys_to_user ( stack ), stacksz ); + com32_farcall_proc = proc; + + __asm__ __volatile__ ( + REAL_CODE ( "lcall *%%ss:(com32_farcall_proc)\n\t" ) + : "=a" (eax) + : + : "ecx", "edx" ); + + remove_user_from_rm_stack ( 0, stacksz ); + + return eax; +} diff --git a/src/arch/x86/interface/syslinux/com32_wrapper.S b/src/arch/x86/interface/syslinux/com32_wrapper.S new file mode 100644 index 00000000..d59a3392 --- /dev/null +++ b/src/arch/x86/interface/syslinux/com32_wrapper.S @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2008 Daniel Verkamp . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ) + +#include "librm.h" + + .text + + .code32 + .globl com32_farcall_wrapper +com32_farcall_wrapper: + movl $VIRTUAL(com32_farcall), %eax + jmp com32_wrapper + + .code32 + .globl com32_cfarcall_wrapper +com32_cfarcall_wrapper: + movl $VIRTUAL(com32_cfarcall), %eax + jmp com32_wrapper + + .code32 + .globl com32_intcall_wrapper +com32_intcall_wrapper: + movl $VIRTUAL(com32_intcall), %eax + /* fall through */ + + .code32 +com32_wrapper: + + /* Disable interrupts */ + cli + + /* Switch to internal virtual address space */ + call _phys_to_virt + +#ifdef __x86_64__ + + .code64 + + /* Preserve registers which are callee-save for COM32 (i386 API) */ + pushq %rdi + pushq %rsi + pushq %rbp + + /* Extract parameters from stack */ + movl 28(%rsp), %edi + movl 32(%rsp), %esi + movl 36(%rsp), %edx + + /* Align stack pointer */ + movq %rsp, %rbp + andq $~0x07, %rsp + + /* Call helper function */ + movslq %eax, %rax + call *%rax + + /* Restore stack pointer */ + movq %rbp, %rsp + + /* Restore registers */ + popq %rbp + popq %rsi + popq %rdi + +#else /* _x86_64 */ + + /* Call helper function */ + pushl 12(%esp) + pushl 12(%esp) + pushl 12(%esp) + call *%eax + addl $12, %esp + +#endif /* _x86_64 */ + + /* Switch to external flat physical address space */ + call _virt_to_phys + .code32 + + /* Reenable interrupts and return */ + sti + ret diff --git a/src/arch/x86/interface/syslinux/comboot_call.c b/src/arch/x86/interface/syslinux/comboot_call.c new file mode 100644 index 00000000..e70f200e --- /dev/null +++ b/src/arch/x86/interface/syslinux/comboot_call.c @@ -0,0 +1,705 @@ +/* + * Copyright (C) 2008 Daniel Verkamp . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * @file SYSLINUX COMBOOT API + * + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** The "SYSLINUX" version string */ +static char __bss16_array ( syslinux_version, [32] ); +#define syslinux_version __use_data16 ( syslinux_version ) + +/** The "SYSLINUX" copyright string */ +static char __data16_array ( syslinux_copyright, [] ) = " http://ipxe.org"; +#define syslinux_copyright __use_data16 ( syslinux_copyright ) + +static char __data16_array ( syslinux_configuration_file, [] ) = ""; +#define syslinux_configuration_file __use_data16 ( syslinux_configuration_file ) + +/** Feature flags */ +static uint8_t __data16 ( comboot_feature_flags ) = COMBOOT_FEATURE_IDLE_LOOP; +#define comboot_feature_flags __use_data16 ( comboot_feature_flags ) + +typedef union { + syslinux_pm_regs pm; syslinux_rm_regs rm; +} syslinux_regs; + +/** Initial register values for INT 22h AX=1Ah and 1Bh */ +static syslinux_regs __text16 ( comboot_initial_regs ); +#define comboot_initial_regs __use_text16 ( comboot_initial_regs ) + +static struct segoff __text16 ( int20_vector ); +#define int20_vector __use_text16 ( int20_vector ) + +static struct segoff __text16 ( int21_vector ); +#define int21_vector __use_text16 ( int21_vector ) + +static struct segoff __text16 ( int22_vector ); +#define int22_vector __use_text16 ( int22_vector ) + +extern void int20_wrapper ( void ); +extern void int21_wrapper ( void ); +extern void int22_wrapper ( void ); + +/* setjmp/longjmp context buffer used to return after loading an image */ +rmjmp_buf comboot_return; + +/* Mode flags set by INT 22h AX=0017h */ +static uint16_t comboot_graphics_mode = 0; + +/** + * Print a string with a particular terminator + */ +static void print_user_string ( unsigned int segment, unsigned int offset, char terminator ) { + int i = 0; + char c; + userptr_t str = real_to_user ( segment, offset ); + for ( ; ; ) { + copy_from_user ( &c, str, i, 1 ); + if ( c == terminator ) break; + putchar ( c ); + i++; + } +} + + +/** + * Perform a series of memory copies from a list in low memory + */ +static void shuffle ( unsigned int list_segment, unsigned int list_offset, unsigned int count ) +{ + comboot_shuffle_descriptor shuf[COMBOOT_MAX_SHUFFLE_DESCRIPTORS]; + unsigned int i; + + /* Copy shuffle descriptor list so it doesn't get overwritten */ + copy_from_user ( shuf, real_to_user ( list_segment, list_offset ), 0, + count * sizeof( comboot_shuffle_descriptor ) ); + + /* Do the copies */ + for ( i = 0; i < count; i++ ) { + userptr_t src_u = phys_to_user ( shuf[ i ].src ); + userptr_t dest_u = phys_to_user ( shuf[ i ].dest ); + + if ( shuf[ i ].src == 0xFFFFFFFF ) { + /* Fill with 0 instead of copying */ + memset_user ( dest_u, 0, 0, shuf[ i ].len ); + } else if ( shuf[ i ].dest == 0xFFFFFFFF ) { + /* Copy new list of descriptors */ + count = shuf[ i ].len / sizeof( comboot_shuffle_descriptor ); + assert ( count <= COMBOOT_MAX_SHUFFLE_DESCRIPTORS ); + copy_from_user ( shuf, src_u, 0, shuf[ i ].len ); + i = -1; + } else { + /* Regular copy */ + memmove_user ( dest_u, 0, src_u, 0, shuf[ i ].len ); + } + } +} + + +/** + * Set default text mode + */ +void comboot_force_text_mode ( void ) { + if ( comboot_graphics_mode & COMBOOT_VIDEO_VESA ) { + /* Set VGA mode 3 via VESA VBE mode set */ + __asm__ __volatile__ ( + REAL_CODE ( + "mov $0x4F02, %%ax\n\t" + "mov $0x03, %%bx\n\t" + "int $0x10\n\t" + ) + : : ); + } else if ( comboot_graphics_mode & COMBOOT_VIDEO_GRAPHICS ) { + /* Set VGA mode 3 via standard VGA mode set */ + __asm__ __volatile__ ( + REAL_CODE ( + "mov $0x03, %%ax\n\t" + "int $0x10\n\t" + ) + : : ); + } + + comboot_graphics_mode = 0; +} + + +/** + * Fetch kernel and optional initrd + */ +static int comboot_fetch_kernel ( char *kernel_file, char *cmdline ) { + struct image *kernel; + struct image *initrd; + char *initrd_file; + int rc; + + /* Find initrd= parameter, if any */ + if ( ( initrd_file = strstr ( cmdline, "initrd=" ) ) != NULL ) { + char *initrd_end; + + /* skip "initrd=" */ + initrd_file += 7; + + /* Find terminating space, if any, and replace with NUL */ + initrd_end = strchr ( initrd_file, ' ' ); + if ( initrd_end ) + *initrd_end = '\0'; + + DBG ( "COMBOOT: fetching initrd '%s'\n", initrd_file ); + + /* Fetch initrd */ + if ( ( rc = imgdownload_string ( initrd_file, 0, + &initrd ) ) != 0 ) { + DBG ( "COMBOOT: could not fetch initrd: %s\n", + strerror ( rc ) ); + return rc; + } + + /* Restore space after initrd name, if applicable */ + if ( initrd_end ) + *initrd_end = ' '; + } + + DBG ( "COMBOOT: fetching kernel '%s'\n", kernel_file ); + + /* Fetch kernel */ + if ( ( rc = imgdownload_string ( kernel_file, 0, &kernel ) ) != 0 ) { + DBG ( "COMBOOT: could not fetch kernel: %s\n", + strerror ( rc ) ); + return rc; + } + + /* Replace comboot image with kernel */ + if ( ( rc = image_replace ( kernel ) ) != 0 ) { + DBG ( "COMBOOT: could not replace with kernel: %s\n", + strerror ( rc ) ); + return rc; + } + + return 0; +} + + +/** + * Terminate program interrupt handler + */ +static __asmcall void int20 ( struct i386_all_regs *ix86 __unused ) { + rmlongjmp ( comboot_return, COMBOOT_EXIT ); +} + + +/** + * DOS-compatible API + */ +static __asmcall void int21 ( struct i386_all_regs *ix86 ) { + ix86->flags |= CF; + + switch ( ix86->regs.ah ) { + case 0x00: + case 0x4C: /* Terminate program */ + rmlongjmp ( comboot_return, COMBOOT_EXIT ); + break; + + case 0x01: /* Get Key with Echo */ + case 0x08: /* Get Key without Echo */ + /* TODO: handle extended characters? */ + ix86->regs.al = getchar( ); + + /* Enter */ + if ( ix86->regs.al == 0x0A ) + ix86->regs.al = 0x0D; + + if ( ix86->regs.ah == 0x01 ) + putchar ( ix86->regs.al ); + + ix86->flags &= ~CF; + break; + + case 0x02: /* Write Character */ + putchar ( ix86->regs.dl ); + ix86->flags &= ~CF; + break; + + case 0x04: /* Write Character to Serial Port */ + if ( serial_console.base ) { + uart_transmit ( &serial_console, ix86->regs.dl ); + ix86->flags &= ~CF; + } + break; + + case 0x09: /* Write DOS String to Console */ + print_user_string ( ix86->segs.ds, ix86->regs.dx, '$' ); + ix86->flags &= ~CF; + break; + + case 0x0B: /* Check Keyboard */ + if ( iskey() ) + ix86->regs.al = 0xFF; + else + ix86->regs.al = 0x00; + + ix86->flags &= ~CF; + break; + + case 0x30: /* Check DOS Version */ + /* Bottom halves all 0; top halves spell "SYSLINUX" */ + ix86->regs.eax = 0x59530000; + ix86->regs.ebx = 0x4C530000; + ix86->regs.ecx = 0x4E490000; + ix86->regs.edx = 0x58550000; + ix86->flags &= ~CF; + break; + + default: + DBG ( "COMBOOT unknown int21 function %02x\n", ix86->regs.ah ); + break; + } +} + + +/** + * Dispatch PXE API call weakly + * + * @v ix86 Registers for PXE call + * @ret present Zero if the PXE stack is present, nonzero if not + * + * A successful return only indicates that the PXE stack was available + * for dispatching the call; it says nothing about the success of + * whatever the call asked for. + */ +__weak int pxe_api_call_weak ( struct i386_all_regs *ix86 __unused ) { + return -1; +} + +/** + * SYSLINUX API + */ +static __asmcall void int22 ( struct i386_all_regs *ix86 ) { + ix86->flags |= CF; + + switch ( ix86->regs.ax ) { + case 0x0001: /* Get Version */ + + /* Number of INT 22h API functions available */ + ix86->regs.ax = 0x001D; + + /* SYSLINUX version number */ + ix86->regs.ch = 0; /* major */ + ix86->regs.cl = 0; /* minor */ + + /* SYSLINUX derivative ID */ + ix86->regs.dl = BZI_LOADER_TYPE_IPXE; + + /* SYSLINUX version */ + snprintf ( syslinux_version, sizeof ( syslinux_version ), + "\r\niPXE %s", product_version ); + + /* SYSLINUX version and copyright strings */ + ix86->segs.es = rm_ds; + ix86->regs.si = ( ( unsigned ) __from_data16 ( syslinux_version ) ); + ix86->regs.di = ( ( unsigned ) __from_data16 ( syslinux_copyright ) ); + + ix86->flags &= ~CF; + break; + + case 0x0002: /* Write String */ + print_user_string ( ix86->segs.es, ix86->regs.bx, '\0' ); + ix86->flags &= ~CF; + break; + + case 0x0003: /* Run command */ + { + userptr_t cmd_u = real_to_user ( ix86->segs.es, ix86->regs.bx ); + int len = strlen_user ( cmd_u, 0 ); + char cmd[len + 1]; + copy_from_user ( cmd, cmd_u, 0, len + 1 ); + DBG ( "COMBOOT: executing command '%s'\n", cmd ); + system ( cmd ); + DBG ( "COMBOOT: exiting after executing command...\n" ); + rmlongjmp ( comboot_return, COMBOOT_EXIT_COMMAND ); + } + break; + + case 0x0004: /* Run default command */ + /* FIXME: just exit for now */ + rmlongjmp ( comboot_return, COMBOOT_EXIT_COMMAND ); + break; + + case 0x0005: /* Force text mode */ + comboot_force_text_mode ( ); + ix86->flags &= ~CF; + break; + + case 0x0006: /* Open file */ + { + int fd; + userptr_t file_u = real_to_user ( ix86->segs.es, ix86->regs.si ); + int len = strlen_user ( file_u, 0 ); + char file[len + 1]; + + copy_from_user ( file, file_u, 0, len + 1 ); + + if ( file[0] == '\0' ) { + DBG ( "COMBOOT: attempted open with empty file name\n" ); + break; + } + + DBG ( "COMBOOT: opening file '%s'\n", file ); + + fd = open ( file ); + + if ( fd < 0 ) { + DBG ( "COMBOOT: error opening file %s\n", file ); + break; + } + + /* This relies on the fact that a iPXE POSIX fd will + * always fit in 16 bits. + */ +#if (POSIX_FD_MAX > 65535) +#error POSIX_FD_MAX too large +#endif + ix86->regs.si = (uint16_t) fd; + + ix86->regs.cx = COMBOOT_FILE_BLOCKSZ; + ix86->regs.eax = fsize ( fd ); + ix86->flags &= ~CF; + } + break; + + case 0x0007: /* Read file */ + { + int fd = ix86->regs.si; + int len = ix86->regs.cx * COMBOOT_FILE_BLOCKSZ; + int rc; + fd_set fds; + userptr_t buf = real_to_user ( ix86->segs.es, ix86->regs.bx ); + + /* Wait for data ready to read */ + FD_ZERO ( &fds ); + FD_SET ( fd, &fds ); + + select ( &fds, 1 ); + + rc = read_user ( fd, buf, 0, len ); + if ( rc < 0 ) { + DBG ( "COMBOOT: read failed\n" ); + ix86->regs.si = 0; + break; + } + + ix86->regs.ecx = rc; + ix86->flags &= ~CF; + } + break; + + case 0x0008: /* Close file */ + { + int fd = ix86->regs.si; + close ( fd ); + ix86->flags &= ~CF; + } + break; + + case 0x0009: /* Call PXE Stack */ + if ( pxe_api_call_weak ( ix86 ) != 0 ) + ix86->flags |= CF; + else + ix86->flags &= ~CF; + break; + + case 0x000A: /* Get Derivative-Specific Information */ + + /* iPXE has its own derivative ID, so there is no defined + * output here; just return AL for now */ + ix86->regs.al = BZI_LOADER_TYPE_IPXE; + ix86->flags &= ~CF; + break; + + case 0x000B: /* Get Serial Console Configuration */ + if ( serial_console.base ) { + ix86->regs.dx = ( ( intptr_t ) serial_console.base ); + ix86->regs.cx = serial_console.divisor; + ix86->regs.bx = 0; + ix86->flags &= ~CF; + } + break; + + case 0x000C: /* Perform final cleanup */ + shutdown_boot(); + break; + + case 0x000E: /* Get configuration file name */ + /* FIXME: stub */ + ix86->segs.es = rm_ds; + ix86->regs.bx = ( ( unsigned ) __from_data16 ( syslinux_configuration_file ) ); + ix86->flags &= ~CF; + break; + + case 0x000F: /* Get IPAPPEND strings */ + /* FIXME: stub */ + ix86->regs.cx = 0; + ix86->segs.es = 0; + ix86->regs.bx = 0; + ix86->flags &= ~CF; + break; + + case 0x0010: /* Resolve hostname */ + { + userptr_t hostname_u = real_to_user ( ix86->segs.es, ix86->regs.bx ); + int len = strlen_user ( hostname_u, 0 ); + char hostname[len]; + struct in_addr addr; + + copy_from_user ( hostname, hostname_u, 0, len + 1 ); + + /* TODO: + * "If the hostname does not contain a dot (.), the + * local domain name is automatically appended." + */ + + comboot_resolv ( hostname, &addr ); + + ix86->regs.eax = addr.s_addr; + ix86->flags &= ~CF; + } + break; + + case 0x0011: /* Maximum number of shuffle descriptors */ + ix86->regs.cx = COMBOOT_MAX_SHUFFLE_DESCRIPTORS; + ix86->flags &= ~CF; + break; + + case 0x0012: /* Cleanup, shuffle and boot */ + if ( ix86->regs.cx > COMBOOT_MAX_SHUFFLE_DESCRIPTORS ) + break; + + /* Perform final cleanup */ + shutdown_boot(); + + /* Perform sequence of copies */ + shuffle ( ix86->segs.es, ix86->regs.di, ix86->regs.cx ); + + /* Jump to real-mode entry point */ + __asm__ __volatile__ ( + REAL_CODE ( + "pushw %0\n\t" + "popw %%ds\n\t" + "pushl %1\n\t" + "lret\n\t" + ) + : + : "r" ( ix86->segs.ds ), + "r" ( ix86->regs.ebp ), + "d" ( ix86->regs.ebx ), + "S" ( ix86->regs.esi ) ); + + assert ( 0 ); /* Execution should never reach this point */ + + break; + + case 0x0013: /* Idle loop call */ + step ( ); + ix86->flags &= ~CF; + break; + + case 0x0015: /* Get feature flags */ + ix86->segs.es = rm_ds; + ix86->regs.bx = ( ( unsigned ) __from_data16 ( &comboot_feature_flags ) ); + ix86->regs.cx = 1; /* Number of feature flag bytes */ + ix86->flags &= ~CF; + break; + + case 0x0016: /* Run kernel image */ + { + userptr_t file_u = real_to_user ( ix86->segs.ds, ix86->regs.si ); + userptr_t cmd_u = real_to_user ( ix86->segs.es, ix86->regs.bx ); + int file_len = strlen_user ( file_u, 0 ); + int cmd_len = strlen_user ( cmd_u, 0 ); + char file[file_len + 1]; + char cmd[cmd_len + 1]; + + copy_from_user ( file, file_u, 0, file_len + 1 ); + copy_from_user ( cmd, cmd_u, 0, cmd_len + 1 ); + + DBG ( "COMBOOT: run kernel %s %s\n", file, cmd ); + comboot_fetch_kernel ( file, cmd ); + /* Technically, we should return if we + * couldn't load the kernel, but it's not safe + * to do that since we have just overwritten + * part of the COMBOOT program's memory space. + */ + DBG ( "COMBOOT: exiting to run kernel...\n" ); + rmlongjmp ( comboot_return, COMBOOT_EXIT_RUN_KERNEL ); + } + break; + + case 0x0017: /* Report video mode change */ + comboot_graphics_mode = ix86->regs.bx; + ix86->flags &= ~CF; + break; + + case 0x0018: /* Query custom font */ + /* FIXME: stub */ + ix86->regs.al = 0; + ix86->segs.es = 0; + ix86->regs.bx = 0; + ix86->flags &= ~CF; + break; + + case 0x001B: /* Cleanup, shuffle and boot to real mode */ + if ( ix86->regs.cx > COMBOOT_MAX_SHUFFLE_DESCRIPTORS ) + break; + + /* Perform final cleanup */ + shutdown_boot(); + + /* Perform sequence of copies */ + shuffle ( ix86->segs.es, ix86->regs.di, ix86->regs.cx ); + + /* Copy initial register values to .text16 */ + memcpy_user ( real_to_user ( rm_cs, (unsigned) __from_text16 ( &comboot_initial_regs ) ), 0, + real_to_user ( ix86->segs.ds, ix86->regs.si ), 0, + sizeof(syslinux_rm_regs) ); + + /* Load initial register values */ + __asm__ __volatile__ ( + REAL_CODE ( + /* Point SS:SP at the register value structure */ + "pushw %%cs\n\t" + "popw %%ss\n\t" + "movw $comboot_initial_regs, %%sp\n\t" + + /* Segment registers */ + "popw %%es\n\t" + "popw %%ax\n\t" /* Skip CS */ + "popw %%ds\n\t" + "popw %%ax\n\t" /* Skip SS for now */ + "popw %%fs\n\t" + "popw %%gs\n\t" + + /* GP registers */ + "popl %%eax\n\t" + "popl %%ecx\n\t" + "popl %%edx\n\t" + "popl %%ebx\n\t" + "popl %%ebp\n\t" /* Skip ESP for now */ + "popl %%ebp\n\t" + "popl %%esi\n\t" + "popl %%edi\n\t" + + /* Load correct SS:ESP */ + "movw $(comboot_initial_regs + 6), %%sp\n\t" + "popw %%ss\n\t" + "movl %%cs:(comboot_initial_regs + 28), %%esp\n\t" + + "ljmp *%%cs:(comboot_initial_regs + 44)\n\t" + ) + : : ); + + break; + + case 0x001C: /* Get pointer to auxilliary data vector */ + /* FIXME: stub */ + ix86->regs.cx = 0; /* Size of the ADV */ + ix86->flags &= ~CF; + break; + + case 0x001D: /* Write auxilliary data vector */ + /* FIXME: stub */ + ix86->flags &= ~CF; + break; + + default: + DBG ( "COMBOOT unknown int22 function %04x\n", ix86->regs.ax ); + break; + } +} + +/** + * Hook BIOS interrupts related to COMBOOT API (INT 20h, 21h, 22h) + */ +void hook_comboot_interrupts ( ) { + + __asm__ __volatile__ ( + TEXT16_CODE ( "\nint20_wrapper:\n\t" + VIRT_CALL ( int20 ) + "clc\n\t" + "call patch_cf\n\t" + "iret\n\t" ) : ); + + hook_bios_interrupt ( 0x20, ( intptr_t ) int20_wrapper, &int20_vector ); + + __asm__ __volatile__ ( + TEXT16_CODE ( "\nint21_wrapper:\n\t" + VIRT_CALL ( int21 ) + "clc\n\t" + "call patch_cf\n\t" + "iret\n\t" ) : ); + + hook_bios_interrupt ( 0x21, ( intptr_t ) int21_wrapper, &int21_vector ); + + __asm__ __volatile__ ( + TEXT16_CODE ( "\nint22_wrapper:\n\t" + VIRT_CALL ( int22 ) + "clc\n\t" + "call patch_cf\n\t" + "iret\n\t" ) : ); + + hook_bios_interrupt ( 0x22, ( intptr_t ) int22_wrapper, &int22_vector ); +} + +/** + * Unhook BIOS interrupts related to COMBOOT API (INT 20h, 21h, 22h) + */ +void unhook_comboot_interrupts ( ) { + + unhook_bios_interrupt ( 0x20, ( intptr_t ) int20_wrapper, + &int20_vector ); + + unhook_bios_interrupt ( 0x21, ( intptr_t ) int21_wrapper, + &int21_vector ); + + unhook_bios_interrupt ( 0x22, ( intptr_t ) int22_wrapper, + &int22_vector ); +} + +/* Avoid dragging in serial console support unconditionally */ +struct uart serial_console __attribute__ (( weak )); diff --git a/src/arch/x86/interface/syslinux/comboot_resolv.c b/src/arch/x86/interface/syslinux/comboot_resolv.c new file mode 100644 index 00000000..03bbfd04 --- /dev/null +++ b/src/arch/x86/interface/syslinux/comboot_resolv.c @@ -0,0 +1,61 @@ +#include +#include +#include +#include +#include +#include + +FILE_LICENCE ( GPL2_OR_LATER ); + +struct comboot_resolver { + struct interface intf; + int rc; + struct in_addr addr; +}; + +static void comboot_resolv_close ( struct comboot_resolver *comboot_resolver, + int rc ) { + comboot_resolver->rc = rc; + intf_shutdown ( &comboot_resolver->intf, rc ); +} + +static void comboot_resolv_done ( struct comboot_resolver *comboot_resolver, + struct sockaddr *sa ) { + struct sockaddr_in *sin; + + if ( sa->sa_family == AF_INET ) { + sin = ( ( struct sockaddr_in * ) sa ); + comboot_resolver->addr = sin->sin_addr; + } +} + +static struct interface_operation comboot_resolv_op[] = { + INTF_OP ( intf_close, struct comboot_resolver *, comboot_resolv_close ), + INTF_OP ( resolv_done, struct comboot_resolver *, comboot_resolv_done ), +}; + +static struct interface_descriptor comboot_resolv_desc = + INTF_DESC ( struct comboot_resolver, intf, comboot_resolv_op ); + +static struct comboot_resolver comboot_resolver = { + .intf = INTF_INIT ( comboot_resolv_desc ), +}; + +int comboot_resolv ( const char *name, struct in_addr *address ) { + int rc; + + comboot_resolver.rc = -EINPROGRESS; + comboot_resolver.addr.s_addr = 0; + + if ( ( rc = resolv ( &comboot_resolver.intf, name, NULL ) ) != 0 ) + return rc; + + while ( comboot_resolver.rc == -EINPROGRESS ) + step(); + + if ( ! comboot_resolver.addr.s_addr ) + return -EAFNOSUPPORT; + + *address = comboot_resolver.addr; + return comboot_resolver.rc; +} diff --git a/src/arch/x86/interface/vmware/guestinfo.c b/src/arch/x86/interface/vmware/guestinfo.c new file mode 100644 index 00000000..a0530c8d --- /dev/null +++ b/src/arch/x86/interface/vmware/guestinfo.c @@ -0,0 +1,271 @@ +/* + * Copyright (C) 2012 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +/** @file + * + * VMware GuestInfo settings + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** GuestInfo GuestRPC channel */ +static int guestinfo_channel; + +/** + * Fetch value of typed GuestInfo setting + * + * @v settings Settings block + * @v setting Setting to fetch + * @v type Setting type to attempt (or NULL for default) + * @v data Buffer to fill with setting data + * @v len Length of buffer + * @ret found Setting found in GuestInfo + * @ret len Length of setting data, or negative error + */ +static int guestinfo_fetch_type ( struct settings *settings, + struct setting *setting, + const struct setting_type *type, + void *data, size_t len, int *found ) { + const char *parent_name = settings->parent->name; + char command[ 24 /* "info-get guestinfo.ipxe." */ + + strlen ( parent_name ) + 1 /* "." */ + + strlen ( setting->name ) + 1 /* "." */ + + ( type ? strlen ( type->name ) : 0 ) + 1 /* NUL */ ]; + struct setting *predefined; + char *info; + int info_len; + int check_len; + int ret; + + /* Construct info-get command */ + snprintf ( command, sizeof ( command ), + "info-get guestinfo.ipxe.%s%s%s%s%s", + parent_name, ( parent_name[0] ? "." : "" ), setting->name, + ( type ? "." : "" ), ( type ? type->name : "" ) ); + + /* Check for existence and obtain length of GuestInfo value */ + info_len = guestrpc_command ( guestinfo_channel, command, NULL, 0 ); + if ( info_len < 0 ) { + ret = info_len; + goto err_get_info_len; + } + + /* Mark as found */ + *found = 1; + + /* Determine default type if necessary */ + if ( ! type ) { + predefined = find_setting ( setting->name ); + type = ( predefined ? predefined->type : &setting_type_string ); + } + assert ( type != NULL ); + + /* Allocate temporary block to hold GuestInfo value */ + info = zalloc ( info_len + 1 /* NUL */ ); + if ( ! info ) { + DBGC ( settings, "GuestInfo %p could not allocate %d bytes\n", + settings, info_len ); + ret = -ENOMEM; + goto err_alloc; + } + info[info_len] = '\0'; + + /* Fetch GuestInfo value */ + check_len = guestrpc_command ( guestinfo_channel, command, + info, info_len ); + if ( check_len < 0 ) { + ret = check_len; + goto err_get_info; + } + if ( check_len != info_len ) { + DBGC ( settings, "GuestInfo %p length mismatch (expected %d, " + "got %d)\n", settings, info_len, check_len ); + ret = -EIO; + goto err_get_info; + } + DBGC2 ( settings, "GuestInfo %p found %s = \"%s\"\n", + settings, &command[9] /* Skip "info-get " */, info ); + + /* Parse GuestInfo value according to type */ + ret = setting_parse ( type, info, data, len ); + if ( ret < 0 ) { + DBGC ( settings, "GuestInfo %p could not parse \"%s\" as %s: " + "%s\n", settings, info, type->name, strerror ( ret ) ); + goto err_parse; + } + + err_parse: + err_get_info: + free ( info ); + err_alloc: + err_get_info_len: + return ret; +} + +/** + * Fetch value of GuestInfo setting + * + * @v settings Settings block + * @v setting Setting to fetch + * @v data Buffer to fill with setting data + * @v len Length of buffer + * @ret len Length of setting data, or negative error + */ +static int guestinfo_fetch ( struct settings *settings, + struct setting *setting, + void *data, size_t len ) { + struct setting_type *type; + int found = 0; + int ret; + + /* Try default type first */ + ret = guestinfo_fetch_type ( settings, setting, NULL, + data, len, &found ); + if ( found ) + return ret; + + /* Otherwise, try all possible types */ + for_each_table_entry ( type, SETTING_TYPES ) { + ret = guestinfo_fetch_type ( settings, setting, type, + data, len, &found ); + if ( found ) + return ret; + } + + /* Not found */ + return -ENOENT; +} + +/** GuestInfo settings operations */ +static struct settings_operations guestinfo_settings_operations = { + .fetch = guestinfo_fetch, +}; + +/** GuestInfo settings */ +static struct settings guestinfo_settings = { + .refcnt = NULL, + .siblings = LIST_HEAD_INIT ( guestinfo_settings.siblings ), + .children = LIST_HEAD_INIT ( guestinfo_settings.children ), + .op = &guestinfo_settings_operations, +}; + +/** Initialise GuestInfo settings */ +static void guestinfo_init ( void ) { + int rc; + + /* Open GuestRPC channel */ + guestinfo_channel = guestrpc_open(); + if ( guestinfo_channel < 0 ) { + rc = guestinfo_channel; + DBG ( "GuestInfo could not open channel: %s\n", + strerror ( rc ) ); + return; + } + + /* Register root GuestInfo settings */ + if ( ( rc = register_settings ( &guestinfo_settings, NULL, + "vmware" ) ) != 0 ) { + DBG ( "GuestInfo could not register settings: %s\n", + strerror ( rc ) ); + return; + } +} + +/** GuestInfo settings initialiser */ +struct init_fn guestinfo_init_fn __init_fn ( INIT_NORMAL ) = { + .initialise = guestinfo_init, +}; + +/** + * Create per-netdevice GuestInfo settings + * + * @v netdev Network device + * @ret rc Return status code + */ +static int guestinfo_net_probe ( struct net_device *netdev ) { + struct settings *settings; + int rc; + + /* Do nothing unless we have a GuestInfo channel available */ + if ( guestinfo_channel < 0 ) + return 0; + + /* Allocate and initialise settings block */ + settings = zalloc ( sizeof ( *settings ) ); + if ( ! settings ) { + rc = -ENOMEM; + goto err_alloc; + } + settings_init ( settings, &guestinfo_settings_operations, NULL, NULL ); + + /* Register settings */ + if ( ( rc = register_settings ( settings, netdev_settings ( netdev ), + "vmware" ) ) != 0 ) { + DBGC ( settings, "GuestInfo %p could not register for %s: %s\n", + settings, netdev->name, strerror ( rc ) ); + goto err_register; + } + DBGC ( settings, "GuestInfo %p registered for %s\n", + settings, netdev->name ); + + return 0; + + err_register: + free ( settings ); + err_alloc: + return rc; +} + +/** + * Remove per-netdevice GuestInfo settings + * + * @v netdev Network device + */ +static void guestinfo_net_remove ( struct net_device *netdev ) { + struct settings *parent = netdev_settings ( netdev ); + struct settings *settings; + + list_for_each_entry ( settings, &parent->children, siblings ) { + if ( settings->op == &guestinfo_settings_operations ) { + DBGC ( settings, "GuestInfo %p unregistered for %s\n", + settings, netdev->name ); + unregister_settings ( settings ); + free ( settings ); + return; + } + } +} + +/** GuestInfo per-netdevice driver */ +struct net_driver guestinfo_net_driver __net_driver = { + .name = "GuestInfo", + .probe = guestinfo_net_probe, + .remove = guestinfo_net_remove, +}; diff --git a/src/arch/x86/interface/vmware/guestrpc.c b/src/arch/x86/interface/vmware/guestrpc.c new file mode 100644 index 00000000..ef7ee815 --- /dev/null +++ b/src/arch/x86/interface/vmware/guestrpc.c @@ -0,0 +1,332 @@ +/* + * Copyright (C) 2012 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * VMware GuestRPC mechanism + * + */ + +#include +#include +#include +#include +#include +#include + +/* Disambiguate the various error causes */ +#define EPROTO_OPEN __einfo_error ( EINFO_EPROTO_OPEN ) +#define EINFO_EPROTO_OPEN \ + __einfo_uniqify ( EINFO_EPROTO, 0x00, "GuestRPC open failed" ) +#define EPROTO_COMMAND_LEN __einfo_error ( EINFO_EPROTO_COMMAND_LEN ) +#define EINFO_EPROTO_COMMAND_LEN \ + __einfo_uniqify ( EINFO_EPROTO, 0x01, "GuestRPC command length failed" ) +#define EPROTO_COMMAND_DATA __einfo_error ( EINFO_EPROTO_COMMAND_DATA ) +#define EINFO_EPROTO_COMMAND_DATA \ + __einfo_uniqify ( EINFO_EPROTO, 0x02, "GuestRPC command data failed" ) +#define EPROTO_REPLY_LEN __einfo_error ( EINFO_EPROTO_REPLY_LEN ) +#define EINFO_EPROTO_REPLY_LEN \ + __einfo_uniqify ( EINFO_EPROTO, 0x03, "GuestRPC reply length failed" ) +#define EPROTO_REPLY_DATA __einfo_error ( EINFO_EPROTO_REPLY_DATA ) +#define EINFO_EPROTO_REPLY_DATA \ + __einfo_uniqify ( EINFO_EPROTO, 0x04, "GuestRPC reply data failed" ) +#define EPROTO_REPLY_FINISH __einfo_error ( EINFO_EPROTO_REPLY_FINISH ) +#define EINFO_EPROTO_REPLY_FINISH \ + __einfo_uniqify ( EINFO_EPROTO, 0x05, "GuestRPC reply finish failed" ) +#define EPROTO_CLOSE __einfo_error ( EINFO_EPROTO_CLOSE ) +#define EINFO_EPROTO_CLOSE \ + __einfo_uniqify ( EINFO_EPROTO, 0x06, "GuestRPC close failed" ) + +/** + * Open GuestRPC channel + * + * @ret channel Channel number, or negative error + */ +int guestrpc_open ( void ) { + uint16_t channel; + uint32_t discard_b; + uint32_t status; + + /* Issue GuestRPC command */ + status = vmware_cmd_guestrpc ( 0, GUESTRPC_OPEN, GUESTRPC_MAGIC, + &channel, &discard_b ); + if ( status != GUESTRPC_OPEN_SUCCESS ) { + DBGC ( GUESTRPC_MAGIC, "GuestRPC open failed: status %08x\n", + status ); + return -EPROTO_OPEN; + } + + DBGC ( GUESTRPC_MAGIC, "GuestRPC channel %d opened\n", channel ); + return channel; +} + +/** + * Send GuestRPC command length + * + * @v channel Channel number + * @v len Command length + * @ret rc Return status code + */ +static int guestrpc_command_len ( int channel, size_t len ) { + uint16_t discard_d; + uint32_t discard_b; + uint32_t status; + + /* Issue GuestRPC command */ + status = vmware_cmd_guestrpc ( channel, GUESTRPC_COMMAND_LEN, len, + &discard_d, &discard_b ); + if ( status != GUESTRPC_COMMAND_LEN_SUCCESS ) { + DBGC ( GUESTRPC_MAGIC, "GuestRPC channel %d send command " + "length %zd failed: status %08x\n", + channel, len, status ); + return -EPROTO_COMMAND_LEN; + } + + return 0; +} + +/** + * Send GuestRPC command data + * + * @v channel Channel number + * @v data Command data + * @ret rc Return status code + */ +static int guestrpc_command_data ( int channel, uint32_t data ) { + uint16_t discard_d; + uint32_t discard_b; + uint32_t status; + + /* Issue GuestRPC command */ + status = vmware_cmd_guestrpc ( channel, GUESTRPC_COMMAND_DATA, data, + &discard_d, &discard_b ); + if ( status != GUESTRPC_COMMAND_DATA_SUCCESS ) { + DBGC ( GUESTRPC_MAGIC, "GuestRPC channel %d send command " + "data %08x failed: status %08x\n", + channel, data, status ); + return -EPROTO_COMMAND_DATA; + } + + return 0; +} + +/** + * Receive GuestRPC reply length + * + * @v channel Channel number + * @ret reply_id Reply ID + * @ret len Reply length, or negative error + */ +static int guestrpc_reply_len ( int channel, uint16_t *reply_id ) { + uint32_t len; + uint32_t status; + + /* Issue GuestRPC command */ + status = vmware_cmd_guestrpc ( channel, GUESTRPC_REPLY_LEN, 0, + reply_id, &len ); + if ( status != GUESTRPC_REPLY_LEN_SUCCESS ) { + DBGC ( GUESTRPC_MAGIC, "GuestRPC channel %d receive reply " + "length failed: status %08x\n", channel, status ); + return -EPROTO_REPLY_LEN; + } + + return len; +} + +/** + * Receive GuestRPC reply data + * + * @v channel Channel number + * @v reply_id Reply ID + * @ret data Reply data + * @ret rc Return status code + */ +static int guestrpc_reply_data ( int channel, uint16_t reply_id, + uint32_t *data ) { + uint16_t discard_d; + uint32_t status; + + /* Issue GuestRPC command */ + status = vmware_cmd_guestrpc ( channel, GUESTRPC_REPLY_DATA, reply_id, + &discard_d, data ); + if ( status != GUESTRPC_REPLY_DATA_SUCCESS ) { + DBGC ( GUESTRPC_MAGIC, "GuestRPC channel %d receive reply " + "%d data failed: status %08x\n", + channel, reply_id, status ); + return -EPROTO_REPLY_DATA; + } + + return 0; +} + +/** + * Finish receiving GuestRPC reply + * + * @v channel Channel number + * @v reply_id Reply ID + * @ret rc Return status code + */ +static int guestrpc_reply_finish ( int channel, uint16_t reply_id ) { + uint16_t discard_d; + uint32_t discard_b; + uint32_t status; + + /* Issue GuestRPC command */ + status = vmware_cmd_guestrpc ( channel, GUESTRPC_REPLY_FINISH, reply_id, + &discard_d, &discard_b ); + if ( status != GUESTRPC_REPLY_FINISH_SUCCESS ) { + DBGC ( GUESTRPC_MAGIC, "GuestRPC channel %d finish reply %d " + "failed: status %08x\n", channel, reply_id, status ); + return -EPROTO_REPLY_FINISH; + } + + return 0; +} + +/** + * Close GuestRPC channel + * + * @v channel Channel number + */ +void guestrpc_close ( int channel ) { + uint16_t discard_d; + uint32_t discard_b; + uint32_t status; + + /* Issue GuestRPC command */ + status = vmware_cmd_guestrpc ( channel, GUESTRPC_CLOSE, 0, + &discard_d, &discard_b ); + if ( status != GUESTRPC_CLOSE_SUCCESS ) { + DBGC ( GUESTRPC_MAGIC, "GuestRPC channel %d close failed: " + "status %08x\n", channel, status ); + return; + } + + DBGC ( GUESTRPC_MAGIC, "GuestRPC channel %d closed\n", channel ); +} + +/** + * Issue GuestRPC command + * + * @v channel Channel number + * @v command Command + * @v reply Reply buffer + * @v reply_len Length of reply buffer + * @ret len Length of reply, or negative error + * + * The actual length of the reply will be returned even if the buffer + * was too small. + */ +int guestrpc_command ( int channel, const char *command, char *reply, + size_t reply_len ) { + const uint8_t *command_bytes = ( ( const void * ) command ); + uint8_t *reply_bytes = ( ( void * ) reply ); + size_t command_len = strlen ( command ); + int orig_reply_len = reply_len; + uint16_t status; + uint8_t *status_bytes = ( ( void * ) &status ); + size_t status_len = sizeof ( status ); + uint32_t data; + uint16_t reply_id; + int len; + int remaining; + unsigned int i; + int rc; + + DBGC2 ( GUESTRPC_MAGIC, "GuestRPC channel %d issuing command:\n", + channel ); + DBGC2_HDA ( GUESTRPC_MAGIC, 0, command, command_len ); + + /* Sanity check */ + assert ( ( reply != NULL ) || ( reply_len == 0 ) ); + + /* Send command length */ + if ( ( rc = guestrpc_command_len ( channel, command_len ) ) < 0 ) + return rc; + + /* Send command data */ + while ( command_len ) { + data = 0; + for ( i = sizeof ( data ) ; i ; i-- ) { + if ( command_len ) { + data = ( ( data & ~0xff ) | + *(command_bytes++) ); + command_len--; + } + data = ( ( data << 24 ) | ( data >> 8 ) ); + } + if ( ( rc = guestrpc_command_data ( channel, data ) ) < 0 ) + return rc; + } + + /* Receive reply length */ + if ( ( len = guestrpc_reply_len ( channel, &reply_id ) ) < 0 ) { + rc = len; + return rc; + } + + /* Receive reply */ + for ( remaining = len ; remaining > 0 ; remaining -= sizeof ( data ) ) { + if ( ( rc = guestrpc_reply_data ( channel, reply_id, + &data ) ) < 0 ) { + return rc; + } + for ( i = sizeof ( data ) ; i ; i-- ) { + if ( status_len ) { + *(status_bytes++) = ( data & 0xff ); + status_len--; + len--; + } else if ( reply_len ) { + *(reply_bytes++) = ( data & 0xff ); + reply_len--; + } + data = ( ( data << 24 ) | ( data >> 8 ) ); + } + } + + /* Finish receiving RPC reply */ + if ( ( rc = guestrpc_reply_finish ( channel, reply_id ) ) < 0 ) + return rc; + + DBGC2 ( GUESTRPC_MAGIC, "GuestRPC channel %d received reply (id %d, " + "length %d):\n", channel, reply_id, len ); + DBGC2_HDA ( GUESTRPC_MAGIC, 0, &status, sizeof ( status ) ); + DBGC2_HDA ( GUESTRPC_MAGIC, sizeof ( status ), reply, + ( ( len < orig_reply_len ) ? len : orig_reply_len ) ); + + /* Check reply status */ + if ( status != GUESTRPC_SUCCESS ) { + DBGC ( GUESTRPC_MAGIC, "GuestRPC channel %d command failed " + "(status %04x, reply id %d, reply length %d):\n", + channel, status, reply_id, len ); + DBGC_HDA ( GUESTRPC_MAGIC, 0, command, command_len ); + DBGC_HDA ( GUESTRPC_MAGIC, 0, &status, sizeof ( status ) ); + DBGC_HDA ( GUESTRPC_MAGIC, sizeof ( status ), reply, + ( ( len < orig_reply_len ) ? len : orig_reply_len )); + return -EIO; + } + + return len; +} diff --git a/src/arch/x86/interface/vmware/vmconsole.c b/src/arch/x86/interface/vmware/vmconsole.c new file mode 100644 index 00000000..f7df4f75 --- /dev/null +++ b/src/arch/x86/interface/vmware/vmconsole.c @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2012 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * VMware logfile console + * + */ + +#include +#include +#include +#include +#include +#include + +/** VMware logfile console buffer size */ +#define VMCONSOLE_BUFSIZE 128 + +/* Set default console usage if applicable */ +#if ! ( defined ( CONSOLE_VMWARE ) && CONSOLE_EXPLICIT ( CONSOLE_VMWARE ) ) +#undef CONSOLE_VMWARE +#define CONSOLE_VMWARE ( CONSOLE_USAGE_ALL & ~CONSOLE_USAGE_TUI ) +#endif + +/** VMware logfile console GuestRPC channel */ +static int vmconsole_channel; + +/** VMware logfile console line buffer */ +static struct { + char prefix[4]; + char message[VMCONSOLE_BUFSIZE]; +} vmconsole_buffer = { + .prefix = "log ", +}; + +/** VMware logfile console ANSI escape sequence handlers */ +static struct ansiesc_handler vmconsole_handlers[] = { + { 0, NULL } +}; + +/** VMware logfile line console */ +static struct line_console vmconsole_line = { + .buffer = vmconsole_buffer.message, + .len = sizeof ( vmconsole_buffer.message ), + .ctx = { + .handlers = vmconsole_handlers, + }, +}; + +/** VMware logfile console recursion marker */ +static int vmconsole_entered; + +/** + * Print a character to VMware logfile console + * + * @v character Character to be printed + */ +static void vmconsole_putchar ( int character ) { + int rc; + + /* Ignore if we are already mid-logging */ + if ( vmconsole_entered ) + return; + + /* Fill line buffer */ + if ( line_putchar ( &vmconsole_line, character ) == 0 ) + return; + + /* Guard against re-entry */ + vmconsole_entered = 1; + + /* Send log message */ + if ( ( rc = guestrpc_command ( vmconsole_channel, + vmconsole_buffer.prefix, NULL, 0 ) ) <0){ + DBG ( "VMware console could not send log message: %s\n", + strerror ( rc ) ); + } + + /* Clear re-entry flag */ + vmconsole_entered = 0; +} + +/** VMware logfile console driver */ +struct console_driver vmconsole __console_driver = { + .putchar = vmconsole_putchar, + .disabled = CONSOLE_DISABLED, + .usage = CONSOLE_VMWARE, +}; + +/** + * Initialise VMware logfile console + * + */ +static void vmconsole_init ( void ) { + int rc; + + /* Attempt to open console */ + vmconsole_channel = guestrpc_open(); + if ( vmconsole_channel < 0 ) { + rc = vmconsole_channel; + DBG ( "VMware console could not be initialised: %s\n", + strerror ( rc ) ); + return; + } + + /* Mark console as available */ + vmconsole.disabled = 0; +} + +/** + * VMware logfile console initialisation function + */ +struct init_fn vmconsole_init_fn __init_fn ( INIT_CONSOLE ) = { + .initialise = vmconsole_init, +}; diff --git a/src/arch/x86/interface/vmware/vmware.c b/src/arch/x86/interface/vmware/vmware.c new file mode 100644 index 00000000..a415465f --- /dev/null +++ b/src/arch/x86/interface/vmware/vmware.c @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2012 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * VMware backdoor mechanism + * + * Based on the unofficial documentation at + * + * http://sites.google.com/site/chitchatvmback/backdoor + * + */ + +#include +#include +#include + +/** + * Detect VMware presence + * + * @ret rc Return status code + */ +int vmware_present ( void ) { + uint32_t version; + uint32_t magic; + uint32_t product_type; + + /* Perform backdoor call */ + vmware_cmd_get_version ( &version, &magic, &product_type ); + + /* Check for VMware presence */ + if ( magic != VMW_MAGIC ) { + DBGC ( VMW_MAGIC, "VMware not present\n" ); + return -ENOENT; + } + + DBGC ( VMW_MAGIC, "VMware product type %04x version %08x detected\n", + product_type, version ); + return 0; +} diff --git a/src/arch/x86/prefix/bootpart.S b/src/arch/x86/prefix/bootpart.S new file mode 100644 index 00000000..6d0c6034 --- /dev/null +++ b/src/arch/x86/prefix/bootpart.S @@ -0,0 +1,218 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#define BOOT_SEG 0x07c0 +#define EXEC_SEG 0x0100 +#define STACK_SEG 0x0200 +#define STACK_SIZE 0x2000 + + .text + .arch i386 + .section ".prefix", "awx", @progbits + .code16 + +/* + * Find active partition + * + * Parameters: + * %dl : BIOS drive number + * %bp : Active partition handler routine + */ +find_active_partition: + /* Set up stack at STACK_SEG:STACK_SIZE */ + movw $STACK_SEG, %ax + movw %ax, %ss + movw $STACK_SIZE, %sp + + /* Relocate self to EXEC_SEG */ + pushw $BOOT_SEG + popw %ds + pushw $EXEC_SEG + popw %es + xorw %si, %si + xorw %di, %di + movw $0x200, %cx + rep movsb + ljmp $EXEC_SEG, $1f +1: pushw %ds + popw %es + pushw %cs + popw %ds + + /* Check for LBA extensions */ + movb $0x41, %ah + movw $0x55aa, %bx + stc + int $0x13 + jc 1f + cmpw $0xaa55, %bx + jne 1f + movw $read_lba, read_sectors +1: + /* Read and process root partition table */ + xorb %dh, %dh + movw $0x0001, %cx + xorl %esi, %esi + xorl %edi, %edi + call process_table + + /* Print failure message */ + movw $10f, %si + jmp boot_error +10: .asciz "Could not locate active partition\r\n" + +/* + * Print failure message and boot next device + * + * Parameters: + * %si : Failure string + */ +boot_error: + cld + movw $0x0007, %bx + movb $0x0e, %ah +1: lodsb + testb %al, %al + je 99f + int $0x10 + jmp 1b +99: /* Boot next device */ + int $0x18 + +/* + * Process partition table + * + * Parameters: + * %dl : BIOS drive number + * %dh : Head + * %cl : Sector (bits 0-5), high two bits of cylinder (bits 6-7) + * %ch : Low eight bits of cylinder + * %esi:%edi : LBA address + * %bp : Active partition handler routine + * + * Returns: + * CF set on error + */ +process_table: + pushal + call read_boot_sector + jc 99f + movw $446, %bx +1: call process_partition + addw $16, %bx + cmpw $510, %bx + jne 1b +99: popal + ret + +/* + * Process partition + * + * Parameters: + * %dl : BIOS drive number + * %dh : Head + * %cl : Sector (bits 0-5), high two bits of cylinder (bits 6-7) + * %ch : Low eight bits of cylinder + * %esi:%edi : LBA address + * %bx : Offset within partition table + * %bp : Active partition handler routine + */ +process_partition: + pushal + /* Load C/H/S values from partition entry */ + movb %es:1(%bx), %dh + movw %es:2(%bx), %cx + /* Update LBA address from partition entry */ + addl %es:8(%bx), %edi + adcl $0, %esi + /* Check active flag */ + testb $0x80, %es:(%bx) + jz 1f + call read_boot_sector + jc 99f + jmp *%bp +1: /* Check for extended partition */ + movb %es:4(%bx), %al + cmpb $0x05, %al + je 2f + cmpb $0x0f, %al + je 2f + cmpb $0x85, %al + jne 99f +2: call process_table +99: popal + /* Reload original partition table */ + call read_boot_sector + ret + +/* + * Read single sector to %es:0000 and verify 0x55aa signature + * + * Parameters: + * %dl : BIOS drive number + * %dh : Head + * %cl : Sector (bits 0-5), high two bits of cylinder (bits 6-7) + * %ch : Low eight bits of cylinder + * %esi:%edi : LBA address + * + * Returns: + * CF set on error + */ +read_boot_sector: + pushw %ax + movw $1, %ax + call *read_sectors + jc 99f + cmpw $0xaa55, %es:(510) + je 99f + stc +99: popw %ax + ret + +/* + * Read sectors to %es:0000 + * + * Parameters: + * %dl : BIOS drive number + * %dh : Head + * %cl : Sector (bits 0-5), high two bits of cylinder (bits 6-7) + * %ch : Low eight bits of cylinder + * %esi:%edi : LBA address + * %ax : Number of sectors (max 127) + * + * Returns: + * CF set on error + */ +read_sectors: .word read_chs + +read_chs: + /* Read sectors using C/H/S address */ + pushal + xorw %bx, %bx + movb $0x02, %ah + stc + int $0x13 + sti + popal + ret + +read_lba: + /* Read sectors using LBA address */ + pushal + movw %ax, (lba_desc + 2) + pushw %es + popw (lba_desc + 6) + movl %edi, (lba_desc + 8) + movl %esi, (lba_desc + 12) + movw $lba_desc, %si + movb $0x42, %ah + int $0x13 + popal + ret + +lba_desc: + .byte 0x10 + .byte 0 + .word 1 + .word 0x0000 + .word 0x0000 + .long 0, 0 diff --git a/src/arch/x86/prefix/dskprefix.S b/src/arch/x86/prefix/dskprefix.S new file mode 100644 index 00000000..0503f113 --- /dev/null +++ b/src/arch/x86/prefix/dskprefix.S @@ -0,0 +1,383 @@ +/* NOTE: this boot sector contains instructions that need at least an 80186. + * Yes, as86 has a bug somewhere in the valid instruction set checks. + * + */ + +/* floppyload.S Copyright (C) 1991, 1992 Linus Torvalds + * modified by Drew Eckhardt + * modified by Bruce Evans (bde) + * + * floppyprefix.S is loaded at 0x0000:0x7c00 by the bios-startup routines. + * + * It then loads the system at SYSSEG<<4, using BIOS interrupts. + * + * The loader has been made as simple as possible, and continuous read errors + * will result in a unbreakable loop. Reboot by hand. It loads pretty fast by + * getting whole tracks at a time whenever possible. + */ + +FILE_LICENCE ( GPL2_ONLY ) + +#include + +.equ BOOTSEG, 0x07C0 /* original address of boot-sector */ + +.equ SYSSEG, 0x1000 /* system loaded at SYSSEG<<4 */ + + .org 0 + .arch i386 + .text + .section ".prefix", "ax", @progbits + .code16 + .globl _dsk_start +_dsk_start: + + jmp $BOOTSEG, $go /* reload cs:ip to match relocation addr */ +go: + movw $0x2000-12, %di /* 0x2000 is arbitrary value >= length */ + /* of bootsect + room for stack + 12 for */ + /* saved disk parm block */ + + movw $BOOTSEG, %ax + movw %ax,%ds + movw %ax,%es + movw %ax,%ss /* put stack at BOOTSEG:0x4000-12. */ + movw %di,%sp + +/* Many BIOS's default disk parameter tables will not recognize multi-sector + * reads beyond the maximum sector number specified in the default diskette + * parameter tables - this may mean 7 sectors in some cases. + * + * Since single sector reads are slow and out of the question, we must take care + * of this by creating new parameter tables (for the first disk) in RAM. We + * will set the maximum sector count to 36 - the most we will encounter on an + * ED 2.88. High doesn't hurt. Low does. + * + * Segments are as follows: ds=es=ss=cs - BOOTSEG + */ + + xorw %cx,%cx + movw %cx,%es /* access segment 0 */ + movw $0x78, %bx /* 0:bx is parameter table address */ + pushw %ds /* save ds */ +/* 0:bx is parameter table address */ + ldsw %es:(%bx),%si /* loads ds and si */ + + movw %ax,%es /* ax is BOOTSECT (loaded above) */ + movb $6, %cl /* copy 12 bytes */ + cld + pushw %di /* keep a copy for later */ + rep + movsw /* ds:si is source, es:di is dest */ + popw %di + + movb $36,%es:4(%di) + + movw %cx,%ds /* access segment 0 */ + xchgw %di,(%bx) + movw %es,%si + xchgw %si,2(%bx) + popw %ds /* restore ds */ + movw %di, dpoff /* save old parameters */ + movw %si, dpseg /* to restore just before finishing */ + pushw %ds + popw %es /* reload es */ + +/* Note that es is already set up. Also cx is 0 from rep movsw above. */ + + xorb %ah,%ah /* reset FDC */ + xorb %dl,%dl + int $0x13 + +/* Get disk drive parameters, specifically number of sectors/track. + * + * It seems that there is no BIOS call to get the number of sectors. Guess + * 36 sectors if sector 36 can be read, 18 sectors if sector 18 can be read, + * 15 if sector 15 can be read. Otherwise guess 9. + */ + + movw $disksizes, %si /* table of sizes to try */ + +probe_loop: + lodsb + cbtw /* extend to word */ + movw %ax, sectors + cmpw $disksizes+4, %si + jae got_sectors /* if all else fails, try 9 */ + xchgw %cx,%ax /* cx = track and sector */ + xorw %dx,%dx /* drive 0, head 0 */ + movw $0x0200, %bx /* address after boot sector */ + /* (512 bytes from origin, es = cs) */ + movw $0x0201, %ax /* service 2, 1 sector */ + int $0x13 + jc probe_loop /* try next value */ + +got_sectors: + movw $msg1end-msg1, %cx + movw $msg1, %si + call print_str + +/* ok, we've written the Loading... message, now we want to load the system */ + + movw $SYSSEG, %ax + movw %ax,%es /* segment of SYSSEG<<4 */ + pushw %es + call read_it + +/* This turns off the floppy drive motor, so that we enter the kernel in a + * known state, and don't have to worry about it later. + */ + movw $0x3f2, %dx + xorb %al,%al + outb %al,%dx + + call print_nl + pop %es /* = SYSSEG */ + +/* Restore original disk parameters */ + movw $0x78, %bx + movw dpoff, %di + movw dpseg, %si + xorw %ax,%ax + movw %ax,%ds + movw %di,(%bx) + movw %si,2(%bx) + + /* Everything now loaded. %es = SYSSEG, so %es:0000 points to + * start of loaded image. + */ + + /* Jump to loaded copy */ + ljmp $SYSSEG, $start_runtime + +endseg: .word SYSSEG + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii "ADDW" + .long endseg + .long 16 + .long 0 + .previous + +/* This routine loads the system at address SYSSEG<<4, making sure no 64kB + * boundaries are crossed. We try to load it as fast as possible, loading whole + * tracks whenever we can. + * + * in: es - starting address segment (normally SYSSEG) + */ +read_it: + movw $0,sread /* load whole image including prefix */ + movw %es,%ax + testw $0x0fff, %ax +die: jne die /* es must be at 64kB boundary */ + xorw %bx,%bx /* bx is starting address within segment */ +rp_read: + movw %es,%ax + movw %bx,%dx + movb $4, %cl + shrw %cl,%dx /* bx is always divisible by 16 */ + addw %dx,%ax + cmpw endseg, %ax /* have we loaded all yet? */ + jb ok1_read + ret +ok1_read: + movw sectors, %ax + subw sread, %ax + movw %ax,%cx + shlw $9, %cx + addw %bx,%cx + jnc ok2_read + je ok2_read + xorw %ax,%ax + subw %bx,%ax + shrw $9, %ax +ok2_read: + call read_track + movw %ax,%cx + addw sread, %ax + cmpw sectors, %ax + jne ok3_read + movw $1, %ax + subw head, %ax + jne ok4_read + incw track +ok4_read: + movw %ax, head + xorw %ax,%ax +ok3_read: + movw %ax, sread + shlw $9, %cx + addw %cx,%bx + jnc rp_read + movw %es,%ax + addb $0x10, %ah + movw %ax,%es + xorw %bx,%bx + jmp rp_read + +read_track: + pusha + pushw %ax + pushw %bx + pushw %bp /* just in case the BIOS is buggy */ + movw $0x0e2e, %ax /* 0x2e = . */ + movw $0x0007, %bx + int $0x10 + popw %bp + popw %bx + popw %ax + + movw track, %dx + movw sread, %cx + incw %cx + movb %dl,%ch + movw head, %dx + movb %dl,%dh + andw $0x0100, %dx + movb $2, %ah + + pushw %dx /* save for error dump */ + pushw %cx + pushw %bx + pushw %ax + + int $0x13 + jc bad_rt + addw $8, %sp + popa + ret + +bad_rt: pushw %ax /* save error code */ + call print_all /* ah = error, al = read */ + + xorb %ah,%ah + xorb %dl,%dl + int $0x13 + + addw $10, %sp + popa + jmp read_track + +/* print_all is for debugging purposes. It will print out all of the registers. + * The assumption is that this is called from a routine, with a stack frame like + * dx + * cx + * bx + * ax + * error + * ret <- sp + */ + +print_all: + call print_nl /* nl for readability */ + movw $5, %cx /* error code + 4 registers */ + movw %sp,%bp + +print_loop: + pushw %cx /* save count left */ + + cmpb $5, %cl + jae no_reg /* see if register name is needed */ + + movw $0x0007, %bx /* page 0, attribute 7 (normal) */ + movw $0xe05+0x41-1, %ax + subb %cl,%al + int $0x10 + + movb $0x58, %al /* 'X' */ + int $0x10 + + movb $0x3A, %al /* ':' */ + int $0x10 + +no_reg: + addw $2, %bp /* next register */ + call print_hex /* print it */ + movb $0x20, %al /* print a space */ + int $0x10 + popw %cx + loop print_loop + call print_nl /* nl for readability */ + ret + +print_str: + movw $0x0007, %bx /* page 0, attribute 7 (normal) */ + movb $0x0e, %ah /* write char, tty mode */ +prloop: + lodsb + int $0x10 + loop prloop + ret + +print_nl: + movw $0x0007, %bx /* page 0, attribute 7 (normal) */ + movw $0xe0d, %ax /* CR */ + int $0x10 + movb $0xa, %al /* LF */ + int $0x10 + ret + +/* print_hex prints the word pointed to by ss:bp in hexadecimal. */ + +print_hex: + movw (%bp),%dx /* load word into dx */ + movb $4, %cl + movb $0x0e, %ah /* write char, tty mode */ + movw $0x0007, %bx /* page 0, attribute 7 (normal) */ + call print_digit + call print_digit + call print_digit +/* fall through */ +print_digit: + rol %cl,%dx /* rotate so that lowest 4 bits are used */ + movb $0x0f, %al /* mask for nybble */ + andb %dl,%al + addb $0x90, %al /* convert al to ascii hex (four instructions) */ + daa + adcb $0x40, %al + daa + int $0x10 + ret + +sread: .word 0 /* sectors read of current track */ +head: .word 0 /* current head */ +track: .word 0 /* current track */ + +sectors: + .word 0 + +dpseg: .word 0 +dpoff: .word 0 + +disksizes: + .byte 36,18,15,9 + +msg1: + .ascii "Loading ROM image" +msg1end: + + .org 510, 0 + .word 0xAA55 + +start_runtime: + /* Install iPXE */ + call install + + /* Set up real-mode stack */ + movw %bx, %ss + movw $_estack16, %sp + + /* Jump to .text16 segment */ + pushw %ax + pushw $1f + lret + .section ".text16", "awx", @progbits +1: + /* Run iPXE */ + virtcall main + + /* Uninstall iPXE */ + call uninstall + + /* Boot next device */ + int $0x18 + diff --git a/src/arch/x86/prefix/exeprefix.S b/src/arch/x86/prefix/exeprefix.S new file mode 100644 index 00000000..c351456e --- /dev/null +++ b/src/arch/x86/prefix/exeprefix.S @@ -0,0 +1,160 @@ +/* + * Copyright (C) 2011 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#include + +/* Initial temporary stack size */ +#define EXE_STACK_SIZE 0x400 + +/* Temporary decompression area (avoid DOS high memory area) */ +#define EXE_DECOMPRESS_ADDRESS 0x110000 + +/* Fields within the Program Segment Prefix */ +#define PSP_CMDLINE_LEN 0x80 +#define PSP_CMDLINE_START 0x81 + + .text + .arch i386 + .org 0 + .code16 + .section ".prefix", "awx", @progbits + +signature: + /* "MZ" signature */ + .ascii "MZ" + +last_block: + /* Number of bytes in last block that are really used */ + .word 0 + +blocks: + /* Number of 512-byte blocks */ + .word 0 + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii "ADDW" + .long blocks + .long 512 + .long 0 + .previous + +num_reloc: + /* Number of relocation entries stored after the header */ + .word 0 + +header_pgh: + /* Number of paragraphs in the header */ + .word ( ( _exe_start - signature ) / 16 ) + +min_bss_pgh: + /* Minimum number of paragraphs of additional (BSS) memory */ + .word ( EXE_STACK_SIZE / 16 ) + +max_bss_pgh: + /* Maximum number of paragraphs of additional (BSS) memory */ + .word ( EXE_STACK_SIZE / 16 ) + +init_ss: + /* Initial stack segment (relative to start of executable) */ + .word -( ( _exe_start - signature ) / 16 ) + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii "ADDW" + .long init_ss + .long 16 + .long 0 + .previous + +init_sp: + /* Initial stack pointer */ + .word EXE_STACK_SIZE + +checksum: + /* Checksum (ignored) */ + .word 0 + +init_ip: + /* Initial instruction pointer */ + .word _exe_start + +init_cs: + /* Initial code segment (relative to start of executable) */ + .word -( ( _exe_start - signature ) / 16 ) + +reloc_table: + /* Relocation table offset */ + .word 0 + +overlay: + /* Overlay number */ + .word 0 + + .align 16, 0 + + .globl _exe_start +_exe_start: + /* Install iPXE. Use a fixed temporary decompression area to + * avoid trashing the DOS high memory area. + */ + call alloc_basemem + xorl %esi, %esi + movl $EXE_DECOMPRESS_ADDRESS, %edi + orl $0xffffffff, %ebp /* Allow arbitrary relocation */ + call install_prealloc + + /* Set up real-mode stack */ + movw %bx, %ss + movw $_estack16, %sp + + /* Jump to .text16 segment */ + pushw %ax + pushw $1f + lret + .section ".text16", "awx", @progbits +1: + /* Terminate command line with a NUL */ + movzbw PSP_CMDLINE_LEN, %si + movb $0, PSP_CMDLINE_START(%si) + + /* Calculate command line physical address */ + xorl %esi, %esi + movw %ds, %si + shll $4, %esi + addl $PSP_CMDLINE_START, %esi + + /* Set up %ds for access to .data16 */ + movw %bx, %ds + + /* Record command line address */ + movl %esi, cmdline_phys + + /* Run iPXE */ + virtcall main + + /* Uninstall iPXE */ + call uninstall + + /* Exit back to DOS. This is very unlikely to work */ + movw $0x4c00, %ax + int $0x21 diff --git a/src/arch/x86/prefix/hdprefix.S b/src/arch/x86/prefix/hdprefix.S new file mode 100644 index 00000000..28c8a532 --- /dev/null +++ b/src/arch/x86/prefix/hdprefix.S @@ -0,0 +1,115 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#include + + .text + .arch i386 + .section ".prefix", "awx", @progbits + .code16 + .org 0 + .globl _hd_start +_hd_start: + + movw $load_image, %bp + jmp find_active_partition + +#include "bootpart.S" + +load_image: + /* Get disk geometry */ + pushal + pushw %es + movb $0x08, %ah + int $0x13 + jc load_failed + movb %cl, max_sector + movb %dh, max_head + popw %es + popal + +1: /* Read to end of current track (or end of image) */ + movb %cl, %al + negb %al + addb max_sector, %al + incb %al + andb $0x3f, %al + movzbl %al, %eax + movl load_length, %ebx + cmpl %eax, %ebx + ja 2f + movl %ebx, %eax +2: call *read_sectors + jc load_failed + + /* Update %es */ + movw %es, %bx + shll $5, %eax + addw %ax, %bx + movw %bx, %es + shrl $5, %eax + + /* Update LBA address */ + addl %eax, %edi + adcl $0, %esi + + /* Update CHS address */ + andb $0xc0, %cl + orb $0x01, %cl + incb %dh + cmpb max_head, %dh + jbe 3f + xorb %dh, %dh + incb %ch + jnc 3f + addb $0xc0, %cl +3: + /* Loop until whole image is read */ + subl %eax, load_length + ja 1b + ljmp $BOOT_SEG, $start_image + +max_sector: + .byte 0 +max_head: + .byte 0 +load_length: + .long 0 + + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii "ADDL" + .long load_length + .long 512 + .long 0 + .previous + + +load_failed: + movw $10f, %si + jmp boot_error +10: .asciz "Could not load iPXE\r\n" + + .org 510 + .byte 0x55, 0xaa + +start_image: + /* Install iPXE */ + call install + + /* Set up real-mode stack */ + movw %bx, %ss + movw $_estack16, %sp + + /* Jump to .text16 segment */ + pushw %ax + pushw $1f + lret + .section ".text16", "awx", @progbits +1: + /* Run iPXE */ + virtcall main + + /* Uninstall iPXE */ + call uninstall + + /* Boot next device */ + int $0x18 diff --git a/src/arch/x86/prefix/isaromprefix.S b/src/arch/x86/prefix/isaromprefix.S new file mode 100644 index 00000000..fb49819e --- /dev/null +++ b/src/arch/x86/prefix/isaromprefix.S @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#define BUSTYPE "ISAR" +#define _rom_start _isarom_start +#include "romprefix.S" diff --git a/src/arch/x86/prefix/kkkpxeprefix.S b/src/arch/x86/prefix/kkkpxeprefix.S new file mode 100644 index 00000000..6e43cd26 --- /dev/null +++ b/src/arch/x86/prefix/kkkpxeprefix.S @@ -0,0 +1,17 @@ +/***************************************************************************** + * PXE prefix that keeps the whole PXE stack present and provides an exit hook + * + * This prefix is essentially intended solely for the case of ipxelinux.0 + ***************************************************************************** + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +/* Provide the PXENV_FILE_EXIT_HOOK API call */ +REQUIRING_SYMBOL ( _kkkpxe_start ) +REQUIRE_OBJECT ( pxe_exit_hook ) + +#define PXELOADER_KEEP_UNDI +#define PXELOADER_KEEP_PXE +#define _pxe_start _kkkpxe_start +#include "pxeprefix.S" diff --git a/src/arch/x86/prefix/kkpxeprefix.S b/src/arch/x86/prefix/kkpxeprefix.S new file mode 100644 index 00000000..3c17dbdb --- /dev/null +++ b/src/arch/x86/prefix/kkpxeprefix.S @@ -0,0 +1,11 @@ +/***************************************************************************** + * PXE prefix that keeps the whole PXE stack present + ***************************************************************************** + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#define PXELOADER_KEEP_UNDI +#define PXELOADER_KEEP_PXE +#define _pxe_start _kkpxe_start +#include "pxeprefix.S" diff --git a/src/arch/x86/prefix/kpxeprefix.S b/src/arch/x86/prefix/kpxeprefix.S new file mode 100644 index 00000000..200006d8 --- /dev/null +++ b/src/arch/x86/prefix/kpxeprefix.S @@ -0,0 +1,10 @@ +/***************************************************************************** + * PXE prefix that keep the UNDI portion of the PXE stack present + ***************************************************************************** + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#define PXELOADER_KEEP_UNDI +#define _pxe_start _kpxe_start +#include "pxeprefix.S" diff --git a/src/arch/x86/prefix/libprefix.S b/src/arch/x86/prefix/libprefix.S new file mode 100644 index 00000000..ffb21105 --- /dev/null +++ b/src/arch/x86/prefix/libprefix.S @@ -0,0 +1,1077 @@ +/* + * Copyright (C) 2006 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#include + + .arch i386 + +/* Image compression enabled */ +#define COMPRESS 1 + +/* Protected mode flag */ +#define CR0_PE 1 + +/* Allow for DBG()-style messages within libprefix */ +#ifdef NDEBUG + .macro progress message, regs:vararg + .endm +#else + .macro dumpreg reg, others:vararg + pushl %eax + movl \reg, %eax + pushw %di + xorw %di, %di + call print_space + call print_hex_dword + popw %di + popl %eax + .ifnb \others + dumpreg \others + .endif + .endm + + .macro progress message, regs:vararg + pushfl + pushw %ds + pushw %si + pushw %di + pushw %cs + popw %ds + xorw %di, %di + movw $progress_\@, %si + call print_message + popw %di + popw %si + .ifnb \regs + dumpreg \regs + .endif + pushw %di + pushw %ax + xorw %di, %di + movb $( '\n' ), %al + call print_character + popw %ax + popw %di + popw %ds + popfl + .section ".prefix.data", "aw", @progbits +progress_\@: + .asciz "\message" + .size progress_\@, . - progress_\@ + .previous + .endm +#endif + +/***************************************************************************** + * Utility function: print character (with LF -> LF,CR translation) + * + * Parameters: + * %al : character to print + * %ds:di : output buffer (or %di=0 to print to console) + * Returns: + * %ds:di : next character in output buffer (if applicable) + ***************************************************************************** + */ + .section ".prefix.print_character", "awx", @progbits + .code16 + .globl print_character +print_character: + /* Preserve registers */ + pushw %ax + pushw %bx + pushw %bp + /* If %di is non-zero, write character to buffer and exit */ + testw %di, %di + jz 1f + movb %al, %ds:(%di) + incw %di + jmp 3f +1: /* Print character */ + movw $0x0007, %bx /* page 0, attribute 7 (normal) */ + movb $0x0e, %ah /* write char, tty mode */ + cmpb $0x0a, %al /* '\n'? */ + jne 2f + int $0x10 + movb $0x0d, %al +2: int $0x10 + /* Restore registers and return */ +3: popw %bp + popw %bx + popw %ax + ret + .size print_character, . - print_character + +/***************************************************************************** + * Utility function: print space + * + * Parameters: + * %ds:di : output buffer (or %di=0 to print to console) + * Returns: + * %ds:di : next character in output buffer (if applicable) + ***************************************************************************** + */ + .section ".prefix.print_space", "awx", @progbits + .code16 + .globl print_space +print_space: + /* Preserve registers */ + pushw %ax + /* Print space */ + movb $( ' ' ), %al + call print_character + /* Restore registers and return */ + popw %ax + ret + .size print_space, . - print_space + +/***************************************************************************** + * Utility function: print a NUL-terminated string + * + * Parameters: + * %ds:si : string to print + * %ds:di : output buffer (or %di=0 to print to console) + * Returns: + * %ds:si : character after terminating NUL + * %ds:di : next character in output buffer (if applicable) + ***************************************************************************** + */ + .section ".prefix.print_message", "awx", @progbits + .code16 + .globl print_message +print_message: + /* Preserve registers */ + pushw %ax + /* Print string */ +1: lodsb + testb %al, %al + je 2f + call print_character + jmp 1b +2: /* Restore registers and return */ + popw %ax + ret + .size print_message, . - print_message + +/***************************************************************************** + * Utility functions: print hex digit/byte/word/dword + * + * Parameters: + * %al (low nibble) : digit to print + * %al : byte to print + * %ax : word to print + * %eax : dword to print + * %ds:di : output buffer (or %di=0 to print to console) + * Returns: + * %ds:di : next character in output buffer (if applicable) + ***************************************************************************** + */ + .section ".prefix.print_hex", "awx", @progbits + .code16 + .globl print_hex_dword +print_hex_dword: + rorl $16, %eax + call print_hex_word + rorl $16, %eax + /* Fall through */ + .size print_hex_dword, . - print_hex_dword + .globl print_hex_word +print_hex_word: + xchgb %al, %ah + call print_hex_byte + xchgb %al, %ah + /* Fall through */ + .size print_hex_word, . - print_hex_word + .globl print_hex_byte +print_hex_byte: + rorb $4, %al + call print_hex_nibble + rorb $4, %al + /* Fall through */ + .size print_hex_byte, . - print_hex_byte + .globl print_hex_nibble +print_hex_nibble: + /* Preserve registers */ + pushw %ax + /* Print digit (technique by Norbert Juffa */ + andb $0x0f, %al + cmpb $10, %al + sbbb $0x69, %al + das + call print_character + /* Restore registers and return */ + popw %ax + ret + .size print_hex_nibble, . - print_hex_nibble + +/***************************************************************************** + * Utility function: print PCI bus:dev.fn + * + * Parameters: + * %ax : PCI bus:dev.fn to print + * %ds:di : output buffer (or %di=0 to print to console) + * Returns: + * %ds:di : next character in output buffer (if applicable) + ***************************************************************************** + */ + .section ".prefix.print_pci_busdevfn", "awx", @progbits + .code16 + .globl print_pci_busdevfn +print_pci_busdevfn: + /* Preserve registers */ + pushw %ax + /* Print bus */ + xchgb %al, %ah + call print_hex_byte + /* Print ":" */ + movb $( ':' ), %al + call print_character + /* Print device */ + movb %ah, %al + shrb $3, %al + call print_hex_byte + /* Print "." */ + movb $( '.' ), %al + call print_character + /* Print function */ + movb %ah, %al + andb $0x07, %al + call print_hex_nibble + /* Restore registers and return */ + popw %ax + ret + .size print_pci_busdevfn, . - print_pci_busdevfn + +/***************************************************************************** + * Utility function: clear current line + * + * Parameters: + * %ds:di : output buffer (or %di=0 to print to console) + * Returns: + * %ds:di : next character in output buffer (if applicable) + ***************************************************************************** + */ + .section ".prefix.print_kill_line", "awx", @progbits + .code16 + .globl print_kill_line +print_kill_line: + /* Preserve registers */ + pushw %ax + pushw %cx + /* Print CR */ + movb $( '\r' ), %al + call print_character + /* Print 79 spaces */ + movw $79, %cx +1: call print_space + loop 1b + /* Print CR */ + call print_character + /* Restore registers and return */ + popw %cx + popw %ax + ret + .size print_kill_line, . - print_kill_line + +/**************************************************************************** + * copy_bytes + * + * Copy bytes + * + * Parameters: + * %ds:esi : source address + * %es:edi : destination address + * %ecx : length + * Returns: + * %ds:esi : next source address + * %es:edi : next destination address + * Corrupts: + * None + **************************************************************************** + */ + .section ".prefix.copy_bytes", "awx", @progbits + .code16 +copy_bytes: + pushl %ecx + rep addr32 movsb + popl %ecx + ret + .size copy_bytes, . - copy_bytes + +/**************************************************************************** + * zero_bytes + * + * Zero bytes + * + * Parameters: + * %es:edi : destination address + * %ecx : length + * Returns: + * %es:edi : next destination address + * Corrupts: + * None + **************************************************************************** + */ + .section ".prefix.zero_bytes", "awx", @progbits + .code16 +zero_bytes: + pushl %ecx + pushw %ax + xorw %ax, %ax + rep addr32 stosb + popw %ax + popl %ecx + ret + .size zero_bytes, . - zero_bytes + +/**************************************************************************** + * process_bytes + * + * Call memcpy()-like function + * + * Parameters: + * %esi : source physical address + * %edi : destination physical address + * %ecx : length + * %bx : memcpy()-like function to call, passing parameters: + * %ds:esi : source address + * %es:edi : destination address + * %ecx : length + * and returning: + * %ds:esi : next source address + * %es:edi : next destination address + * Returns: + * %esi : next source physical address + * %edi : next destination physical address + * CF : as returned by memcpy()-like function + * Corrupts: + * None + **************************************************************************** + */ + .section ".prefix.process_bytes", "awx", @progbits + .code16 +process_bytes: + +#ifndef KEEP_IT_REAL + + /* Preserve registers */ + pushl %eax + pushl %ebp + + /* Construct GDT on stack (since .prefix may not be writable) */ + .equ GDT_LEN, 0x20 + .equ PM_DS, 0x18 /* Flat data segment */ + pushl $0x00cf9300 + pushl $0x0000ffff + .equ PM_SS, 0x10 /* Stack segment based at %ss:0000 */ + pushl $0x008f0930 + pushw %ss + pushw $0xffff + .equ PM_CS, 0x08 /* Code segment based at %cs:0000 */ + pushl $0x008f09b0 + pushw %cs + pushw $0xffff + pushl $0 /* Base and length */ + pushw %ss + pushw $( GDT_LEN - 1 ) + movzwl %sp, %ebp + shll $4, 0x02(%bp) + addl %ebp, 0x02(%bp) + shll $4, 0x0a(%bp) + shll $4, 0x12(%bp) + subw $8, %sp + sgdt -8(%bp) + + /* Switch to protected mode */ + pushw %gs + pushw %fs + pushw %es + pushw %ds + pushw %ss + pushw %cs + pushw $2f + cli + data32 lgdt (%bp) + movl %cr0, %eax + orb $CR0_PE, %al + movl %eax, %cr0 + ljmp $PM_CS, $1f +1: movw $PM_SS, %ax + movw %ax, %ss + movw $PM_DS, %ax + movw %ax, %ds + movw %ax, %es + movw %ax, %fs + movw %ax, %gs + +#ifdef NDEBUG + /* Call memcpy()-like function */ + call *%bx +#endif + + /* Return to (flat) real mode */ + movl %cr0, %eax + pushfw + andb $0!CR0_PE, %al + popfw + movl %eax, %cr0 + lret +2: /* lret will ljmp to here */ + popw %ss + popw %ds + popw %es + popw %fs + popw %gs + +#ifndef NDEBUG + /* Call memcpy()-like function in flat real mode (to allow for + * debug output via INT 10). + */ + pushw %ds + pushw %es + xorw %ax, %ax + movw %ax, %ds + movw %ax, %es + call *%bx + popw %es + popw %ds +#endif + + /* Restore GDT */ + data32 lgdt -8(%bp) + leaw GDT_LEN(%bp), %sp + + /* Restore registers and return */ + popl %ebp + popl %eax + ret + +#else /* KEEP_IT_REAL */ + + /* Preserve registers */ + pushl %eax + pushw %ds + pushw %es + + /* Convert %esi and %edi to %ds:esi and %es:edi */ + shrl $4, %esi + movw %si, %ds + xorw %si, %si + shll $4, %esi + shrl $4, %edi + movw %di, %es + xorw %di, %di + shll $4, %edi + + /* Call memcpy()-like function */ + call *%bx + + /* Convert %ds:esi and %es:edi back to physical addresses */ + pushfw + xorl %eax, %eax + movw %ds, %ax + shll $4, %eax + addl %eax, %esi + xorl %eax, %eax + movw %es, %ax + shll $4, %eax + addl %eax, %edi + popfw + + /* Restore registers and return */ + popw %es + popw %ds + popl %eax + ret + +#endif /* KEEP_IT_REAL */ + + .size process_bytes, . - process_bytes + +/**************************************************************************** + * install_block + * + * Install block to specified address + * + * Parameters: + * %esi : source physical address (must be a multiple of 16) + * %edi : destination physical address (must be a multiple of 16) + * %ecx : length of (decompressed) data + * %edx : total length of block (including any uninitialised data portion) + * Returns: + * %esi : next source physical address (will be a multiple of 16) + * %edi : next destination physical address (will be a multiple of 16) + * CF set on failure + * Corrupts: + * none + **************************************************************************** + */ + .section ".prefix.install_block", "awx", @progbits + .code16 +install_block: + /* Preserve registers */ + pushl %ecx + pushw %bx + + /* Decompress (or copy) source to destination */ +#if COMPRESS + movw $decompress16, %bx +#else + movw $copy_bytes, %bx +#endif + call process_bytes + jc 99f + + /* Zero .bss portion */ + negl %ecx + addl %edx, %ecx + movw $zero_bytes, %bx + call process_bytes + + /* Round up %esi and %edi to start of next blocks */ + addl $0xf, %esi + andl $~0xf, %esi + addl $0xf, %edi + andl $~0xf, %edi /* Will also clear CF */ + +99: /* Restore registers and return */ + popw %bx + popl %ecx + ret + .size install_block, . - install_block + +/**************************************************************************** + * alloc_basemem + * + * Allocate space for .text16 and .data16 from top of base memory. + * Memory is allocated using the BIOS free base memory counter at + * 0x40:13. + * + * Parameters: + * none + * Returns: + * %ax : .text16 segment address + * %bx : .data16 segment address + * Corrupts: + * none + **************************************************************************** + */ + .section ".prefix.alloc_basemem", "awx", @progbits + .code16 + .globl alloc_basemem +alloc_basemem: + /* Preserve registers */ + pushw %fs + + /* FBMS => %ax as segment address */ + pushw $0x40 + popw %fs + movw %fs:0x13, %ax + shlw $6, %ax + + /* Calculate .data16 segment address */ + subw $_data16_memsz_ppgh, %ax + pushw %ax + + /* Calculate .text16 segment address */ + subw $_text16_memsz_ppgh, %ax + pushw %ax + + /* Update FBMS */ + shrw $6, %ax + movw %ax, %fs:0x13 + + /* Retrieve .text16 and .data16 segment addresses */ + popw %ax + popw %bx + + /* Restore registers and return */ + popw %fs + ret + .size alloc_basemem, . - alloc_basemem + +/**************************************************************************** + * free_basemem + * + * Free space allocated with alloc_basemem. + * + * Parameters: + * none (.text16 segment address is implicit in %cs) + * Returns: + * %ax : 0 if successfully freed + * Corrupts: + * none + **************************************************************************** + */ + .section ".text16.free_basemem", "ax", @progbits + .code16 + .globl free_basemem +free_basemem: + /* Preserve registers */ + pushw %fs + pushw %ax + + /* Check FBMS counter */ + movw %cs, %ax + shrw $6, %ax + pushw $0x40 + popw %fs + cmpw %ax, %fs:0x13 + jne 1f + + /* Check hooked interrupt count */ + cmpw $0, %cs:hooked_bios_interrupts + jne 1f + + /* OK to free memory */ + movw %cs, %ax + addw $_text16_memsz_ppgh, %ax + addw $_data16_memsz_ppgh, %ax + shrw $6, %ax + movw %ax, %fs:0x13 + xorw %ax, %ax + +1: /* Restore registers and return */ + popw %ax + popw %fs + ret + .size free_basemem, . - free_basemem + + .section ".text16.data.hooked_bios_interrupts", "aw", @progbits + .globl hooked_bios_interrupts +hooked_bios_interrupts: + .word 0 + .size hooked_bios_interrupts, . - hooked_bios_interrupts + +/**************************************************************************** + * install + * + * Install all text and data segments. + * + * Parameters: + * none + * Returns: + * %ax : .text16 segment address + * %bx : .data16 segment address + * Corrupts: + * none + **************************************************************************** + */ + .section ".prefix.install", "awx", @progbits + .code16 + .globl install +install: + progress "\ninstall:" + /* Preserve registers */ + pushl %esi + pushl %edi + pushl %ebp + /* Allocate space for .text16 and .data16 */ + call alloc_basemem + /* Image source = %cs:0000 */ + xorl %esi, %esi + /* Image destination = default */ + xorl %edi, %edi + /* Allow arbitrary relocation */ + orl $0xffffffff, %ebp + /* Install text and data segments */ + call install_prealloc + /* Restore registers and return */ + popl %ebp + popl %edi + popl %esi + ret + .size install, . - install + +/**************************************************************************** + * install_prealloc + * + * Install all text and data segments. + * + * Parameters: + * %ax : .text16 segment address + * %bx : .data16 segment address + * %esi : Image source physical address (or zero for %cs:0000) + * %edi : Decompression temporary area physical address (or zero for default) + * %ebp : Maximum end address for relocation + * - 0xffffffff for no maximum + * - 0x00000000 to inhibit use of INT 15,e820 and INT 15,e801 + * Corrupts: + * none + **************************************************************************** + */ + .section ".prefix.install_prealloc", "awx", @progbits + .code16 + .globl install_prealloc +install_prealloc: + progress "\ninstall_prealloc:", %eax, %ebx, %esi, %edi, %ebp + /* Save registers on external stack */ + pushal + pushw %ds + pushw %es + cld /* Sanity: clear the direction flag asap */ + + /* Switch to temporary stack in .bss16 */ + pushw %ss + popw %ds + movl %esp, %ecx + movw %bx, %ss + movl $_data16_memsz, %esp + pushw %ds + pushl %ecx + + /* Set up %ds for (read-only) access to .prefix */ + pushw %cs + popw %ds + + /* Save decompression temporary area physical address */ + pushl %edi + + /* Install .text16.early and calculate %ecx as offset to next block */ + pushl %esi + xorl %esi, %esi + movw %cs, %si + shll $4, %esi + pushl %esi /* Save original %cs:0000 */ + addl $_text16_early_lma, %esi + movzwl %ax, %edi + shll $4, %edi + movl $_text16_early_filesz, %ecx + movl $_text16_early_memsz, %edx + progress " .text16.early ", %esi, %edi, %ecx, %edx + call install_block /* .text16.early */ + jc install_block_death + popl %ecx /* Calculate offset to next block */ + subl %esi, %ecx + negl %ecx + popl %esi + +#ifndef KEEP_IT_REAL + + /* Access high memory by enabling the A20 gate. (We will + * already have 4GB segment limits as a result of calling + * install_block.) + */ + progress " access_highmem" + pushw %cs + pushw $1f + pushw %ax + pushw $access_highmem + lret +1: /* Die if we could not access high memory */ + jc access_highmem_death + +#endif + + /* Open payload (which may not yet be in memory) */ + progress " open_payload ", %esi, %ecx + pushw %cs + pushw $1f + pushw %ax + pushw $open_payload + lret +1: /* Die if we could not access the payload */ + jc open_payload_death + + /* Calculate physical address of payload (i.e. first source) */ + testl %esi, %esi + jnz 1f + movw %cs, %si + shll $4, %esi +1: addl %ecx, %esi + + /* Install .text16.late and .data16 */ + movl $_text16_late_filesz, %ecx + movl $_text16_late_memsz, %edx + progress " .text16.late ", %esi, %edi, %ecx, %edx + call install_block /* .text16.late */ + jc install_block_death + movzwl %bx, %edi + shll $4, %edi + movl $_data16_filesz, %ecx + movl $_data16_filesz, %edx /* do not zero our temporary stack */ + progress " .data16 ", %esi, %edi, %ecx, %edx + call install_block /* .data16 */ + jc install_block_death + + /* Set up %ds for access to .data16 */ + movw %bx, %ds + + /* Restore decompression temporary area physical address */ + popl %edi + +#ifndef KEEP_IT_REAL + + /* Find a suitable decompression temporary area, if none specified */ + pushl %eax + testl %edi, %edi + jnz 1f + /* Use INT 15,88 to find the highest available address via INT + * 15,88. This limits us to around 64MB, which should avoid + * all of the POST-time memory map failure modes. + */ + movb $0x88, %ah + int $0x15 + movw %ax, %di + addl $0x400, %edi + subl $_textdata_memsz_kb, %edi + andw $~0x03, %di + shll $10, %edi + /* Sanity check: if we have ended up below 1MB, use 1MB */ + cmpl $0x100000, %edi + jae 1f + movl $0x100000, %edi +1: popl %eax + + /* Install .text and .data to temporary area in high memory, + * prior to reading the E820 memory map and relocating + * properly. + */ + pushl %edi + movl $_textdata_filesz, %ecx + movl $_textdata_memsz, %edx + progress " .textdata ", %esi, %edi, %ecx, %edx + call install_block + jc install_block_death + popl %edi + +#endif /* KEEP_IT_REAL */ + + /* Switch back to original stack and zero .bss16 */ + addr32 lss %ss:(%esp), %esp + pushl %edi + pushw %es + movw %bx, %es + movl $_data16_filesz, %edi + movl $_data16_memsz, %ecx + subl %edi, %ecx + call zero_bytes + popw %es + popl %edi + +#ifndef KEEP_IT_REAL + + /* Initialise librm at current location */ + progress " init_librm ", %eax, %ebx, %edi + movw %ax, (init_librm_vector+2) + lcall *init_librm_vector + + /* Prepare for return to .prefix segment */ + pushw %cs + + /* Jump to .text16 segment */ + pushw %ax + pushw $1f + lret + .section ".text16.install_prealloc", "ax", @progbits +1: + /* Inhibit INT 15,e820 and INT 15,e801 if applicable */ + testl %ebp, %ebp + jnz 1f + incb memmap_post + decl %ebp +1: + /* Call relocate() to determine target address for relocation. + * relocate() will return with %esi, %edi and %ecx set up + * ready for the copy to the new location. + */ + virtcall relocate + + /* Jump back to .prefix segment */ + pushw $1f + lret + .section ".prefix.install_prealloc", "awx", @progbits +1: + /* Copy code to new location */ + progress " copy ", %esi, %edi, %ecx + pushl %edi + pushw %bx + movw $copy_bytes, %bx + call process_bytes + popw %bx + popl %edi + + /* Initialise librm at new location */ + progress " init_librm ", %eax, %ebx, %edi + lcall *init_librm_vector + +#else /* KEEP_IT_REAL */ + + /* Initialise libkir */ + movw %ax, (init_libkir_vector+2) + lcall *init_libkir_vector + +#endif /* KEEP_IT_REAL */ + + /* Close access to payload */ + progress " close_payload" + movw %ax, (close_payload_vector+2) + lcall *close_payload_vector + + /* Restore registers */ + popw %es + popw %ds + popal + ret + .size install_prealloc, . - install_prealloc + + /* Vectors for far calls to .text16 functions. Must be in + * .data16, since .prefix may not be writable. + */ + .section ".data16.install_prealloc", "aw", @progbits +#ifdef KEEP_IT_REAL +init_libkir_vector: + .word init_libkir + .word 0 + .size init_libkir_vector, . - init_libkir_vector +#else +init_librm_vector: + .word init_librm + .word 0 + .size init_librm_vector, . - init_librm_vector +#endif +close_payload_vector: + .word close_payload + .word 0 + .size close_payload_vector, . - close_payload_vector + + /* Dummy routines to open and close payload */ + .section ".text16.early.data.open_payload", "aw", @progbits + .weak open_payload + .weak close_payload +open_payload: +close_payload: + clc + lret + .size open_payload, . - open_payload + .size close_payload, . - close_payload + + /* Report installation failure */ + .section ".prefix.install_death", "ax", @progbits +install_death: + pushw %cs + popw %ds + xorw %di, %di + call print_hex_dword + call print_space + movl %esi, %eax + call print_hex_dword + call print_space + movl %ecx, %eax + call print_hex_dword + movw $install_death_message, %si + call print_message +2: /* Halt system */ + cli + hlt + jmp 2b + .size install_death, . - install_death + .section ".prefix.data.install_death_message", "aw", @progbits +install_death_message: + .asciz "\nInstallation failed - cannot continue\n" + .size install_death_message, . - install_death_message + + /* Report failure to access high memory */ + .section ".prefix.install_block_death", "ax", @progbits +install_block_death: + movl $0x1b101b10, %eax + jmp install_death + .size install_block_death, . - install_block_death + + /* Report failure to access high memory */ + .section ".prefix.access_highmem_death", "ax", @progbits +access_highmem_death: + movl $0x0a200a20, %eax + jmp install_death + .size access_highmem_death, . - access_highmem_death + + /* Report failure to open payload */ + .section ".prefix.open_payload_death", "ax", @progbits +open_payload_death: + xorl %eax, %eax + jmp install_death + .size open_payload_death, . - open_payload_death + +/**************************************************************************** + * uninstall + * + * Uninstall all text and data segments. + * + * Parameters: + * none (.text16 segment address is implicit in %cs) + * Returns: + * none + * Corrupts: + * none + **************************************************************************** + */ + .section ".text16.uninstall", "ax", @progbits + .code16 + .globl uninstall +uninstall: + call free_basemem + ret + .size uninstall, . - uninstall + + + + /* File split information for the compressor */ +#if COMPRESS +#define PACK_OR_COPY "PACK" +#else +#define PACK_OR_COPY "COPY" +#endif + .section ".zinfo", "a", @progbits + .ascii "COPY" + .long _prefix_lma + .long _prefix_filesz + .long _max_align + .ascii PACK_OR_COPY + .long _text16_early_lma + .long _text16_early_filesz + .long _max_align + .ascii "PAYL" + .long 0 + .long 0 + .long _payload_align + .ascii "COPY" + .long _pprefix_lma + .long _pprefix_filesz + .long _max_align + .ascii PACK_OR_COPY + .long _text16_late_lma + .long _text16_late_filesz + .long _max_align + .ascii PACK_OR_COPY + .long _data16_lma + .long _data16_filesz + .long _max_align + .ascii PACK_OR_COPY + .long _textdata_lma + .long _textdata_filesz + .long _max_align + + .weak _payload_align + .equ _payload_align, 1 diff --git a/src/arch/x86/prefix/lkrnprefix.S b/src/arch/x86/prefix/lkrnprefix.S new file mode 100644 index 00000000..922181f0 --- /dev/null +++ b/src/arch/x86/prefix/lkrnprefix.S @@ -0,0 +1,235 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#include + +#define BZI_LOAD_HIGH_ADDR 0x100000 + + .text + .arch i386 + .code16 + .section ".prefix", "ax", @progbits + .globl _lkrn_start +_lkrn_start: + +/***************************************************************************** + * + * Kernel header + * + * We place our prefix (i.e. our .prefix and .text16.early sections) + * within the bzImage real-mode portion which gets loaded at + * 1000:0000, and our payload (i.e. everything else) within the + * bzImage protected-mode portion which gets loaded at 0x100000 + * upwards. + * + */ + + .org 0x1f1 +setup_sects: + .byte -1 /* Allow for initial "boot sector" */ + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii "ADHL" + .long setup_sects + .long 512 + .long 0 + .previous +root_flags: + .word 0 +syssize: + .long 0 + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii "ADPL" + .long syssize + .long 16 + .long 0 + .previous +ram_size: + .word 0 +vid_mode: + .word 0 +root_dev: + .word 0 +boot_flag: + .word 0xaa55 +jump: + /* Manually specify a two-byte jmp instruction here rather + * than leaving it up to the assembler. + */ + .byte 0xeb, ( setup - header ) +header: + .byte 'H', 'd', 'r', 'S' +version: + .word 0x0207 /* 2.07 */ +realmode_swtch: + .long 0 +start_sys: + .word 0 +kernel_version: + .word version_string - 0x200 +type_of_loader: + .byte 0 +loadflags: + .byte 0x01 /* LOADED_HIGH */ +setup_move_size: + .word 0 +code32_start: + .long 0 +ramdisk_image: + .long 0 +ramdisk_size: + .long 0 +bootsect_kludge: + .long 0 +heap_end_ptr: + .word 0 +ext_loader_ver: + .byte 0 +ext_loader_type: + .byte 0 +cmd_line_ptr: + .long 0 +initrd_addr_max: + .long 0xffffffff +kernel_alignment: + .long 0 +relocatable_kernel: + .byte 0 +min_alignment: + .byte 0 +xloadflags: + .word 0 +cmdline_size: + .long 0x7ff +hardware_subarch: + .long 0 +hardware_subarch_data: + .byte 0, 0, 0, 0, 0, 0, 0, 0 + +version_string: + .asciz VERSION + +/***************************************************************************** + * + * Setup code + * + */ + +setup: + /* Fix up code segment */ + pushw %ds + pushw $1f + lret +1: + /* Set up stack just below 0x7c00 and clear direction flag */ + xorw %ax, %ax + movw %ax, %ss + movw $0x7c00, %sp + cld + + /* Retrieve command-line pointer */ + movl cmd_line_ptr, %edx + testl %edx, %edx + jz no_cmd_line + + /* Set up %es:%di to point to command line */ + movl %edx, %edi + andl $0xf, %edi + rorl $4, %edx + movw %dx, %es + + /* Find length of command line */ + pushw %di + movw $0xffff, %cx + repnz scasb + notw %cx + popw %si + + /* Make space for command line on stack */ + movw %sp, %di + subw %cx, %di + andw $~0xf, %di + movw %di, %sp + + /* Copy command line to stack */ + pushw %ds + pushw %es + popw %ds + pushw %ss + popw %es + rep movsb + popw %ds + + /* Store new command-line pointer */ + movzwl %sp, %edx +no_cmd_line: + + /* Calculate maximum relocation address */ + movl ramdisk_image, %ebp + testl %ebp, %ebp + jnz 1f + orl $0xffffffff, %ebp /* Allow arbitrary relocation if no initrd */ +1: + /* Install iPXE */ + call alloc_basemem + xorl %esi, %esi + xorl %edi, %edi + call install_prealloc + + /* Set up real-mode stack */ + movw %bx, %ss + movw $_estack16, %sp + + /* Jump to .text16 segment */ + pushw %ax + pushw $1f + lret + .section ".text16", "awx", @progbits +1: + /* Retrieve initrd pointer and size */ + movl ramdisk_image, %ebp + movl ramdisk_size, %ecx + + /* Set up %ds for access to .data16 */ + movw %bx, %ds + + /* Store command-line pointer */ + movl %edx, cmdline_phys + + /* Store initrd pointer and size */ + movl %ebp, initrd_phys + movl %ecx, initrd_len + + /* Run iPXE */ + virtcall main + + /* Uninstall iPXE */ + call uninstall + + /* Boot next device */ + int $0x18 + +/***************************************************************************** + * + * Open payload (called by libprefix) + * + * Parameters: + * %ds:0000 : Prefix + * %esi : Buffer for copy of image source (or zero if no buffer available) + * %ecx : Expected offset within buffer of first payload block + * Returns: + * %esi : Valid image source address (buffered or unbuffered) + * %ecx : Actual offset within buffer of first payload block + * CF set on error + */ + + .section ".text16.early", "awx", @progbits + .globl open_payload +open_payload: + + /* Our payload will always end up at BZI_LOAD_HIGH_ADDR */ + movl $BZI_LOAD_HIGH_ADDR, %esi + xorl %ecx, %ecx + lret + + /* Payload must be aligned to a whole number of setup sectors */ + .globl _payload_align + .equ _payload_align, 512 diff --git a/src/arch/x86/prefix/mbr.S b/src/arch/x86/prefix/mbr.S new file mode 100644 index 00000000..032c0e77 --- /dev/null +++ b/src/arch/x86/prefix/mbr.S @@ -0,0 +1,16 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .text + .arch i386 + .section ".prefix", "awx", @progbits + .code16 + .org 0 + + .globl mbr +mbr: + movw $exec_sector, %bp + jmp find_active_partition +exec_sector: + ljmp $0x0000, $0x7c00 + +#include "bootpart.S" diff --git a/src/arch/x86/prefix/mromprefix.S b/src/arch/x86/prefix/mromprefix.S new file mode 100644 index 00000000..2b5c6bf6 --- /dev/null +++ b/src/arch/x86/prefix/mromprefix.S @@ -0,0 +1,535 @@ +/* + * Copyright (C) 2010 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#define PCIBIOS_READ_CONFIG_WORD 0xb109 +#define PCIBIOS_READ_CONFIG_DWORD 0xb10a +#define PCIBIOS_WRITE_CONFIG_WORD 0xb10c +#define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d +#define PCI_COMMAND 0x04 +#define PCI_COMMAND_MEM 0x02 +#define PCI_BAR_0 0x10 +#define PCI_BAR_5 0x24 +#define PCI_BAR_EXPROM 0x30 + +#define PCIR_SIGNATURE ( 'P' + ( 'C' << 8 ) + ( 'I' << 16 ) + ( 'R' << 24 ) ) + +#define ROMPREFIX_EXCLUDE_PAYLOAD 1 +#define ROMPREFIX_MORE_IMAGES 1 +#define _pcirom_start _mrom_start +#include "pciromprefix.S" + + .text + .arch i386 + .code16 + +/* Obtain access to payload by exposing the expansion ROM BAR at the + * address currently used by a suitably large memory BAR on the same + * device. The memory BAR is temporarily disabled. Using a memory + * BAR on the same device means that we don't have to worry about the + * configuration of any intermediate PCI bridges. + * + * Parameters: + * %ds:0000 : Prefix + * %esi : Buffer for copy of image source (or zero if no buffer available) + * %ecx : Expected offset within buffer of first payload block + * Returns: + * %esi : Valid image source address (buffered or unbuffered) + * %ecx : Actual offset within buffer of first payload block + * CF set on error + */ + .section ".text16.early", "awx", @progbits + .globl open_payload +open_payload: + /* Preserve registers */ + pushl %eax + pushw %bx + pushl %edx + pushl %edi + pushw %bp + pushw %es + pushw %ds + + /* Retrieve bus:dev.fn from .prefix */ + movw init_pci_busdevfn, %bx + + /* Set up %ds for access to .text16.early */ + pushw %cs + popw %ds + + /* Set up %es for access to flat address space */ + xorw %ax, %ax + movw %ax, %es + + /* Store bus:dev.fn to .text16.early */ + movw %bx, payload_pci_busdevfn + + /* Get expansion ROM BAR current value */ + movw $PCI_BAR_EXPROM, %di + call pci_read_bar + movl %eax, rom_bar_orig_value + + /* Get expansion ROM BAR size */ + call pci_size_mem_bar_low + movl %ecx, rom_bar_size + + /* Find a suitable memory BAR to use */ + movw $PCI_BAR_0, %di /* %di is PCI BAR register */ + xorw %bp, %bp /* %bp is increment */ +find_mem_bar: + /* Move to next BAR */ + addw %bp, %di + cmpw $PCI_BAR_5, %di + jle 1f + stc + movl $0xbabababa, %esi /* Report "No suitable BAR" */ + movl rom_bar_size, %ecx + jmp 99f +1: movw $4, %bp + + /* Get BAR current value */ + call pci_read_bar + + /* Skip non-existent BARs */ + notl %eax + testl %eax, %eax + notl %eax + jz find_mem_bar + + /* Skip I/O BARs */ + testb $0x01, %al + jnz find_mem_bar + + /* Set increment to 8 for 64-bit BARs */ + testb $0x04, %al + jz 1f + movw $8, %bp +1: + /* Skip 64-bit BARs with high dword set; we couldn't use this + * address for the (32-bit) expansion ROM BAR anyway + */ + testl %edx, %edx + jnz find_mem_bar + + /* Get low dword of BAR size */ + call pci_size_mem_bar_low + + /* Skip BARs smaller than the expansion ROM BAR */ + cmpl %ecx, rom_bar_size + ja find_mem_bar + + /* We have a memory BAR with a 32-bit address that is large + * enough to use. Store BAR number and original value. + */ + movw %di, stolen_bar_register + movl %eax, stolen_bar_orig_value + + /* Remove flags from BAR address */ + xorb %al, %al + + /* Write zero to our stolen BAR. This doesn't technically + * disable it, but it's a pretty safe bet that the PCI bridge + * won't pass through accesses to this region anyway. Note + * that the high dword (if any) must already be zero. + */ + xorl %ecx, %ecx + call pci_write_config_dword + + /* Enable expansion ROM BAR at stolen BAR's address */ + movl %eax, %ecx + orb $0x1, %cl + movw $PCI_BAR_EXPROM, %di + call pci_write_config_dword + + /* Locate our ROM image */ +1: movl $0xaa55, %ecx /* 55aa signature */ + addr32 es cmpw %cx, (%eax) + jne 2f + movl $PCIR_SIGNATURE, %ecx /* PCIR signature */ + addr32 es movzwl 0x18(%eax), %edx + addr32 es cmpl %ecx, (%eax,%edx) + jne 2f + addr32 es cmpl $_build_id, build_id(%eax) /* iPXE build ID */ + je 3f + movl $0x80, %ecx /* Last image */ + addr32 es testb %cl, 0x15(%eax,%edx) + jnz 2f + addr32 es movzwl 0x10(%eax,%edx), %ecx /* PCIR image length */ + shll $9, %ecx + addl %ecx, %eax + jmp 1b +2: /* Failure */ + stc + movl %eax, %esi /* Report failure address */ + jmp 99f +3: + + /* Copy payload to buffer, or set buffer address to BAR address */ + testl %esi, %esi + jz 1f + /* We have a buffer; copy payload to it. Since .mrom is + * designed specifically for real hardware, we assume that + * flat real mode is working properly. (In the unlikely event + * that this code is run inside a hypervisor that doesn't + * properly support flat real mode, it will die horribly.) + */ + pushl %esi + movl %esi, %edi + movl %eax, %esi + addr32 es movzbl 2(%esi), %ecx + shll $7, %ecx + addr32 es movzwl mpciheader_image_length(%esi,%ecx,4), %edx + shll $7, %edx + addl %edx, %ecx + addr32 es rep movsl + popl %esi + jmp 2f +1: /* We have no buffer; set %esi to the BAR address */ + movl %eax, %esi +2: + + /* Locate first payload block (after the dummy ROM header) */ + addr32 es movzbl 2(%esi), %ecx + shll $9, %ecx + addl $_pprefix_skip, %ecx + + clc + /* Restore registers and return */ +99: popw %ds + popw %es + popw %bp + popl %edi + popl %edx + popw %bx + popl %eax + lret + .size open_payload, . - open_payload + + .section ".text16.early.data", "aw", @progbits +payload_pci_busdevfn: + .word 0 + .size payload_pci_busdevfn, . - payload_pci_busdevfn + + .section ".text16.early.data", "aw", @progbits +rom_bar_orig_value: + .long 0 + .size rom_bar_orig_value, . - rom_bar_orig_value + + .section ".text16.early.data", "aw", @progbits +rom_bar_size: + .long 0 + .size rom_bar_size, . - rom_bar_size + + .section ".text16.early.data", "aw", @progbits +stolen_bar_register: + .word 0 + .size stolen_bar_register, . - stolen_bar_register + + .section ".text16.early.data", "aw", @progbits +stolen_bar_orig_value: + .long 0 + .size stolen_bar_orig_value, . - stolen_bar_orig_value + +/* Restore original BAR values + * + * Parameters: + * none + * Returns: + * none + */ + .section ".text16.early", "awx", @progbits + .globl close_payload +close_payload: + /* Preserve registers */ + pushw %bx + pushw %di + pushl %ecx + pushw %ds + + /* Set up %ds for access to .text16.early */ + pushw %cs + popw %ds + + /* Retrieve stored bus:dev.fn */ + movw payload_pci_busdevfn, %bx + + /* Restore expansion ROM BAR original value */ + movw $PCI_BAR_EXPROM, %di + movl rom_bar_orig_value, %ecx + call pci_write_config_dword + + /* Restore stolen BAR original value */ + movw stolen_bar_register, %di + movl stolen_bar_orig_value, %ecx + call pci_write_config_dword + + /* Restore registers and return */ + popw %ds + popl %ecx + popw %di + popw %bx + lret + .size close_payload, . - close_payload + +/* Get PCI BAR value + * + * Parameters: + * %bx : PCI bus:dev.fn + * %di : PCI BAR register number + * Returns: + * %edx:%eax : PCI BAR value + */ + .section ".text16.early", "awx", @progbits +pci_read_bar: + /* Preserve registers */ + pushl %ecx + pushw %di + + /* Read low dword value */ + call pci_read_config_dword + movl %ecx, %eax + + /* Read high dword value, if applicable */ + xorl %edx, %edx + andb $0x07, %cl + cmpb $0x04, %cl + jne 1f + addw $4, %di + call pci_read_config_dword + movl %ecx, %edx +1: + /* Restore registers and return */ + popw %di + popl %ecx + ret + .size pci_read_bar, . - pci_read_bar + +/* Get low dword of PCI memory BAR size + * + * Parameters: + * %bx : PCI bus:dev.fn + * %di : PCI BAR register number + * %eax : Low dword of current PCI BAR value + * Returns: + * %ecx : PCI BAR size + */ + .section ".text16.early", "awx", @progbits +pci_size_mem_bar_low: + /* Preserve registers */ + pushw %dx + + /* Disable memory accesses */ + xorw %dx, %dx + call pci_set_mem_access + + /* Write all ones to BAR */ + xorl %ecx, %ecx + decl %ecx + call pci_write_config_dword + + /* Read back BAR */ + call pci_read_config_dword + + /* Calculate size */ + notl %ecx + orb $0x0f, %cl + incl %ecx + + /* Restore original value */ + pushl %ecx + movl %eax, %ecx + call pci_write_config_dword + popl %ecx + + /* Enable memory accesses */ + movw $PCI_COMMAND_MEM, %dx + call pci_set_mem_access + + /* Restore registers and return */ + popw %dx + ret + .size pci_size_mem_bar_low, . - pci_size_mem_bar_low + +/* Read PCI config dword + * + * Parameters: + * %bx : PCI bus:dev.fn + * %di : PCI register number + * Returns: + * %ecx : Dword value + */ + .section ".text16.early", "awx", @progbits +pci_read_config_dword: + /* Preserve registers */ + pushl %eax + pushl %ebx + pushl %edx + + /* Issue INT 0x1a,b10a */ + movw $PCIBIOS_READ_CONFIG_DWORD, %ax + int $0x1a + + /* Restore registers and return */ + popl %edx + popl %ebx + popl %eax + ret + .size pci_read_config_dword, . - pci_read_config_dword + +/* Write PCI config dword + * + * Parameters: + * %bx : PCI bus:dev.fn + * %di : PCI register number + * %ecx : PCI BAR value + * Returns: + * none + */ + .section ".text16.early", "awx", @progbits +pci_write_config_dword: + /* Preserve registers */ + pushal + + /* Issue INT 0x1a,b10d */ + movw $PCIBIOS_WRITE_CONFIG_DWORD, %ax + int $0x1a + + /* Restore registers and return */ + popal + ret + .size pci_write_config_dword, . - pci_write_config_dword + +/* Enable/disable memory access response in PCI command word + * + * Parameters: + * %bx : PCI bus:dev.fn + * %dx : PCI_COMMAND_MEM, or zero + * Returns: + * none + */ + .section ".text16.early", "awx", @progbits +pci_set_mem_access: + /* Preserve registers */ + pushal + + /* Read current value of command register */ + pushw %bx + pushw %dx + movw $PCI_COMMAND, %di + movw $PCIBIOS_READ_CONFIG_WORD, %ax + int $0x1a + popw %dx + popw %bx + + /* Set memory access enable as appropriate */ + andw $~PCI_COMMAND_MEM, %cx + orw %dx, %cx + + /* Write new value of command register */ + movw $PCIBIOS_WRITE_CONFIG_WORD, %ax + int $0x1a + + /* Restore registers and return */ + popal + ret + .size pci_set_mem_access, . - pci_set_mem_access + +/* Update image source address for UNDI loader + * + * Parameters: + * %esi : Image source address + * Returns: + * %esi : Image source address + */ + .section ".prefix", "ax", @progbits + .globl undiloader_source +undiloader_source: + /* Always use expansion ROM BAR directly when installing via + * the UNDI loader entry point, since the PMM-allocated block + * may collide with whatever is calling the UNDI loader entry + * point. + */ + xorl %esi, %esi + ret + +/* Payload prefix + * + * We include a dummy ROM header to cover the "hidden" portion of the + * overall ROM image. + */ + .globl _payload_align + .equ _payload_align, 512 + .section ".pprefix", "ax", @progbits + .org 0x00 +mromheader: + .word 0xaa55 /* BIOS extension signature */ + .byte 0x01 /* Dummy size (BIOS bug workaround) */ + .org 0x18 + .word mpciheader + .org 0x1a + .word 0 + .size mromheader, . - mromheader + + .align 4 +mpciheader: + .ascii "PCIR" /* Signature */ + .word pci_vendor_id /* Vendor identification */ + .word pci_device_id /* Device identification */ + .word 0x0000 /* Device list pointer */ + .word mpciheader_len /* PCI data structure length */ + .byte 0x03 /* PCI data structure revision */ + .byte 0x00, 0x00, 0x02 /* Class code */ +mpciheader_image_length: + .word 0 /* Image length */ + .word 0x0001 /* Revision level */ + .byte 0xff /* Code type */ + .byte 0x80 /* Last image indicator */ +mpciheader_runtime_length: + .word 0 /* Maximum run-time image length */ + .word 0x0000 /* Configuration utility code header */ + .word 0x0000 /* DMTF CLP entry point */ + .equ mpciheader_len, . - mpciheader + .size mpciheader, . - mpciheader + + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii "APPW" + .long mpciheader_image_length + .long 512 + .long 0 + .ascii "APPW" + .long mpciheader_runtime_length + .long 512 + .long 0 + .previous + +/* Fix up additional image source size + * + */ + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii "ADPW" + .long extra_size + .long 512 + .long 0 + .previous diff --git a/src/arch/x86/prefix/nbiprefix.S b/src/arch/x86/prefix/nbiprefix.S new file mode 100644 index 00000000..de38e4af --- /dev/null +++ b/src/arch/x86/prefix/nbiprefix.S @@ -0,0 +1,84 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#include + + .text + .arch i386 + .code16 + .section ".prefix", "ax", @progbits + .org 0 + +nbi_header: + +/***************************************************************************** + * NBI file header + ***************************************************************************** + */ +file_header: + .long 0x1b031336 /* Signature */ + .byte 0x04 /* 16 bytes header, no vendor info */ + .byte 0 + .byte 0 + .byte 0 /* No flags */ + .word 0x0000, 0x07c0 /* Load header to 0x07c0:0x0000 */ + .word _nbi_start, 0x07c0 /* Start execution at 0x07c0:entry */ + .size file_header, . - file_header + +/***************************************************************************** + * NBI segment header + ***************************************************************************** + */ +segment_header: + .byte 0x04 /* 16 bytes header, no vendor info */ + .byte 0 + .byte 0 + .byte 0x04 /* Last segment */ + .long 0x00007e00 +imglen: .long -512 +memlen: .long -512 + .size segment_header, . - segment_header + + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii "ADDL" + .long imglen + .long 1 + .long 0 + .ascii "ADDL" + .long memlen + .long 1 + .long 0 + .previous + +/***************************************************************************** + * NBI entry point + ***************************************************************************** + */ + .globl _nbi_start +_nbi_start: + /* Install iPXE */ + call install + + /* Set up real-mode stack */ + movw %bx, %ss + movw $_estack16, %sp + + /* Jump to .text16 segment */ + pushw %ax + pushw $1f + lret + .section ".text16", "awx", @progbits +1: + /* Run iPXE */ + virtcall main + + /* Uninstall iPXE */ + call uninstall + + /* Reboot system */ + int $0x19 + + .previous + .size _nbi_start, . - _nbi_start + +nbi_header_end: + .org 512 diff --git a/src/arch/x86/prefix/nullprefix.S b/src/arch/x86/prefix/nullprefix.S new file mode 100644 index 00000000..bd0ff339 --- /dev/null +++ b/src/arch/x86/prefix/nullprefix.S @@ -0,0 +1,15 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .org 0 + .text + .arch i386 + + .section ".prefix", "ax", @progbits + .code16 +_prefix: + + .section ".text16", "ax", @progbits +prefix_exit: + +prefix_exit_end: + .previous diff --git a/src/arch/x86/prefix/pciromprefix.S b/src/arch/x86/prefix/pciromprefix.S new file mode 100644 index 00000000..5a5a4964 --- /dev/null +++ b/src/arch/x86/prefix/pciromprefix.S @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#define BUSTYPE "PCIR" +#define _rom_start _pcirom_start +#include "romprefix.S" diff --git a/src/arch/x86/prefix/pxeprefix.S b/src/arch/x86/prefix/pxeprefix.S new file mode 100644 index 00000000..52ea1803 --- /dev/null +++ b/src/arch/x86/prefix/pxeprefix.S @@ -0,0 +1,860 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#define PXENV_UNDI_SHUTDOWN 0x0005 +#define PXENV_UNDI_GET_NIC_TYPE 0x0012 +#define PXENV_UNDI_GET_IFACE_INFO 0x0013 +#define PXENV_STOP_UNDI 0x0015 +#define PXENV_UNLOAD_STACK 0x0070 +#define PXENV_GET_CACHED_INFO 0x0071 +#define PXENV_PACKET_TYPE_DHCP_ACK 0x0002 +#define PXENV_FILE_CMDLINE 0x00e8 + +#define PXE_HACK_EB54 0x0001 + + .text + .arch i386 + .org 0 + .code16 + +#include +#include + +#define STACK_MAGIC ( 'L' + ( 'R' << 8 ) + ( 'E' << 16 ) + ( 'T' << 24 ) ) +#define EB_MAGIC_1 ( 'E' + ( 't' << 8 ) + ( 'h' << 16 ) + ( 'e' << 24 ) ) +#define EB_MAGIC_2 ( 'r' + ( 'b' << 8 ) + ( 'o' << 16 ) + ( 'o' << 24 ) ) + +/* Prefix memory layout: + * + * iPXE binary image + * Temporary stack + * Temporary copy of DHCPACK packet + * Temporary copy of command line + */ +#define PREFIX_STACK_SIZE 2048 +#define PREFIX_TEMP_DHCPACK PREFIX_STACK_SIZE +#define PREFIX_TEMP_DHCPACK_SIZE ( 1260 /* sizeof ( BOOTPLAYER_t ) */ ) +#define PREFIX_TEMP_CMDLINE ( PREFIX_TEMP_DHCPACK + PREFIX_TEMP_DHCPACK_SIZE ) +#define PREFIX_TEMP_CMDLINE_SIZE 4096 + +/***************************************************************************** + * Entry point: set operating context, print welcome message + ***************************************************************************** + */ + .section ".prefix", "ax", @progbits + .globl _pxe_start +_pxe_start: + jmp $0x7c0, $1f +1: + /* Preserve registers for possible return to PXE */ + pushfl + pushal + pushw %gs + pushw %fs + pushw %es + pushw %ds + + /* Store magic word on PXE stack and remember PXE %ss:esp */ + pushl $STACK_MAGIC + movw %ss, %cs:pxe_ss + movl %esp, %cs:pxe_esp + + /* Set up segments */ + movw %cs, %ax + movw %ax, %ds + movw $0x40, %ax /* BIOS data segment access */ + movw %ax, %fs + /* Set up temporary stack immediately after the iPXE image */ + movw %cs, %ax + addw image_size_pgh, %ax + movw %ax, %ss + movl $PREFIX_STACK_SIZE, %esp + /* Clear direction flag, for the sake of sanity */ + cld + /* Print welcome message */ + movw $10f, %si + xorw %di, %di + call print_message + .section ".prefix.data", "aw", @progbits +10: .asciz "PXE->EB:" + .previous + + /* Image size (for stack placement calculation) */ + .section ".prefix.data", "aw", @progbits +image_size_pgh: + .word 0 + .previous + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii "ADDW" + .long image_size_pgh + .long 16 + .long 0 + .previous + +/***************************************************************************** + * Find us a usable !PXE or PXENV+ entry point + ***************************************************************************** + */ +detect_pxe: + /* Plan A: !PXE pointer from the stack */ + lgsl pxe_esp, %ebp /* %gs:%bp -> original stack */ + lesw %gs:52(%bp), %bx + call is_valid_ppxe + je have_ppxe + + /* Plan B: PXENV+ pointer from initial ES:BX */ + movw %gs:32(%bp),%bx + movw %gs:8(%bp),%es + call is_valid_pxenv + je have_pxenv + + /* Plan C: PXENV+ structure via INT 1Ah */ + movw $0x5650, %ax + int $0x1a + jc 1f + cmpw $0x564e, %ax + jne 1f + call is_valid_pxenv + je have_pxenv +1: + /* Plan D: scan base memory for !PXE */ + call memory_scan_ppxe + je have_ppxe + + /* Plan E: scan base memory for PXENV+ */ + call memory_scan_pxenv + jne stack_not_found + +have_pxenv: + movw %bx, pxenv_offset + movw %es, pxenv_segment + + cmpw $0x201, %es:6(%bx) /* API version >= 2.01 */ + jb 1f + cmpb $0x2c, %es:8(%bx) /* ... and structure long enough */ + jb 2f + + lesw %es:0x28(%bx), %bx /* Find !PXE from PXENV+ */ + call is_valid_ppxe + je have_ppxe +2: + call memory_scan_ppxe /* We are *supposed* to have !PXE... */ + je have_ppxe +1: + lesw pxenv_segoff, %bx /* Nope, we're stuck with PXENV+ */ + + /* Record entry point and UNDI segments */ + pushl %es:0x0a(%bx) /* Entry point */ + pushw %es:0x24(%bx) /* UNDI code segment */ + pushw %es:0x26(%bx) /* UNDI code size */ + pushw %es:0x20(%bx) /* UNDI data segment */ + pushw %es:0x22(%bx) /* UNDI data size */ + + /* Print "PXENV+ at
" */ + movw $10f, %si + jmp check_have_stack + .section ".prefix.data", "aw", @progbits +10: .asciz " PXENV+ at " + .previous + +have_ppxe: + movw %bx, ppxe_offset + movw %es, ppxe_segment + + pushl %es:0x10(%bx) /* Entry point */ + pushw %es:0x30(%bx) /* UNDI code segment */ + pushw %es:0x36(%bx) /* UNDI code size */ + pushw %es:0x28(%bx) /* UNDI data segment */ + pushw %es:0x2e(%bx) /* UNDI data size */ + + /* Print "!PXE at
" */ + movw $10f, %si + jmp check_have_stack + .section ".prefix.data", "aw", @progbits +10: .asciz " !PXE at " + .previous + +is_valid_ppxe: + cmpl $0x45585021, %es:(%bx) + jne 1f + movzbw %es:4(%bx), %cx + cmpw $0x58, %cx + jae is_valid_checksum +1: + ret + +is_valid_pxenv: + cmpl $0x4e455850, %es:(%bx) + jne 1b + cmpw $0x2b56, %es:4(%bx) + jne 1b + movzbw %es:8(%bx), %cx + cmpw $0x28, %cx + jb 1b + +is_valid_checksum: + pushw %ax + movw %bx, %si + xorw %ax, %ax +2: + es lodsb + addb %al, %ah + loopw 2b + popw %ax + ret + +memory_scan_ppxe: + movw $is_valid_ppxe, %dx + jmp memory_scan_common + +memory_scan_pxenv: + movw $is_valid_pxenv, %dx + +memory_scan_common: + movw %fs:(0x13), %ax + shlw $6, %ax + decw %ax +1: incw %ax + cmpw $( 0xa000 - 1 ), %ax + ja 2f + movw %ax, %es + xorw %bx, %bx + call *%dx + jne 1b +2: ret + +/***************************************************************************** + * Sanity check: we must have an entry point + ***************************************************************************** + */ +check_have_stack: + /* Save common values pushed onto the stack */ + popl undi_data_segoff + popl undi_code_segoff + popl entry_segoff + + /* Print have !PXE/PXENV+ message; structure pointer in %es:%bx */ + call print_message + call print_segoff + movb $( ',' ), %al + call print_character + + /* Check for entry point */ + movl entry_segoff, %eax + testl %eax, %eax + jnz 99f + /* No entry point: print message and skip everything else */ +stack_not_found: + movw $10f, %si + call print_message + jmp finished + .section ".prefix.data", "aw", @progbits +10: .asciz " No PXE stack found!\n" + .previous +99: + +/***************************************************************************** + * Calculate base memory usage by UNDI + ***************************************************************************** + */ +find_undi_basemem_usage: + movw undi_code_segment, %ax + movw undi_code_size, %bx + movw undi_data_segment, %cx + movw undi_data_size, %dx + cmpw %ax, %cx + ja 1f + xchgw %ax, %cx + xchgw %bx, %dx +1: /* %ax:%bx now describes the lower region, %cx:%dx the higher */ + shrw $6, %ax /* Round down to nearest kB */ + movw %ax, undi_fbms_start + addw $0x0f, %dx /* Round up to next segment */ + shrw $4, %dx + addw %dx, %cx + addw $((1024 / 16) - 1), %cx /* Round up to next kB */ + shrw $6, %cx + movw %cx, undi_fbms_end + +/***************************************************************************** + * Print information about detected PXE stack + ***************************************************************************** + */ +print_structure_information: + /* Print entry point */ + movw $10f, %si + call print_message + les entry_segoff, %bx + call print_segoff + .section ".prefix.data", "aw", @progbits +10: .asciz " entry point at " + .previous + /* Print UNDI code segment */ + movw $10f, %si + call print_message + les undi_code_segoff, %bx + call print_segoff + .section ".prefix.data", "aw", @progbits +10: .asciz "\n UNDI code segment " + .previous + /* Print UNDI data segment */ + movw $10f, %si + call print_message + les undi_data_segoff, %bx + call print_segoff + .section ".prefix.data", "aw", @progbits +10: .asciz ", data segment " + .previous + /* Print UNDI memory usage */ + movw $10f, %si + call print_message + movw undi_fbms_start, %ax + call print_word + movb $( '-' ), %al + call print_character + movw undi_fbms_end, %ax + call print_word + movw $20f, %si + call print_message + .section ".prefix.data", "aw", @progbits +10: .asciz " (" +20: .asciz "kB)\n" + .previous + +/***************************************************************************** + * Determine physical device + ***************************************************************************** + */ +get_physical_device: + /* Issue PXENV_UNDI_GET_NIC_TYPE */ + movw $PXENV_UNDI_GET_NIC_TYPE, %bx + call pxe_call + jnc 1f + call print_pxe_error + jmp no_physical_device +1: /* Determine physical device type */ + movb ( pxe_parameter_structure + 0x02 ), %al + cmpb $2, %al + je pci_physical_device + jmp no_physical_device + +pci_physical_device: + /* Record PCI bus:dev.fn and vendor/device IDs */ + movl ( pxe_parameter_structure + 0x03 ), %eax + movl %eax, pci_vendor + movw ( pxe_parameter_structure + 0x0b ), %ax + movw %ax, pci_busdevfn + movw $10f, %si + call print_message + call print_pci_busdevfn + jmp 99f + .section ".prefix.data", "aw", @progbits +10: .asciz " UNDI device is PCI " + .previous + +no_physical_device: + /* No device found, or device type not understood */ + movw $10f, %si + call print_message + .section ".prefix.data", "aw", @progbits +10: .asciz " Unable to determine UNDI physical device" + .previous + +99: + +/***************************************************************************** + * Determine interface type + ***************************************************************************** + */ +get_iface_type: + /* Issue PXENV_UNDI_GET_IFACE_INFO */ + movw $PXENV_UNDI_GET_IFACE_INFO, %bx + call pxe_call + jnc 1f + call print_pxe_error + jmp 99f +1: /* Print interface type */ + movw $10f, %si + call print_message + leaw ( pxe_parameter_structure + 0x02 ), %si + call print_message + .section ".prefix.data", "aw", @progbits +10: .asciz ", type " + .previous + /* Check for "Etherboot" interface type */ + cmpl $EB_MAGIC_1, ( pxe_parameter_structure + 0x02 ) + jne 99f + cmpl $EB_MAGIC_2, ( pxe_parameter_structure + 0x06 ) + jne 99f + movw $10f, %si + call print_message + .section ".prefix.data", "aw", @progbits +10: .asciz " (workaround enabled)" + .previous + /* Flag Etherboot workarounds as required */ + orw $PXE_HACK_EB54, pxe_hacks + +99: movb $0x0a, %al + call print_character + +/***************************************************************************** + * Get cached DHCP_ACK packet + ***************************************************************************** + */ +get_dhcpack: + /* Issue PXENV_GET_CACHED_INFO */ + xorl %esi, %esi + movw %ss, %si + movw %si, ( pxe_parameter_structure + 0x08 ) + movw $PREFIX_TEMP_DHCPACK, ( pxe_parameter_structure + 0x06 ) + movw $PREFIX_TEMP_DHCPACK_SIZE, ( pxe_parameter_structure +0x04 ) + movw $PXENV_PACKET_TYPE_DHCP_ACK, ( pxe_parameter_structure + 0x02 ) + movw $PXENV_GET_CACHED_INFO, %bx + call pxe_call + jnc 1f + call print_pxe_error + jmp 99f +1: /* Store physical address of packet */ + shll $4, %esi + addl $PREFIX_TEMP_DHCPACK, %esi + movl %esi, pxe_cached_dhcpack +99: + .section ".prefix.data", "aw", @progbits +pxe_cached_dhcpack: + .long 0 + .previous + +/***************************************************************************** + * Check for a command line + ***************************************************************************** + */ +get_cmdline: + /* Issue PXENV_FILE_CMDLINE */ + xorl %esi, %esi + movw %ss, %si + movw %si, ( pxe_parameter_structure + 0x06 ) + movw $PREFIX_TEMP_CMDLINE, ( pxe_parameter_structure + 0x04 ) + movw $PREFIX_TEMP_CMDLINE_SIZE, ( pxe_parameter_structure + 0x02 ) + movw $PXENV_FILE_CMDLINE, %bx + call pxe_call + jc 99f /* Suppress errors; this is an iPXE extension API call */ + /* Check for non-NULL command line */ + movw ( pxe_parameter_structure + 0x02 ), %ax + testw %ax, %ax + jz 99f + /* Record command line */ + shll $4, %esi + addl $PREFIX_TEMP_CMDLINE, %esi + movl %esi, pxe_cmdline +99: + .section ".prefix.data", "aw", @progbits +pxe_cmdline: + .long 0 + .previous + +/***************************************************************************** + * Leave NIC in a safe state + ***************************************************************************** + */ +#ifndef PXELOADER_KEEP_PXE +shutdown_nic: + /* Issue PXENV_UNDI_SHUTDOWN */ + movw $PXENV_UNDI_SHUTDOWN, %bx + call pxe_call + jnc 1f + call print_pxe_error +1: +unload_base_code: + /* Etherboot treats PXENV_UNLOAD_STACK as PXENV_STOP_UNDI, so + * we must not issue this call if the underlying stack is + * Etherboot and we were not intending to issue a PXENV_STOP_UNDI. + */ +#ifdef PXELOADER_KEEP_UNDI + testw $PXE_HACK_EB54, pxe_hacks + jnz 99f +#endif /* PXELOADER_KEEP_UNDI */ + /* Issue PXENV_UNLOAD_STACK */ + movw $PXENV_UNLOAD_STACK, %bx + call pxe_call + jnc 1f + call print_pxe_error + jmp 99f +1: /* Free base memory used by PXE base code */ + movw undi_fbms_start, %ax + movw %fs:(0x13), %bx + call free_basemem +99: + andw $~( UNDI_FL_INITIALIZED | UNDI_FL_KEEP_ALL ), flags +#endif /* PXELOADER_KEEP_PXE */ + +/***************************************************************************** + * Unload UNDI driver + ***************************************************************************** + */ +#ifndef PXELOADER_KEEP_UNDI +unload_undi: + /* Issue PXENV_STOP_UNDI */ + movw $PXENV_STOP_UNDI, %bx + call pxe_call + jnc 1f + call print_pxe_error + jmp 99f +1: /* Free base memory used by UNDI */ + movw undi_fbms_end, %ax + movw undi_fbms_start, %bx + call free_basemem + /* Clear UNDI_FL_STARTED */ + andw $~UNDI_FL_STARTED, flags +99: +#endif /* PXELOADER_KEEP_UNDI */ + +/***************************************************************************** + * Print remaining free base memory + ***************************************************************************** + */ +print_free_basemem: + movw $10f, %si + call print_message + movw %fs:(0x13), %ax + call print_word + movw $20f, %si + call print_message + .section ".prefix.data", "aw", @progbits +10: .asciz " " +20: .asciz "kB free base memory after PXE unload\n" + .previous + +/***************************************************************************** + * Exit point + ***************************************************************************** + */ +finished: + jmp run_ipxe + +/***************************************************************************** + * Subroutine: print segment:offset address + * + * Parameters: + * %es:%bx : segment:offset address to print + * %ds:di : output buffer (or %di=0 to print to console) + * Returns: + * %ds:di : next character in output buffer (if applicable) + ***************************************************************************** + */ +print_segoff: + /* Preserve registers */ + pushw %ax + /* Print ":offset" */ + movw %es, %ax + call print_hex_word + movb $( ':' ), %al + call print_character + movw %bx, %ax + call print_hex_word + /* Restore registers and return */ + popw %ax + ret + +/***************************************************************************** + * Subroutine: print decimal word + * + * Parameters: + * %ax : word to print + * %ds:di : output buffer (or %di=0 to print to console) + * Returns: + * %ds:di : next character in output buffer (if applicable) + ***************************************************************************** + */ +print_word: + /* Preserve registers */ + pushw %ax + pushw %bx + pushw %cx + pushw %dx + /* Build up digit sequence on stack */ + movw $10, %bx + xorw %cx, %cx +1: xorw %dx, %dx + divw %bx, %ax + pushw %dx + incw %cx + testw %ax, %ax + jnz 1b + /* Print digit sequence */ +1: popw %ax + call print_hex_nibble + loop 1b + /* Restore registers and return */ + popw %dx + popw %cx + popw %bx + popw %ax + ret + +/***************************************************************************** + * Subroutine: zero 1kB block of base memory + * + * Parameters: + * %bx : block to zero (in kB) + * Returns: + * Nothing + ***************************************************************************** + */ +zero_kb: + /* Preserve registers */ + pushw %ax + pushw %cx + pushw %di + pushw %es + /* Zero block */ + movw %bx, %ax + shlw $6, %ax + movw %ax, %es + movw $0x400, %cx + xorw %di, %di + xorw %ax, %ax + rep stosb + /* Restore registers and return */ + popw %es + popw %di + popw %cx + popw %ax + ret + +/***************************************************************************** + * Subroutine: free and zero base memory + * + * Parameters: + * %ax : Desired new free base memory counter (in kB) + * %bx : Expected current free base memory counter (in kB) + * %fs : BIOS data segment (0x40) + * Returns: + * None + * + * The base memory from %bx kB to %ax kB is unconditionally zeroed. + * It will be freed if and only if the expected current free base + * memory counter (%bx) matches the actual current free base memory + * counter in 0x40:0x13; if this does not match then the memory will + * be leaked. + ***************************************************************************** + */ +free_basemem: + /* Zero base memory */ + pushw %bx +1: cmpw %bx, %ax + je 2f + call zero_kb + incw %bx + jmp 1b +2: popw %bx + /* Free base memory */ + cmpw %fs:(0x13), %bx /* Update FBMS only if "old" value */ + jne 1f /* is correct */ +1: movw %ax, %fs:(0x13) + ret + +/***************************************************************************** + * Subroutine: make a PXE API call. Works with either !PXE or PXENV+ API. + * + * Parameters: + * %bx : PXE API call number + * %ds:pxe_parameter_structure : Parameters for PXE API call + * Returns: + * %ax : PXE status code (not exit code) + * CF set if %ax is non-zero + ***************************************************************************** + */ +pxe_call: + /* Preserve registers */ + pushw %di + pushw %es + /* Set up registers for PXENV+ API. %bx already set up */ + pushw %ds + popw %es + movw $pxe_parameter_structure, %di + /* Set up stack for !PXE API */ + pushw %es + pushw %di + pushw %bx + /* Make the API call */ + lcall *entry_segoff + /* Reset the stack */ + addw $6, %sp + movw pxe_parameter_structure, %ax + clc + testw %ax, %ax + jz 1f + stc +1: /* Clear direction flag, for the sake of sanity */ + cld + /* Restore registers and return */ + popw %es + popw %di + ret + +/***************************************************************************** + * Subroutine: print PXE API call error message + * + * Parameters: + * %ax : PXE status code + * %bx : PXE API call number + * Returns: + * Nothing + ***************************************************************************** + */ +print_pxe_error: + pushw %si + movw $10f, %si + call print_message + xchgw %ax, %bx + call print_hex_word + movw $20f, %si + call print_message + xchgw %ax, %bx + call print_hex_word + movw $30f, %si + call print_message + popw %si + ret + .section ".prefix.data", "aw", @progbits +10: .asciz " UNDI API call " +20: .asciz " failed: status code " +30: .asciz "\n" + .previous + +/***************************************************************************** + * PXE data structures + ***************************************************************************** + */ + .section ".prefix.data" + +pxe_esp: .long 0 +pxe_ss: .word 0 + +pxe_parameter_structure: .fill 64 + +undi_code_segoff: +undi_code_size: .word 0 +undi_code_segment: .word 0 + +undi_data_segoff: +undi_data_size: .word 0 +undi_data_segment: .word 0 + +pxe_hacks: .word 0 + +/* The following fields are part of a struct undi_device */ + +undi_device: + +pxenv_segoff: +pxenv_offset: .word 0 +pxenv_segment: .word 0 + +ppxe_segoff: +ppxe_offset: .word 0 +ppxe_segment: .word 0 + +entry_segoff: +entry_offset: .word 0 +entry_segment: .word 0 + +undi_fbms_start: .word 0 +undi_fbms_end: .word 0 + +pci_busdevfn: .word UNDI_NO_PCI_BUSDEVFN +isapnp_csn: .word UNDI_NO_ISAPNP_CSN +isapnp_read_port: .word UNDI_NO_ISAPNP_READ_PORT + +pci_vendor: .word 0 +pci_device: .word 0 +flags: + .word ( UNDI_FL_INITIALIZED | UNDI_FL_STARTED | UNDI_FL_KEEP_ALL ) + + .equ undi_device_size, ( . - undi_device ) + +/***************************************************************************** + * Run iPXE main code + ***************************************************************************** + */ + .section ".prefix" +run_ipxe: + /* Install iPXE */ + call install + + /* Set up real-mode stack */ + movw %bx, %ss + movw $_estack16, %sp + +#ifdef PXELOADER_KEEP_UNDI + /* Copy our undi_device structure to the preloaded_undi variable */ + movw %bx, %es + movw $preloaded_undi, %di + movw $undi_device, %si + movw $undi_device_size, %cx + rep movsb +#endif + + /* Retrieve PXE %ss:esp */ + movw pxe_ss, %di + movl pxe_esp, %ebp + + /* Retrieve PXE command line, if any */ + movl pxe_cmdline, %esi + + /* Retrieve cached DHCPACK, if any */ + movl pxe_cached_dhcpack, %ecx + + /* Jump to .text16 segment with %ds pointing to .data16 */ + movw %bx, %ds + pushw %ax + pushw $1f + lret + .section ".text16", "ax", @progbits +1: + /* Update the exit hook */ + movw %cs, ( pxe_exit_hook + 2 ) + + /* Store command-line pointer */ + movl %esi, cmdline_phys + + /* Store cached DHCPACK pointer */ + movl %ecx, cached_dhcpack_phys + + /* Run main program */ + virtcall main + + /* Uninstall iPXE */ + call uninstall + + /* Restore PXE stack */ + movw %di, %ss + movl %ebp, %esp + + /* Jump to hook if applicable */ + ljmpw *pxe_exit_hook + + .section ".data16", "aw", @progbits + .globl pxe_exit_hook +pxe_exit_hook: + .word exit_ipxe, 0 + .previous + +exit_ipxe: + /* Check PXE stack magic */ + popl %eax + cmpl $STACK_MAGIC, %eax + jne 1f + + /* PXE stack OK: return to caller */ + popw %ds + popw %es + popw %fs + popw %gs + popal + popfl + xorw %ax, %ax /* Return success */ + lret + +1: /* PXE stack corrupt or removed: use INT 18 */ + int $0x18 + .previous diff --git a/src/arch/x86/prefix/romprefix.S b/src/arch/x86/prefix/romprefix.S new file mode 100644 index 00000000..3abef0ea --- /dev/null +++ b/src/arch/x86/prefix/romprefix.S @@ -0,0 +1,911 @@ +/* At entry, the processor is in 16 bit real mode and the code is being + * executed from an address it was not linked to. Code must be pic and + * 32 bit sensitive until things are fixed up. + * + * Also be very careful as the stack is at the rear end of the interrupt + * table so using a noticeable amount of stack space is a no-no. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#include +#include +#include + +#define PNP_SIGNATURE ( '$' + ( 'P' << 8 ) + ( 'n' << 16 ) + ( 'P' << 24 ) ) +#define PMM_SIGNATURE ( '$' + ( 'P' << 8 ) + ( 'M' << 16 ) + ( 'M' << 24 ) ) +#define PCI_SIGNATURE ( 'P' + ( 'C' << 8 ) + ( 'I' << 16 ) + ( ' ' << 24 ) ) +#define STACK_MAGIC ( 'L' + ( 'R' << 8 ) + ( 'E' << 16 ) + ( 'T' << 24 ) ) +#define PMM_ALLOCATE 0x0000 +#define PMM_FIND 0x0001 +#define PMM_HANDLE_BASE ( ( ( 'F' - 'A' + 1 ) << 26 ) + \ + ( ( 'E' - 'A' + 1 ) << 21 ) + \ + ( ( 'N' - 'A' + 1 ) << 16 ) ) +#define PMM_HANDLE_BASE_IMAGE_SOURCE \ + ( PMM_HANDLE_BASE | 0x00001000 ) +#define PMM_HANDLE_BASE_DECOMPRESS_TO \ + ( PMM_HANDLE_BASE | 0x00002000 ) +#define PCI_FUNC_MASK 0x07 + +/* ROM banner timeout, converted to a number of (18Hz) timer ticks. */ +#define ROM_BANNER_TIMEOUT_TICKS ( ( 18 * ROM_BANNER_TIMEOUT ) / 10 ) + +/* Allow payload to be excluded from ROM size + */ +#if ROMPREFIX_EXCLUDE_PAYLOAD +#define ZINFO_TYPE_ADxB "ADHB" +#define ZINFO_TYPE_ADxW "ADHW" +#else +#define ZINFO_TYPE_ADxB "ADDB" +#define ZINFO_TYPE_ADxW "ADDW" +#endif + +/* Allow ROM to be marked as containing multiple images + */ +#if ROMPREFIX_MORE_IMAGES +#define INDICATOR 0x00 +#else +#define INDICATOR 0x80 +#endif + +/* Default to building a PCI ROM if no bus type is specified + */ +#ifndef BUSTYPE +#define BUSTYPE "PCIR" +#endif + + .text + .code16 + .arch i386 + .section ".prefix", "ax", @progbits + .globl _rom_start +_rom_start: + + .org 0x00 +romheader: + .word 0xAA55 /* BIOS extension signature */ +romheader_size: .byte 0 /* Size in 512-byte blocks */ + jmp init /* Initialisation vector */ +checksum: + .byte 0 + .org 0x10 + .word ipxeheader + .org 0x16 + .word undiheader +.ifeqs BUSTYPE, "PCIR" + .org 0x18 + .word pciheader +.endif + .org 0x1a + .word pnpheader + .size romheader, . - romheader + + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii ZINFO_TYPE_ADxB + .long romheader_size + .long 512 + .long 0 + .previous + +.ifeqs BUSTYPE, "PCIR" + .align 4 +pciheader: + .ascii "PCIR" /* Signature */ + .word pci_vendor_id /* Vendor identification */ + .word pci_device_id /* Device identification */ + .word ( pci_devlist - pciheader ) /* Device list pointer */ + .word pciheader_len /* PCI data structure length */ + .byte 0x03 /* PCI data structure revision */ + .byte 0x00, 0x00, 0x02 /* Class code */ +pciheader_image_length: + .word 0 /* Image length */ + .word 0x0001 /* Revision level */ + .byte 0x00 /* Code type */ + .byte INDICATOR /* Last image indicator */ +pciheader_runtime_length: + .word 0 /* Maximum run-time image length */ + .word 0x0000 /* Configuration utility code header */ + .word 0x0000 /* DMTF CLP entry point */ + .equ pciheader_len, . - pciheader + .size pciheader, . - pciheader + + /* PCI additional device list (filled in by linker) */ + .section ".pci_devlist.00000000", "a", @progbits +pci_devlist: + .previous + .section ".pci_devlist.ffffffff", "a", @progbits +pci_devlist_end: + .short 0x0000 /* List terminator */ + .previous + /* Ensure that terminator is always present */ + .reloc pciheader, RELOC_TYPE_NONE, pci_devlist_end + + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii ZINFO_TYPE_ADxW + .long pciheader_image_length + .long 512 + .long 0 + .ascii "ADHW" + .long pciheader_runtime_length + .long 512 + .long 0 + .previous +.endif /* PCIR */ + + /* PnP doesn't require any particular alignment, but IBM + * BIOSes will scan on 16-byte boundaries rather than using + * the offset stored at 0x1a + */ + .align 16 +pnpheader: + .ascii "$PnP" /* Signature */ + .byte 0x01 /* Structure revision */ + .byte ( pnpheader_len / 16 ) /* Length (in 16 byte increments) */ + .word 0x0000 /* Offset of next header */ + .byte 0x00 /* Reserved */ + .byte 0x00 /* Checksum */ + .long 0x00000000 /* Device identifier */ + .word mfgstr /* Manufacturer string */ + .word prodstr /* Product name */ + .byte 0x02 /* Device base type code */ + .byte 0x00 /* Device sub-type code */ + .byte 0x00 /* Device interface type code */ + .byte 0xf4 /* Device indicator */ + .word 0x0000 /* Boot connection vector */ + .word 0x0000 /* Disconnect vector */ + .word bev_entry /* Boot execution vector */ + .word 0x0000 /* Reserved */ + .word 0x0000 /* Static resource information vector*/ + .equ pnpheader_len, . - pnpheader + .size pnpheader, . - pnpheader + +/* Manufacturer string */ +mfgstr: + .asciz "http://ipxe.org" + .size mfgstr, . - mfgstr + +/* Product string + * + * Defaults to PRODUCT_SHORT_NAME. If the ROM image is writable at + * initialisation time, it will be filled in to include the PCI + * bus:dev.fn number of the card as well. + */ +prodstr: + .ascii PRODUCT_SHORT_NAME +.ifeqs BUSTYPE, "PCIR" +prodstr_separator: + .byte 0 + .ascii "(PCI " +prodstr_pci_id: + .ascii "xx:xx.x)" /* Filled in by init code */ +.endif /* PCIR */ + .byte 0 + .size prodstr, . - prodstr + + .globl undiheader + .weak undiloader + .align 4 +undiheader: + .ascii "UNDI" /* Signature */ + .byte undiheader_len /* Length of structure */ + .byte 0 /* Checksum */ + .byte 0 /* Structure revision */ + .byte 0,1,2 /* PXE version: 2.1.0 */ + .word undiloader /* Offset to loader routine */ + .word _data16_memsz /* Stack segment size */ + .word _data16_memsz /* Data segment size */ + .word _text16_memsz /* Code segment size */ + .ascii BUSTYPE /* Bus type */ + .equ undiheader_len, . - undiheader + .size undiheader, . - undiheader + + .align 4 +ipxeheader: + .ascii "iPXE" /* Signature */ + .byte ipxeheader_len /* Length of structure */ + .byte 0 /* Checksum */ +shrunk_rom_size: + .byte 0 /* Shrunk size (in 512-byte blocks) */ + .byte 0 /* Reserved */ +build_id: + .long _build_id /* Randomly-generated build ID */ + .equ ipxeheader_len, . - ipxeheader + .size ipxeheader, . - ipxeheader + + .section ".zinfo.fixup", "a", @progbits /* Compressor fixups */ + .ascii "ADHB" + .long shrunk_rom_size + .long 512 + .long 0 + .previous + +/* Initialisation (called once during POST) + * + * Determine whether or not this is a PnP system via a signature + * check. If it is PnP, return to the PnP BIOS indicating that we are + * a boot-capable device; the BIOS will call our boot execution vector + * if it wants to boot us. If it is not PnP, hook INT 19. + */ +init: + /* Preserve registers, clear direction flag, set %ds=%cs */ + pushaw + pushw %ds + pushw %es + pushw %fs + pushw %gs + cld + pushw %cs + popw %ds + + /* Print message as early as possible */ + movw $init_message, %si + xorw %di, %di + call print_message + + /* Store PCI 3.0 runtime segment address for later use, if + * applicable. + */ +.ifeqs BUSTYPE, "PCIR" + movw %bx, %gs +.endif + + /* Store PCI bus:dev.fn address, print PCI bus:dev.fn, and add + * PCI bus:dev.fn to product name string, if applicable. + */ +.ifeqs BUSTYPE, "PCIR" + xorw %di, %di + call print_space + movw %ax, init_pci_busdevfn + call print_pci_busdevfn + movw $prodstr_pci_id, %di + call print_pci_busdevfn + movb $( ' ' ), prodstr_separator +.endif + + /* Print segment address */ + xorw %di, %di + call print_space + movw %cs, %ax + call print_hex_word + + /* Check for PCI BIOS version, if applicable */ +.ifeqs BUSTYPE, "PCIR" + pushl %ebx + pushl %edx + pushl %edi + stc + movw $0xb101, %ax + int $0x1a + jc no_pci3 + cmpl $PCI_SIGNATURE, %edx + jne no_pci3 + testb %ah, %ah + jnz no_pci3 + movw $init_message_pci, %si + xorw %di, %di + call print_message + movb %bh, %al + call print_hex_nibble + movb $( '.' ), %al + call print_character + movb %bl, %al + call print_hex_byte + cmpb $3, %bh + jb no_pci3 + /* PCI >=3.0: leave %gs as-is if sane */ + movw %gs, %ax + cmpw $0xa000, %ax /* Insane if %gs < 0xa000 */ + jb pci3_insane + movw %cs, %bx /* Sane if %cs == %gs */ + cmpw %bx, %ax + je 1f + movzbw romheader_size, %cx /* Sane if %cs+len <= %gs */ + shlw $5, %cx + addw %cx, %bx + cmpw %bx, %ax + jae 1f + movw %cs, %bx /* Sane if %gs+len <= %cs */ + addw %cx, %ax + cmpw %bx, %ax + jbe 1f +pci3_insane: /* PCI 3.0 with insane %gs value: print error and ignore %gs */ + movb $( '!' ), %al + call print_character + movw %gs, %ax + call print_hex_word +no_pci3: + /* PCI <3.0: set %gs (runtime segment) = %cs (init-time segment) */ + pushw %cs + popw %gs +1: popl %edi + popl %edx + popl %ebx +.endif /* PCIR */ + + /* Check for PnP BIOS. Although %es:di should point to the + * PnP BIOS signature on entry, some BIOSes fail to do this. + */ + movw $( 0xf000 - 1 ), %bx +pnp_scan: + incw %bx + jz no_pnp + movw %bx, %es + cmpl $PNP_SIGNATURE, %es:0 + jne pnp_scan + xorw %dx, %dx + xorw %si, %si + movzbw %es:5, %cx +1: es lodsb + addb %al, %dl + loop 1b + jnz pnp_scan + /* Is PnP: print PnP message */ + movw $init_message_pnp, %si + xorw %di, %di + call print_message + jmp pnp_done +no_pnp: /* Not PnP-compliant - hook INT 19 */ +#ifdef NONPNP_HOOK_INT19 + movw $init_message_int19, %si + xorw %di, %di + call print_message + xorw %ax, %ax + movw %ax, %es + pushl %es:( 0x19 * 4 ) + popl orig_int19 + pushw %gs /* %gs contains runtime %cs */ + pushw $int19_entry + popl %es:( 0x19 * 4 ) +#endif /* NONPNP_HOOK_INT19 */ +pnp_done: + + /* Check for PMM */ + movw $( 0xe000 - 1 ), %bx +pmm_scan: + incw %bx + jz no_pmm + movw %bx, %es + cmpl $PMM_SIGNATURE, %es:0 + jne pmm_scan + xorw %dx, %dx + xorw %si, %si + movzbw %es:5, %cx +1: es lodsb + addb %al, %dl + loop 1b + jnz pmm_scan + /* PMM found: print PMM message */ + movw $init_message_pmm, %si + xorw %di, %di + call print_message + /* We have PMM and so a 1kB stack: preserve whole registers */ + pushal + /* Allocate image source PMM block. Round up the size to the + * nearest 4kB (8 512-byte sectors) to work around AMI BIOS bugs. + */ + movzbl romheader_size, %ecx + addw extra_size, %cx + addw $0x0007, %cx /* Round up to multiple of 8 512-byte sectors */ + andw $0xfff8, %cx + shll $5, %ecx + movl $PMM_HANDLE_BASE_IMAGE_SOURCE, %ebx + movw $get_pmm_image_source, %bp + call get_pmm + movl %esi, image_source + jz 1f + /* Copy ROM to image source PMM block */ + pushw %es + xorw %ax, %ax + movw %ax, %es + movl %esi, %edi + xorl %esi, %esi + movzbl romheader_size, %ecx + shll $7, %ecx + addr32 rep movsl /* PMM presence implies flat real mode */ + popw %es + /* Shrink ROM */ + movb shrunk_rom_size, %al + movb %al, romheader_size +1: /* Allocate decompression PMM block. Allow 4kB for page + * alignment and round up the size to the nearest 128kB, then + * use the size within the PMM handle; this allows the same + * decompression area to be shared between multiple iPXE ROMs + * even with differing build IDs + */ + movl $_textdata_memsz_pgh, %ecx + addl $( 0x00000100 /* 4kB */ + 0x00001fff /* 128kB - 1 */ ), %ecx + andl $( 0xffffe000 /* ~( 128kB - 1 ) */ ), %ecx + movl %ecx, %ebx + shrw $12, %bx + orl $PMM_HANDLE_BASE_DECOMPRESS_TO, %ebx + movw $get_pmm_decompress_to, %bp + call get_pmm + addl $( 0x00000fff /* 4kB - 1 */ ), %esi + andl $( 0xfffff000 /* ~( 4kB - 1 ) */ ), %esi + movl %esi, decompress_to + /* Restore registers */ + popal +no_pmm: + + /* Update checksum */ + xorw %bx, %bx + xorw %si, %si + movzbw romheader_size, %cx + shlw $9, %cx +1: lodsb + addb %al, %bl + loop 1b + subb %bl, checksum + + /* Copy self to option ROM space, if applicable. Required for + * PCI3.0, which loads us to a temporary location in low + * memory. Will be a no-op for lower PCI versions. + */ +.ifeqs BUSTYPE, "PCIR" + /* Get runtime segment address and length */ + movw %gs, %ax + movw %ax, %es + movzbw romheader_size, %cx + /* Print runtime segment address */ + xorw %di, %di + call print_space + call print_hex_word + /* Fail if we have insufficient space in final location */ + movw %cs, %si + cmpw %si, %ax + je 1f + cmpw pciheader_runtime_length, %cx + jbe 1f + movb $( '!' ), %al + call print_character + xorw %cx, %cx +1: /* Copy to final location */ + shlw $9, %cx + xorw %si, %si + xorw %di, %di + cs rep movsb +.endif + + /* Skip prompt if this is not the first PCI function, if applicable */ +.ifeqs BUSTYPE, "PCIR" + testb $PCI_FUNC_MASK, init_pci_busdevfn + jnz no_shell +.endif + /* Prompt for POST-time shell */ + movw $init_message_prompt, %si + xorw %di, %di + call print_message + movw $prodstr, %si + call print_message + movw $init_message_dots, %si + call print_message + /* Wait for Ctrl-B */ + movw $0xff02, %bx + call wait_for_key + /* Clear prompt */ + pushf + xorw %di, %di + call print_kill_line + movw $init_message_done, %si + call print_message + popf + jnz no_shell + /* Ctrl-B was pressed: invoke iPXE. The keypress will be + * picked up by the initial shell prompt, and we will drop + * into a shell. + */ + xorl %ebp, %ebp /* Inhibit use of INT 15,e820 and INT 15,e801 */ + pushw %cs + call exec +no_shell: + movb $( '\n' ), %al + xorw %di, %di + call print_character + + /* Restore registers */ + popw %gs + popw %fs + popw %es + popw %ds + popaw + + /* Indicate boot capability to PnP BIOS, if present */ + movw $0x20, %ax + lret + .size init, . - init + +/* Attempt to find or allocate PMM block + * + * Parameters: + * %ecx : size of block to allocate, in paragraphs + * %ebx : PMM handle base + * %bp : routine to check acceptability of found blocks + * %es:0000 : PMM structure + * Returns: + * %ebx : PMM handle + * %esi : allocated block address, or zero (with ZF set) if allocation failed + */ +get_pmm: + /* Preserve registers */ + pushl %eax + pushw %di + movw $( ' ' ), %di +get_pmm_find: + /* Try to find existing block */ + pushl %ebx /* PMM handle */ + pushw $PMM_FIND + lcall *%es:7 + addw $6, %sp + pushw %dx + pushw %ax + popl %esi + /* Treat 0xffffffff (not supported) as 0x00000000 (not found) */ + incl %esi + jz get_pmm_allocate + decl %esi + jz get_pmm_allocate + /* Block found - check acceptability */ + call *%bp + jnc get_pmm_done + /* Block not acceptable - increment handle and retry */ + incl %ebx + jmp get_pmm_find +get_pmm_allocate: + /* Block not found - try to allocate new block */ + pushw $0x0002 /* Extended memory */ + pushl %ebx /* PMM handle */ + pushl %ecx /* Length */ + pushw $PMM_ALLOCATE + lcall *%es:7 + addw $12, %sp + pushw %dx + pushw %ax + popl %esi + movw $( '+' ), %di /* Indicate allocation attempt */ +get_pmm_done: + /* Print block address */ + movw %di, %ax + xorw %di, %di + call print_character + movl %esi, %eax + call print_hex_dword + /* Treat 0xffffffff (not supported) as 0x00000000 (allocation + * failed), and set ZF to indicate a zero result. + */ + incl %esi + jz 1f + decl %esi +1: /* Restore registers and return */ + popw %di + popl %eax + ret + .size get_pmm, . - get_pmm + + /* Check acceptability of image source block */ +get_pmm_image_source: + pushw %es + xorw %ax, %ax + movw %ax, %es + movl build_id, %eax + addr32 cmpl %es:build_id(%esi), %eax + je 1f + stc +1: popw %es + ret + .size get_pmm_image_source, . - get_pmm_image_source + + /* Check acceptability of decompression block */ +get_pmm_decompress_to: + clc + ret + .size get_pmm_decompress_to, . - get_pmm_decompress_to + +/* + * Note to hardware vendors: + * + * If you wish to brand this boot ROM, please do so by defining the + * strings PRODUCT_NAME and PRODUCT_SHORT_NAME in config/branding.h. + * + * While nothing in the GPL prevents you from removing all references + * to iPXE or http://ipxe.org, we prefer you not to do so. + * + * If you have an OEM-mandated branding requirement that cannot be + * satisfied simply by defining PRODUCT_NAME and PRODUCT_SHORT_NAME, + * please contact us. + * + * [ Including an ASCII NUL in PRODUCT_NAME is considered to be + * bypassing the spirit of this request! ] + */ +init_message: + .ascii "\n" + .ascii PRODUCT_NAME + .ascii "\n" + .ascii PRODUCT_SHORT_NAME + .ascii " (" + .ascii PRODUCT_URI + .asciz ")" + .size init_message, . - init_message +.ifeqs BUSTYPE, "PCIR" +init_message_pci: + .asciz " PCI" + .size init_message_pci, . - init_message_pci +.endif /* PCIR */ +init_message_pnp: + .asciz " PnP" + .size init_message_pnp, . - init_message_pnp +init_message_pmm: + .asciz " PMM" + .size init_message_pmm, . - init_message_pmm +init_message_int19: + .asciz " INT19" + .size init_message_int19, . - init_message_int19 +init_message_prompt: + .asciz "\nPress Ctrl-B to configure " + .size init_message_prompt, . - init_message_prompt +init_message_dots: + .asciz "..." + .size init_message_dots, . - init_message_dots +init_message_done: + .asciz "\n\n" + .size init_message_done, . - init_message_done + +/* PCI bus:dev.fn + * + */ +.ifeqs BUSTYPE, "PCIR" +init_pci_busdevfn: + .word 0 + .size init_pci_busdevfn, . - init_pci_busdevfn +.endif /* PCIR */ + +/* Image source area + * + * May be either zero (indicating to use option ROM space as source), + * or within a PMM-allocated block. + */ + .globl image_source +image_source: + .long 0 + .size image_source, . - image_source + +/* Additional image source size (in 512-byte sectors) + * + */ +extra_size: + .word 0 + .size extra_size, . - extra_size + +/* Temporary decompression area + * + * May be either zero (indicating to use default decompression area in + * high memory), or within a PMM-allocated block. + */ + .globl decompress_to +decompress_to: + .long 0 + .size decompress_to, . - decompress_to + +/* Boot Execution Vector entry point + * + * Called by the PnP BIOS when it wants to boot us. + */ +bev_entry: + orl $0xffffffff, %ebp /* Allow arbitrary relocation */ + pushw %cs + call exec + lret + .size bev_entry, . - bev_entry + +/* INT19 entry point + * + * Called via the hooked INT 19 if we detected a non-PnP BIOS. We + * attempt to return via the original INT 19 vector (if we were able + * to store it). + */ +int19_entry: + pushw %cs + popw %ds + /* Prompt user to press B to boot */ + movw $int19_message_prompt, %si + xorw %di, %di + call print_message + movw $prodstr, %si + call print_message + movw $int19_message_dots, %si + call print_message + movw $0xdf4e, %bx + call wait_for_key + pushf + xorw %di, %di + call print_kill_line + movw $int19_message_done, %si + call print_message + popf + jz 1f + /* Leave keypress in buffer and start iPXE. The keypress will + * cause the usual initial Ctrl-B prompt to be skipped. + */ + orl $0xffffffff, %ebp /* Allow arbitrary relocation */ + pushw %cs + call exec +1: /* Try to call original INT 19 vector */ + movl %cs:orig_int19, %eax + testl %eax, %eax + je 2f + ljmp *%cs:orig_int19 +2: /* No chained vector: issue INT 18 as a last resort */ + int $0x18 + .size int19_entry, . - int19_entry +orig_int19: + .long 0 + .size orig_int19, . - orig_int19 + +int19_message_prompt: + .asciz "Press N to skip booting from " + .size int19_message_prompt, . - int19_message_prompt +int19_message_dots: + .asciz "..." + .size int19_message_dots, . - int19_message_dots +int19_message_done: + .asciz "\n\n" + .size int19_message_done, . - int19_message_done + +/* Execute as a boot device + * + */ +exec: /* Set %ds = %cs */ + pushw %cs + popw %ds + + /* Print message as soon as possible */ + movw $prodstr, %si + xorw %di, %di + call print_message + movw $exec_message_pre_install, %si + call print_message + + /* Store magic word on BIOS stack and remember BIOS %ss:sp */ + pushl $STACK_MAGIC + movw %ss, %cx + movw %sp, %dx + + /* Obtain a reasonably-sized temporary stack */ + xorw %bx, %bx + movw %bx, %ss + movw $0x7c00, %sp + + /* Install iPXE */ + call alloc_basemem + movl image_source, %esi + movl decompress_to, %edi + call install_prealloc + + /* Print message indicating successful installation */ + movw $exec_message_post_install, %si + xorw %di, %di + call print_message + + /* Set up real-mode stack */ + movw %bx, %ss + movw $_estack16, %sp + + /* Jump to .text16 segment */ + pushw %ax + pushw $1f + lret + .section ".text16", "awx", @progbits +1: + /* Retrieve PCI bus:dev.fn, if applicable */ +.ifeqs BUSTYPE, "PCIR" + movw init_pci_busdevfn, %ax +.endif + + /* Set up %ds for access to .data16 */ + movw %bx, %ds + + /* Store PCI bus:dev.fn, if applicable */ +.ifeqs BUSTYPE, "PCIR" +#ifdef AUTOBOOT_ROM_FILTER + movw %ax, autoboot_busdevfn +#endif /* AUTOBOOT_ROM_FILTER */ +.endif + + /* Run iPXE */ + virtcall main + + /* Set up flat real mode for return to BIOS */ + call flatten_real_mode + + /* Uninstall iPXE */ + call uninstall + + /* Restore BIOS stack */ + movw %cx, %ss + movw %dx, %sp + + /* Check magic word on BIOS stack */ + popl %eax + cmpl $STACK_MAGIC, %eax + jne 1f + /* BIOS stack OK: return to caller */ + lret +1: /* BIOS stack corrupt: use INT 18 */ + int $0x18 + .previous + +exec_message_pre_install: + .asciz " starting execution..." + .size exec_message_pre_install, . - exec_message_pre_install +exec_message_post_install: + .asciz "ok\n" + .size exec_message_post_install, . - exec_message_post_install + +/* Wait for key press specified by %bl (masked by %bh) + * + * Used by init and INT19 code when prompting user. If the specified + * key is pressed, it is left in the keyboard buffer. + * + * Returns with ZF set iff specified key is pressed. + */ +wait_for_key: + /* Preserve registers */ + pushw %cx + pushw %ax +1: /* Empty the keyboard buffer before waiting for input */ + movb $0x01, %ah + int $0x16 + jz 2f + xorw %ax, %ax + int $0x16 + jmp 1b +2: /* Wait for a key press */ + movw $ROM_BANNER_TIMEOUT_TICKS, %cx +3: decw %cx + js 99f /* Exit with ZF clear */ + /* Wait for timer tick to be updated */ + call wait_for_tick + /* Check to see if a key was pressed */ + movb $0x01, %ah + int $0x16 + jz 3b + /* Check to see if key was the specified key */ + andb %bh, %al + cmpb %al, %bl + je 99f /* Exit with ZF set */ + /* Not the specified key: remove from buffer and stop waiting */ + pushfw + xorw %ax, %ax + int $0x16 + popfw /* Exit with ZF clear */ +99: /* Restore registers and return */ + popw %ax + popw %cx + ret + .size wait_for_key, . - wait_for_key + +/* Wait for timer tick + * + * Used by wait_for_key + */ +wait_for_tick: + pushl %eax + pushw %fs + movw $0x40, %ax + movw %ax, %fs + movl %fs:(0x6c), %eax +1: pushf + sti + hlt + popf + cmpl %fs:(0x6c), %eax + je 1b + popw %fs + popl %eax + ret + .size wait_for_tick, . - wait_for_tick + +/* Drag in objects via _rom_start */ +REQUIRING_SYMBOL ( _rom_start ) + +/* Drag in ROM configuration */ +REQUIRE_OBJECT ( config_romprefix ) diff --git a/src/arch/x86/prefix/undiloader.S b/src/arch/x86/prefix/undiloader.S new file mode 100644 index 00000000..1d77110e --- /dev/null +++ b/src/arch/x86/prefix/undiloader.S @@ -0,0 +1,73 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#include + + .text + .code16 + .arch i386 + .section ".prefix", "ax", @progbits + +/* UNDI loader + * + * Called by an external program to load our PXE stack. + */ + .globl undiloader +undiloader: + /* Save registers */ + pushl %esi + pushl %edi + pushl %ebp + pushw %ds + pushw %es + pushw %bx + + /* ROM segment address to %ds */ + pushw %cs + popw %ds + + /* UNDI loader parameter structure address into %es:%di */ + movw %sp, %bx + movw %ss:22(%bx), %di + movw %ss:24(%bx), %es + + /* Install to specified real-mode addresses */ + pushw %di + movw %es:12(%di), %bx + movw %es:14(%di), %ax + movl image_source, %esi + call undiloader_source + xorl %edi, %edi + orl $0xffffffff, %ebp /* Allow arbitrary relocation */ + call install_prealloc + popw %di + + /* Jump to .text16 segment */ + pushw %ax + pushw $1f + lret + .section ".text16", "ax", @progbits +1: + /* Call UNDI loader C code */ + virtcall pxe_loader_call + +1: /* Restore registers and return */ + popw %bx + popw %es + popw %ds + popl %ebp + popl %edi + popl %esi + lret + +/* Update image source address for UNDI loader + * + * Parameters: + * %esi : Image source address + * Returns: + * %esi : Image source address + */ + .section ".prefix", "ax", @progbits + .globl undiloader_source + .weak undiloader_source +undiloader_source: + ret diff --git a/src/arch/x86/prefix/unlzma.S b/src/arch/x86/prefix/unlzma.S new file mode 100644 index 00000000..ce18c756 --- /dev/null +++ b/src/arch/x86/prefix/unlzma.S @@ -0,0 +1,994 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/**************************************************************************** + * + * This file provides the decompress() and decompress16() functions + * which can be called in order to decompress an LZMA-compressed + * image. The code is modelled on the public-domain "XZ Embedded" + * implementation as used by the Linux kernel. Symbol names are + * chosen to match the XZ Embedded implementation where possible, for + * ease of reference. + * + * This code is optimised for size rather than speed, since the amount + * of data to be decompressed is trivially small by modern standards. + * + * The same basic assembly code is used to compile both decompress() + * and decompress16(). + * + * Note that these functions require large amounts of stack space. + * + **************************************************************************** + */ + + .text + .arch i586 + .section ".prefix.lib", "ax", @progbits + +#ifdef CODE16 +#define ADDR16 +#define ADDR32 addr32 +#define decompress decompress16 + .code16 +#else /* CODE16 */ +#define ADDR16 addr16 +#define ADDR32 + .code32 +#endif /* CODE16 */ + +#define CRCPOLY 0xedb88320 +#define CRCSEED 0xffffffff + +/**************************************************************************** + * Debugging + **************************************************************************** + * + * This code will usually run in 16-bit protected mode, in which case + * only the 0xe9 debug port (present on some virtual machines) can be + * used. + * + * To debug on real hardware, build with DEBUG=libprefix. This will + * cause this code to be called in flat real mode, and so DEBUG_INT10 + * may be used. + */ + +/* Enable debugging via 0xe9 debug port */ +#define DEBUG_E9 0 + +/* Enable debugging via BIOS INT 10 (works only when in flat real mode) */ +#define DEBUG_INT10 0 + +#if ( DEBUG_E9 || DEBUG_INT10 ) + .macro print_character, reg + pushfl + pushw %ax + pushw %bx + pushw %bp + movb \reg, %al + movw $0x0007, %bx + movb $0x0e, %ah +#if DEBUG_E9 + outb %al, $0xe9 +#endif +#if DEBUG_INT10 + cmpb $('\n'), %al + jne L\@ + int $0x10 + movb $('\r'), %al +L\@: int $0x10 +#endif + popw %bp + popw %bx + popw %ax + popfl + .endm + + .macro print_hex_nibble + pushfl + pushw %ax + cmpb $10, %al + sbb $0x69, %al + das + print_character %al + popw %ax + popfl + .endm + + .macro print_hex_byte, reg + pushfl + pushw %ax + movb \reg, %al + pushw %ax + shrb $4, %al + print_hex_nibble + popw %ax + andb $0x0f, %al + print_hex_nibble + popw %ax + popfl + .endm + + .macro print_hex_word, reg + pushw %ax + movw \reg, %ax + print_hex_byte %ah + print_hex_byte %al + popw %ax + .endm + + .macro print_hex_dword, reg + pushl %eax + movl \reg, %eax + rorl $16, %eax + print_hex_word %ax + rorl $16, %eax + print_hex_word %ax + popl %eax + .endm +#else + .macro print_character, char + .endm + .macro print_hex_byte, reg + .endm + .macro print_hex_word, reg + .endm + .macro print_hex_dword, reg + .endm +#endif + +/**************************************************************************** + * LZMA parameters and data structures + **************************************************************************** + */ + +/* LZMA decompressor states (as used in XZ Embedded) */ +#define STATE_LIT_LIT 0x00 +#define STATE_MATCH_LIT_LIT 0x01 +#define STATE_REP_LIT_LIT 0x02 +#define STATE_SHORTREP_LIT_LIT 0x03 +#define STATE_MATCH_LIT 0x04 +#define STATE_REP_LIT 0x05 +#define STATE_SHORTREP_LIT 0x06 +#define STATE_LIT_MATCH 0x07 +#define STATE_LIT_LONGREP 0x08 +#define STATE_LIT_SHORTREP 0x09 +#define STATE_NONLIT_MATCH 0x0a +#define STATE_NONLIT_REP 0x0b + +/* LZMA maximum decompressor state in which most recent symbol was a literal */ +#define STATE_LIT_MAX 0x06 + +/* LZMA number of literal context bits ("lc=" parameter) */ +#define LZMA_LC 2 + + .struct 0 +lzma_len_dec: +choice: .word 0 +choice2: .word 0 +low: .rept ( 1 << 3 ) + .word 0 + .endr +mid: .rept ( 1 << 3 ) + .word 0 + .endr +high: .rept ( 1 << 8 ) + .word 0 + .endr + .equ sizeof__lzma_len_dec, . - lzma_len_dec + .previous + + .struct 0 +lzma_dec: +out_start: .long 0 +rc_code: .long 0 +rc_range: .long 0 +len: .word 0 +reps: +rep0: .long 0 +rep1: .long 0 +rep2: .long 0 +rep3: .long 0 +probs: +is_match: .word 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +is_rep: .word 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +is_rep0: .word 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +is_rep1: .word 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +is_rep2: .word 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +is_rep0_long: .word 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +dist_slot: .rept ( 4 * ( 1 << 6 ) ) + .word 0 + .endr +dist_special: .rept ( ( 1 << ( 14 / 2 ) ) - 14 ) + .word 0 + .endr +dist_align: .rept ( 1 << 4 ) + .word 0 + .endr +match_len_dec: .space sizeof__lzma_len_dec +rep_len_dec: .space sizeof__lzma_len_dec +literal: .rept ( ( 1 << LZMA_LC ) * 0x300 ) + .word 0 + .endr + .align 4 + .equ sizeof__lzma_dec, . - lzma_dec + .previous + + /* Some binutils versions seem not to handle .struct/.previous */ + .section ".prefix.lib", "ax", @progbits + +/***************************************************************************** + * Normalise range encoder + * + * Parameters: + * %ss:%ebp : LZMA parameter block + * %ds:%esi : compressed input data pointer + * Returns: + * %ds:%esi : compressed input data pointer (possibly updated) + * %eax : current range + ***************************************************************************** + */ +rc_normalise: + /* Check if rc_range is less than 1<<24 */ + testb $0xff, (rc_range+3)(%ebp) + jnz 1f + /* If it is, shift in a new byte from the compressed input data */ + shll $8, rc_range(%ebp) + shll $8, rc_code(%ebp) + ADDR32 lodsb + movb %al, (rc_code+0)(%ebp) +1: /* Return current range */ + movl rc_range(%ebp), %eax + ret + .size rc_normalise, . - rc_normalise + +/***************************************************************************** + * Decode single range-encoded bit using a probability estimate + * + * Parameters: + * %ss:%ebp : LZMA parameter block + * %ds:%esi : compressed input data pointer + * %ebx : probability estimate pointer (offset from %ebp) + * Returns: + * %ds:%esi : compressed input data pointer (possibly updated) + * CF : decoded bit + * ZF : inverse of decoded bit + * Corrupts: + * none + ***************************************************************************** + */ +rc_bit: + /* Preserve registers */ + pushl %eax + pushl %edx + /* Perform normalisation */ + call rc_normalise + /* Calculate bound in %eax and probability estimate in %dx */ + shrl $11, %eax + movzwl (%ebp,%ebx), %edx + mul %edx /* will zero %edx */ + movw (%ebp,%ebx), %dx + /* Compare code against bound */ + cmpl %eax, rc_code(%ebp) + jae 2f +1: /* Code is less than bound */ + movl %eax, rc_range(%ebp) + negw %dx + addw $(1<<11), %dx + shrw $5, %dx + addw %dx, (%ebp,%ebx) + xorw %ax, %ax /* Clear CF, set ZF */ + jmp 99f +2: /* Code is greater than or equal to bound */ + subl %eax, rc_range(%ebp) + subl %eax, rc_code(%ebp) + shrw $5, %dx + subw %dx, (%ebp,%ebx) + incw %dx /* Clear ZF (%dx is 11-bit; can never wrap) */ + stc /* Set CF */ +99: /* Restore registers and return */ + popl %edx + popl %eax + ret + .size rc_bit, . - rc_bit + +/***************************************************************************** + * Decode MSB-first bittree + * + * Parameters: + * %ss:%ebp : LZMA parameter block + * %ds:%esi : compressed input data pointer + * %ebx : probability estimate set pointer (offset from %ebp) + * %cx : number of bits to decode + * Returns: + * %ds:%esi : compressed input data pointer (possibly updated) + * %eax : decoded bittree + * Corrupts: + * none + ***************************************************************************** + */ +rc_bittree: + /* Preserve registers */ + pushl %edi + pushw %cx + movl %ebx, %edi + /* Initialise registers */ + movl $1, %eax +1: /* Decode bit */ + leaw (%edi,%eax,2), %bx /* high word always zero anyway */ + call rc_bit + rclw %ax + ADDR16 loop 1b + /* Restore registers, clear unwanted high bit of result, and return */ + movl %edi, %ebx + popw %cx + popl %edi + btrw %cx, %ax + ret + .size rc_bittree, . - rc_bittree + +/***************************************************************************** + * Decode LSB-first bittree + * + * Parameters: + * %ss:%ebp : LZMA parameter block + * %ds:%esi : compressed input data pointer + * %ebx : probability estimate set pointer (offset from %ebp) + * %cx : number of bits to decode + * Returns: + * %ds:%esi : compressed input data pointer (possibly updated) + * %eax : decoded bittree + * Corrupts: + * none + ***************************************************************************** + */ +rc_bittree_reverse: + /* Preserve registers */ + pushw %cx + /* Decode bittree */ + call rc_bittree +1: /* Reverse result */ + rcrb %al + rclb %ah + ADDR16 loop 1b + shrw $8, %ax + /* Restore registers and return */ + popw %cx + ret + .size rc_bittree_reverse, . - rc_bittree_reverse + +/***************************************************************************** + * Decode MSB-first bittree with optional match byte + * + * Parameters: + * %ss:%ebp : LZMA parameter block + * %ds:%esi : compressed input data pointer + * %ebx : probability estimate set pointer (offset from %ebp) + * %cl : match byte + * %ch : 1 to use match byte, 0 to ignore match byte + * Returns: + * %ds:%esi : compressed input data pointer (possibly updated) + * %eax : decoded bittree + * Corrupts: + * none + ***************************************************************************** + */ +rc_bittree_match: + /* Preserve registers */ + pushl %edi + pushw %cx + pushw %dx + movl %ebx, %edi + /* Initialise registers */ + movl $1, %eax +1: /* Decode bit */ + rolb $1, %cl + movw %cx, %dx + andb %dh, %dl /* match_bit in %dl */ + movw %dx, %bx + addb %bl, %bh + xorb %bl, %bl + addw %ax, %bx /* offset + match_bit + symbol */ + leaw (%edi,%ebx,2), %bx /* high word always zero anyway */ + call rc_bit + rclw %ax + movb %al, %dh + notb %dh + xorb %dh, %dl + andb %dl, %ch /* offset &= ( match_bit ^ bit ) */ + testb %ah, %ah + jz 1b + /* Restore registers, clear unwanted high bit of result, and return */ + movl %edi, %ebx + popw %dx + popw %cx + popl %edi + xorb %ah, %ah + ret + .size rc_bittree_match, . - rc_bittree_match + +/***************************************************************************** + * Decode direct bits (no probability estimates) + * + * Parameters: + * %ss:%ebp : LZMA parameter block + * %ds:%esi : compressed input data pointer + * %cx : number of bits to decode + * Returns: + * %ds:%esi : compressed input data pointer (possibly updated) + * %eax : decoded bits + * Corrupts: + * none + ***************************************************************************** + */ +rc_direct: + /* Preserve registers */ + pushl %ebx + pushw %cx + pushl %edx + /* Initialise registers */ + xorl %edx, %edx +1: /* Perform normalisation */ + call rc_normalise + /* Decode bit */ + shrl $1, %eax + movl %eax, rc_range(%ebp) + movl rc_code(%ebp), %ebx + subl %eax, %ebx + js 2f + movl %ebx, rc_code(%ebp) +2: rcll %ebx + rcll %edx + xorb $1, %dl + ADDR16 loop 1b + /* Restore registers and return */ + movl %edx, %eax + popl %edx + popw %cx + popl %ebx + ret + .size rc_direct, . - rc_direct + +/***************************************************************************** + * Decode an LZMA literal + * + * Parameters: + * %ss:%ebp : LZMA parameter block + * %ds:%esi : compressed input data pointer + * %es:%edi : uncompressed output data pointer + * %edx : LZMA state + * Returns: + * %ds:%esi : compressed input data pointer (possibly updated) + * %es:%edi : uncompressed output data pointer (updated) + * %edx : LZMA state + * CF : end of payload marker found (always zero) + * Corrupts: + * %eax + * %ebx + * %ecx + ***************************************************************************** + * + * Literals are coded as an eight-bit tree, using a match byte if the + * previous symbol was not a literal. + * + */ +lzma_literal: + /* Get most recent output byte, if available */ + xorl %ebx, %ebx + cmpl %edi, out_start(%ebp) + je 1f + movb %es:-1(%edi), %bh +1: /* Locate probability estimate set */ + shrb $( 8 - LZMA_LC ), %bh + shlb $1, %bh + leaw literal(%ebx,%ebx,2), %bx + /* Get match byte, if applicable */ + xorw %cx, %cx + cmpb $STATE_LIT_MAX, %dl + jbe 1f + movl rep0(%ebp), %eax + notl %eax + movb %es:(%edi,%eax), %cl + movb $1, %ch +1: /* Decode bittree */ + call rc_bittree_match + /* Store output byte */ + ADDR32 stosb + print_hex_byte %al + print_character $(' ') + /* Update LZMA state */ + subb $3, %dl + jns 1f + xorb %dl, %dl +1: cmpb $7, %dl + jb 1f + subb $3, %dl +1: /* Clear CF and return */ + clc + ret + .size lzma_literal, . - lzma_literal + +/***************************************************************************** + * Decode an LZMA length + * + * Parameters: + * %ss:%ebp : LZMA parameter block + * %ds:%esi : compressed input data pointer + * %ebx : length parameter pointer (offset from %ebp) + * Returns: + * %ds:%esi : compressed input data pointer (possibly updated) + * Corrupts: + * %ebx + ***************************************************************************** + * + * Lengths are encoded as: + * + * "0" + 3 bits : lengths 2-9 ("low") + * "10" + 3 bits : lengths 10-17 ("mid") + * "11" + 8 bits : lengths 18-273 ("high") + */ +lzma_len: + /* Preserve registers */ + pushl %eax + pushl %ecx + pushl %edi + movl %ebx, %edi + /* Start by assuming three bits and a base length of 2 */ + movw $3, %cx + movw $2, len(%ebp) + /* Check low-length choice bit */ + leal choice(%edi), %ebx + call rc_bit + leal low(%edi), %ebx + jz 1f + /* Check high-length choice bit */ + leal choice2(%edi), %ebx + call rc_bit + leal mid(%edi), %ebx + movb $10, len(%ebp) + jz 1f + leal high(%edi), %ebx + movb $8, %cl + movb $18, len(%ebp) +1: /* Get encoded length */ + call rc_bittree + addw %ax, len(%ebp) + /* Restore registers and return */ + movl %edi, %ebx + popl %edi + popl %ecx + popl %eax + ret + .size lzma_len, . - lzma_len + +/***************************************************************************** + * Copy (possibly repeated) matched data + * + * Parameters: + * %ss:%ebp : LZMA parameter block + * %ds:%esi : compressed input data pointer + * %es:%edi : uncompressed output data pointer + * %cl : repeated match distance index (for repeated matches) + * %eax : match distance (for non-repeated matches) + * Returns: + * %ds:%esi : compressed input data pointer (possibly updated) + * %es:%edi : uncompressed output data pointer + * CF : match distance is out of range + * Corrupts: + * %eax + * %ebx + * %ecx + ***************************************************************************** + */ +match: /* Update repeated match list */ + print_character $('[') + movl $3, %ecx + jmp 1f +match_rep: + print_character $('[') + print_character $('R') + print_hex_byte %cl + print_character $('=') + movzbl %cl, %ecx + movl reps(%ebp,%ecx,4), %eax + jcxz 2f +1: movl (reps-4)(%ebp,%ecx,4), %ebx + movl %ebx, reps(%ebp,%ecx,4) + loop 1b + movl %eax, rep0(%ebp) +2: /* Preserve registers */ + pushl %esi + /* Get stored match length */ + movzwl len(%ebp), %ecx + print_hex_dword %eax + print_character $('+') + print_hex_word %cx + print_character $(']') + print_character $(' ') + /* Abort with CF set if match distance is out of range */ + movl out_start(%ebp), %esi + negl %esi + leal -1(%edi,%esi), %esi + cmpl %eax, %esi + jc 99f + /* Perform copy */ + notl %eax + leal (%edi,%eax), %esi + ADDR32 es rep movsb +99: /* Restore registers and return */ + popl %esi + ret + .size match, . - match + +/***************************************************************************** + * Decode an LZMA match + * + * Parameters: + * %ss:%ebp : LZMA parameter block + * %ds:%esi : compressed input data pointer + * %es:%edi : uncompressed output data pointer + * %edx : LZMA state + * Returns: + * %ds:%esi : compressed input data pointer (possibly updated) + * %es:%edi : uncompressed output data pointer + * %edx : LZMA state + * CF : end of payload marker found + * Corrupts: + * %eax + * %ebx + * %ecx + ***************************************************************************** + * + * Matches are encoded as an LZMA length followed by a 6-bit "distance + * slot" code, 0-26 fixed-probability bits, and 0-5 context encoded + * bits. + */ +lzma_match: + /* Preserve registers */ + pushl %edi + /* Update LZMA state */ + cmpb $STATE_LIT_MAX, %dl + movb $STATE_LIT_MATCH, %dl + jbe 1f + movb $STATE_NONLIT_MATCH, %dl +1: /* Decode length */ + movl $match_len_dec, %ebx + call lzma_len + /* Decode distance slot */ + movw len(%ebp), %bx + subw $2, %bx + cmpw $4, %bx + jb 1f + movw $3, %bx +1: shlw $7, %bx + addw $dist_slot, %bx + movw $6, %cx + call rc_bittree + /* Distance slots 0-3 are literal distances */ + cmpb $4, %al + jb 99f + /* Determine initial bits: 10/11 for even/odd distance codes */ + movl %eax, %edi + andw $1, %di + orw $2, %di + /* Determine number of context-encoded bits */ + movw %ax, %cx + shrb $1, %cl + decb %cl + /* Select context to be used in absence of fixed-probability bits */ + movl %edi, %ebx + shlw %cl, %bx + subw %ax, %bx + leaw (dist_special-2)(%ebx,%ebx), %bx + /* Decode fixed-probability bits, if any */ + cmpb $6, %cl + jb 1f + subb $4, %cl + shll %cl, %edi + call rc_direct + orl %eax, %edi + /* Select context to be used in presence of fixed-probability bits */ + movb $4, %cl + movl $dist_align, %ebx +1: /* Decode context-encoded bits */ + shll %cl, %edi + call rc_bittree_reverse + orl %edi, %eax +99: /* Restore registers and tail-call */ + popl %edi + jmp match + .size lzma_match, . - lzma_match + +/***************************************************************************** + * Decode an LZMA repeated match + * + * Parameters: + * %ss:%ebp : LZMA parameter block + * %ds:%esi : compressed input data pointer + * %es:%edi : uncompressed output data pointer + * %edx : LZMA state + * Returns: + * %ds:%esi : compressed input data pointer (possibly updated) + * %es:%edi : uncompressed output data pointer + * %edx : LZMA state + * CF : end of payload marker found + * Corrupts: + * %eax + * %ebx + * %ecx + ***************************************************************************** + * + * Repeated matches are encoded as: + * + * "00" : shortrep0 (implicit length 1) + * "01" + len : longrep0 + * "10" + len : longrep1 + * "110" + len : longrep2 + * "111" + len : longrep3 + */ +lzma_rep_match: + /* Initially assume longrep0 */ + movw $(STATE_LIT_LONGREP << 8), %cx + /* Get is_rep0 bit */ + leal is_rep0(,%edx,2), %ebx + call rc_bit + jnz 1f + /* Get is_rep0_long bit */ + leal is_rep0_long(,%edx,2), %ebx + call rc_bit + jnz 98f + movw $1, len(%ebp) + movb $STATE_LIT_SHORTREP, %ch + jmp 99f +1: /* Get is_rep1 bit */ + incb %cl + leal is_rep1(,%edx,2), %ebx + call rc_bit + jz 98f + /* Get is_rep2 bit */ + incb %cl + leal is_rep2(,%edx,2), %ebx + call rc_bit + adcb $0, %cl +98: /* Decode length */ + movl $rep_len_dec, %ebx + call lzma_len +99: /* Update LZMA state */ + cmpb $STATE_LIT_MAX, %dl + movb %ch, %dl + jbe 1f + movb $STATE_NONLIT_REP, %dl +1: /* Tail call */ + jmp match_rep + .size lzma_match, . - lzma_match + +/***************************************************************************** + * Decode one LZMA symbol + * + * Parameters: + * %ss:%ebp : LZMA parameter block + * %ds:%esi : compressed input data pointer + * %es:%edi : uncompressed output data pointer + * %edx : LZMA state + * Returns: + * %ds:%esi : compressed input data pointer (possibly updated) + * %es:%edi : uncompressed output data pointer (updated) + * %edx : LZMA state + * CF : end of payload marker found + * Corrupts: + * %eax + * %ebx + * %ecx + ***************************************************************************** + */ +lzma_decode: + /* Get is_match bit */ + leal is_match(,%edx,2), %ebx + call rc_bit + jz lzma_literal + /* Get is_rep bit */ + leal is_rep(,%edx,2), %ebx + call rc_bit + jz lzma_match + jmp lzma_rep_match + .size lzma_decode, . - lzma_decode + +/**************************************************************************** + * Undo effect of branch-call-jump (BCJ) filter + * + * Parameters: + * %es:%esi : start of uncompressed output data (note %es) + * %es:%edi : end of uncompressed output data + * Returns: + * Corrupts: + * %eax + * %ebx + * %ecx + * %edx + * %esi + ***************************************************************************** + */ +bcj_filter: + /* Store (negative) start of data in %edx */ + movl %esi, %edx + negl %edx + /* Calculate limit in %ecx */ + leal -5(%edi,%edx), %ecx +1: /* Calculate offset in %ebx */ + leal (%esi,%edx), %ebx + /* Check for end of data */ + cmpl %ecx, %ebx + ja 99f + /* Check for an opcode which would be followed by a rel32 address */ + ADDR32 es lodsb + andb $0xfe, %al + cmpb $0xe8, %al + jne 1b + /* Get current jump target value in %eax */ + ADDR32 es lodsl + /* Convert absolute addresses in the range [0,limit) back to + * relative addresses in the range [-offset,limit-offset). + */ + cmpl %ecx, %eax + jae 2f + subl %ebx,%es:-4(%esi) +2: /* Convert negative numbers in the range [-offset,0) back to + * positive numbers in the range [limit-offset,limit). + */ + notl %eax /* Range is now [0,offset) */ + cmpl %ebx, %eax + jae 1b + addl %ecx,%es:-4(%esi) + jmp 1b +99: /* Return */ + ret + .size bcj_filter, . - bcj_filter + +/**************************************************************************** + * Verify CRC32 + * + * Parameters: + * %ds:%esi : Start of compressed input data + * %edx : Length of compressed input data (including CRC) + * Returns: + * CF clear if CRC32 is zero + * All other registers are preserved + * Corrupts: + * %eax + * %ebx + * %ecx + * %edx + * %esi + **************************************************************************** + */ +verify_crc32: + /* Calculate CRC */ + addl %esi, %edx + movl $CRCSEED, %ebx +1: ADDR32 lodsb + xorb %al, %bl + movw $8, %cx +2: rcrl %ebx + jnc 3f + xorl $CRCPOLY, %ebx +3: ADDR16 loop 2b + cmpl %esi, %edx + jne 1b + /* Set CF if result is nonzero */ + testl %ebx, %ebx + jz 1f + stc +1: /* Return */ + ret + .size verify_crc32, . - verify_crc32 + +/**************************************************************************** + * decompress (real-mode or 16/32-bit protected-mode near call) + * + * Decompress data + * + * Parameters (passed via registers): + * %ds:%esi : Start of compressed input data + * %es:%edi : Start of output buffer + * Returns: + * %ds:%esi - End of compressed input data + * %es:%edi - End of decompressed output data + * CF set if CRC32 was incorrect + * All other registers are preserved + * + * NOTE: It would be possible to build a smaller version of the + * decompression code for -DKEEP_IT_REAL by using 16-bit registers + * where possible. + **************************************************************************** + */ + .globl decompress +decompress: + /* Preserve registers */ + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + pushl %ebp + /* Verify CRC32 */ + ADDR32 lodsl + movl %eax, %edx + pushl %esi + call verify_crc32 + popl %esi + jc 99f + /* Allocate parameter block */ + subl $sizeof__lzma_dec, %esp + movl %esp, %ebp + /* Zero parameter block and set all probabilities to 0.5 */ + pushl %edi + pushw %es + pushw %ss + popw %es + movl %ebp, %edi + xorl %eax, %eax + movl $( sizeof__lzma_dec / 4 ), %ecx + ADDR32 rep stosl + leal probs(%ebp), %edi + movw $( ( 1 << 11 ) / 2 ), %ax + movl $( ( sizeof__lzma_dec - probs ) / 2 ), %ecx + ADDR32 rep stosw + popw %es + popl %edi + /* Initialise remaining parameters */ + movl %edi, out_start(%ebp) + print_character $('\n') + ADDR32 lodsb /* discard initial byte */ + print_hex_byte %al + ADDR32 lodsl + bswapl %eax + print_hex_dword %eax + print_character $('\n') + movl %eax, rc_code(%ebp) + decl rc_range(%ebp) + movl $STATE_LIT_LIT, %edx +1: /* Decompress until we reach end of buffer */ + call lzma_decode + jnc 1b + call rc_normalise + print_character $('\n') + /* Undo BCJ filter */ + pushl %esi + movl out_start(%ebp), %esi + call bcj_filter + popl %esi + /* Skip CRC */ + ADDR32 lodsl + /* Free parameter block (and clear CF) */ + addl $sizeof__lzma_dec, %esp +99: /* Restore registers and return */ + popl %ebp + popl %edx + popl %ecx + popl %ebx + popl %eax + ret + + /* Specify minimum amount of stack space required */ + .globl _min_decompress_stack + .equ _min_decompress_stack, ( sizeof__lzma_dec + 512 /* margin */ ) diff --git a/src/arch/x86/prefix/unlzma16.S b/src/arch/x86/prefix/unlzma16.S new file mode 100644 index 00000000..32b43f0d --- /dev/null +++ b/src/arch/x86/prefix/unlzma16.S @@ -0,0 +1,9 @@ +/* + * 16-bit version of the decompressor + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#define CODE16 +#include "unlzma.S" diff --git a/src/arch/x86/prefix/usbdisk.S b/src/arch/x86/prefix/usbdisk.S new file mode 100644 index 00000000..977de6dd --- /dev/null +++ b/src/arch/x86/prefix/usbdisk.S @@ -0,0 +1,78 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +#include + + .text + .arch i386 + .section ".prefix", "awx", @progbits + .code16 + .org 0 + +#include "mbr.S" + +/* Partition table: 64 heads, 32 sectors/track (ZIP-drive compatible) */ +#define HEADS 64 +#define SECTORS 32 +#define CYLADDR(cyl) ((((cyl) * HEADS + (((cyl) == 0) & 1)) * SECTORS) * 512) + +#ifdef CONSOLE_INT13 +#define LOGPART 1 +#define LOGSTART 0 +#define LOGCOUNT 1 +#define BOOTSTART 1 +#define BOOTCOUNT 2 +#else /* CONSOLE_INT13 */ +#define LOGPART 0 +#define BOOTSTART 0 +#define BOOTCOUNT 2 +#endif /* CONSOLE_INT13 */ + + /* Construct a C/H/S address */ + .macro chs cylinder, head, sector + .byte \head + .byte (((\cylinder & 0x300) >> 2) | \sector) + .byte (\cylinder & 0x0ff) + .endm + + /* Construct a linear address */ + .macro linear cylinders, heads, sectors + .long ((((\cylinders * HEADS) + \heads) * SECTORS) + \sectors - 1) + .endm + + /* Construct a partition table entry */ + .macro partition bootflag, type, start, count + .byte \bootflag + chs \start, ((\start == 0) & 1), 1 + .byte \type + chs (\start + \count - 1), (HEADS - 1), SECTORS + linear \start, ((\start == 0) & 1), 1 + linear \count, 0, (1 - (((\start == 0) & 1) * SECTORS)) + .endm + + /* Partition table */ + .org 446 + .space 16 + .space 16 + + /* Partition 3: log partition (for CONSOLE_INT13) */ + .if LOGPART + partition 0x00, 0xe0, LOGSTART, LOGCOUNT + .else + .space 16 + .endif + + /* Partition 4: boot partition */ + partition 0x80, 0xeb, BOOTSTART, BOOTCOUNT + + /* Disk signature */ + .org 510 + .byte 0x55, 0xaa + +/* Skip to start of log partition */ + .if LOGPART + .org CYLADDR(LOGSTART) + .ascii "iPXE LOG\n\n" + .endif + +/* Skip to start of boot partition */ + .org CYLADDR(BOOTSTART) diff --git a/src/arch/x86/scripts/pcbios.lds b/src/arch/x86/scripts/pcbios.lds new file mode 100644 index 00000000..c9a91c02 --- /dev/null +++ b/src/arch/x86/scripts/pcbios.lds @@ -0,0 +1,280 @@ +/* -*- ld-script -*- */ + +/* + * Linker script for i386 images + * + */ + +SECTIONS { + + /* Each section starts at a virtual address of zero. + * + * We guarantee alignment of virtual addresses to any alignment + * specified by the constituent object files (e.g. via + * __attribute__((aligned(x)))). Load addresses are guaranteed + * only up to _max_align. Provided that all loader and relocation + * code honours _max_align, this means that physical addresses are + * also guaranteed up to _max_align. + * + * Note that when using -DKEEP_IT_REAL, the UNDI segments are only + * guaranteed to be loaded on a paragraph boundary (i.e. 16-byte + * alignment). Using _max_align>16 will therefore not guarantee + * >16-byte alignment of physical addresses when -DKEEP_IT_REAL is + * used (though virtual addresses will still be fully aligned). + * + */ + + PROVIDE ( _max_align = 16 ); + + /* + * Values used in page table calculations + * + * On older versions of ld (without the SANE_EXPR feature), + * numeric literals within a section description tend to be + * interpreted as section-relative symbols. + * + */ + _page_size = 4096; + _page_size_1 = ( _page_size - 1 ); + _pte_size = 8; + _pte_count = ( _page_size / _pte_size ); + _pte_count_1 = ( _pte_count - 1 ); + + /* + * Allow decompressor to require a minimum amount of temporary stack + * space. + * + */ + PROVIDE ( _min_decompress_stack = 0 ); + + /* + * The prefix + * + */ + + .prefix 0x0 : AT ( _prefix_lma ) { + _prefix = .; + *(.prefix) + *(SORT(.pci_devlist.*)) + *(.prefix.*) + _mprefix = .; + } .bss.prefix (NOLOAD) : AT ( _end_lma ) { + _eprefix = .; + } + _prefix_filesz = ABSOLUTE ( _mprefix ) - ABSOLUTE ( _prefix ); + _prefix_memsz = ABSOLUTE ( _eprefix ) - ABSOLUTE ( _prefix ); + + /* + * The 16-bit (real-mode) code section + * + */ + + .text16.early 0x0 : AT ( _text16_early_lma ) { + _text16 = .; + KEEP(*(.text16.null)) + KEEP(*(.text16.null.*)) + . += 1; /* Prevent NULL being valid */ + *(.text16.early) + *(.text16.early.*) + _etext16_early = .; + } .text16.late ALIGN ( _max_align ) : AT ( _text16_late_lma ) { + _text16_late = .; + *(.text16) + *(.text16.*) + _mtext16 = .; + } .bss.text16 (NOLOAD) : AT ( _end_lma ) { + _etext16 = .; + } + _text16_early_filesz = ABSOLUTE ( _etext16_early ) - ABSOLUTE ( _text16 ); + _text16_early_memsz = ABSOLUTE ( _etext16_early ) - ABSOLUTE ( _text16 ); + _text16_late_filesz = ABSOLUTE ( _mtext16 ) - ABSOLUTE ( _text16_late ); + _text16_late_memsz = ABSOLUTE ( _etext16 ) - ABSOLUTE ( _text16_late ); + _text16_memsz = ABSOLUTE ( _etext16 ) - ABSOLUTE ( _text16 ); + + /* + * The 16-bit (real-mode) data section + * + */ + + .data16 0x0 : AT ( _data16_lma ) { + _data16 = .; + . += 1; /* Prevent NULL being valid */ + *(.rodata16) + *(.rodata16.*) + *(.data16) + *(.data16.*) + _mdata16 = .; + } .bss.data16 (NOLOAD) : AT ( _end_lma ) { + *(.bss16) + *(.bss16.*) + *(.stack16) + *(.stack16.*) + . = MAX ( ., _mdata16 + _min_decompress_stack ); + _edata16 = .; + } + _data16_filesz = ABSOLUTE ( _mdata16 ) - ABSOLUTE ( _data16 ); + _data16_memsz = ABSOLUTE ( _edata16 ) - ABSOLUTE ( _data16 ); + + /* + * The 32-bit sections + * + */ + + .textdata 0x0 : AT ( _textdata_lma ) { + _textdata = .; + KEEP(*(.text.null_trap)) + KEEP(*(.text.null_trap.*)) + . += 1; /* Prevent NULL being valid */ + *(.text) + *(.text.*) + *(.rodata) + *(.rodata.*) + *(.data) + *(.data.*) + KEEP(*(SORT(.tbl.*))) /* Various tables. See include/tables.h */ + KEEP(*(.provided)) + KEEP(*(.provided.*)) + _mtextdata = .; + } .bss.textdata (NOLOAD) : AT ( _end_lma ) { + *(.bss) + *(.bss.*) + *(COMMON) + *(.stack) + *(.stack.*) + _pages = .; + *(.pages) + *(.pages.*) + _use_page_tables = ABSOLUTE ( . ) - ABSOLUTE ( _pages ); + _textdata_paged_len = + ABSOLUTE ( ABSOLUTE ( . ) - ABSOLUTE ( _textdata ) ); + _textdata_ptes = + ABSOLUTE ( ( _textdata_paged_len + _page_size_1 ) / _page_size ); + _textdata_pdes = + ABSOLUTE ( ( _textdata_ptes + _pte_count_1 ) / _pte_count ); + . += ( _use_page_tables ? ( _textdata_pdes * _page_size ) : 0 ); + _epages = .; + _etextdata = .; + } + _textdata_filesz = ABSOLUTE ( _mtextdata ) - ABSOLUTE ( _textdata ); + _textdata_memsz = ABSOLUTE ( _etextdata ) - ABSOLUTE ( _textdata ); + + /* + * Payload prefix + * + * If present, this will be placed between .text16.early and .text16.late. + * + */ + .pprefix 0x0 : AT ( _pprefix_lma ) { + _pprefix = .; + KEEP(*(.pprefix)) + KEEP(*(.pprefix.*)) + _mpprefix = .; + } .bss.pprefix (NOLOAD) : AT ( _end_lma ) { + _epprefix = .; + } + _pprefix_filesz = ABSOLUTE ( _mpprefix ) - ABSOLUTE ( _pprefix ); + _pprefix_memsz = ABSOLUTE ( _epprefix ) - ABSOLUTE ( _pprefix ); + + /* + * Compressor information block + * + */ + + .zinfo 0x0 : AT ( _zinfo_lma ) { + _zinfo = .; + KEEP(*(.zinfo)) + KEEP(*(.zinfo.*)) + _mzinfo = .; + } .bss.zinfo (NOLOAD) : AT ( _end_lma ) { + _ezinfo = .; + } + _zinfo_filesz = ABSOLUTE ( _mzinfo ) - ABSOLUTE ( _zinfo ); + _zinfo_memsz = ABSOLUTE ( _ezinfo ) - ABSOLUTE ( _zinfo ); + + /* + * Weak symbols that need zero values if not otherwise defined + * + */ + + .weak 0x0 : AT ( _end_lma ) { + _weak = .; + *(.weak) + *(.weak.*) + _eweak = .; + } + _assert = ASSERT ( ( _weak == _eweak ), ".weak is non-zero length" ); + + /* + * Dispose of the comment and note sections to make the link map + * easier to read + * + */ + + /DISCARD/ : { + *(.comment) + *(.comment.*) + *(.note) + *(.note.*) + *(.eh_frame) + *(.eh_frame.*) + *(.rel) + *(.rel.*) + *(.einfo) + *(.einfo.*) + *(.discard) + *(.discard.*) + } + + /* + * Load address calculations. In older versions of ld, ALIGN() + * can operate only on the location counter, so we use that. + * + */ + + . = 0; + + . = ALIGN ( _max_align ); + _prefix_lma = .; + . += _prefix_filesz; + + . = ALIGN ( _max_align ); + _text16_early_lma = .; + . += _text16_early_filesz; + + . = ALIGN ( _max_align ); + . = ALIGN ( _payload_align ); + _pprefix_lma = .; + . += _pprefix_filesz; + + . = ALIGN ( _max_align ); + _payload_lma = .; + _pprefix_skip = ABSOLUTE ( _payload_lma ) - ABSOLUTE ( _pprefix_lma ); + _text16_late_lma = .; + . += _text16_late_filesz; + + . = ALIGN ( _max_align ); + _data16_lma = .; + . += _data16_filesz; + + . = ALIGN ( _max_align ); + _textdata_lma = .; + . += _textdata_filesz; + + _filesz = .; /* Do not include zinfo block in file size */ + + . = ALIGN ( _max_align ); + _zinfo_lma = .; + . += _zinfo_filesz; + + . = ALIGN ( _max_align ); + _end_lma = .; + + /* + * Values calculated to save code from doing it + * + */ + _text16_memsz_ppgh = ( ( ( _text16_memsz + 63 ) / 64 ) * 4 ); + _data16_memsz_ppgh = ( ( ( _data16_memsz + 63 ) / 64 ) * 4 ); + _textdata_memsz_pgh = ( ( _textdata_memsz + 15 ) / 16 ); + _textdata_memsz_kb = ( ( _textdata_memsz + 1023 ) / 1024 ); +} diff --git a/src/arch/x86/tests/comboot/shuffle-simple.asm b/src/arch/x86/tests/comboot/shuffle-simple.asm new file mode 100644 index 00000000..fa574bd7 --- /dev/null +++ b/src/arch/x86/tests/comboot/shuffle-simple.asm @@ -0,0 +1,39 @@ + bits 16 + org 100h + + jmp start + +shuffle_start: + push 0xB800 + pop es + mov cx, 80*24*2 + mov ax, 'AA' + xor di, di + rep stosw +.lbl: jmp .lbl +shuffle_end: + nop +shuffle_len equ (shuffle_end - shuffle_start + 1) + +start: + ; calculate physical address of shuffled part + xor eax, eax + push ds + pop ax + shl eax, 4 + add ax, shuffle_start + mov dword [source], eax + + mov ax, 0012h + mov di, shuffle_descriptors + mov cx, num_shuffle_descriptors + mov ebp, 0x7c00 + int 22h + int3 + +shuffle_descriptors: + dd 0x7C00 +source: dd 0 + dd shuffle_len + +num_shuffle_descriptors equ 1 diff --git a/src/arch/x86/tests/comboot/version.asm b/src/arch/x86/tests/comboot/version.asm new file mode 100644 index 00000000..01140423 --- /dev/null +++ b/src/arch/x86/tests/comboot/version.asm @@ -0,0 +1,136 @@ + bits 16 + org 100h + +_start: + ; first check for SYSLINUX + mov ah, 30h + int 21h + + cmp eax, 59530000h + jne .not_syslinux + cmp ebx, 4c530000h + jne .not_syslinux + cmp ecx, 4e490000h + jne .not_syslinux + cmp edx, 58550000h + jne .not_syslinux + + ; now get syslinux version + mov ax, 0001h + int 22h + + push cx + push dx + push di + push si + push es + + ; print version string + mov dx, str_version + mov ah, 09h + int 21h + + pop es + pop bx + push es + mov ax, 0002h + int 22h + + ; print copyright string + mov dx, str_copyright + mov ah, 09h + int 21h + + pop es + pop bx + mov ax, 0002h + int 22h + + ; print syslinux derivative id + mov dx, str_derivative + mov ah, 09h + int 21h + + pop ax + call print_hex_byte + + ; print version number + mov dx, str_version_num + mov ah, 09h + int 21h + + pop cx + push cx + mov ax, cx + and ax, 0FFh + call print_dec_word + + mov dl, '.' + mov ah, 02h + int 21h + + pop cx + mov ax, cx + shr ax, 8 + call print_dec_word + + ret + + +.not_syslinux: + mov dx, str_not_syslinux + mov ah, 09h + int 21h + ret + +; input: al = byte to print in hex +print_hex_byte: + push ax + shr al, 4 + call print_hex_nybble + pop ax + call print_hex_nybble + ret + +; input: bottom half of al = nybble to print in hex +print_hex_nybble: + push ax + mov bl, al + and bx, 1111b + mov dl, [str_hex + bx] + mov ah, 02h + int 21h + pop ax + ret + +str_hex: db "01234567890abcdef" + +; input: ax = word to print +print_dec_word: + mov cx, 10 + mov word [.count], 0 +.loop: + xor dx, dx + div cx + inc word [.count] + push dx + test ax, ax + jnz .loop + +.print: + pop dx + add dx, '0' + mov ah, 02h + int 21h + dec word [.count] + jnz .print + + ret + +.count: dw 0 + +str_not_syslinux: db "Not SYSLINUX or derivative (running on DOS?)$" +str_version: db "Version: $" +str_copyright: db 10, "Copyright: $" +str_derivative: db 10, "Derivative ID: 0x$" +str_version_num: db 10, "Version number: $" diff --git a/src/arch/x86/transitions/liba20.S b/src/arch/x86/transitions/liba20.S new file mode 100644 index 00000000..6c1e1f62 --- /dev/null +++ b/src/arch/x86/transitions/liba20.S @@ -0,0 +1,313 @@ +/* + * Copyright (C) 2010 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .arch i386 + +/**************************************************************************** + * test_a20_short, test_a20_long + * + * Check to see if A20 line is enabled + * + * Parameters: + * none + * Returns: + * CF set if A20 line is not enabled + * Corrupts: + * none + **************************************************************************** + */ +#define TEST_A20_SHORT_MAX_RETRIES 0x20 +#define TEST_A20_LONG_MAX_RETRIES 0x200000 + .section ".text16.early", "awx", @progbits + .code16 +test_a20_short: + pushl %ecx + movl $TEST_A20_SHORT_MAX_RETRIES, %ecx + jmp 1f + .size test_a20_short, . - test_a20_short +test_a20_long: + pushl %ecx + movl $TEST_A20_LONG_MAX_RETRIES, %ecx +1: pushw %ax + pushw %ds + pushw %es + + /* Set up segment registers for access across the 1MB boundary */ + xorw %ax, %ax + movw %ax, %ds + decw %ax + movw %ax, %es + +2: /* Modify and check test pattern; succeed if we see a difference */ + pushfw + cli + xchgw %ds:0, %cx + movw %es:0x10, %ax + xchgw %ds:0, %cx + popfw + cmpw %ax, %cx + clc + jnz 99f + + /* Delay and retry */ + outb %al, $0x80 + addr32 loop 2b + stc + +99: /* Restore registers and return */ + popw %es + popw %ds + popw %ax + popl %ecx + ret + .size test_a20_long, . - test_a20_long + +/**************************************************************************** + * enable_a20_bios + * + * Try enabling A20 line via BIOS + * + * Parameters: + * none + * Returns: + * CF set if A20 line is not enabled + * Corrupts: + * none + **************************************************************************** + */ + .section ".text16.early", "awx", @progbits + .code16 +enable_a20_bios: + + /* Preserve registers. Be very paranoid, since some BIOSes + * are reported to clobber %ebx + */ + pushal + + /* Attempt INT 15,2401 */ + movw $0x2401, %ax + int $0x15 + jc 99f + + /* Check that success was really successful */ + call test_a20_short + +99: /* Restore registers and return */ + popal + ret + .size enable_a20_bios, . - enable_a20_bios + +/**************************************************************************** + * enable_a20_kbc + * + * Try enabling A20 line via keyboard controller + * + * Parameters: + * none + * Returns: + * CF set if A20 line is not enabled + * Corrupts: + * none + **************************************************************************** + */ +#define KC_RDWR 0x60 +#define KC_RDWR_SET_A20 0xdf +#define KC_CMD 0x64 +#define KC_CMD_WOUT 0xd1 +#define KC_CMD_NULL 0xff +#define KC_STATUS 0x64 +#define KC_STATUS_OBUF_FULL 0x01 +#define KC_STATUS_IBUF_FULL 0x02 +#define KC_MAX_RETRIES 100000 + .section ".text16.early", "awx", @progbits + .code16 +enable_a20_kbc: + /* Preserve registers */ + pushw %ax + + /* Try keyboard controller */ + call empty_kbc + movb $KC_CMD_WOUT, %al + outb %al, $KC_CMD + call empty_kbc + movb $KC_RDWR_SET_A20, %al + outb %al, $KC_RDWR + call empty_kbc + movb $KC_CMD_NULL, %al + outb %al, $KC_CMD + call empty_kbc + + /* Check to see if it worked */ + call test_a20_long + + /* Restore registers and return */ + popw %ax + ret + .size enable_a20_kbc, . - enable_a20_kbc + + .section ".text16.early", "awx", @progbits + .code16 +empty_kbc: + /* Preserve registers */ + pushl %ecx + pushw %ax + + /* Wait for KBC to become empty */ + movl $KC_MAX_RETRIES, %ecx +1: outb %al, $0x80 + inb $KC_STATUS, %al + testb $( KC_STATUS_OBUF_FULL | KC_STATUS_IBUF_FULL ), %al + jz 99f + testb $KC_STATUS_OBUF_FULL, %al + jz 2f + outb %al, $0x80 + inb $KC_RDWR, %al +2: addr32 loop 1b + +99: /* Restore registers and return */ + popw %ax + popl %ecx + ret + .size empty_kbc, . - empty_kbc + +/**************************************************************************** + * enable_a20_fast + * + * Try enabling A20 line via "Fast Gate A20" + * + * Parameters: + * none + * Returns: + * CF set if A20 line is not enabled + * Corrupts: + * none + **************************************************************************** + */ +#define SCP_A 0x92 + .section ".text16.early", "awx", @progbits + .code16 +enable_a20_fast: + /* Preserve registers */ + pushw %ax + + /* Try "Fast Gate A20" */ + inb $SCP_A, %al + orb $0x02, %al + andb $~0x01, %al + outb %al, $SCP_A + + /* Check to see if it worked */ + call test_a20_long + + /* Restore registers and return */ + popw %ax + ret + .size enable_a20_fast, . - enable_a20_fast + +/**************************************************************************** + * enable_a20 + * + * Try enabling A20 line via any available method + * + * Parameters: + * none + * Returns: + * CF set if A20 line is not enabled + * Corrupts: + * none + **************************************************************************** + */ +#define ENABLE_A20_RETRIES 255 + .section ".text16.early", "awx", @progbits + .code16 + .globl enable_a20 +enable_a20: + /* Preserve registers */ + pushl %ecx + pushw %ax + + /* Check to see if A20 is already enabled */ + call test_a20_short + jnc 99f + + /* Use known working method, if we have one */ + movw %cs:enable_a20_method, %ax + testw %ax, %ax + jz 1f + call *%ax + jmp 99f +1: + /* Try all methods in turn until one works */ + movl $ENABLE_A20_RETRIES, %ecx +2: movw $enable_a20_bios, %ax + movw %ax, %cs:enable_a20_method + call *%ax + jnc 99f + movw $enable_a20_kbc, %ax + movw %ax, %cs:enable_a20_method + call *%ax + jnc 99f + movw $enable_a20_fast, %ax + movw %ax, %cs:enable_a20_method + call *%ax + jnc 99f + addr32 loop 2b + /* Failure; exit with carry set */ + movw $0, %cs:enable_a20_method + stc + +99: /* Restore registers and return */ + popw %ax + popl %ecx + ret + + .section ".text16.early.data", "aw", @progbits + .align 2 +enable_a20_method: + .word 0 + .size enable_a20_method, . - enable_a20_method + +/**************************************************************************** + * access_highmem (real mode far call) + * + * Open up access to high memory with A20 enabled + * + * Parameters: + * none + * Returns: + * CF set if high memory could not be accessed + * Corrupts: + * none + **************************************************************************** + */ + .section ".text16.early", "awx", @progbits + .code16 + .globl access_highmem +access_highmem: + /* Enable A20 line */ + call enable_a20 + lret + .size access_highmem, . - access_highmem diff --git a/src/arch/x86/transitions/libkir.S b/src/arch/x86/transitions/libkir.S new file mode 100644 index 00000000..fa9459d5 --- /dev/null +++ b/src/arch/x86/transitions/libkir.S @@ -0,0 +1,256 @@ +/* + * libkir: a transition library for -DKEEP_IT_REAL + * + * Michael Brown + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +/**************************************************************************** + * This file defines libkir: an interface between external and + * internal environments when -DKEEP_IT_REAL is used, so that both + * internal and external environments are in real mode. It deals with + * switching data segments and the stack. It provides the following + * functions: + * + * ext_to_kir & switch between external and internal (kir) + * kir_to_ext environments, preserving all non-segment + * registers + * + * kir_call issue a call to an internal routine from external + * code + * + * libkir is written to avoid assuming that segments are anything + * other than opaque data types, and also avoids assuming that the + * stack pointer is 16-bit. This should enable it to run just as well + * in 16:16 or 16:32 protected mode as in real mode. + **************************************************************************** + */ + +/* Breakpoint for when debugging under bochs */ +#define BOCHSBP xchgw %bx, %bx + + .text + .arch i386 + .section ".text16", "awx", @progbits + .code16 + +/**************************************************************************** + * init_libkir (real-mode or 16:xx protected-mode far call) + * + * Initialise libkir ready for transitions to the kir environment + * + * Parameters: + * %cs : .text16 segment + * %ds : .data16 segment + **************************************************************************** + */ + .globl init_libkir +init_libkir: + /* Record segment registers */ + pushw %ds + popw %cs:kir_ds + lret + +/**************************************************************************** + * ext_to_kir (real-mode or 16:xx protected-mode near call) + * + * Switch from external stack and segment registers to internal stack + * and segment registers. %ss:sp is restored from the saved kir_ds + * and kir_sp. %ds, %es, %fs and %gs are all restored from the saved + * kir_ds. All other registers are preserved. + * + * %cs:0000 must point to the start of the runtime image code segment + * on entry. + * + * Parameters: none + **************************************************************************** + */ + + .globl ext_to_kir +ext_to_kir: + /* Record external segment registers */ + movw %ds, %cs:ext_ds + pushw %cs + popw %ds /* Set %ds = %cs for easier access to variables */ + movw %es, %ds:ext_es + movw %fs, %ds:ext_fs + movw %gs, %ds:ext_fs + + /* Preserve registers */ + movw %ax, %ds:save_ax + + /* Extract near return address from stack */ + popw %ds:save_retaddr + + /* Record external %ss:esp */ + movw %ss, %ds:ext_ss + movl %esp, %ds:ext_esp + + /* Load internal segment registers and stack pointer */ + movw %ds:kir_ds, %ax + movw %ax, %ss + movzwl %ds:kir_sp, %esp + movw %ax, %ds + movw %ax, %es + movw %ax, %fs + movw %ax, %gs +1: + + /* Place return address on new stack */ + pushw %cs:save_retaddr + + /* Restore registers and return */ + movw %cs:save_ax, %ax + ret + +/**************************************************************************** + * kir_to_ext (real-mode or 16:xx protected-mode near call) + * + * Switch from internal stack and segment registers to external stack + * and segment registers. %ss:%esp is restored from the saved ext_ss + * and ext_esp. Other segment registers are restored from the + * corresponding locations. All other registers are preserved. + * + * Note that it is actually %ss that is recorded as kir_ds, on the + * assumption that %ss == %ds when kir_to_ext is called. + * + * Parameters: none + **************************************************************************** + */ + + .globl kir_to_ext +kir_to_ext: + /* Record near return address */ + pushw %cs + popw %ds /* Set %ds = %cs for easier access to variables */ + popw %ds:save_retaddr + + /* Record internal segment registers and %sp */ + movw %ss, %ds:kir_ds + movw %sp, %ds:kir_sp + + /* Load external segment registers and stack pointer */ + movw %ds:ext_ss, %ss + movl %ds:ext_esp, %esp + movw %ds:ext_gs, %gs + movw %ds:ext_fs, %fs + movw %ds:ext_es, %es + movw %ds:ext_ds, %ds + + /* Return */ + pushw %cs:save_retaddr + ret + +/**************************************************************************** + * kir_call (real-mode or 16:xx protected-mode far call) + * + * Call a specific C function in the internal code. The prototype of + * the C function must be + * void function ( struct i386_all_resg *ix86 ); + * ix86 will point to a struct containing the real-mode registers + * at entry to kir_call. + * + * All registers will be preserved across kir_call(), unless the C + * function explicitly overwrites values in ix86. Interrupt status + * will also be preserved. + * + * Parameters: + * function : (32-bit) virtual address of C function to call + * + * Example usage: + * pushl $pxe_api_call + * lcall $UNDI_CS, $kir_call + * addw $4, %sp + * to call in to the C function + * void pxe_api_call ( struct i386_all_regs *ix86 ); + **************************************************************************** + */ + + .globl kir_call +kir_call: + /* Preserve flags. Must do this before any operation that may + * affect flags. + */ + pushfl + popl %cs:save_flags + + /* Disable interrupts. We do funny things with the stack, and + * we're not re-entrant. + */ + cli + + /* Extract address of internal routine from stack. We must do + * this without using (%bp), because we may be called with + * either a 16-bit or a 32-bit stack segment. + */ + popl %cs:save_retaddr /* Scratch location */ + popl %cs:save_function + subl $8, %esp /* Restore %esp */ + + /* Switch to internal stack. Note that the external stack is + * inaccessible once we're running internally (since we have + * no concept of 48-bit far pointers) + */ + call ext_to_kir + + /* Store external registers on internal stack */ + pushl %cs:save_flags + pushal + pushl %cs:ext_fs_and_gs + pushl %cs:ext_ds_and_es + pushl %cs:ext_cs_and_ss + + /* Push &ix86 on stack and call function */ + sti + pushl %esp + data32 call *%cs:save_function + popl %eax /* discard */ + + /* Restore external registers from internal stack */ + popl %cs:ext_cs_and_ss + popl %cs:ext_ds_and_es + popl %cs:ext_fs_and_gs + popal + popl %cs:save_flags + + /* Switch to external stack */ + call kir_to_ext + + /* Restore flags */ + pushl %cs:save_flags + popfl + + /* Return */ + lret + +/**************************************************************************** + * Stored internal and external stack and segment registers + **************************************************************************** + */ + +ext_cs_and_ss: +ext_cs: .word 0 +ext_ss: .word 0 +ext_ds_and_es: +ext_ds: .word 0 +ext_es: .word 0 +ext_fs_and_gs: +ext_fs: .word 0 +ext_gs: .word 0 +ext_esp: .long 0 + + .globl kir_ds +kir_ds: .word 0 + .globl kir_sp +kir_sp: .word _estack + +/**************************************************************************** + * Temporary variables + **************************************************************************** + */ +save_ax: .word 0 +save_retaddr: .long 0 +save_flags: .long 0 +save_function: .long 0 diff --git a/src/arch/x86/transitions/libpm.S b/src/arch/x86/transitions/libpm.S new file mode 100644 index 00000000..e69de29b diff --git a/src/arch/x86/transitions/librm.S b/src/arch/x86/transitions/librm.S new file mode 100644 index 00000000..9d3eff95 --- /dev/null +++ b/src/arch/x86/transitions/librm.S @@ -0,0 +1,1621 @@ +/* + * librm: a library for interfacing to real-mode code + * + * Michael Brown + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + +/* Drag in general configuration */ +#include + +/* Drag in local definitions */ +#include "librm.h" + +/* CR0: protection enabled */ +#define CR0_PE ( 1 << 0 ) + +/* CR0: paging */ +#define CR0_PG ( 1 << 31 ) + +/* CR4: physical address extensions */ +#define CR4_PAE ( 1 << 5 ) + +/* Extended feature enable MSR (EFER) */ +#define MSR_EFER 0xc0000080 + +/* EFER: long mode enable */ +#define EFER_LME ( 1 << 8 ) + +/* Page: present */ +#define PG_P 0x01 + +/* Page: read/write */ +#define PG_RW 0x02 + +/* Page: user/supervisor */ +#define PG_US 0x04 + +/* Page: page size */ +#define PG_PS 0x80 + +/* Size of various paging-related data structures */ +#define SIZEOF_PTE_LOG2 3 +#define SIZEOF_PTE ( 1 << SIZEOF_PTE_LOG2 ) +#define SIZEOF_PT_LOG2 12 +#define SIZEOF_PT ( 1 << SIZEOF_PT_LOG2 ) +#define SIZEOF_4KB_PAGE_LOG2 12 +#define SIZEOF_4KB_PAGE ( 1 << SIZEOF_4KB_PAGE_LOG2 ) +#define SIZEOF_2MB_PAGE_LOG2 21 +#define SIZEOF_2MB_PAGE ( 1 << SIZEOF_2MB_PAGE_LOG2 ) +#define SIZEOF_LOW_4GB_LOG2 32 +#define SIZEOF_LOW_4GB ( 1 << SIZEOF_LOW_4GB_LOG2 ) + +/* Size of various C data structures */ +#define SIZEOF_I386_SEG_REGS 12 +#define SIZEOF_I386_REGS 32 +#define SIZEOF_REAL_MODE_REGS ( SIZEOF_I386_SEG_REGS + SIZEOF_I386_REGS ) +#define SIZEOF_I386_FLAGS 4 +#define SIZEOF_I386_ALL_REGS ( SIZEOF_REAL_MODE_REGS + SIZEOF_I386_FLAGS ) +#define SIZEOF_X86_64_REGS 128 + +/* Size of an address */ +#ifdef __x86_64__ +#define SIZEOF_ADDR 8 +#else +#define SIZEOF_ADDR 4 +#endif + +/* Default code size */ +#ifdef __x86_64__ +#define CODE_DEFAULT code64 +#else +#define CODE_DEFAULT code32 +#endif + +/* Selectively assemble code for 32-bit/64-bit builds */ +#ifdef __x86_64__ +#define if32 if 0 +#define if64 if 1 +#else +#define if32 if 1 +#define if64 if 0 +#endif + +/**************************************************************************** + * Global descriptor table + * + * Call init_librm to set up the GDT before attempting to use any + * protected-mode code. + * + * NOTE: This must be located before prot_to_real, otherwise gas + * throws a "can't handle non absolute segment in `ljmp'" error due to + * not knowing the value of REAL_CS when the ljmp is encountered. + * + * Note also that putting ".word gdt_end - gdt - 1" directly into + * gdt_limit, rather than going via gdt_length, will also produce the + * "non absolute segment" error. This is most probably a bug in gas. + **************************************************************************** + */ + .section ".data16.gdt", "aw", @progbits + .align 16 +gdt: +gdtr: /* The first GDT entry is unused, the GDTR can fit here. */ +gdt_limit: .word gdt_length - 1 +gdt_base: .long 0 + .word 0 /* padding */ + + .org gdt + VIRTUAL_CS, 0 +virtual_cs: /* 32 bit protected mode code segment, virtual addresses */ + .word 0xffff, 0 + .byte 0, 0x9f, 0xcf, 0 + + .org gdt + VIRTUAL_DS, 0 +virtual_ds: /* 32 bit protected mode data segment, virtual addresses */ + .word 0xffff, 0 + .byte 0, 0x93, 0xcf, 0 + + .org gdt + PHYSICAL_CS, 0 +physical_cs: /* 32 bit protected mode code segment, physical addresses */ + .word 0xffff, 0 + .byte 0, 0x9f, 0xcf, 0 + + .org gdt + PHYSICAL_DS, 0 +physical_ds: /* 32 bit protected mode data segment, physical addresses */ + .word 0xffff, 0 + .byte 0, 0x93, 0xcf, 0 + + .org gdt + REAL_CS, 0 +real_cs: /* 16 bit real mode code segment */ + .word 0xffff, 0 + .byte 0, 0x9b, 0x00, 0 + + .org gdt + REAL_DS, 0 +real_ds: /* 16 bit real mode data segment */ + .word 0xffff, 0 + .byte 0, 0x93, 0x00, 0 + + .org gdt + P2R_DS, 0 +p2r_ds: /* 16 bit real mode data segment for prot_to_real transition */ + .word 0xffff, ( P2R_DS << 4 ) + .byte 0, 0x93, 0x00, 0 + + .org gdt + LONG_CS, 0 +long_cs: /* 64 bit long mode code segment */ + .word 0, 0 + .byte 0, 0x9a, 0x20, 0 + +gdt_end: + .equ gdt_length, gdt_end - gdt + +/**************************************************************************** + * Stored real-mode and protected-mode stack pointers + * + * The real-mode stack pointer is stored here whenever real_to_prot + * is called and restored whenever prot_to_real is called. The + * converse happens for the protected-mode stack pointer. + * + * Despite initial appearances this scheme is, in fact re-entrant, + * because program flow dictates that we always return via the point + * we left by. For example: + * PXE API call entry + * 1 real => prot + * ... + * Print a text string + * ... + * 2 prot => real + * INT 10 + * 3 real => prot + * ... + * ... + * 4 prot => real + * PXE API call exit + * + * At point 1, the RM mode stack value, say RPXE, is stored in + * rm_ss,sp. We want this value to still be present in rm_ss,sp when + * we reach point 4. + * + * At point 2, the RM stack value is restored from RPXE. At point 3, + * the RM stack value is again stored in rm_ss,sp. This *does* + * overwrite the RPXE that we have stored there, but it's the same + * value, since the code between points 2 and 3 has managed to return + * to us. + **************************************************************************** + */ + .section ".bss.rm_ss_sp", "aw", @nobits + .globl rm_sp +rm_sp: .word 0 + .globl rm_ss +rm_ss: .word 0 + + .section ".data.pm_esp", "aw", @progbits +pm_esp: .long VIRTUAL(_estack) + +/**************************************************************************** + * Temporary static data buffer + * + * This is used to reduce the amount of real-mode stack space consumed + * during mode transitions, since we are sometimes called with very + * little real-mode stack space available. + **************************************************************************** + */ + /* Temporary static buffer usage by virt_call */ + .struct 0 +VC_TMP_GDT: .space 6 +VC_TMP_IDT: .space 6 +VC_TMP_PAD: .space 4 /* for alignment */ +.if64 +VC_TMP_CR3: .space 4 +VC_TMP_CR4: .space 4 +VC_TMP_EMER: .space 8 +.endif +#ifdef TIVOLI_VMM_WORKAROUND +VC_TMP_FXSAVE: .space 512 +#endif +VC_TMP_END: + .previous + + /* Temporary static buffer usage by real_call */ + .struct 0 +RC_TMP_FUNCTION: .space 4 +RC_TMP_END: + .previous + + /* Shared temporary static buffer */ + .section ".bss16.rm_tmpbuf", "aw", @nobits + .align 16 +rm_tmpbuf: + .space VC_TMP_END + .size rm_tmpbuf, . - rm_tmpbuf + +/**************************************************************************** + * Virtual address offsets + * + * These are used by the protected-mode code to map between virtual + * and physical addresses, and to access variables in the .text16 or + * .data16 segments. + **************************************************************************** + */ + .struct 0 +VA_VIRT_OFFSET: .space SIZEOF_ADDR +VA_TEXT16: .space SIZEOF_ADDR +VA_DATA16: .space SIZEOF_ADDR +VA_SIZE: + .previous + + /* Internal copies, used only by librm itself */ + .section ".bss16.rm_virt_addrs", "aw", @nobits +rm_virt_addrs: .space VA_SIZE + .equ rm_virt_offset, ( rm_virt_addrs + VA_VIRT_OFFSET ) + .equ rm_text16, ( rm_virt_addrs + VA_TEXT16 ) + .equ rm_data16, ( rm_virt_addrs + VA_DATA16 ) + + /* Externally visible variables, used by C code */ + .section ".bss.virt_addrs", "aw", @nobits +virt_addrs: .space VA_SIZE + .globl virt_offset + .equ virt_offset, ( virt_addrs + VA_VIRT_OFFSET ) + .globl text16 + .equ text16, ( virt_addrs + VA_TEXT16 ) + .globl data16 + .equ data16, ( virt_addrs + VA_DATA16 ) + +/**************************************************************************** + * init_librm (real-mode far call, 16-bit real-mode far return address) + * + * Initialise the GDT ready for transitions to protected mode. + * + * Parameters: + * %cs : .text16 segment + * %ds : .data16 segment + * %edi : Physical base of protected-mode code + **************************************************************************** + */ + .section ".text16.init_librm", "ax", @progbits + .code16 + .globl init_librm +init_librm: + /* Preserve registers */ + pushl %eax + pushl %ebx + pushl %edi + + /* Store rm_virt_offset and set up virtual_cs and virtual_ds segments */ + subl $VIRTUAL(_textdata), %edi + movl %edi, rm_virt_offset +.if64 ; setae (rm_virt_offset+4) ; .endif + movl %edi, %eax + movw $virtual_cs, %bx + call set_seg_base + movw $virtual_ds, %bx + call set_seg_base + + /* Store rm_cs and rm_text16, set up real_cs segment */ + xorl %eax, %eax + movw %cs, %ax + movw %ax, %cs:rm_cs + shll $4, %eax + movw $real_cs, %bx + call set_seg_base +.if32 ; subl %edi, %eax ; .endif + movl %eax, rm_text16 + + /* Store rm_ds and rm_data16, set up real_ds segment and GDT base */ + xorl %eax, %eax + movw %ds, %ax + movw %ax, %cs:rm_ds + shll $4, %eax + movw $real_ds, %bx + call set_seg_base + movl %eax, gdt_base + addl $gdt, gdt_base +.if32 ; subl %edi, %eax ; .endif + movl %eax, rm_data16 + + /* Configure virt_call for protected mode, if applicable */ +.if64 ; movl $VIRTUAL(vc_pmode), %cs:vc_jmp_offset ; .endif + + /* Switch to protected mode */ + virtcall init_librm_pmode + .section ".text.init_librm", "ax", @progbits + .code32 +init_librm_pmode: + + /* Store virt_offset, text16, and data16 */ + pushw %ds + movw $REAL_DS, %ax + movw %ax, %ds + movl $rm_virt_addrs, %esi + movl $VIRTUAL(virt_addrs), %edi + movl $( VA_SIZE / 4 ), %ecx + rep movsl + popw %ds + +.if64 ; /* Initialise long mode, if applicable */ + movl VIRTUAL(virt_offset), %edi + leal VIRTUAL(p2l_ljmp_target)(%edi), %eax + movl %eax, VIRTUAL(p2l_ljmp_offset) + call init_pages +.endif + /* Return to real mode */ + ret + .section ".text16.init_librm", "ax", @progbits + .code16 +init_librm_rmode: + + /* Configure virt_call for long mode, if applicable */ +.if64 ; movl $VIRTUAL(vc_lmode), %cs:vc_jmp_offset ; .endif + + /* Initialise IDT */ + virtcall init_idt + + /* Restore registers */ + popl %edi + popl %ebx + popl %eax + lret + + .section ".text16.set_seg_base", "ax", @progbits + .code16 +set_seg_base: +1: movw %ax, 2(%bx) + rorl $16, %eax + movb %al, 4(%bx) + movb %ah, 7(%bx) + roll $16, %eax + ret + +/**************************************************************************** + * real_to_prot (real-mode near call, 32-bit virtual return address) + * + * Switch from 16-bit real-mode to 32-bit protected mode with virtual + * addresses. The real-mode %ss:sp is stored in rm_ss and rm_sp, and + * the protected-mode %esp is restored from the saved pm_esp. + * Interrupts are disabled. All other registers may be destroyed. + * + * The return address for this function should be a 32-bit virtual + * address. + * + * Parameters: + * %ecx : number of bytes to move from RM stack to PM stack + * %edx : number of bytes to copy from RM temporary buffer to PM stack + * + **************************************************************************** + */ + .section ".text16.real_to_prot", "ax", @progbits + .code16 +real_to_prot: + /* Enable A20 line */ + call enable_a20 + /* A failure at this point is fatal, and there's nothing we + * can do about it other than lock the machine to make the + * problem immediately visible. + */ +1: jc 1b + + /* Make sure we have our data segment available */ + movw %cs:rm_ds, %ds + + /* Add protected-mode return address to length of data to be copied */ + addw $4, %cx /* %ecx must be less than 64kB anyway */ + + /* Real-mode %ss:%sp => %ebp and virtual address => %esi */ + xorl %eax, %eax + movw %ss, %ax + shll $4, %eax + movzwl %sp, %ebp + addr32 leal (%eax,%ebp), %esi + subl rm_virt_offset, %esi + shll $12, %eax + orl %eax, %ebp + + /* Real-mode data segment virtual address => %ebx */ + movl rm_data16, %ebx +.if64 ; subl rm_virt_offset, %ebx ; .endif + + /* Load protected-mode global descriptor table */ + data32 lgdt gdtr + + /* Zero segment registers. This wastes around 12 cycles on + * real hardware, but saves a substantial number of emulated + * instructions under KVM. + */ + xorw %ax, %ax + movw %ax, %ds + movw %ax, %es + movw %ax, %fs + movw %ax, %gs + movw %ax, %ss + + /* Switch to protected mode (with paging disabled if applicable) */ + cli + movl %cr0, %eax +.if64 ; andl $~CR0_PG, %eax ; .endif + orb $CR0_PE, %al + movl %eax, %cr0 + data32 ljmp $VIRTUAL_CS, $VIRTUAL(r2p_pmode) + .section ".text.real_to_prot", "ax", @progbits + .code32 +r2p_pmode: + /* Set up protected-mode data segments and stack pointer */ + movw $VIRTUAL_DS, %ax + movw %ax, %ds + movw %ax, %es + movw %ax, %fs + movw %ax, %gs + movw %ax, %ss + movl VIRTUAL(pm_esp), %esp + + /* Load protected-mode interrupt descriptor table */ + lidt VIRTUAL(idtr32) + + /* Record real-mode %ss:sp (after removal of data) */ + addl %ecx, %ebp + movl %ebp, VIRTUAL(rm_sp) + + /* Move data from RM stack to PM stack */ + subl %edx, %esp + subl %ecx, %esp + movl %esp, %edi + rep movsb + + /* Copy data from RM temporary buffer to PM stack */ + leal rm_tmpbuf(%ebx), %esi + movl %edx, %ecx + rep movsb + + /* Return to virtual address */ + ret + +/**************************************************************************** + * prot_to_real (protected-mode near call, 32-bit real-mode return address) + * + * Switch from 32-bit protected mode with virtual addresses to 16-bit + * real mode. The protected-mode %esp is stored in pm_esp and the + * real-mode %ss:sp is restored from the saved rm_ss and rm_sp. The + * high word of the real-mode %esp is set to zero. All real-mode data + * segment registers are loaded from the saved rm_ds. Interrupts are + * *not* enabled, since we want to be able to use prot_to_real in an + * ISR. All other registers may be destroyed. + * + * The return address for this function should be a 32-bit (sic) + * real-mode offset within .code16. + * + * Parameters: + * %ecx : number of bytes to move from PM stack to RM stack + * %edx : number of bytes to move from PM stack to RM temporary buffer + * %esi : real-mode global and interrupt descriptor table registers + * + **************************************************************************** + */ + .section ".text.prot_to_real", "ax", @progbits + .code32 +prot_to_real: + /* Copy real-mode global descriptor table register to RM code segment */ + movl VIRTUAL(text16), %edi +.if64 ; subl VIRTUAL(virt_offset), %edi ; .endif + leal rm_gdtr(%edi), %edi + movsw + movsl + + /* Load real-mode interrupt descriptor table register */ + lidt (%esi) + + /* Add return address to data to be moved to RM stack */ + addl $4, %ecx + + /* Real-mode %ss:sp => %ebp and virtual address => %edi */ + movl VIRTUAL(rm_sp), %ebp + subl %ecx, %ebp + movzwl VIRTUAL(rm_ss), %eax + shll $4, %eax + movzwl %bp, %edi + addl %eax, %edi + subl VIRTUAL(virt_offset), %edi + + /* Move data from PM stack to RM stack */ + movl %esp, %esi + rep movsb + + /* Move data from PM stack to RM temporary buffer */ + movl VIRTUAL(data16), %edi +.if64 ; subl VIRTUAL(virt_offset), %edi ; .endif + addl $rm_tmpbuf, %edi + movl %edx, %ecx + rep movsb + + /* Record protected-mode %esp (after removal of data) */ + movl %esi, VIRTUAL(pm_esp) + + /* Load real-mode segment limits */ + movw $P2R_DS, %ax + movw %ax, %ds + movw %ax, %es + movw %ax, %fs + movw %ax, %gs + movw %ax, %ss + ljmp $REAL_CS, $p2r_rmode + .section ".text16.prot_to_real", "ax", @progbits + .code16 +p2r_rmode: + /* Load real-mode GDT */ + data32 lgdt %cs:rm_gdtr + /* Switch to real mode */ + movl %cr0, %eax + andb $0!CR0_PE, %al + movl %eax, %cr0 +p2r_ljmp_rm_cs: + ljmp $0, $1f +1: + /* Set up real-mode data segments and stack pointer */ + movw %cs:rm_ds, %ax + movw %ax, %ds + movw %ax, %es + movw %ax, %fs + movw %ax, %gs + movl %ebp, %eax + shrl $16, %eax + movw %ax, %ss + movzwl %bp, %esp + + /* Return to real-mode address */ + data32 ret + + + /* Real-mode code and data segments. Assigned by the call to + * init_librm. rm_cs doubles as the segment part of the jump + * instruction used by prot_to_real. Both are located in + * .text16 rather than .data16: rm_cs since it forms part of + * the jump instruction within the code segment, and rm_ds + * since real-mode code needs to be able to locate the data + * segment with no other reference available. + */ + .globl rm_cs + .equ rm_cs, ( p2r_ljmp_rm_cs + 3 ) + + .section ".text16.data.rm_ds", "aw", @progbits + .globl rm_ds +rm_ds: .word 0 + + /* Real-mode global and interrupt descriptor table registers */ + .section ".text16.data.rm_gdtr", "aw", @progbits +rm_gdtr: + .word 0 /* Limit */ + .long 0 /* Base */ + +/**************************************************************************** + * phys_to_prot (protected-mode near call, 32-bit physical return address) + * + * Switch from 32-bit protected mode with physical addresses to 32-bit + * protected mode with virtual addresses. %esp is adjusted to a + * virtual address. All other registers are preserved. + * + * The return address for this function should be a 32-bit physical + * (sic) address. + * + **************************************************************************** + */ + .section ".text.phys_to_prot", "ax", @progbits + .code32 + .globl phys_to_prot +phys_to_prot: + /* Preserve registers */ + pushl %eax + pushl %ebp + + /* Switch to virtual code segment */ + cli + ljmp $VIRTUAL_CS, $VIRTUAL(1f) +1: + /* Switch to virtual data segment and adjust %esp */ + movw $VIRTUAL_DS, %ax + movw %ax, %ds + movw %ax, %es + movw %ax, %fs + movw %ax, %gs + movw %ax, %ss + movl VIRTUAL(virt_offset), %ebp + subl %ebp, %esp + + /* Adjust return address to a virtual address */ + subl %ebp, 8(%esp) + + /* Restore registers and return */ + popl %ebp + popl %eax + ret + +.if32 /* Expose as _phys_to_virt for use by COMBOOT, if applicable */ + .globl _phys_to_virt + .equ _phys_to_virt, phys_to_prot +.endif + +/**************************************************************************** + * prot_to_phys (protected-mode near call, 32-bit virtual return address) + * + * Switch from 32-bit protected mode with virtual addresses to 32-bit + * protected mode with physical addresses. %esp is adjusted to a + * physical address. All other registers are preserved. + * + * The return address for this function should be a 32-bit virtual + * (sic) address. + * + **************************************************************************** + */ + .section ".text.prot_to_phys", "ax", @progbits + .code32 +prot_to_phys: + /* Preserve registers */ + pushl %eax + pushl %ebp + + /* Adjust return address to a physical address */ + movl VIRTUAL(virt_offset), %ebp + addl %ebp, 8(%esp) + + /* Switch to physical code segment */ + cli + pushl $PHYSICAL_CS + leal VIRTUAL(1f)(%ebp), %eax + pushl %eax + lret +1: + /* Switch to physical data segment and adjust %esp */ + movw $PHYSICAL_DS, %ax + movw %ax, %ds + movw %ax, %es + movw %ax, %fs + movw %ax, %gs + movw %ax, %ss + addl %ebp, %esp + + /* Restore registers and return */ + popl %ebp + popl %eax + ret + +.if32 /* Expose as _virt_to_phys for use by COMBOOT, if applicable */ + .globl _virt_to_phys + .equ _virt_to_phys, prot_to_phys +.endif + +/**************************************************************************** + * intr_to_prot (protected-mode near call, 32-bit virtual return address) + * + * Switch from 32-bit protected mode with a virtual code segment and + * either a physical or virtual stack segment to 32-bit protected mode + * with normal virtual addresses. %esp is adjusted if necessary to a + * virtual address. All other registers are preserved. + * + * The return address for this function should be a 32-bit virtual + * address. + * + **************************************************************************** + */ + .section ".text.intr_to_prot", "ax", @progbits + .code32 + .globl intr_to_prot +intr_to_prot: + /* Preserve registers */ + pushl %eax + + /* Check whether stack segment is physical or virtual */ + movw %ss, %ax + cmpw $VIRTUAL_DS, %ax + movw $VIRTUAL_DS, %ax + + /* Reload data segment registers */ + movw %ax, %ds + movw %ax, %es + movw %ax, %fs + movw %ax, %gs + + /* Reload stack segment and adjust %esp if necessary */ + je 1f + movw %ax, %ss + subl VIRTUAL(virt_offset), %esp +1: + /* Restore registers and return */ + popl %eax + ret + + /* Expose as _intr_to_virt for use by GDB */ + .globl _intr_to_virt + .equ _intr_to_virt, intr_to_prot + +/**************************************************************************** + * prot_to_long (protected-mode near call, 32-bit virtual return address) + * + * Switch from 32-bit protected mode with virtual addresses to 64-bit + * long mode. The protected-mode %esp is adjusted to a physical + * address. All other registers are preserved. + * + * The return address for this function should be a 32-bit (sic) + * virtual address. + * + **************************************************************************** + */ + .if64 + + .section ".text.prot_to_long", "ax", @progbits + .code32 +prot_to_long: + /* Preserve registers */ + pushl %eax + pushl %ecx + pushl %edx + + /* Set up PML4 */ + movl VIRTUAL(pml4), %eax + movl %eax, %cr3 + + /* Enable PAE */ + movl %cr4, %eax + orb $CR4_PAE, %al + movl %eax, %cr4 + + /* Enable long mode */ + movl $MSR_EFER, %ecx + rdmsr + orw $EFER_LME, %ax + wrmsr + + /* Enable paging */ + movl %cr0, %eax + orl $CR0_PG, %eax + movl %eax, %cr0 + + /* Restore registers */ + popl %edx + popl %ecx + popl %eax + + /* Construct 64-bit return address */ + pushl (%esp) + movl $0xffffffff, 4(%esp) +p2l_ljmp: + /* Switch to long mode (using a physical %rip) */ + ljmp $LONG_CS, $0 + .code64 +p2l_lmode: + /* Adjust and zero-extend %esp to a physical address */ + addl virt_offset, %esp + + /* Use long-mode IDT */ + lidt idtr64 + + /* Return to virtual address */ + ret + + /* Long mode jump offset and target. Required since an ljmp + * in protected mode will zero-extend the offset, and so + * cannot reach an address within the negative 2GB as used by + * -mcmodel=kernel. Assigned by the call to init_librm. + */ + .equ p2l_ljmp_offset, ( p2l_ljmp + 1 ) + .equ p2l_ljmp_target, p2l_lmode + + .endif + +/**************************************************************************** + * long_to_prot (long-mode near call, 64-bit virtual return address) + * + * Switch from 64-bit long mode to 32-bit protected mode with virtual + * addresses. The long-mode %rsp is adjusted to a virtual address. + * All other registers are preserved. + * + * The return address for this function should be a 64-bit (sic) + * virtual address. + * + **************************************************************************** + */ + .if64 + + .section ".text.long_to_prot", "ax", @progbits + .code64 +long_to_prot: + /* Switch to protected mode */ + ljmp *l2p_vector + .code32 +l2p_pmode: + /* Adjust %esp to a virtual address */ + subl VIRTUAL(virt_offset), %esp + + /* Preserve registers */ + pushl %eax + pushl %ecx + pushl %edx + + /* Disable paging */ + movl %cr0, %eax + andl $~CR0_PG, %eax + movl %eax, %cr0 + + /* Disable PAE (in case external non-PAE-aware code enables paging) */ + movl %cr4, %eax + andb $~CR4_PAE, %al + movl %eax, %cr4 + + /* Disable long mode */ + movl $MSR_EFER, %ecx + rdmsr + andw $~EFER_LME, %ax + wrmsr + + /* Restore registers */ + popl %edx + popl %ecx + popl %eax + + /* Use protected-mode IDT */ + lidt VIRTUAL(idtr32) + + /* Return */ + ret $4 + + /* Long mode jump vector. Required since there is no "ljmp + * immediate" instruction in long mode. + */ + .section ".data.l2p_vector", "aw", @progbits +l2p_vector: + .long VIRTUAL(l2p_pmode), VIRTUAL_CS + + .endif + +/**************************************************************************** + * long_save_regs (long-mode near call, 64-bit virtual return address) + * + * Preserve registers that are accessible only in long mode. This + * includes %r8-%r15 and the upper halves of %rax, %rbx, %rcx, %rdx, + * %rsi, %rdi, and %rbp. + * + **************************************************************************** + */ + .if64 + + .section ".text.long_preserve_regs", "ax", @progbits + .code64 +long_preserve_regs: + /* Preserve registers */ + pushq %rax + pushq %rcx + pushq %rdx + pushq %rbx + pushq %rsp + pushq %rbp + pushq %rsi + pushq %rdi + pushq %r8 + pushq %r9 + pushq %r10 + pushq %r11 + pushq %r12 + pushq %r13 + pushq %r14 + pushq %r15 + + /* Return */ + jmp *SIZEOF_X86_64_REGS(%rsp) + + .endif + +/**************************************************************************** + * long_restore_regs (long-mode near call, 64-bit virtual return address) + * + * Restore registers that are accessible only in long mode. This + * includes %r8-%r15 and the upper halves of %rax, %rbx, %rcx, %rdx, + * %rsi, %rdi, and %rbp. + * + **************************************************************************** + */ + .if64 + + .section ".text.long_restore_regs", "ax", @progbits + .code64 +long_restore_regs: + /* Move return address above register dump */ + popq SIZEOF_X86_64_REGS(%rsp) + + /* Restore registers */ + popq %r15 + popq %r14 + popq %r13 + popq %r12 + popq %r11 + popq %r10 + popq %r9 + popq %r8 + movl %edi, (%rsp) + popq %rdi + movl %esi, (%rsp) + popq %rsi + movl %ebp, (%rsp) + popq %rbp + leaq 8(%rsp), %rsp /* discard */ + movl %ebx, (%rsp) + popq %rbx + movl %edx, (%rsp) + popq %rdx + movl %ecx, (%rsp) + popq %rcx + movl %eax, (%rsp) + popq %rax + + /* Return */ + ret + + .endif + +/**************************************************************************** + * virt_call (real-mode near call, 16-bit real-mode near return address) + * + * Call a specific C function in 32-bit protected mode or 64-bit long + * mode (as applicable). The prototype of the C function must be + * void function ( struct i386_all_regs *ix86 ); + * ix86 will point to a struct containing the real-mode registers + * at entry to virt_call(). + * + * All registers will be preserved across virt_call(), unless the C + * function explicitly overwrites values in ix86. Interrupt status + * and GDT will also be preserved. Gate A20 will be enabled. + * + * Note that virt_call() does not rely on the real-mode stack + * remaining intact in order to return, since everything relevant is + * copied to the protected-mode stack for the duration of the call. + * In particular, this means that a real-mode prefix can make a call + * to main() which will return correctly even if the prefix's stack + * gets vapourised during the Etherboot run. (The prefix cannot rely + * on anything else on the stack being preserved, so should move any + * critical data to registers before calling main()). + * + * Parameters: + * function : 32-bit virtual address of function to call + * + * Example usage: + * pushl $pxe_api_call + * call virt_call + * to call in to the C function + * void pxe_api_call ( struct i386_all_regs *ix86 ); + **************************************************************************** + */ + .struct 0 +VC_OFFSET_IX86: .space SIZEOF_I386_ALL_REGS +VC_OFFSET_PADDING: .space 2 /* for alignment */ +VC_OFFSET_RETADDR: .space 2 +VC_OFFSET_PARAMS: +VC_OFFSET_FUNCTION: .space 4 +VC_OFFSET_END: + .previous + + .section ".text16.virt_call", "ax", @progbits + .code16 + .globl virt_call +virt_call: + /* Preserve registers and flags on external RM stack */ + pushw %ss /* padding */ + pushfl + pushal + pushw %gs + pushw %fs + pushw %es + pushw %ds + pushw %ss + pushw %cs + + /* Claim ownership of temporary static buffer */ + cli + movw %cs:rm_ds, %ds + +#ifdef TIVOLI_VMM_WORKAROUND + /* Preserve FPU, MMX and SSE state in temporary static buffer */ + fxsave ( rm_tmpbuf + VC_TMP_FXSAVE ) +#endif + /* Preserve GDT and IDT in temporary static buffer */ + sidt ( rm_tmpbuf + VC_TMP_IDT ) + sgdt ( rm_tmpbuf + VC_TMP_GDT ) + +.if64 ; /* Preserve control registers, if applicable */ + movl $MSR_EFER, %ecx + rdmsr + movl %eax, ( rm_tmpbuf + VC_TMP_EMER + 0 ) + movl %edx, ( rm_tmpbuf + VC_TMP_EMER + 4 ) + movl %cr4, %eax + movl %eax, ( rm_tmpbuf + VC_TMP_CR4 ) + movl %cr3, %eax + movl %eax, ( rm_tmpbuf + VC_TMP_CR3 ) +.endif + /* For sanity's sake, clear the direction flag as soon as possible */ + cld + + /* Switch to protected mode and move register dump to PM stack */ + movl $VC_OFFSET_END, %ecx + movl $VC_TMP_END, %edx + pushl $VIRTUAL(vc_pmode) +vc_jmp: jmp real_to_prot + .section ".text.virt_call", "ax", @progbits + .code32 +vc_pmode: + /* Call function (in protected mode) */ + pushl %esp + call *(VC_OFFSET_FUNCTION+4)(%esp) + popl %eax /* discard */ + +.if64 ; /* Switch to long mode */ + jmp 1f +vc_lmode: + call prot_to_long + .code64 + + /* Call function (in long mode) */ + movq %rsp, %rdi + movslq VC_OFFSET_FUNCTION(%rsp), %rax + callq *%rax + + /* Switch to protected mode */ + call long_to_prot +1: .code32 +.endif + /* Switch to real mode and move register dump back to RM stack */ + movl $VC_OFFSET_END, %ecx + movl $VC_TMP_END, %edx + leal VC_TMP_GDT(%esp, %ecx), %esi + pushl $vc_rmode + jmp prot_to_real + .section ".text16.virt_call", "ax", @progbits + .code16 +vc_rmode: +.if64 ; /* Restore control registers, if applicable */ + movw %sp, %bp + movl ( rm_tmpbuf + VC_TMP_CR3 ), %eax + movl %eax, %cr3 + movl ( rm_tmpbuf + VC_TMP_CR4 ), %eax + movl %eax, %cr4 + movl ( rm_tmpbuf + VC_TMP_EMER + 0 ), %eax + movl ( rm_tmpbuf + VC_TMP_EMER + 4 ), %edx + movl $MSR_EFER, %ecx + wrmsr +.endif + +#ifdef TIVOLI_VMM_WORKAROUND + /* Restore FPU, MMX and SSE state from temporary static buffer */ + fxrstor ( rm_tmpbuf + VC_TMP_FXSAVE ) +#endif + /* Restore registers and flags and return */ + popl %eax /* skip %cs and %ss */ + popw %ds + popw %es + popw %fs + popw %gs + popal + /* popal skips %esp. We therefore want to do "movl -20(%sp), + * %esp", but -20(%sp) is not a valid 80386 expression. + * Fortunately, prot_to_real() zeroes the high word of %esp, so + * we can just use -20(%esp) instead. + */ + addr32 movl -20(%esp), %esp + popfl + popw %ss /* padding */ + + /* Return and discard function parameters */ + ret $( VC_OFFSET_END - VC_OFFSET_PARAMS ) + + + /* Protected-mode jump target */ + .equ vc_jmp_offset, ( vc_jmp - 4 ) + +/**************************************************************************** + * real_call (protected-mode near call, 32-bit virtual return address) + * real_call (long-mode near call, 64-bit virtual return address) + * + * Call a real-mode function from protected-mode or long-mode code. + * + * The non-segment register values will be passed directly to the + * real-mode code. The segment registers will be set as per + * prot_to_real. The non-segment register values set by the real-mode + * function will be passed back to the protected-mode or long-mode + * caller. A result of this is that this routine cannot be called + * directly from C code, since it clobbers registers that the C ABI + * expects the callee to preserve. + * + * librm.h defines a convenient macro REAL_CODE() for using real_call. + * See librm.h and realmode.h for details and examples. + * + * Parameters: + * function : offset within .text16 of real-mode function to call + * + * Returns: none + **************************************************************************** + */ + .struct 0 +RC_OFFSET_REGS: .space SIZEOF_I386_REGS +RC_OFFSET_REGS_END: +RC_OFFSET_FUNCTION_COPY:.space 4 +.if64 +RC_OFFSET_LREGS: .space SIZEOF_X86_64_REGS +RC_OFFSET_LREG_RETADDR: .space SIZEOF_ADDR +.endif +RC_OFFSET_RETADDR: .space SIZEOF_ADDR +RC_OFFSET_PARAMS: +RC_OFFSET_FUNCTION: .space SIZEOF_ADDR +RC_OFFSET_END: + .previous + + .section ".text.real_call", "ax", @progbits + .CODE_DEFAULT + .globl real_call +real_call: +.if64 ; /* Preserve registers and switch to protected mode, if applicable */ + call long_preserve_regs + call long_to_prot + .code32 +.endif + /* Create register dump and function pointer copy on PM stack */ + pushl ( RC_OFFSET_FUNCTION - RC_OFFSET_FUNCTION_COPY - 4 )(%esp) + pushal + + /* Switch to real mode and move register dump to RM stack */ + movl $RC_OFFSET_REGS_END, %ecx + movl $RC_TMP_END, %edx + pushl $rc_rmode + movl $VIRTUAL(rm_default_gdtr_idtr), %esi + jmp prot_to_real + .section ".text16.real_call", "ax", @progbits + .code16 +rc_rmode: + /* Call real-mode function */ + popal + call *( rm_tmpbuf + RC_TMP_FUNCTION ) + pushal + + /* For sanity's sake, clear the direction flag as soon as possible */ + cld + + /* Switch to protected mode and move register dump back to PM stack */ + movl $RC_OFFSET_REGS_END, %ecx + xorl %edx, %edx + pushl $VIRTUAL(rc_pmode) + jmp real_to_prot + .section ".text.real_call", "ax", @progbits + .code32 +rc_pmode: + /* Restore registers */ + popal + +.if64 ; /* Switch to long mode and restore registers, if applicable */ + call prot_to_long + .code64 + call long_restore_regs +.endif + /* Return and discard function parameters */ + ret $( RC_OFFSET_END - RC_OFFSET_PARAMS ) + + + /* Default real-mode global and interrupt descriptor table registers */ + .section ".data.rm_default_gdtr_idtr", "aw", @progbits +rm_default_gdtr_idtr: + .word 0 /* Global descriptor table limit */ + .long 0 /* Global descriptor table base */ + .word 0x03ff /* Interrupt descriptor table limit */ + .long 0 /* Interrupt descriptor table base */ + +/**************************************************************************** + * phys_call (protected-mode near call, 32-bit virtual return address) + * phys_call (long-mode near call, 64-bit virtual return address) + * + * Call a function with flat 32-bit physical addressing + * + * The non-segment register values will be passed directly to the + * function. The segment registers will be set for flat 32-bit + * physical addressing. The non-segment register values set by the + * function will be passed back to the caller. + * + * librm.h defines a convenient macro PHYS_CODE() for using phys_call. + * + * Parameters: + * function : virtual (sic) address of function to call + * + **************************************************************************** + */ + .struct 0 +.if64 +PHC_OFFSET_LREGS: .space SIZEOF_X86_64_REGS +PHC_OFFSET_LREG_RETADDR:.space SIZEOF_ADDR +.endif +PHC_OFFSET_RETADDR: .space SIZEOF_ADDR +PHC_OFFSET_PARAMS: +PHC_OFFSET_FUNCTION: .space SIZEOF_ADDR +PHC_OFFSET_END: + .previous + + .section ".text.phys_call", "ax", @progbits + .CODE_DEFAULT + .globl phys_call +phys_call: +.if64 ; /* Preserve registers and switch to protected mode, if applicable */ + call long_preserve_regs + call long_to_prot + .code32 +.endif + /* Adjust function pointer to a physical address */ + pushl %ebp + movl VIRTUAL(virt_offset), %ebp + addl %ebp, ( PHC_OFFSET_FUNCTION + 4 /* saved %ebp */ )(%esp) + popl %ebp + + /* Switch to physical addresses */ + call prot_to_phys + + /* Call function */ + call *PHC_OFFSET_FUNCTION(%esp) + + /* For sanity's sake, clear the direction flag as soon as possible */ + cld + + /* Switch to virtual addresses */ + call phys_to_prot + +.if64 ; /* Switch to long mode and restore registers, if applicable */ + call prot_to_long + .code64 + call long_restore_regs +.endif + /* Return and discard function parameters */ + ret $( PHC_OFFSET_END - PHC_OFFSET_PARAMS ) + +/**************************************************************************** + * phys_to_long (protected-mode near call, 32-bit physical return address) + * + * Used by COMBOOT. + * + **************************************************************************** + */ + .if64 + + .section ".text.phys_to_long", "ax", @progbits + .code32 +phys_to_long: + + /* Switch to virtual addresses */ + call phys_to_prot + + /* Convert to 32-bit virtual return address */ + pushl %eax + movl VIRTUAL(virt_offset), %eax + subl %eax, 4(%esp) + popl %eax + + /* Switch to long mode and return */ + jmp prot_to_long + + /* Expose as _phys_to_virt for use by COMBOOT */ + .globl _phys_to_virt + .equ _phys_to_virt, phys_to_long + + .endif + +/**************************************************************************** + * long_to_phys (long-mode near call, 64-bit virtual return address) + * + * Used by COMBOOT. + * + **************************************************************************** + */ + .if64 + + .section ".text.long_to_phys", "ax", @progbits + .code64 +long_to_phys: + + /* Switch to protected mode */ + call long_to_prot + .code32 + + /* Convert to 32-bit virtual return address */ + popl (%esp) + + /* Switch to physical addresses and return */ + jmp prot_to_phys + + /* Expose as _virt_to_phys for use by COMBOOT */ + .globl _virt_to_phys + .equ _virt_to_phys, long_to_phys + + .endif + +/**************************************************************************** + * flatten_real_mode (real-mode near call) + * + * Switch to flat real mode + * + **************************************************************************** + */ + .section ".text16.flatten_real_mode", "ax", @progbits + .code16 + .globl flatten_real_mode +flatten_real_mode: + /* Modify GDT to use flat real mode */ + movb $0x8f, real_cs + 6 + movb $0x8f, real_ds + 6 + /* Call dummy protected-mode function */ + virtcall flatten_dummy + /* Restore GDT */ + movb $0x00, real_cs + 6 + movb $0x00, real_ds + 6 + /* Return */ + ret + + .section ".text.flatten_dummy", "ax", @progbits + .CODE_DEFAULT +flatten_dummy: + ret + +/**************************************************************************** + * Interrupt wrapper + * + * Used by the protected-mode and long-mode interrupt vectors to call + * the interrupt() function. + * + * May be entered with either physical or virtual stack segment. + **************************************************************************** + */ + .section ".text.interrupt_wrapper", "ax", @progbits + .code32 + .globl interrupt_wrapper +interrupt_wrapper: + /* Preserve registers (excluding already-saved %eax) */ + pushl %ebx + pushl %ecx + pushl %edx + pushl %esi + pushl %edi + pushl %ebp + + /* Expand IRQ number to whole %eax register */ + movzbl %al, %eax + +.if64 ; /* Skip transition to long mode, if applicable */ + xorl %edx, %edx + movw %cs, %bx + cmpw $LONG_CS, %bx + je 1f +.endif + /* Preserve segment registers and original %esp */ + pushl %ds + pushl %es + pushl %fs + pushl %gs + pushl %ss + pushl %esp + + /* Switch to virtual addressing */ + call intr_to_prot + + /* Pass 32-bit interrupt frame pointer in %edx */ + movl %esp, %edx + xorl %ecx, %ecx +.if64 + /* Switch to long mode */ + call prot_to_long + .code64 + +1: /* Preserve long-mode registers */ + pushq %r8 + pushq %r9 + pushq %r10 + pushq %r11 + pushq %r12 + pushq %r13 + pushq %r14 + pushq %r15 + + /* Expand IRQ number to whole %rdi register */ + movl %eax, %edi + + /* Pass 32-bit interrupt frame pointer (if applicable) in %rsi */ + testl %edx, %edx + je 1f + movl %edx, %esi + addl virt_offset, %esi +1: + /* Pass 64-bit interrupt frame pointer in %rdx */ + movq %rsp, %rdx +.endif + /* Call interrupt handler */ + call interrupt +.if64 + /* Restore long-mode registers */ + popq %r15 + popq %r14 + popq %r13 + popq %r12 + popq %r11 + popq %r10 + popq %r9 + popq %r8 + + /* Skip transition back to protected mode, if applicable */ + cmpw $LONG_CS, %bx + je 1f + + /* Switch to protected mode */ + call long_to_prot + .code32 + cmpw $LONG_CS, %bx +.endif + /* Restore segment registers and original %esp */ + lss (%esp), %esp + popl %ss + popl %gs + popl %fs + popl %es + popl %ds + +1: /* Restore registers */ + popl %ebp + popl %edi + popl %esi + popl %edx + popl %ecx + popl %ebx + popl %eax + + /* Return from interrupt (with REX prefix if required) */ +.if64 ; jne 1f ; .byte 0x48 ; .endif +1: iret + +/**************************************************************************** + * Page tables + * + **************************************************************************** + */ + .section ".pages", "aw", @nobits + .align SIZEOF_PT + + /* Page map level 4 entries (PML4Es) + * + * This comprises + * + * - PML4E[0x000] covering [0x0000000000000000-0x0000007fffffffff] + * - PML4E[0x1ff] covering [0xffffff8000000000-0xffffffffffffffff] + * + * These point to the PDPT. This creates some aliased + * addresses within unused portions of the 64-bit address + * space, but allows us to use just a single PDPT. + * + * - PDE[...] covering arbitrary 2MB portions of I/O space + * + * These are 2MB pages created by ioremap() to cover I/O + * device addresses. + */ +pml4e: + .space SIZEOF_PT + .size pml4e, . - pml4e + + .globl io_pages + .equ io_pages, pml4e + + /* Page directory pointer table entries (PDPTEs) + * + * This comprises: + * + * - PDPTE[0x000] covering [0x0000000000000000-0x000000003fffffff] + * - PDPTE[0x001] covering [0x0000000040000000-0x000000007fffffff] + * - PDPTE[0x002] covering [0x0000000080000000-0x00000000bfffffff] + * - PDPTE[0x003] covering [0x00000000c0000000-0x00000000ffffffff] + * + * These point to the appropriate page directories (in pde_low) + * used to identity-map the whole of the 32-bit address space. + * + * - PDPTE[0x004] covering [0x0000000100000000-0x000000013fffffff] + * + * This points back to the PML4, allowing the PML4 to be + * (ab)used to hold 2MB pages used for I/O device addresses. + * + * - PDPTE[0x1ff] covering [0xffffffffc0000000-0xffffffffffffffff] + * + * This points back to the PDPT itself, allowing the PDPT to be + * (ab)used to hold PDEs covering .textdata. + * + * - PDE[N-M] covering [_textdata,_end) + * + * These are used to point to the page tables (in pte_textdata) + * used to map our .textdata section. Note that each PDE + * covers 2MB, so we are likely to use only a single PDE in + * practice. + */ +pdpte: + .space SIZEOF_PT + .size pdpte, . - pdpte + .equ pde_textdata, pdpte /* (ab)use */ + + /* Page directory entries (PDEs) for the low 4GB + * + * This comprises 2048 2MB pages to identity-map the whole of + * the 32-bit address space. + */ +pde_low: + .equ PDE_LOW_PTES, ( SIZEOF_LOW_4GB / SIZEOF_2MB_PAGE ) + .equ PDE_LOW_PTS, ( ( PDE_LOW_PTES * SIZEOF_PTE ) / SIZEOF_PT ) + .space ( PDE_LOW_PTS * SIZEOF_PT ) + .size pde_low, . - pde_low + + /* Page table entries (PTEs) for .textdata + * + * This comprises enough 4kB pages to map the whole of + * .textdata. The required number of PTEs is calculated by + * the linker script. + * + * Note that these mappings do not cover the PTEs themselves. + * This does not matter, since code running with paging + * enabled never needs to access these PTEs. + */ +pte_textdata: + /* Allocated by linker script; must be at the end of .textdata */ + + .section ".bss.pml4", "aw", @nobits +pml4: .long 0 + +/**************************************************************************** + * init_pages (protected-mode near call) + * + * Initialise the page tables ready for long mode. + * + * Parameters: + * %edi : virt_offset + **************************************************************************** + */ + .section ".text.init_pages", "ax", @progbits + .code32 +init_pages: + /* Initialise PML4Es for low 4GB and negative 2GB */ + leal ( VIRTUAL(pdpte) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax + movl %eax, VIRTUAL(pml4e) + movl %eax, ( VIRTUAL(pml4e) + SIZEOF_PT - SIZEOF_PTE ) + + /* Initialise PDPTE for negative 1GB */ + movl %eax, ( VIRTUAL(pdpte) + SIZEOF_PT - SIZEOF_PTE ) + + /* Initialise PDPTE for I/O space */ + leal ( VIRTUAL(pml4e) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax + movl %eax, ( VIRTUAL(pdpte) + ( PDE_LOW_PTS * SIZEOF_PTE ) ) + + /* Initialise PDPTEs for low 4GB */ + movl $PDE_LOW_PTS, %ecx + leal ( VIRTUAL(pde_low) + ( PDE_LOW_PTS * SIZEOF_PT ) + \ + ( PG_P | PG_RW | PG_US ) )(%edi), %eax +1: subl $SIZEOF_PT, %eax + movl %eax, ( VIRTUAL(pdpte) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE) + loop 1b + + /* Initialise PDEs for low 4GB */ + movl $PDE_LOW_PTES, %ecx + leal ( 0 + ( PG_P | PG_RW | PG_US | PG_PS ) ), %eax +1: subl $SIZEOF_2MB_PAGE, %eax + movl %eax, ( VIRTUAL(pde_low) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE) + loop 1b + + /* Initialise PDEs for .textdata */ + movl $_textdata_pdes, %ecx + leal ( VIRTUAL(_etextdata) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax + movl $VIRTUAL(_textdata), %ebx + shrl $( SIZEOF_2MB_PAGE_LOG2 - SIZEOF_PTE_LOG2 ), %ebx + andl $( SIZEOF_PT - 1 ), %ebx +1: subl $SIZEOF_PT, %eax + movl %eax, (VIRTUAL(pde_textdata) - SIZEOF_PTE)(%ebx,%ecx,SIZEOF_PTE) + loop 1b + + /* Initialise PTEs for .textdata */ + movl $_textdata_ptes, %ecx + leal ( VIRTUAL(_textdata) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax + addl $_textdata_paged_len, %eax +1: subl $SIZEOF_4KB_PAGE, %eax + movl %eax, ( VIRTUAL(pte_textdata) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE) + loop 1b + + /* Record PML4 physical address */ + leal VIRTUAL(pml4e)(%edi), %eax + movl %eax, VIRTUAL(pml4) + + /* Return */ + ret diff --git a/src/arch/x86/transitions/librm_mgmt.c b/src/arch/x86/transitions/librm_mgmt.c new file mode 100644 index 00000000..f9e1d261 --- /dev/null +++ b/src/arch/x86/transitions/librm_mgmt.c @@ -0,0 +1,401 @@ +/* + * librm: a library for interfacing to real-mode code + * + * Michael Brown + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include + +/* + * This file provides functions for managing librm. + * + */ + +/** The interrupt wrapper */ +extern char interrupt_wrapper[]; + +/** The interrupt vectors */ +static struct interrupt_vector intr_vec[NUM_INT]; + +/** The 32-bit interrupt descriptor table */ +static struct interrupt32_descriptor +idt32[NUM_INT] __attribute__ (( aligned ( 16 ) )); + +/** The 32-bit interrupt descriptor table register */ +struct idtr32 idtr32 = { + .limit = ( sizeof ( idt32 ) - 1 ), +}; + +/** The 64-bit interrupt descriptor table */ +static struct interrupt64_descriptor +idt64[NUM_INT] __attribute__ (( aligned ( 16 ) )); + +/** The interrupt descriptor table register */ +struct idtr64 idtr64 = { + .limit = ( sizeof ( idt64 ) - 1 ), +}; + +/** Length of stack dump */ +#define STACK_DUMP_LEN 128 + +/** Timer interrupt profiler */ +static struct profiler timer_irq_profiler __profiler = { .name = "irq.timer" }; + +/** Other interrupt profiler */ +static struct profiler other_irq_profiler __profiler = { .name = "irq.other" }; + +/** + * Allocate space on the real-mode stack and copy data there from a + * user buffer + * + * @v data User buffer + * @v size Size of stack data + * @ret sp New value of real-mode stack pointer + */ +uint16_t copy_user_to_rm_stack ( userptr_t data, size_t size ) { + userptr_t rm_stack; + rm_sp -= size; + rm_stack = real_to_user ( rm_ss, rm_sp ); + memcpy_user ( rm_stack, 0, data, 0, size ); + return rm_sp; +}; + +/** + * Deallocate space on the real-mode stack, optionally copying back + * data to a user buffer. + * + * @v data User buffer + * @v size Size of stack data + */ +void remove_user_from_rm_stack ( userptr_t data, size_t size ) { + if ( data ) { + userptr_t rm_stack = real_to_user ( rm_ss, rm_sp ); + memcpy_user ( rm_stack, 0, data, 0, size ); + } + rm_sp += size; +}; + +/** + * Set interrupt vector + * + * @v intr Interrupt number + * @v vector Interrupt vector, or NULL to disable + */ +void set_interrupt_vector ( unsigned int intr, void *vector ) { + struct interrupt32_descriptor *idte32; + struct interrupt64_descriptor *idte64; + intptr_t addr = ( ( intptr_t ) vector ); + + /* Populate 32-bit interrupt descriptor */ + idte32 = &idt32[intr]; + idte32->segment = VIRTUAL_CS; + idte32->attr = ( vector ? ( IDTE_PRESENT | IDTE_TYPE_IRQ32 ) : 0 ); + idte32->low = ( addr >> 0 ); + idte32->high = ( addr >> 16 ); + + /* Populate 64-bit interrupt descriptor, if applicable */ + if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) { + idte64 = &idt64[intr]; + idte64->segment = LONG_CS; + idte64->attr = ( vector ? + ( IDTE_PRESENT | IDTE_TYPE_IRQ64 ) : 0 ); + idte64->low = ( addr >> 0 ); + idte64->mid = ( addr >> 16 ); + idte64->high = ( ( ( uint64_t ) addr ) >> 32 ); + } +} + +/** + * Initialise interrupt descriptor table + * + */ +void init_idt ( void ) { + struct interrupt_vector *vec; + unsigned int intr; + + /* Initialise the interrupt descriptor table and interrupt vectors */ + for ( intr = 0 ; intr < NUM_INT ; intr++ ) { + vec = &intr_vec[intr]; + vec->push = PUSH_INSN; + vec->movb = MOVB_INSN; + vec->intr = intr; + vec->jmp = JMP_INSN; + vec->offset = ( ( intptr_t ) interrupt_wrapper - + ( intptr_t ) vec->next ); + set_interrupt_vector ( intr, vec ); + } + DBGC ( &intr_vec[0], "INTn vector at %p+%zxn (phys %#lx+%zxn)\n", + intr_vec, sizeof ( intr_vec[0] ), + virt_to_phys ( intr_vec ), sizeof ( intr_vec[0] ) ); + + /* Initialise the 32-bit interrupt descriptor table register */ + idtr32.base = virt_to_phys ( idt32 ); + + /* Initialise the 64-bit interrupt descriptor table register, + * if applicable. + */ + if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) + idtr64.base = virt_to_phys ( idt64 ); +} + +/** + * Determine interrupt profiler (for debugging) + * + * @v intr Interrupt number + * @ret profiler Profiler + */ +static struct profiler * interrupt_profiler ( int intr ) { + + switch ( intr ) { + case IRQ_INT ( 0 ) : + return &timer_irq_profiler; + default: + return &other_irq_profiler; + } +} + +/** + * Display interrupt stack dump (for debugging) + * + * @v intr Interrupt number + * @v frame32 32-bit interrupt wrapper stack frame (or NULL) + * @v frame64 64-bit interrupt wrapper stack frame (or NULL) + */ +static __attribute__ (( unused )) void +interrupt_dump ( int intr, struct interrupt_frame32 *frame32, + struct interrupt_frame64 *frame64 ) { + unsigned long sp; + void *stack; + + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return; + + /* Print register dump */ + if ( ( sizeof ( physaddr_t ) <= sizeof ( uint32_t ) ) || frame32 ) { + sp = ( frame32->esp + sizeof ( *frame32 ) - + offsetof ( typeof ( *frame32 ), esp ) ); + DBGC ( &intr, "INT%d at %04x:%08x (stack %04x:%08lx):\n", + intr, frame32->cs, frame32->eip, frame32->ss, sp ); + DBGC ( &intr, "cs = %04x ds = %04x es = %04x fs = %04x " + "gs = %04x ss = %04x\n", frame32->cs, frame32->ds, + frame32->es, frame32->fs, frame32->gs, frame32->ss ); + DBGC ( &intr, "eax = %08x ebx = %08x ecx = %08x " + "edx = %08x flg = %08x\n", frame32->eax, frame32->ebx, + frame32->ecx, frame32->edx, frame32->eflags ); + DBGC ( &intr, "esi = %08x edi = %08x ebp = %08x " + "esp = %08lx eip = %08x\n", frame32->esi, frame32->edi, + frame32->ebp, sp, frame32->eip ); + stack = ( ( ( void * ) frame32 ) + sizeof ( *frame32 ) ); + } else { + DBGC ( &intr, "INT%d at %04llx:%016llx (stack " + "%04llx:%016llx):\n", intr, + ( ( unsigned long long ) frame64->cs ), + ( ( unsigned long long ) frame64->rip ), + ( ( unsigned long long ) frame64->ss ), + ( ( unsigned long long ) frame64->rsp ) ); + DBGC ( &intr, "rax = %016llx rbx = %016llx rcx = %016llx\n", + ( ( unsigned long long ) frame64->rax ), + ( ( unsigned long long ) frame64->rbx ), + ( ( unsigned long long ) frame64->rcx ) ); + DBGC ( &intr, "rdx = %016llx rsi = %016llx rdi = %016llx\n", + ( ( unsigned long long ) frame64->rdx ), + ( ( unsigned long long ) frame64->rsi ), + ( ( unsigned long long ) frame64->rdi ) ); + DBGC ( &intr, "rbp = %016llx rsp = %016llx flg = %016llx\n", + ( ( unsigned long long ) frame64->rbp ), + ( ( unsigned long long ) frame64->rsp ), + ( ( unsigned long long ) frame64->rflags ) ); + DBGC ( &intr, "r8 = %016llx r9 = %016llx r10 = %016llx\n", + ( ( unsigned long long ) frame64->r8 ), + ( ( unsigned long long ) frame64->r9 ), + ( ( unsigned long long ) frame64->r10 ) ); + DBGC ( &intr, "r11 = %016llx r12 = %016llx r13 = %016llx\n", + ( ( unsigned long long ) frame64->r11 ), + ( ( unsigned long long ) frame64->r12 ), + ( ( unsigned long long ) frame64->r13 ) ); + DBGC ( &intr, "r14 = %016llx r15 = %016llx\n", + ( ( unsigned long long ) frame64->r14 ), + ( ( unsigned long long ) frame64->r15 ) ); + sp = frame64->rsp; + stack = phys_to_virt ( sp ); + } + + /* Print stack dump */ + DBGC_HDA ( &intr, sp, stack, STACK_DUMP_LEN ); +} + +/** + * Interrupt handler + * + * @v intr Interrupt number + * @v frame32 32-bit interrupt wrapper stack frame (or NULL) + * @v frame64 64-bit interrupt wrapper stack frame (or NULL) + * @v frame Interrupt wrapper stack frame + */ +void __attribute__ (( regparm ( 3 ) )) +interrupt ( int intr, struct interrupt_frame32 *frame32, + struct interrupt_frame64 *frame64 ) { + struct profiler *profiler = interrupt_profiler ( intr ); + uint32_t discard_eax; + + /* Trap CPU exceptions if debugging is enabled. Note that we + * cannot treat INT8+ as exceptions, since we are not + * permitted to rebase the PIC. + */ + if ( DBG_LOG && ( intr < IRQ_INT ( 0 ) ) ) { + interrupt_dump ( intr, frame32, frame64 ); + DBG ( "CPU exception: dropping to emergency shell\n" ); + shell(); + } + + /* Reissue interrupt in real mode */ + profile_start ( profiler ); + __asm__ __volatile__ ( REAL_CODE ( "movb %%al, %%cs:(1f + 1)\n\t" + "\n1:\n\t" + "int $0x00\n\t" ) + : "=a" ( discard_eax ) : "0" ( intr ) ); + profile_stop ( profiler ); + profile_exclude ( profiler ); +} + +/** + * Map pages for I/O + * + * @v bus_addr Bus address + * @v len Length of region + * @ret io_addr I/O address + */ +static void * ioremap_pages ( unsigned long bus_addr, size_t len ) { + unsigned long start; + unsigned int count; + unsigned int stride; + unsigned int first; + unsigned int i; + size_t offset; + void *io_addr; + + DBGC ( &io_pages, "IO mapping %08lx+%zx\n", bus_addr, len ); + + /* Sanity check */ + if ( ! len ) + return NULL; + + /* Round down start address to a page boundary */ + start = ( bus_addr & ~( IO_PAGE_SIZE - 1 ) ); + offset = ( bus_addr - start ); + assert ( offset < IO_PAGE_SIZE ); + + /* Calculate number of pages required */ + count = ( ( offset + len + IO_PAGE_SIZE - 1 ) / IO_PAGE_SIZE ); + assert ( count != 0 ); + assert ( count < ( sizeof ( io_pages.page ) / + sizeof ( io_pages.page[0] ) ) ); + + /* Round up number of pages to a power of two */ + stride = ( 1 << ( fls ( count ) - 1 ) ); + assert ( count <= stride ); + + /* Allocate pages */ + for ( first = 0 ; first < ( sizeof ( io_pages.page ) / + sizeof ( io_pages.page[0] ) ) ; + first += stride ) { + + /* Calculate I/O address */ + io_addr = ( IO_BASE + ( first * IO_PAGE_SIZE ) + offset ); + + /* Check that page table entries are available */ + for ( i = first ; i < ( first + count ) ; i++ ) { + if ( io_pages.page[i] & PAGE_P ) { + io_addr = NULL; + break; + } + } + if ( ! io_addr ) + continue; + + /* Create page table entries */ + for ( i = first ; i < ( first + count ) ; i++ ) { + io_pages.page[i] = ( start | PAGE_P | PAGE_RW | + PAGE_US | PAGE_PWT | PAGE_PCD | + PAGE_PS ); + start += IO_PAGE_SIZE; + } + + /* Mark last page as being the last in this allocation */ + io_pages.page[ i - 1 ] |= PAGE_LAST; + + /* Return I/O address */ + DBGC ( &io_pages, "IO mapped %08lx+%zx to %p using PTEs " + "[%d-%d]\n", bus_addr, len, io_addr, first, + ( first + count - 1 ) ); + return io_addr; + } + + DBGC ( &io_pages, "IO could not map %08lx+%zx\n", bus_addr, len ); + return NULL; +} + +/** + * Unmap pages for I/O + * + * @v io_addr I/O address + */ +static void iounmap_pages ( volatile const void *io_addr ) { + volatile const void *invalidate = io_addr; + unsigned int first; + unsigned int i; + int is_last; + + DBGC ( &io_pages, "IO unmapping %p\n", io_addr ); + + /* Calculate first page table entry */ + first = ( ( io_addr - IO_BASE ) / IO_PAGE_SIZE ); + + /* Clear page table entries */ + for ( i = first ; ; i++ ) { + + /* Sanity check */ + assert ( io_pages.page[i] & PAGE_P ); + + /* Check if this is the last page in this allocation */ + is_last = ( io_pages.page[i] & PAGE_LAST ); + + /* Clear page table entry */ + io_pages.page[i] = 0; + + /* Invalidate TLB for this page */ + __asm__ __volatile__ ( "invlpg (%0)" : : "r" ( invalidate ) ); + invalidate += IO_PAGE_SIZE; + + /* Terminate if this was the last page */ + if ( is_last ) + break; + } + + DBGC ( &io_pages, "IO unmapped %p using PTEs [%d-%d]\n", + io_addr, first, i ); +} + +PROVIDE_UACCESS_INLINE ( librm, phys_to_user ); +PROVIDE_UACCESS_INLINE ( librm, user_to_phys ); +PROVIDE_UACCESS_INLINE ( librm, virt_to_user ); +PROVIDE_UACCESS_INLINE ( librm, user_to_virt ); +PROVIDE_UACCESS_INLINE ( librm, userptr_add ); +PROVIDE_UACCESS_INLINE ( librm, memcpy_user ); +PROVIDE_UACCESS_INLINE ( librm, memmove_user ); +PROVIDE_UACCESS_INLINE ( librm, memset_user ); +PROVIDE_UACCESS_INLINE ( librm, strlen_user ); +PROVIDE_UACCESS_INLINE ( librm, memchr_user ); +PROVIDE_IOMAP_INLINE ( pages, io_to_bus ); +PROVIDE_IOMAP ( pages, ioremap, ioremap_pages ); +PROVIDE_IOMAP ( pages, iounmap, iounmap_pages ); diff --git a/src/arch/x86/transitions/librm_test.c b/src/arch/x86/transitions/librm_test.c new file mode 100644 index 00000000..77cf8022 --- /dev/null +++ b/src/arch/x86/transitions/librm_test.c @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * Real mode transition self-tests + * + * This file allows for easy measurement of the time taken to perform + * real mode transitions, which may have a substantial overhead when + * running under a hypervisor. + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include + +/** Number of sample iterations for profiling */ +#define PROFILE_COUNT 4096 + +/** Protected-to-real mode transition profiler */ +static struct profiler p2r_profiler __profiler = { .name = "p2r" }; + +/** Real-to-protected mode transition profiler */ +static struct profiler r2p_profiler __profiler = { .name = "r2p" }; + +/** Real-mode call profiler */ +static struct profiler real_call_profiler __profiler = { .name = "real_call" }; + +/** Virtual call profiler */ +static struct profiler virt_call_profiler __profiler = { .name = "virt_call" }; + +/** + * Dummy function for profiling tests + */ +static __asmcall void librm_test_call ( struct i386_all_regs *ix86 __unused ) { + /* Do nothing */ +} + +/** + * Perform real mode transition self-tests + * + */ +static void librm_test_exec ( void ) { + unsigned int i; + unsigned long timestamp; + uint32_t timestamp_lo; + uint32_t timestamp_hi; + uint32_t started; + uint32_t stopped; + uint32_t discard_d; + + /* Profile mode transitions. We want to profile each + * direction of the transition separately, so perform an RDTSC + * while in real mode and tweak the profilers' start/stop + * times appropriately. + */ + for ( i = 0 ; i < PROFILE_COUNT ; i++ ) { + profile_start ( &p2r_profiler ); + __asm__ __volatile__ ( REAL_CODE ( "rdtsc\n\t" ) + : "=a" ( timestamp_lo ), + "=d" ( timestamp_hi ) + : ); + timestamp = timestamp_lo; + if ( sizeof ( timestamp ) > sizeof ( timestamp_lo ) ) + timestamp |= ( ( ( uint64_t ) timestamp_hi ) << 32 ); + profile_start_at ( &r2p_profiler, timestamp ); + profile_stop ( &r2p_profiler ); + profile_stop_at ( &p2r_profiler, timestamp ); + } + + /* Profile complete real-mode call cycle */ + for ( i = 0 ; i < PROFILE_COUNT ; i++ ) { + profile_start ( &real_call_profiler ); + __asm__ __volatile__ ( REAL_CODE ( "" ) : ); + profile_stop ( &real_call_profiler ); + } + + /* Profile complete virtual call cycle */ + for ( i = 0 ; i < PROFILE_COUNT ; i++ ) { + __asm__ __volatile__ ( REAL_CODE ( "rdtsc\n\t" + "movl %k0, %k2\n\t" + VIRT_CALL ( librm_test_call ) + "rdtsc\n\t" ) + : "=a" ( stopped ), "=d" ( discard_d ), + "=R" ( started ) : ); + profile_start_at ( &virt_call_profiler, started ); + profile_stop_at ( &virt_call_profiler, stopped ); + } +} + +/** Real mode transition self-test */ +struct self_test librm_test __self_test = { + .name = "librm", + .exec = librm_test_exec, +}; + +REQUIRING_SYMBOL ( librm_test ); +REQUIRE_OBJECT ( test ); diff --git a/src/arch/x86_64/Makefile.pcbios b/src/arch/x86_64/Makefile.pcbios new file mode 100644 index 00000000..ba4c8d8d --- /dev/null +++ b/src/arch/x86_64/Makefile.pcbios @@ -0,0 +1,15 @@ +# -*- makefile -*- : Force emacs to use Makefile mode + +# Place .textdata in negative 2GB of address space +# +CFLAGS += -mcmodel=kernel +LDFLAGS += --section-start=.textdata=0xffffffffeb000000 + +# Assembly code does not respect a red zone. +# +CFLAGS += -mno-red-zone + +# Include generic BIOS Makefile +# +MAKEDEPS += arch/x86/Makefile.pcbios +include arch/x86/Makefile.pcbios diff --git a/src/arch/x86_64/core/gdbidt.S b/src/arch/x86_64/core/gdbidt.S new file mode 100644 index 00000000..89280bf8 --- /dev/null +++ b/src/arch/x86_64/core/gdbidt.S @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * GDB exception handlers + * + */ + +/* Size of a register */ +#define SIZEOF_REG 8 + +/* POSIX signal numbers for reporting traps to GDB */ +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGFPE 8 +#define SIGSTKFLT 16 + + .section ".text.gdbmach_interrupt", "ax", @progbits + .code64 + + .struct 0 +/* Register dump created for GDB stub */ +regs: +regs_rax: .space SIZEOF_REG +regs_rbx: .space SIZEOF_REG +regs_rcx: .space SIZEOF_REG +regs_rdx: .space SIZEOF_REG +regs_rsi: .space SIZEOF_REG +regs_rdi: .space SIZEOF_REG +regs_rbp: .space SIZEOF_REG +regs_rsp: .space SIZEOF_REG +regs_r8: .space SIZEOF_REG +regs_r9: .space SIZEOF_REG +regs_r10: .space SIZEOF_REG +regs_r11: .space SIZEOF_REG +regs_r12: .space SIZEOF_REG +regs_r13: .space SIZEOF_REG +regs_r14: .space SIZEOF_REG +regs_r15: .space SIZEOF_REG +regs_rip: .space SIZEOF_REG +regs_rflags: .space SIZEOF_REG +regs_cs: .space SIZEOF_REG +regs_ss: .space SIZEOF_REG +regs_ds: .space SIZEOF_REG +regs_es: .space SIZEOF_REG +regs_fs: .space SIZEOF_REG +regs_gs: .space SIZEOF_REG +regs_end: +/* GDB signal code */ +gdb: +gdb_code: .space SIZEOF_REG +gdb_end: +/* Long-mode exception frame */ +frame: +frame_rip: .space SIZEOF_REG +frame_cs: .space SIZEOF_REG +frame_rflags: .space SIZEOF_REG +frame_rsp: .space SIZEOF_REG +frame_ss: .space SIZEOF_REG +frame_end: + .previous + + .globl gdbmach_sigfpe +gdbmach_sigfpe: + push $SIGFPE + jmp gdbmach_interrupt + + .globl gdbmach_sigtrap +gdbmach_sigtrap: + push $SIGTRAP + jmp gdbmach_interrupt + + .globl gdbmach_sigstkflt +gdbmach_sigstkflt: + push $SIGSTKFLT + jmp gdbmach_interrupt + + .globl gdbmach_sigill +gdbmach_sigill: + push $SIGILL + jmp gdbmach_interrupt + +gdbmach_interrupt: + + /* Create register dump */ + pushq %gs + pushq %fs + pushq $0 /* %es unused in long mode */ + pushq $0 /* %ds unused in long mode */ + pushq ( frame_ss - regs_ss - SIZEOF_REG )(%rsp) + pushq ( frame_cs - regs_cs - SIZEOF_REG )(%rsp) + pushq ( frame_rflags - regs_rflags - SIZEOF_REG )(%rsp) + pushq ( frame_rip - regs_rip - SIZEOF_REG )(%rsp) + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %r11 + pushq %r10 + pushq %r9 + pushq %r8 + pushq ( frame_rsp - regs_rsp - SIZEOF_REG )(%rsp) + pushq %rbp + pushq %rdi + pushq %rsi + pushq %rdx + pushq %rcx + pushq %rbx + pushq %rax + + /* Call GDB stub exception handler */ + movq gdb_code(%rsp), %rdi + movq %rsp, %rsi + call gdbmach_handler + + /* Restore from register dump */ + popq %rax + popq %rbx + popq %rcx + popq %rdx + popq %rsi + popq %rdi + popq %rbp + popq ( frame_rsp - regs_rsp - SIZEOF_REG )(%rsp) + popq %r8 + popq %r9 + popq %r10 + popq %r11 + popq %r12 + popq %r13 + popq %r14 + popq %r15 + popq ( frame_rip - regs_rip - SIZEOF_REG )(%rsp) + popq ( frame_rflags - regs_rflags - SIZEOF_REG )(%rsp) + popq ( frame_cs - regs_cs - SIZEOF_REG )(%rsp) + popq ( frame_ss - regs_ss - SIZEOF_REG )(%rsp) + addq $( regs_fs - regs_ds ), %rsp /* skip %ds, %es */ + popq %fs + popq %gs + + /* Skip code */ + addq $( gdb_end - gdb_code ), %rsp /* skip code */ + + /* Return */ + iretq diff --git a/src/arch/x86_64/core/setjmp.S b/src/arch/x86_64/core/setjmp.S new file mode 100644 index 00000000..e43200d7 --- /dev/null +++ b/src/arch/x86_64/core/setjmp.S @@ -0,0 +1,65 @@ +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .text + .code64 + + /* Must match jmp_buf structure layout */ + .struct 0 +env_retaddr: .quad 0 +env_stack: .quad 0 +env_rbx: .quad 0 +env_rbp: .quad 0 +env_r12: .quad 0 +env_r13: .quad 0 +env_r14: .quad 0 +env_r15: .quad 0 + .previous + +/* + * Save stack context for non-local goto + */ + .globl setjmp +setjmp: + /* Save return address */ + movq 0(%rsp), %rax + movq %rax, env_retaddr(%rdi) + /* Save stack pointer */ + movq %rsp, env_stack(%rdi) + /* Save other registers */ + movq %rbx, env_rbx(%rdi) + movq %rbp, env_rbp(%rdi) + movq %r12, env_r12(%rdi) + movq %r13, env_r13(%rdi) + movq %r14, env_r14(%rdi) + movq %r15, env_r15(%rdi) + /* Return 0 when returning as setjmp() */ + xorq %rax, %rax + ret + .size setjmp, . - setjmp + +/* + * Non-local jump to a saved stack context + */ + .globl longjmp +longjmp: + /* Get result in %rax */ + movq %rsi, %rax + /* Force result to non-zero */ + testq %rax, %rax + jnz 1f + incq %rax +1: /* Restore stack pointer */ + movq env_stack(%rdi), %rsp + /* Restore other registers */ + movq env_rbx(%rdi), %rbx + movq env_rbp(%rdi), %rbp + movq env_r12(%rdi), %r12 + movq env_r13(%rdi), %r13 + movq env_r14(%rdi), %r14 + movq env_r15(%rdi), %r15 + /* Replace return address on the new stack */ + popq %rcx /* discard */ + pushq env_retaddr(%rdi) + /* Return to setjmp() caller */ + ret + .size longjmp, . - longjmp diff --git a/src/arch/x86_64/include/bits/hyperv.h b/src/arch/x86_64/include/bits/hyperv.h new file mode 100644 index 00000000..fa8bb3f9 --- /dev/null +++ b/src/arch/x86_64/include/bits/hyperv.h @@ -0,0 +1,51 @@ +#ifndef _BITS_HYPERV_H +#define _BITS_HYPERV_H + +/** @file + * + * Hyper-V interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** + * Issue hypercall + * + * @v hv Hyper-V hypervisor + * @v code Call code + * @v in Input parameters + * @v out Output parameters + * @ret status Status code + */ +static inline __attribute__ (( always_inline )) int +hv_call ( struct hv_hypervisor *hv, unsigned int code, const void *in, + void *out ) { + void *hypercall = hv->hypercall; + register uint64_t rcx asm ( "rcx" ); + register uint64_t rdx asm ( "rdx" ); + register uint64_t r8 asm ( "r8" ); + uint64_t in_phys; + uint64_t out_phys; + uint16_t result; + + in_phys = ( ( __builtin_constant_p ( in ) && ( in == NULL ) ) + ? 0 : virt_to_phys ( in ) ); + out_phys = ( ( __builtin_constant_p ( out ) && ( out == NULL ) ) + ? 0 : virt_to_phys ( out ) ); + rcx = code; + rdx = in_phys; + r8 = out_phys; + __asm__ __volatile__ ( "call *%4" + : "=a" ( result ), "+r" ( rcx ), "+r" ( rdx ), + "+r" ( r8 ) + : "m" ( hypercall ) + : "r9", "r10", "r11" ); + return result; +} + +#endif /* _BITS_HYPERV_H */ diff --git a/src/arch/x86_64/include/ipxe/msr.h b/src/arch/x86_64/include/ipxe/msr.h new file mode 100644 index 00000000..316243b6 --- /dev/null +++ b/src/arch/x86_64/include/ipxe/msr.h @@ -0,0 +1,43 @@ +#ifndef _IPXE_MSR_H +#define _IPXE_MSR_H + +/** @file + * + * Model-specific registers + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * Read model-specific register + * + * @v msr Model-specific register + * @ret value Value + */ +static inline __attribute__ (( always_inline )) uint64_t +rdmsr ( unsigned int msr ) { + uint32_t high; + uint32_t low; + + __asm__ __volatile__ ( "rdmsr" : + "=d" ( high ), "=a" ( low ) : "c" ( msr ) ); + return ( ( ( ( uint64_t ) high ) << 32 ) | low ); +} + +/** + * Write model-specific register + * + * @v msr Model-specific register + * @v value Value + */ +static inline __attribute__ (( always_inline )) void +wrmsr ( unsigned int msr, uint64_t value ) { + uint32_t high = ( value >> 32 ); + uint32_t low = ( value >> 0 ); + + __asm__ __volatile__ ( "wrmsr" : : + "c" ( msr ), "d" ( high ), "a" ( low ) ); +} + +#endif /* _IPXE_MSR_H */ diff --git a/src/arch/x86_64/include/pcbios/ipxe/dhcp_arch.h b/src/arch/x86_64/include/pcbios/ipxe/dhcp_arch.h new file mode 100644 index 00000000..e22f50b3 --- /dev/null +++ b/src/arch/x86_64/include/pcbios/ipxe/dhcp_arch.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2010 VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +#ifndef _DHCP_ARCH_H +#define _DHCP_ARCH_H + +/** @file + * + * Architecture-specific DHCP options + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#define DHCP_ARCH_CLIENT_ARCHITECTURE DHCP_CLIENT_ARCHITECTURE_X86 + +#define DHCP_ARCH_CLIENT_NDI 1 /* UNDI */ , 2, 1 /* v2.1 */ + +#endif diff --git a/src/arch/x86_64/include/setjmp.h b/src/arch/x86_64/include/setjmp.h new file mode 100644 index 00000000..69835d9f --- /dev/null +++ b/src/arch/x86_64/include/setjmp.h @@ -0,0 +1,34 @@ +#ifndef _SETJMP_H +#define _SETJMP_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** A jump buffer */ +typedef struct { + /** Saved return address */ + uint64_t retaddr; + /** Saved stack pointer */ + uint64_t stack; + /** Saved %rbx */ + uint64_t rbx; + /** Saved %rbp */ + uint64_t rbp; + /** Saved %r12 */ + uint64_t r12; + /** Saved %r13 */ + uint64_t r13; + /** Saved %r14 */ + uint64_t r14; + /** Saved %r15 */ + uint64_t r15; +} jmp_buf[1]; + +extern int __asmcall __attribute__ (( returns_twice )) +setjmp ( jmp_buf env ); + +extern void __asmcall __attribute__ (( noreturn )) +longjmp ( jmp_buf env, int val ); + +#endif /* _SETJMP_H */ diff --git a/src/config/branding.h b/src/config/branding.h new file mode 100644 index 00000000..73f00af9 --- /dev/null +++ b/src/config/branding.h @@ -0,0 +1,174 @@ +#ifndef CONFIG_BRANDING_H +#define CONFIG_BRANDING_H + +/** @file + * + * Branding configuration + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/* + * Branding + * + * Vendors may use these strings to add their own branding to iPXE. + * PRODUCT_NAME is displayed prior to any iPXE branding in startup + * messages, and PRODUCT_SHORT_NAME is used where a brief product + * label is required (e.g. in BIOS boot selection menus). + * + * To minimise end-user confusion, it's probably a good idea to either + * make PRODUCT_SHORT_NAME a substring of PRODUCT_NAME or leave it as + * "iPXE". + * + */ +#define PRODUCT_NAME "" +#define PRODUCT_SHORT_NAME "iPXE" +#define PRODUCT_URI "http://ipxe.org" + +/* + * Tag line + * + * If your PRODUCT_SHORT_NAME is longer than the four characters used + * by "iPXE", then the standard tag line "Open Source Network Boot + * Firmware" is unlikely to fit neatly onto the screen. + */ +#define PRODUCT_TAG_LINE "Open Source Network Boot Firmware" + +/* + * Error messages + * + * iPXE error messages comprise a summary error message + * (e.g. "Permission denied") and a 32-bit error number. This number + * is incorporated into an error URI such as + * + * "No such file or directory (http://ipxe.org/2d0c613b)" + * + * or + * + * "Operation not supported (http://ipxe.org/3c092003)" + * + * Users may browse to the URI within the error message, which is + * provided by a database running on the iPXE web site + * (http://ipxe.org). This database provides details for all possible + * errors generated by iPXE, including: + * + * - the detailed error message (e.g. "Not an OCSP signing + * certificate") to complement the summary message (e.g. "Permission + * denied") which is compiled into the iPXE binary. + * + * - an instruction to the user to upgrade, if the error cannot be + * generated by the latest version of iPXE. + * + * - hints on how to fix the error (e.g. "This error indicates that + * the file was not found on the TFTP server. Check that you can + * retrieve the file using an alternative TFTP client, such as + * tftp-hpa on Linux.") + * + * - details of which source file within the iPXE codebase generated + * the error. + * + * - a direct link to the line(s) of code which generated the error. + * + * If you have a customer support team and would like your customers + * to contact your support team for all problems, instead of using the + * existing support infrastructure provided by http://ipxe.org, then + * you may define a custom URI to be included within error messages. + * + * Note that the custom URI is a printf() format string which must + * include a format specifier for the 32-bit error number. + */ +#define PRODUCT_ERROR_URI "http://ipxe.org/%08x" + +/* + * Command help messages + * + * iPXE command help messages include a URI constructed from the + * command name, such as + * + * "See http://ipxe.org/cmd/vcreate for further information" + * + * The iPXE web site includes documentation for the commands provided + * by the iPXE shell, including: + * + * - details of the command syntax (e.g. "vcreate --tag + * [--priority ] "). + * + * - example usages of the command (e.g. "vcreate --tag 123 net0") + * + * - a formal description of the command (e.g. "Create a VLAN network + * interface on an existing trunk network interface. The new network + * interface will be named by appending a hyphen and the VLAN tag + * value to the trunk network interface name.") + * + * - details of the possible exit statuses from the command. + * + * - links to documentation for related commands (e.g. "vdestroy") + * + * - links to documentation for relevant build options (e.g. "VLAN_CMD"). + * + * - general hints and tips on using the command. + * + * If you want to provide your own documentation for all of the + * commands provided by the iPXE shell, rather than using the existing + * support infrastructure provided by http://ipxe.org, then you may + * define a custom URI to be included within command help messages. + * + * Note that the custom URI is a printf() format string which must + * include a format specifier for the command name. + * + * [ Please also note that the existing documentation is licensed + * under Creative Commons terms which require attribution to the + * iPXE project and prohibit the alteration or removal of any + * references to "iPXE". ] + */ +#define PRODUCT_COMMAND_URI "http://ipxe.org/cmd/%s" + +/* + * Setting help messages + * + * iPXE setting help messages include a URI constructed from the + * setting name, such as + * + * "http://ipxe.org/cfg/initiator-iqn" + * + * The iPXE web site includes documentation for the settings used by + * iPXE, including: + * + * - details of the corresponding DHCP option number. + * + * - details of the corresponding ISC dhcpd option name. + * + * - examples of using the setting from the iPXE command line, or in + * iPXE scripts. + * + * - examples of configuring the setting via a DHCP server. + * + * - a formal description of the setting. + * + * - links to documentation for related settings. + * + * - links to documentation for relevant build options. + * + * - general notes about the setting. + * + * If you want to provide your own documentation for all of the + * settings used by iPXE, rather than using the existing support + * infrastructure provided by http://ipxe.org, then you may define a + * custom URI to be included within setting help messages. + * + * Note that the custom URI is a printf() format string which must + * include a format specifier for the setting name. + * + * [ Please also note that the existing documentation is licensed + * under Creative Commons terms which require attribution to the + * iPXE project and prohibit the alteration or removal of any + * references to "iPXE". ] + */ +#define PRODUCT_SETTING_URI "http://ipxe.org/cfg/%s" + +#include + +#endif /* CONFIG_BRANDING_H */ diff --git a/src/config/cloud/aws.ipxe b/src/config/cloud/aws.ipxe new file mode 100644 index 00000000..2c96e388 --- /dev/null +++ b/src/config/cloud/aws.ipxe @@ -0,0 +1,8 @@ +#!ipxe + +echo Amazon EC2 - iPXE boot via user-data +echo CPU: ${cpuvendor} ${cpumodel} +ifstat || +dhcp || +route || +chain -ar http://169.254.169.254/latest/user-data diff --git a/src/config/cloud/colour.h b/src/config/cloud/colour.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/cloud/console.h b/src/config/cloud/console.h new file mode 100644 index 00000000..dae18e55 --- /dev/null +++ b/src/config/cloud/console.h @@ -0,0 +1,31 @@ +/* + * Console configuration suitable for use in public cloud + * environments, or any environment where direct console access is not + * available. + * + */ + +/* Log to syslog(s) server + * + * The syslog server to be used must be specified via e.g. + * "set syslog 192.168.0.1". + */ +#define CONSOLE_SYSLOG +#define CONSOLE_SYSLOGS + +/* Log to serial port + * + * Note that the serial port output from an AWS EC2 virtual machine is + * generally available (as the "System Log") only after the instance + * has been stopped. + */ +#define CONSOLE_SERIAL + +/* Log to partition on local disk + * + * If all other log mechanisms fail then the VM boot disk containing + * the iPXE image can be detached and attached to another machine in + * the same cloud, allowing the log to be retrieved from the log + * partition. + */ +#define CONSOLE_INT13 diff --git a/src/config/cloud/crypto.h b/src/config/cloud/crypto.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/cloud/gce.ipxe b/src/config/cloud/gce.ipxe new file mode 100644 index 00000000..88e12b56 --- /dev/null +++ b/src/config/cloud/gce.ipxe @@ -0,0 +1,8 @@ +#!ipxe + +echo Google Compute Engine - iPXE boot via metadata +echo CPU: ${cpuvendor} ${cpumodel} +ifstat || +dhcp || +route || +chain -ar http://metadata.google.internal/computeMetadata/v1/instance/attributes/ipxeboot diff --git a/src/config/cloud/general.h b/src/config/cloud/general.h new file mode 100644 index 00000000..99028c14 --- /dev/null +++ b/src/config/cloud/general.h @@ -0,0 +1,4 @@ +/* Allow retrieval of metadata (such as an iPXE boot script) from + * Google Compute Engine metadata server. + */ +#define HTTP_HACK_GCE diff --git a/src/config/cloud/serial.h b/src/config/cloud/serial.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/cloud/settings.h b/src/config/cloud/settings.h new file mode 100644 index 00000000..34deeb07 --- /dev/null +++ b/src/config/cloud/settings.h @@ -0,0 +1,4 @@ +/* It can often be useful to know the CPU on which a cloud instance is + * running (e.g. to isolate problems with Azure AMD instances). + */ +#define CPUID_SETTINGS diff --git a/src/config/cloud/sideband.h b/src/config/cloud/sideband.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/cloud/usb.h b/src/config/cloud/usb.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/config_asn1.c b/src/config/config_asn1.c new file mode 100644 index 00000000..c4419d04 --- /dev/null +++ b/src/config/config_asn1.c @@ -0,0 +1,39 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** @file + * + * ASN.1 file format configuration + * + */ + +PROVIDE_REQUIRING_SYMBOL(); + +#ifdef IMAGE_DER +REQUIRE_OBJECT ( der ); +#endif +#ifdef IMAGE_PEM +REQUIRE_OBJECT ( pem ); +#endif diff --git a/src/config/config_crypto.c b/src/config/config_crypto.c new file mode 100644 index 00000000..440bf4ce --- /dev/null +++ b/src/config/config_crypto.c @@ -0,0 +1,126 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** @file + * + * Cryptographic configuration + * + * Cryptographic configuration is slightly messy since we need to drag + * in objects based on combinations of build options. + */ + +PROVIDE_REQUIRING_SYMBOL(); + +/* RSA */ +#if defined ( CRYPTO_PUBKEY_RSA ) +REQUIRE_OBJECT ( oid_rsa ); +#endif + +/* MD4 */ +#if defined ( CRYPTO_DIGEST_MD4 ) +REQUIRE_OBJECT ( oid_md4 ); +#endif + +/* MD5 */ +#if defined ( CRYPTO_DIGEST_MD5 ) +REQUIRE_OBJECT ( oid_md5 ); +#endif + +/* SHA-1 */ +#if defined ( CRYPTO_DIGEST_SHA1 ) +REQUIRE_OBJECT ( oid_sha1 ); +#endif + +/* SHA-224 */ +#if defined ( CRYPTO_DIGEST_SHA224 ) +REQUIRE_OBJECT ( oid_sha224 ); +#endif + +/* SHA-256 */ +#if defined ( CRYPTO_DIGEST_SHA256 ) +REQUIRE_OBJECT ( oid_sha256 ); +#endif + +/* SHA-384 */ +#if defined ( CRYPTO_DIGEST_SHA384 ) +REQUIRE_OBJECT ( oid_sha384 ); +#endif + +/* SHA-512 */ +#if defined ( CRYPTO_DIGEST_SHA512 ) +REQUIRE_OBJECT ( oid_sha512 ); +#endif + +/* SHA-512/224 */ +#if defined ( CRYPTO_DIGEST_SHA512_224 ) +REQUIRE_OBJECT ( oid_sha512_224 ); +#endif + +/* SHA-512/256 */ +#if defined ( CRYPTO_DIGEST_SHA512_256 ) +REQUIRE_OBJECT ( oid_sha512_256 ); +#endif + +/* RSA and MD5 */ +#if defined ( CRYPTO_PUBKEY_RSA ) && defined ( CRYPTO_DIGEST_MD5 ) +REQUIRE_OBJECT ( rsa_md5 ); +#endif + +/* RSA and SHA-1 */ +#if defined ( CRYPTO_PUBKEY_RSA ) && defined ( CRYPTO_DIGEST_SHA1 ) +REQUIRE_OBJECT ( rsa_sha1 ); +#endif + +/* RSA and SHA-224 */ +#if defined ( CRYPTO_PUBKEY_RSA ) && defined ( CRYPTO_DIGEST_SHA224 ) +REQUIRE_OBJECT ( rsa_sha224 ); +#endif + +/* RSA and SHA-256 */ +#if defined ( CRYPTO_PUBKEY_RSA ) && defined ( CRYPTO_DIGEST_SHA256 ) +REQUIRE_OBJECT ( rsa_sha256 ); +#endif + +/* RSA and SHA-384 */ +#if defined ( CRYPTO_PUBKEY_RSA ) && defined ( CRYPTO_DIGEST_SHA384 ) +REQUIRE_OBJECT ( rsa_sha384 ); +#endif + +/* RSA and SHA-512 */ +#if defined ( CRYPTO_PUBKEY_RSA ) && defined ( CRYPTO_DIGEST_SHA512 ) +REQUIRE_OBJECT ( rsa_sha512 ); +#endif + +/* RSA, AES-CBC, and SHA-1 */ +#if defined ( CRYPTO_PUBKEY_RSA ) && defined ( CRYPTO_CIPHER_AES_CBC ) && \ + defined ( CRYPTO_DIGEST_SHA1 ) +REQUIRE_OBJECT ( rsa_aes_cbc_sha1 ); +#endif + +/* RSA, AES-CBC, and SHA-256 */ +#if defined ( CRYPTO_PUBKEY_RSA ) && defined ( CRYPTO_CIPHER_AES_CBC ) && \ + defined ( CRYPTO_DIGEST_SHA256 ) +REQUIRE_OBJECT ( rsa_aes_cbc_sha256 ); +#endif diff --git a/src/config/config_efi.c b/src/config/config_efi.c new file mode 100644 index 00000000..92678d12 --- /dev/null +++ b/src/config/config_efi.c @@ -0,0 +1,51 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** @file + * + * EFI-specific configuration options + * + */ + +PROVIDE_REQUIRING_SYMBOL(); + +/* + * Drag in all requested console types + * + */ + +#ifdef CONSOLE_EFI +REQUIRE_OBJECT ( efi_console ); +#endif +#ifdef CONSOLE_EFIFB +REQUIRE_OBJECT ( efi_fbcon ); +#endif +#ifdef CONSOLE_FRAMEBUFFER +REQUIRE_OBJECT ( efi_fbcon ); +#endif +#ifdef DOWNLOAD_PROTO_FILE +REQUIRE_OBJECT ( efi_local ); +#endif diff --git a/src/config/config_fdt.c b/src/config/config_fdt.c new file mode 100644 index 00000000..e8d42593 --- /dev/null +++ b/src/config/config_fdt.c @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2019 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** @file + * + * Flattened Device Tree configuration options + * + */ + +PROVIDE_REQUIRING_SYMBOL(); + +/* + * Drag in devicetree sources + */ +#ifdef FDT_EFI +REQUIRE_OBJECT ( efi_fdt ); +#endif diff --git a/src/config/config_http.c b/src/config/config_http.c new file mode 100644 index 00000000..4373ea2c --- /dev/null +++ b/src/config/config_http.c @@ -0,0 +1,51 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** @file + * + * HTTP extensions + * + */ + +PROVIDE_REQUIRING_SYMBOL(); + +/* + * Drag in HTTP extensions + */ +#ifdef HTTP_AUTH_BASIC +REQUIRE_OBJECT ( httpbasic ); +#endif +#ifdef HTTP_AUTH_DIGEST +REQUIRE_OBJECT ( httpdigest ); +#endif +#ifdef HTTP_AUTH_NTLM +REQUIRE_OBJECT ( httpntlm ); +#endif +#ifdef HTTP_ENC_PEERDIST +REQUIRE_OBJECT ( peerdist ); +#endif +#ifdef HTTP_HACK_GCE +REQUIRE_OBJECT ( httpgce ); +#endif diff --git a/src/config/config_linux.c b/src/config/config_linux.c new file mode 100644 index 00000000..71eeff9e --- /dev/null +++ b/src/config/config_linux.c @@ -0,0 +1,41 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** @file + * + * Linux-specific configuration options + * + */ + +PROVIDE_REQUIRING_SYMBOL(); + +/* + * Drag in all requested console types + * + */ + +#ifdef CONSOLE_LINUX +REQUIRE_OBJECT ( linux_console ); +#endif diff --git a/src/config/config_pcbios.c b/src/config/config_pcbios.c new file mode 100644 index 00000000..698c68a8 --- /dev/null +++ b/src/config/config_pcbios.c @@ -0,0 +1,50 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** @file + * + * BIOS-specific configuration options + * + */ + +PROVIDE_REQUIRING_SYMBOL(); + +/* + * Drag in all requested console types + * + */ + +#ifdef CONSOLE_PCBIOS +REQUIRE_OBJECT ( bios_console ); +#endif +#ifdef CONSOLE_VESAFB +REQUIRE_OBJECT ( vesafb ); +#endif +#ifdef CONSOLE_FRAMEBUFFER +REQUIRE_OBJECT ( vesafb ); +#endif +#ifdef CONSOLE_INT13 +REQUIRE_OBJECT ( int13con ); +#endif diff --git a/src/config/config_pixbuf.c b/src/config/config_pixbuf.c new file mode 100644 index 00000000..f8ff59da --- /dev/null +++ b/src/config/config_pixbuf.c @@ -0,0 +1,39 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** @file + * + * Pixel buffer file format configuration + * + */ + +PROVIDE_REQUIRING_SYMBOL(); + +#ifdef IMAGE_PNM +REQUIRE_OBJECT ( pnm ); +#endif +#ifdef IMAGE_PNG +REQUIRE_OBJECT ( png ); +#endif diff --git a/src/config/config_timer.c b/src/config/config_timer.c new file mode 100644 index 00000000..d53c3993 --- /dev/null +++ b/src/config/config_timer.c @@ -0,0 +1,51 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** @file + * + * Timer configuration options + * + */ + +PROVIDE_REQUIRING_SYMBOL(); + +/* + * Drag in timers + */ +#ifdef TIMER_PCBIOS +REQUIRE_OBJECT ( bios_timer ); +#endif +#ifdef TIMER_RDTSC +REQUIRE_OBJECT ( rdtsc_timer ); +#endif +#ifdef TIMER_EFI +REQUIRE_OBJECT ( efi_timer ); +#endif +#ifdef TIMER_LINUX +REQUIRE_OBJECT ( linux_timer ); +#endif +#ifdef TIMER_ACPI +REQUIRE_OBJECT ( acpi_timer ); +#endif diff --git a/src/config/config_usb.c b/src/config/config_usb.c new file mode 100644 index 00000000..b679aeb2 --- /dev/null +++ b/src/config/config_usb.c @@ -0,0 +1,65 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** @file + * + * USB configuration options + * + */ + +PROVIDE_REQUIRING_SYMBOL(); + +/* + * Drag in USB controllers + */ +#ifdef USB_HCD_XHCI +REQUIRE_OBJECT ( xhci ); +#endif +#ifdef USB_HCD_EHCI +REQUIRE_OBJECT ( ehci ); +#endif +#ifdef USB_HCD_UHCI +REQUIRE_OBJECT ( uhci ); +#endif +#ifdef USB_HCD_USBIO +REQUIRE_OBJECT ( usbio ); +#endif + +/* + * Drag in USB peripherals + */ +#ifdef USB_KEYBOARD +REQUIRE_OBJECT ( usbkbd ); +#endif +#ifdef USB_BLOCK +REQUIRE_OBJECT ( usbblk ); +#endif + +/* + * Drag in USB external interfaces + */ +#ifdef USB_EFI +REQUIRE_OBJECT ( efi_usb ); +#endif diff --git a/src/config/dhcp.h b/src/config/dhcp.h new file mode 100644 index 00000000..bff5b56d --- /dev/null +++ b/src/config/dhcp.h @@ -0,0 +1,93 @@ +#ifndef CONFIG_DHCP_H +#define CONFIG_DHCP_H + +/** @file + * + * DHCP configuration + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/* + * DHCP and PXE Boot Server timeout parameters + * + * Initial and final timeout for DHCP discovery + * + * The PXE spec indicates discover request are sent 4 times, with + * timeouts of 4, 8, 16, 32 seconds. iPXE by default uses 1, 2, 4, 8. + */ +#define DHCP_DISC_START_TIMEOUT_SEC 1 +#define DHCP_DISC_END_TIMEOUT_SEC 10 +//#define DHCP_DISC_START_TIMEOUT_SEC 4 /* as per PXE spec */ +//#define DHCP_DISC_END_TIMEOUT_SEC 32 /* as per PXE spec */ + +/* + * Maximum number of discovery deferrals due to blocked links + * (e.g. from non-forwarding STP ports) + */ +#define DHCP_DISC_MAX_DEFERRALS 60 + +/* + * ProxyDHCP offers are given precedence by continue to wait for them + * after a valid DHCPOFFER is received. We'll wait through this + * timeout for it. The PXE spec indicates waiting through the 4 & 8 + * second timeouts, iPXE by default stops after 2. + */ +#define DHCP_DISC_PROXY_TIMEOUT_SEC 2 +//#define DHCP_DISC_PROXY_TIMEOUT_SEC 11 /* as per PXE spec */ + +/* + * Per the PXE spec, requests are also tried 4 times, but at timeout + * intervals of 1, 2, 3, 4 seconds. To adapt this to an exponential + * backoff timer, we can either do 1, 2, 4, 8, ie. 4 retires with a + * longer interval or start at 0 (0.25s) for 0.25, 0.5, 1, 2, 4, + * ie. one extra try and shorter initial timeouts. iPXE by default + * does a combination of both, starting at 0 and going through the 8 + * second timeout. + */ +#define DHCP_REQ_START_TIMEOUT_SEC 0 +#define DHCP_REQ_END_TIMEOUT_SEC 10 +//#define DHCP_REQ_END_TIMEOUT_SEC 4 /* as per PXE spec */ + +/* + * A ProxyDHCP offer without PXE options also goes through a request + * phase using these same parameters, but note the early break below. + */ +#define DHCP_PROXY_START_TIMEOUT_SEC 0 +#define DHCP_PROXY_END_TIMEOUT_SEC 10 +//#define DHCP_PROXY_END_TIMEOUT_SEC 8 /* as per PXE spec */ + +/* + * A ProxyDHCP request timeout should not induce a failure condition, + * so we always want to break before the above set of timers expire. + * The iPXE default value of 2 breaks at the first timeout after 2 + * seconds, which will be after the 2 second timeout. + */ +#define DHCP_REQ_PROXY_TIMEOUT_SEC 2 +//#define DHCP_REQ_PROXY_TIMEOUT_SEC 7 /* as per PXE spec */ + +/* + * Per the PXE spec, a PXE boot server request is also be retried 4 + * times at timeouts of 1, 2, 3, 4. iPXE uses the same timeouts as + * discovery, 1, 2, 4, 8, but will move on to the next server if + * available after an elapsed time greater than 3 seconds, therefore + * effectively only sending 3 tries at timeouts of 1, 2, 4. + */ +#define PXEBS_START_TIMEOUT_SEC 1 +#define PXEBS_END_TIMEOUT_SEC 10 +//#define PXEBS_START_TIMEOUT_SEC 0 /* as per PXE spec */ +//#define PXEBS_END_TIMEOUT_SEC 8 /* as per PXE spec */ + +/* + * Increment to the next PXE Boot server, if available, after this + * this much time has elapsed. + */ +#define PXEBS_MAX_TIMEOUT_SEC 3 +//#define PXEBS_MAX_TIMEOUT_SEC 7 /* as per PXE spec */ + +#include + +#endif /* CONFIG_DHCP_H */ diff --git a/src/config/fault.h b/src/config/fault.h new file mode 100644 index 00000000..5024a8ff --- /dev/null +++ b/src/config/fault.h @@ -0,0 +1,34 @@ +#ifndef CONFIG_FAULT_H +#define CONFIG_FAULT_H + +/** @file + * + * Fault injection + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/* Drop every N transmitted or received network packets */ +#define NETDEV_DISCARD_RATE 0 + +/* Drop every N transmitted or received PeerDist discovery packets */ +#define PEERDISC_DISCARD_RATE 0 + +/* Annul every N PeerDist download attempts */ +#define PEERBLK_ANNUL_RATE 0 + +/* Stall every N PeerDist download attempts */ +#define PEERBLK_STALL_RATE 0 + +/* Abort every N PeerDist download attempts */ +#define PEERBLK_ABORT_RATE 0 + +/* Corrupt every N received PeerDist packets */ +#define PEERBLK_CORRUPT_RATE 0 + +#include + +#endif /* CONFIG_FAULT_H */ diff --git a/src/config/fdt.h b/src/config/fdt.h new file mode 100644 index 00000000..4d13e053 --- /dev/null +++ b/src/config/fdt.h @@ -0,0 +1,16 @@ +#ifndef CONFIG_FDT_H +#define CONFIG_FDT_H + +/** @file + * + * Flattened Device Tree configuration + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#include + +#endif /* CONFIG_FDT_H */ diff --git a/src/config/named.h b/src/config/named.h new file mode 100644 index 00000000..ddde6f0a --- /dev/null +++ b/src/config/named.h @@ -0,0 +1,26 @@ +#ifndef CONFIG_NAMED_H +#define CONFIG_NAMED_H + +/** @file + * + * Named configurations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/* config//
.h */ +#ifdef CONFIG +#define NAMED_CONFIG(_header) +#else +#define NAMED_CONFIG(_header) +#endif + +/* config/local//
.h */ +#ifdef LOCAL_CONFIG +#define LOCAL_NAMED_CONFIG(_header) +#else +#define LOCAL_NAMED_CONFIG(_header) +#endif + +#endif /* CONFIG_NAMED_H */ diff --git a/src/config/qemu/colour.h b/src/config/qemu/colour.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/qemu/console.h b/src/config/qemu/console.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/qemu/crypto.h b/src/config/qemu/crypto.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/qemu/general.h b/src/config/qemu/general.h new file mode 100644 index 00000000..a0844973 --- /dev/null +++ b/src/config/qemu/general.h @@ -0,0 +1,15 @@ +/* Disable entry during POST */ +#undef ROM_BANNER_TIMEOUT +#define ROM_BANNER_TIMEOUT 0 + +/* Extend banner timeout */ +#undef BANNER_TIMEOUT +#define BANNER_TIMEOUT 30 + +/* Work around missing EFI_PXE_BASE_CODE_PROTOCOL */ +#define EFI_DOWNGRADE_UX + +/* The Tivoli VMM workaround causes a KVM emulation failure on hosts + * without unrestricted_guest support + */ +#undef TIVOLI_VMM_WORKAROUND diff --git a/src/config/qemu/serial.h b/src/config/qemu/serial.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/qemu/settings.h b/src/config/qemu/settings.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/qemu/sideband.h b/src/config/qemu/sideband.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/qemu/usb.h b/src/config/qemu/usb.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/rpi/colour.h b/src/config/rpi/colour.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/rpi/console.h b/src/config/rpi/console.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/rpi/crypto.h b/src/config/rpi/crypto.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/rpi/general.h b/src/config/rpi/general.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/rpi/serial.h b/src/config/rpi/serial.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/rpi/settings.h b/src/config/rpi/settings.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/rpi/sideband.h b/src/config/rpi/sideband.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/rpi/usb.h b/src/config/rpi/usb.h new file mode 100644 index 00000000..f17ea0de --- /dev/null +++ b/src/config/rpi/usb.h @@ -0,0 +1,13 @@ +/* + * Use EFI_USB_IO_PROTOCOL + * + * The Raspberry Pi uses an embedded DesignWare USB controller for + * which we do not have a native driver. Use via the + * EFI_USB_IO_PROTOCOL driver instead. + * + */ +#undef USB_HCD_XHCI +#undef USB_HCD_EHCI +#undef USB_HCD_UHCI +#define USB_HCD_USBIO +#undef USB_EFI diff --git a/src/config/usb.h b/src/config/usb.h new file mode 100644 index 00000000..4252ec22 --- /dev/null +++ b/src/config/usb.h @@ -0,0 +1,41 @@ +#ifndef CONFIG_USB_H +#define CONFIG_USB_H + +/** @file + * + * USB configuration + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/* + * USB host controllers (all enabled by default) + * + */ +//#undef USB_HCD_XHCI /* xHCI USB host controller */ +//#undef USB_HCD_EHCI /* EHCI USB host controller */ +//#undef USB_HCD_UHCI /* UHCI USB host controller */ +//#define USB_HCD_USBIO /* Very slow EFI USB host controller */ + +/* + * USB peripherals + * + */ +//#undef USB_KEYBOARD /* USB keyboards */ +//#undef USB_BLOCK /* USB block devices */ + +/* + * USB external interfaces + * + */ +//#undef USB_EFI /* Provide EFI_USB_IO_PROTOCOL interface */ + +#include +#include NAMED_CONFIG(usb.h) +#include +#include LOCAL_NAMED_CONFIG(usb.h) + +#endif /* CONFIG_USB_H */ diff --git a/src/config/vbox/README b/src/config/vbox/README new file mode 100644 index 00000000..b6f2da95 --- /dev/null +++ b/src/config/vbox/README @@ -0,0 +1,18 @@ +Build using this command line: + +make CONFIG=vbox bin/intel--virtio-net--pcnet32.isarom + +Max size of a VirtualBox ROM is 56KB, 57344 bytes. There should be no need +to pad the image as long as the binary is smaller or equal to this size. + +To use the ROM in VirtualBox you need to enable it using this command: + +vboxmanage setextradata global \ + VBoxInternal/Devices/pcbios/0/Config/LanBootRom \ + /absolute/path/to/intel--virtio-net--pcnet32.isarom + +NB: If you build the ROM using the .rom prefix then it'll be built as a PCI +ROM, which won't work properly in VirtualBox. The error message you'll see +is "No more network devices", which is somewhat confusing. If you enter the +shell and use the "autoboot" command things will work as intended. Remember +to always build as a .isarom to avoid this issue. diff --git a/src/config/vbox/colour.h b/src/config/vbox/colour.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/vbox/console.h b/src/config/vbox/console.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/vbox/crypto.h b/src/config/vbox/crypto.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/vbox/general.h b/src/config/vbox/general.h new file mode 100644 index 00000000..06b45f1a --- /dev/null +++ b/src/config/vbox/general.h @@ -0,0 +1,19 @@ +/* Disabled from config/defaults/pcbios.h */ + +#undef SANBOOT_PROTO_ISCSI +#undef SANBOOT_PROTO_AOE +#undef SANBOOT_PROTO_IB_SRP +#undef SANBOOT_PROTO_FCP + +/* Disabled from config/general.h */ + +#undef CRYPTO_80211_WEP +#undef CRYPTO_80211_WPA +#undef CRYPTO_80211_WPA2 +#undef IWMGMT_CMD +#undef MENU_CMD + +/* Ensure ROM banner is not displayed */ + +#undef ROM_BANNER_TIMEOUT +#define ROM_BANNER_TIMEOUT 0 diff --git a/src/config/vbox/serial.h b/src/config/vbox/serial.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/vbox/settings.h b/src/config/vbox/settings.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/vbox/sideband.h b/src/config/vbox/sideband.h new file mode 100644 index 00000000..e69de29b diff --git a/src/config/vbox/usb.h b/src/config/vbox/usb.h new file mode 100644 index 00000000..e69de29b diff --git a/src/core/acpi_settings.c b/src/core/acpi_settings.c new file mode 100644 index 00000000..7ba2e979 --- /dev/null +++ b/src/core/acpi_settings.c @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * ACPI settings + * + */ + +#include +#include +#include +#include +#include + +/** ACPI settings scope */ +static const struct settings_scope acpi_settings_scope; + +/** + * Check applicability of ACPI setting + * + * @v settings Settings block + * @v setting Setting + * @ret applies Setting applies within this settings block + */ +static int acpi_settings_applies ( struct settings *settings __unused, + const struct setting *setting ) { + + return ( setting->scope == &acpi_settings_scope ); +} + +/** + * Fetch value of ACPI setting + * + * @v settings Settings block + * @v setting Setting to fetch + * @v data Buffer to fill with setting data + * @v len Length of buffer + * @ret len Length of setting data, or negative error + */ +static int acpi_settings_fetch ( struct settings *settings, + struct setting *setting, + void *data, size_t len ) { + struct acpi_header acpi; + uint32_t tag_high; + uint32_t tag_low; + uint32_t tag_signature; + unsigned int tag_index; + size_t tag_offset; + size_t tag_len; + userptr_t table; + size_t offset; + size_t max_len; + int delta; + unsigned int i; + + /* Parse settings tag */ + tag_high = ( setting->tag >> 32 ); + tag_low = setting->tag; + tag_signature = bswap_32 ( tag_high ); + tag_index = ( ( tag_low >> 24 ) & 0xff ); + tag_offset = ( ( tag_low >> 8 ) & 0xffff ); + tag_len = ( ( tag_low >> 0 ) & 0xff ); + DBGC ( settings, "ACPI %s.%d offset %#zx length %#zx\n", + acpi_name ( tag_signature ), tag_index, tag_offset, tag_len ); + + /* Locate ACPI table */ + table = acpi_find ( tag_signature, tag_index ); + if ( ! table ) + return -ENOENT; + + /* Read table header */ + copy_from_user ( &acpi, table, 0, sizeof ( acpi ) ); + + /* Calculate starting offset and maximum available length */ + max_len = le32_to_cpu ( acpi.length ); + if ( tag_offset > max_len ) + return -ENOENT; + offset = tag_offset; + max_len -= offset; + + /* Restrict to requested length, if specified */ + if ( tag_len && ( tag_len < max_len ) ) + max_len = tag_len; + + /* Invert endianness for numeric settings */ + if ( setting->type && setting->type->numerate ) { + offset += ( max_len - 1 ); + delta = -1; + } else { + delta = +1; + } + + /* Read data */ + for ( i = 0 ; ( ( i < max_len ) && ( i < len ) ) ; i++ ) { + copy_from_user ( data, table, offset, 1 ); + data++; + offset += delta; + } + + /* Set type if not already specified */ + if ( ! setting->type ) + setting->type = &setting_type_hexraw; + + return max_len; +} + +/** ACPI settings operations */ +static struct settings_operations acpi_settings_operations = { + .applies = acpi_settings_applies, + .fetch = acpi_settings_fetch, +}; + +/** ACPI settings */ +static struct settings acpi_settings = { + .refcnt = NULL, + .siblings = LIST_HEAD_INIT ( acpi_settings.siblings ), + .children = LIST_HEAD_INIT ( acpi_settings.children ), + .op = &acpi_settings_operations, + .default_scope = &acpi_settings_scope, +}; + +/** Initialise ACPI settings */ +static void acpi_settings_init ( void ) { + int rc; + + if ( ( rc = register_settings ( &acpi_settings, NULL, + "acpi" ) ) != 0 ) { + DBG ( "ACPI could not register settings: %s\n", + strerror ( rc ) ); + return; + } +} + +/** ACPI settings initialiser */ +struct init_fn acpi_settings_init_fn __init_fn ( INIT_NORMAL ) = { + .initialise = acpi_settings_init, +}; diff --git a/src/core/blocktrans.c b/src/core/blocktrans.c new file mode 100644 index 00000000..3f32f9cf --- /dev/null +++ b/src/core/blocktrans.c @@ -0,0 +1,261 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Block device translator + * + */ + +#include +#include +#include +#include +#include +#include +#include + +/** + * Reallocate block device translator data buffer + * + * @v xferbuf Data transfer buffer + * @v len New length (or zero to free buffer) + * @ret rc Return status code + */ +static int blktrans_xferbuf_realloc ( struct xfer_buffer *xferbuf, + size_t len ) { + struct block_translator *blktrans = + container_of ( xferbuf, struct block_translator, xferbuf ); + + /* Record length, if applicable */ + if ( blktrans->buffer ) { + + /* We have a (non-reallocatable) data buffer */ + return -ENOTSUP; + + } else { + + /* Record length (for block device capacity) */ + xferbuf->len = len; + return 0; + } +} + +/** + * Write data to block device translator data buffer + * + * @v xferbuf Data transfer buffer + * @v offset Starting offset + * @v data Data to copy + * @v len Length of data + */ +static void blktrans_xferbuf_write ( struct xfer_buffer *xferbuf, size_t offset, + const void *data, size_t len ) { + struct block_translator *blktrans = + container_of ( xferbuf, struct block_translator, xferbuf ); + + /* Write data to buffer, if applicable */ + if ( blktrans->buffer ) { + + /* Write data to buffer */ + copy_to_user ( blktrans->buffer, offset, data, len ); + + } else { + + /* Sanity check */ + assert ( len == 0 ); + } +} + +/** + * Read data from block device translator data buffer + * + * @v xferbuf Data transfer buffer + * @v offset Starting offset + * @v data Data to read + * @v len Length of data + */ +static void blktrans_xferbuf_read ( struct xfer_buffer *xferbuf, size_t offset, + void *data, size_t len ) { + struct block_translator *blktrans = + container_of ( xferbuf, struct block_translator, xferbuf ); + + /* Read data from buffer, if applicable */ + if ( blktrans->buffer ) { + + /* Read data from buffer */ + copy_from_user ( data, blktrans->buffer, offset, len ); + + } else { + + /* Sanity check */ + assert ( len == 0 ); + } +} + +/** Block device translator data transfer buffer operations */ +static struct xfer_buffer_operations blktrans_xferbuf_operations = { + .realloc = blktrans_xferbuf_realloc, + .write = blktrans_xferbuf_write, + .read = blktrans_xferbuf_read, +}; + +/** + * Close block device translator + * + * @v blktrans Block device translator + * @v rc Reason for close + */ +static void blktrans_close ( struct block_translator *blktrans, int rc ) { + struct block_device_capacity capacity; + + /* Report block device capacity, if applicable */ + if ( ( rc == 0 ) && ( blktrans->blksize ) ) { + + /* Construct block device capacity */ + capacity.blocks = + ( blktrans->xferbuf.len / blktrans->blksize ); + capacity.blksize = blktrans->blksize; + capacity.max_count = -1U; + + /* Report block device capacity */ + block_capacity ( &blktrans->block, &capacity ); + } + + /* Shut down interfaces */ + intf_shutdown ( &blktrans->xfer, rc ); + intf_shutdown ( &blktrans->block, rc ); +} + +/** + * Deliver data + * + * @v blktrans Block device translator + * @v iobuf I/O buffer + * @v meta Data transfer metadata + * @ret rc Return status code + */ +static int blktrans_deliver ( struct block_translator *blktrans, + struct io_buffer *iobuf, + struct xfer_metadata *meta ) { + int rc; + + /* Deliver to buffer */ + if ( ( rc = xferbuf_deliver ( &blktrans->xferbuf, iob_disown ( iobuf ), + meta ) ) != 0 ) { + DBGC ( blktrans, "BLKTRANS %p could not deliver: %s\n", + blktrans, strerror ( rc ) ); + goto err; + } + + return 0; + + err: + blktrans_close ( blktrans, rc ); + return rc; +} + +/** + * Get underlying data transfer buffer + * + * @v blktrans Block device translator + * @ret xferbuf Data transfer buffer + */ +static struct xfer_buffer * +blktrans_buffer ( struct block_translator *blktrans ) { + + return &blktrans->xferbuf; +} + +/** Block device translator block device interface operations */ +static struct interface_operation blktrans_block_operations[] = { + INTF_OP ( intf_close, struct block_translator *, blktrans_close ), +}; + +/** Block device translator block device interface descriptor */ +static struct interface_descriptor blktrans_block_desc = + INTF_DESC_PASSTHRU ( struct block_translator, block, + blktrans_block_operations, xfer ); + +/** Block device translator data transfer interface operations */ +static struct interface_operation blktrans_xfer_operations[] = { + INTF_OP ( xfer_deliver, struct block_translator *, blktrans_deliver ), + INTF_OP ( xfer_buffer, struct block_translator *, blktrans_buffer ), + INTF_OP ( intf_close, struct block_translator *, blktrans_close ), +}; + +/** Block device translator data transfer interface descriptor */ +static struct interface_descriptor blktrans_xfer_desc = + INTF_DESC_PASSTHRU ( struct block_translator, xfer, + blktrans_xfer_operations, block ); + +/** + * Insert block device translator + * + * @v block Block device interface + * @v buffer Data buffer (or UNULL) + * @v size Length of data buffer, or block size + * @ret rc Return status code + */ +int block_translate ( struct interface *block, userptr_t buffer, size_t size ) { + struct block_translator *blktrans; + int rc; + + /* Allocate and initialise structure */ + blktrans = zalloc ( sizeof ( *blktrans ) ); + if ( ! blktrans ) { + rc = -ENOMEM; + goto err_alloc; + } + ref_init ( &blktrans->refcnt, NULL ); + intf_init ( &blktrans->block, &blktrans_block_desc, &blktrans->refcnt ); + intf_init ( &blktrans->xfer, &blktrans_xfer_desc, &blktrans->refcnt ); + blktrans->xferbuf.op = &blktrans_xferbuf_operations; + blktrans->buffer = buffer; + if ( buffer ) { + blktrans->xferbuf.len = size; + } else { + blktrans->blksize = size; + } + + /* Attach to interfaces, mortalise self, and return */ + assert ( block->dest != &null_intf ); + intf_plug_plug ( &blktrans->xfer, block->dest ); + intf_plug_plug ( &blktrans->block, block ); + ref_put ( &blktrans->refcnt ); + + DBGC2 ( blktrans, "BLKTRANS %p created", blktrans ); + if ( buffer ) { + DBGC2 ( blktrans, " for %#lx+%#zx", + user_to_phys ( buffer, 0 ), size ); + } + DBGC2 ( blktrans, "\n" ); + return 0; + + ref_put ( &blktrans->refcnt ); + err_alloc: + return rc; +} diff --git a/src/core/dummy_sanboot.c b/src/core/dummy_sanboot.c new file mode 100644 index 00000000..e6293099 --- /dev/null +++ b/src/core/dummy_sanboot.c @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * Dummy SAN device + * + */ + +#include +#include + +/** + * Hook dummy SAN device + * + * @v drive Drive number + * @v uris List of URIs + * @v count Number of URIs + * @v flags Flags + * @ret drive Drive number, or negative error + */ +static int dummy_san_hook ( unsigned int drive, struct uri **uris, + unsigned int count, unsigned int flags ) { + struct san_device *sandev; + int rc; + + /* Allocate SAN device */ + sandev = alloc_sandev ( uris, count, 0 ); + if ( ! sandev ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Register SAN device */ + if ( ( rc = register_sandev ( sandev, drive, flags ) ) != 0 ) { + DBGC ( sandev, "SAN %#02x could not register: %s\n", + sandev->drive, strerror ( rc ) ); + goto err_register; + } + + return drive; + + unregister_sandev ( sandev ); + err_register: + sandev_put ( sandev ); + err_alloc: + return rc; +} + +/** + * Unhook dummy SAN device + * + * @v drive Drive number + */ +static void dummy_san_unhook ( unsigned int drive ) { + struct san_device *sandev; + + /* Find drive */ + sandev = sandev_find ( drive ); + if ( ! sandev ) { + DBG ( "SAN %#02x does not exist\n", drive ); + return; + } + + /* Unregister SAN device */ + unregister_sandev ( sandev ); + + /* Drop reference to drive */ + sandev_put ( sandev ); +} + +/** + * Boot from dummy SAN device + * + * @v drive Drive number + * @v filename Filename (or NULL to use default) + * @ret rc Return status code + */ +static int dummy_san_boot ( unsigned int drive __unused, + const char *filename __unused ) { + + return -EOPNOTSUPP; +} + +/** + * Install ACPI table + * + * @v acpi ACPI description header + * @ret rc Return status code + */ +static int dummy_install ( struct acpi_header *acpi ) { + + DBGC ( acpi, "ACPI table %s:\n", acpi_name ( acpi->signature ) ); + DBGC_HDA ( acpi, 0, acpi, le32_to_cpu ( acpi->length ) ); + return 0; +} + +/** + * Describe dummy SAN device + * + * @ret rc Return status code + */ +static int dummy_san_describe ( void ) { + + return acpi_install ( dummy_install ); +} + +PROVIDE_SANBOOT ( dummy, san_hook, dummy_san_hook ); +PROVIDE_SANBOOT ( dummy, san_unhook, dummy_san_unhook ); +PROVIDE_SANBOOT ( dummy, san_boot, dummy_san_boot ); +PROVIDE_SANBOOT ( dummy, san_describe, dummy_san_describe ); diff --git a/src/core/fault.c b/src/core/fault.c new file mode 100644 index 00000000..63d3ccac --- /dev/null +++ b/src/core/fault.c @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * Fault injection + * + */ + +/** + * Inject fault with a specified probability + * + * @v rate Reciprocal of fault probability (must be non-zero) + * @ret rc Return status code + */ +int inject_fault_nonzero ( unsigned int rate ) { + + /* Do nothing unless we want to inject a fault now */ + if ( ( random() % rate ) != 0 ) + return 0; + + /* Generate error number here so that faults can be injected + * into files that don't themselves have error file + * identifiers (via errfile.h). + */ + return -EFAULT; +} + +/** + * Corrupt data with a specified probability + * + * @v rate Reciprocal of fault probability (must be non-zero) + * @v data Data + * @v len Length of data + * @ret rc Return status code + */ +void inject_corruption_nonzero ( unsigned int rate, const void *data, + size_t len ) { + uint8_t *writable; + size_t offset; + + /* Do nothing if we have no data to corrupt */ + if ( ! len ) + return; + + /* Do nothing unless we want to inject a fault now */ + if ( ! inject_fault_nonzero ( rate ) ) + return; + + /* Get a writable pointer to the nominally read-only data */ + writable = ( ( uint8_t * ) data ); + + /* Pick a random victim byte and zap it */ + offset = ( random() % len ); + writable[offset] ^= random(); +} diff --git a/src/core/fdt.c b/src/core/fdt.c new file mode 100644 index 00000000..f439422c --- /dev/null +++ b/src/core/fdt.c @@ -0,0 +1,486 @@ +/* + * Copyright (C) 2019 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * Flattened Device Tree + * + */ + +/** The system flattened device tree (if present) */ +static struct fdt fdt; + +/** A position within a device tree */ +struct fdt_cursor { + /** Offset within structure block */ + unsigned int offset; + /** Tree depth */ + int depth; +}; + +/** A lexical descriptor */ +struct fdt_descriptor { + /** Node or property name (if applicable) */ + const char *name; + /** Property data (if applicable) */ + const void *data; + /** Length of property data (if applicable) */ + size_t len; +}; + +/** + * Check if device tree exists + * + * @v has_fdt Device tree exists + */ +static inline __attribute__ (( always_inline )) int fdt_exists ( void ) { + + return ( fdt.hdr != NULL ); +} + +/** + * Traverse device tree + * + * @v pos Position within device tree + * @v desc Lexical descriptor to fill in + * @ret rc Return status code + */ +static int fdt_traverse ( struct fdt_cursor *pos, + struct fdt_descriptor *desc ) { + const fdt_token_t *token; + const void *data; + const struct fdt_prop *prop; + unsigned int name_off; + size_t remaining; + size_t len; + + /* Sanity checks */ + assert ( pos->offset < fdt.len ); + assert ( ( pos->offset & ( FDT_STRUCTURE_ALIGN - 1 ) ) == 0 ); + + /* Clear descriptor */ + memset ( desc, 0, sizeof ( *desc ) ); + + /* Locate token and calculate remaining space */ + token = ( fdt.raw + fdt.structure + pos->offset ); + remaining = ( fdt.len - pos->offset ); + if ( remaining < sizeof ( *token ) ) { + DBGC ( &fdt, "FDT truncated tree at +%#04x\n", pos->offset ); + return -EINVAL; + } + remaining -= sizeof ( *token ); + data = ( ( ( const void * ) token ) + sizeof ( *token ) ); + len = 0; + + /* Handle token */ + switch ( *token ) { + + case cpu_to_be32 ( FDT_BEGIN_NODE ): + + /* Start of node */ + desc->name = data; + len = ( strnlen ( desc->name, remaining ) + 1 /* NUL */ ); + if ( remaining < len ) { + DBGC ( &fdt, "FDT unterminated node name at +%#04x\n", + pos->offset ); + return -EINVAL; + } + pos->depth++; + break; + + case cpu_to_be32 ( FDT_END_NODE ): + + /* End of node */ + if ( pos->depth < 0 ) { + DBGC ( &fdt, "FDT spurious node end at +%#04x\n", + pos->offset ); + return -EINVAL; + } + pos->depth--; + if ( pos->depth < 0 ) { + /* End of (sub)tree */ + return -ENOENT; + } + break; + + case cpu_to_be32 ( FDT_PROP ): + + /* Property */ + prop = data; + if ( remaining < sizeof ( *prop ) ) { + DBGC ( &fdt, "FDT truncated property at +%#04x\n", + pos->offset ); + return -EINVAL; + } + desc->data = ( ( ( const void * ) prop ) + sizeof ( *prop ) ); + desc->len = be32_to_cpu ( prop->len ); + len = ( sizeof ( *prop ) + desc->len ); + if ( remaining < len ) { + DBGC ( &fdt, "FDT overlength property at +%#04x\n", + pos->offset ); + return -EINVAL; + } + name_off = be32_to_cpu ( prop->name_off ); + if ( name_off > fdt.strings_len ) { + DBGC ( &fdt, "FDT property name outside strings " + "block at +%#04x\n", pos->offset ); + return -EINVAL; + } + desc->name = ( fdt.raw + fdt.strings + name_off ); + break; + + case cpu_to_be32 ( FDT_NOP ): + + /* Do nothing */ + break; + + default: + + /* Unrecognised or unexpected token */ + DBGC ( &fdt, "FDT unexpected token %#08x at +%#04x\n", + be32_to_cpu ( *token ), pos->offset ); + return -EINVAL; + } + + /* Update cursor */ + len = ( ( len + FDT_STRUCTURE_ALIGN - 1 ) & + ~( FDT_STRUCTURE_ALIGN - 1 ) ); + pos->offset += ( sizeof ( *token ) + len ); + + /* Sanity checks */ + assert ( pos->offset <= fdt.len ); + + return 0; +} + +/** + * Find child node + * + * @v offset Starting node offset + * @v name Node name + * @v child Child node offset to fill in + * @ret rc Return status code + */ +static int fdt_child ( unsigned int offset, const char *name, + unsigned int *child ) { + struct fdt_cursor pos; + struct fdt_descriptor desc; + unsigned int orig_offset; + int rc; + + /* Record original offset (for debugging) */ + orig_offset = offset; + + /* Initialise cursor */ + pos.offset = offset; + pos.depth = -1; + + /* Find child node */ + while ( 1 ) { + + /* Record current offset */ + *child = pos.offset; + + /* Traverse tree */ + if ( ( rc = fdt_traverse ( &pos, &desc ) ) != 0 ) { + DBGC ( &fdt, "FDT +%#04x has no child node \"%s\": " + "%s\n", orig_offset, name, strerror ( rc ) ); + return rc; + } + + /* Check for matching immediate child node */ + if ( ( pos.depth == 1 ) && desc.name && ( ! desc.data ) ) { + DBGC2 ( &fdt, "FDT +%#04x has child node \"%s\"\n", + orig_offset, desc.name ); + if ( strcmp ( name, desc.name ) == 0 ) { + DBGC2 ( &fdt, "FDT +%#04x found child node " + "\"%s\" at +%#04x\n", orig_offset, + desc.name, *child ); + return 0; + } + } + } +} + +/** + * Find node by path + * + * @v path Node path + * @v offset Offset to fill in + * @ret rc Return status code + */ +int fdt_path ( const char *path, unsigned int *offset ) { + char *tmp = ( ( char * ) path ); + char *del; + int rc; + + /* Initialise offset */ + *offset = 0; + + /* Traverse tree one path segment at a time */ + while ( *tmp ) { + + /* Skip any leading '/' */ + while ( *tmp == '/' ) + tmp++; + + /* Find next '/' delimiter and convert to NUL */ + del = strchr ( tmp, '/' ); + if ( del ) + *del = '\0'; + + /* Find child and restore delimiter */ + rc = fdt_child ( *offset, tmp, offset ); + if ( del ) + *del = '/'; + if ( rc != 0 ) + return rc; + + /* Move to next path component, if any */ + while ( *tmp && ( *tmp != '/' ) ) + tmp++; + } + + DBGC2 ( &fdt, "FDT found path \"%s\" at +%#04x\n", path, *offset ); + return 0; +} + +/** + * Find node by alias + * + * @v name Alias name + * @v offset Offset to fill in + * @ret rc Return status code + */ +int fdt_alias ( const char *name, unsigned int *offset ) { + const char *alias; + int rc; + + /* Locate "/aliases" node */ + if ( ( rc = fdt_child ( 0, "aliases", offset ) ) != 0 ) + return rc; + + /* Locate alias property */ + if ( ( alias = fdt_string ( *offset, name ) ) == NULL ) + return -ENOENT; + DBGC ( &fdt, "FDT alias \"%s\" is \"%s\"\n", name, alias ); + + /* Locate aliased node */ + if ( ( rc = fdt_path ( alias, offset ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Find property + * + * @v offset Starting node offset + * @v name Property name + * @v desc Lexical descriptor to fill in + * @ret rc Return status code + */ +static int fdt_property ( unsigned int offset, const char *name, + struct fdt_descriptor *desc ) { + struct fdt_cursor pos; + int rc; + + /* Initialise cursor */ + pos.offset = offset; + pos.depth = -1; + + /* Find property */ + while ( 1 ) { + + /* Traverse tree */ + if ( ( rc = fdt_traverse ( &pos, desc ) ) != 0 ) { + DBGC ( &fdt, "FDT +%#04x has no property \"%s\": %s\n", + offset, name, strerror ( rc ) ); + return rc; + } + + /* Check for matching immediate child property */ + if ( ( pos.depth == 0 ) && desc->data ) { + DBGC2 ( &fdt, "FDT +%#04x has property \"%s\" len " + "%#zx\n", offset, desc->name, desc->len ); + if ( strcmp ( name, desc->name ) == 0 ) { + DBGC2 ( &fdt, "FDT +%#04x found property " + "\"%s\"\n", offset, desc->name ); + DBGC2_HDA ( &fdt, 0, desc->data, desc->len ); + return 0; + } + } + } +} + +/** + * Find string property + * + * @v offset Starting node offset + * @v name Property name + * @ret string String property, or NULL on error + */ +const char * fdt_string ( unsigned int offset, const char *name ) { + struct fdt_descriptor desc; + int rc; + + /* Find property */ + if ( ( rc = fdt_property ( offset, name, &desc ) ) != 0 ) + return NULL; + + /* Check NUL termination */ + if ( strnlen ( desc.data, desc.len ) == desc.len ) { + DBGC ( &fdt, "FDT unterminated string property \"%s\"\n", + name ); + return NULL; + } + + return desc.data; +} + +/** + * Get MAC address from property + * + * @v offset Starting node offset + * @v netdev Network device + * @ret rc Return status code + */ +int fdt_mac ( unsigned int offset, struct net_device *netdev ) { + struct fdt_descriptor desc; + size_t len; + int rc; + + /* Find applicable MAC address property */ + if ( ( ( rc = fdt_property ( offset, "mac-address", &desc ) ) != 0 ) && + ( ( rc = fdt_property ( offset, "local-mac-address", + &desc ) ) != 0 ) ) { + return rc; + } + + /* Check length */ + len = netdev->ll_protocol->hw_addr_len; + if ( len != desc.len ) { + DBGC ( &fdt, "FDT malformed MAC address \"%s\":\n", + desc.name ); + DBGC_HDA ( &fdt, 0, desc.data, desc.len ); + return -ERANGE; + } + + /* Fill in MAC address */ + memcpy ( netdev->hw_addr, desc.data, len ); + + return 0; +} + +/** + * Register device tree + * + * @v fdt Device tree header + * @ret rc Return status code + */ +int register_fdt ( const struct fdt_header *hdr ) { + const uint8_t *end; + + /* Record device tree location */ + fdt.hdr = hdr; + fdt.len = be32_to_cpu ( hdr->totalsize ); + DBGC ( &fdt, "FDT version %d at %p+%#04zx\n", + be32_to_cpu ( hdr->version ), fdt.hdr, fdt.len ); + + /* Check signature */ + if ( hdr->magic != cpu_to_be32 ( FDT_MAGIC ) ) { + DBGC ( &fdt, "FDT has invalid magic value %#08x\n", + be32_to_cpu ( hdr->magic ) ); + goto err; + } + + /* Check version */ + if ( hdr->last_comp_version != cpu_to_be32 ( FDT_VERSION ) ) { + DBGC ( &fdt, "FDT unsupported version %d\n", + be32_to_cpu ( hdr->last_comp_version ) ); + goto err; + } + + /* Record structure block location */ + fdt.structure = be32_to_cpu ( hdr->off_dt_struct ); + fdt.structure_len = be32_to_cpu ( hdr->size_dt_struct ); + DBGC ( &fdt, "FDT structure block at +[%#04x,%#04zx)\n", + fdt.structure, ( fdt.structure + fdt.structure_len ) ); + if ( ( fdt.structure > fdt.len ) || + ( fdt.structure_len > ( fdt.len - fdt.structure ) ) ) { + DBGC ( &fdt, "FDT structure block exceeds table\n" ); + goto err; + } + if ( ( fdt.structure | fdt.structure_len ) & + ( FDT_STRUCTURE_ALIGN - 1 ) ) { + DBGC ( &fdt, "FDT structure block is misaligned\n" ); + goto err; + } + + /* Record strings block location */ + fdt.strings = be32_to_cpu ( hdr->off_dt_strings ); + fdt.strings_len = be32_to_cpu ( hdr->size_dt_strings ); + DBGC ( &fdt, "FDT strings block at +[%#04x,%#04zx)\n", + fdt.strings, ( fdt.strings + fdt.strings_len ) ); + if ( ( fdt.strings > fdt.len ) || + ( fdt.strings_len > ( fdt.len - fdt.strings ) ) ) { + DBGC ( &fdt, "FDT strings block exceeds table\n" ); + goto err; + } + + /* Shrink strings block to ensure NUL termination safety */ + end = ( fdt.raw + fdt.strings + fdt.strings_len ); + for ( ; fdt.strings_len ; fdt.strings_len-- ) { + if ( *(--end) == '\0' ) + break; + } + if ( fdt.strings_len != be32_to_cpu ( hdr->size_dt_strings ) ) { + DBGC ( &fdt, "FDT strings block shrunk to +[%#04x,%#04zx)\n", + fdt.strings, ( fdt.strings + fdt.strings_len ) ); + } + + /* Print model name (for debugging) */ + DBGC ( &fdt, "FDT model is \"%s\"\n", fdt_string ( 0, "model" ) ); + + return 0; + + err: + DBGC_HDA ( &fdt, 0, hdr, sizeof ( *hdr ) ); + fdt.hdr = NULL; + return -EINVAL; +} + +/* Drag in objects via register_fdt */ +REQUIRING_SYMBOL ( register_fdt ); + +/* Drag in device tree configuration */ +REQUIRE_OBJECT ( config_fdt ); diff --git a/src/core/iomap_virt.c b/src/core/iomap_virt.c new file mode 100644 index 00000000..c7f48727 --- /dev/null +++ b/src/core/iomap_virt.c @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * iPXE I/O mapping API using phys_to_virt() + * + */ + +#include + +PROVIDE_IOMAP_INLINE ( virt, ioremap ); +PROVIDE_IOMAP_INLINE ( virt, iounmap ); +PROVIDE_IOMAP_INLINE ( virt, io_to_bus ); diff --git a/src/core/netbios.c b/src/core/netbios.c new file mode 100644 index 00000000..0d4e2086 --- /dev/null +++ b/src/core/netbios.c @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2018 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * NetBIOS user names + * + */ + +#include +#include +#include + +/** + * Split NetBIOS [domain\]username into separate domain and username fields + * + * @v username NetBIOS [domain\]username string + * @ret domain Domain portion of string, or NULL if no domain present + * + * This function modifies the original string by removing the + * separator. The caller may restore the string using + * netbios_domain_undo(). + */ +const char * netbios_domain ( char **username ) { + char *domain_username = *username; + char *sep; + + /* Find separator, if present */ + sep = strchr ( domain_username, '\\' ); + if ( ! sep ) + return NULL; + + /* Overwrite separator with NUL terminator and update username string */ + *sep = '\0'; + *username = ( sep + 1 ); + + return domain_username; +} diff --git a/src/core/null_acpi.c b/src/core/null_acpi.c new file mode 100644 index 00000000..90c78485 --- /dev/null +++ b/src/core/null_acpi.c @@ -0,0 +1,3 @@ +#include + +PROVIDE_ACPI_INLINE ( null, acpi_find_rsdt ); diff --git a/src/core/pool.c b/src/core/pool.c new file mode 100644 index 00000000..0163405f --- /dev/null +++ b/src/core/pool.c @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Pooled connections + * + */ + +#include +#include + +/** + * Recycle this connection after closing + * + * @v intf Data transfer interface + */ +void pool_recycle ( struct interface *intf ) { + + intf_poke ( intf, pool_recycle ); +} + +/** + * Reopen a defunct connection + * + * @v intf Data transfer interface + */ +void pool_reopen ( struct interface *intf ) { + + intf_poke ( intf, pool_reopen ); +} + +/** + * Add connection to pool + * + * @v pool Pooled connection + * @v list List of pooled connections + * @v expiry Expiry time + */ +void pool_add ( struct pooled_connection *pool, struct list_head *list, + unsigned long expiry ) { + + /* Sanity check */ + assert ( list_empty ( &pool->list ) ); + assert ( ! timer_running ( &pool->timer ) ); + + /* Add to list of pooled connections */ + list_add_tail ( &pool->list, list ); + + /* Start expiry timer */ + start_timer_fixed ( &pool->timer, expiry ); +} + +/** + * Remove connection from pool + * + * @v pool Pooled connection + */ +void pool_del ( struct pooled_connection *pool ) { + + /* Remove from list of pooled connections */ + list_del ( &pool->list ); + INIT_LIST_HEAD ( &pool->list ); + + /* Stop expiry timer */ + stop_timer ( &pool->timer ); + + /* Mark as a freshly recycled connection */ + pool->flags = POOL_RECYCLED; +} + +/** + * Close expired pooled connection + * + * @v timer Expiry timer + * @v over Failure indicator + */ +void pool_expired ( struct retry_timer *timer, int over __unused ) { + struct pooled_connection *pool = + container_of ( timer, struct pooled_connection, timer ); + + /* Sanity check */ + assert ( ! list_empty ( &pool->list ) ); + + /* Remove from connection pool */ + list_del ( &pool->list ); + INIT_LIST_HEAD ( &pool->list ); + + /* Close expired connection */ + pool->expired ( pool ); +} diff --git a/src/core/quiesce.c b/src/core/quiesce.c new file mode 100644 index 00000000..5d2a919d --- /dev/null +++ b/src/core/quiesce.c @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Quiesce system + * + */ + +#include + +/** Quiesce system */ +void quiesce ( void ) { + struct quiescer *quiescer; + + /* Call all quiescers */ + for_each_table_entry ( quiescer, QUIESCERS ) { + quiescer->quiesce(); + } +} + +/** Unquiesce system */ +void unquiesce ( void ) { + struct quiescer *quiescer; + + /* Call all quiescers */ + for_each_table_entry ( quiescer, QUIESCERS ) { + quiescer->unquiesce(); + } +} diff --git a/src/core/sanboot.c b/src/core/sanboot.c new file mode 100644 index 00000000..cabc4843 --- /dev/null +++ b/src/core/sanboot.c @@ -0,0 +1,995 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * SAN booting + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * Default SAN drive number + * + * The drive number is a meaningful concept only in a BIOS + * environment, where it represents the INT13 drive number (0x80 for + * the first hard disk). We retain it in other environments to allow + * for a simple way for iPXE commands to refer to SAN drives. + */ +#define SAN_DEFAULT_DRIVE 0x80 + +/** + * Timeout for block device commands (in ticks) + * + * Underlying devices should ideally never become totally stuck. + * However, if they do, then the blocking SAN APIs provide no means + * for the caller to cancel the operation, and the machine appears to + * hang. Use an overall timeout for all commands to avoid this + * problem and bounce timeout failures to the caller. + */ +#define SAN_COMMAND_TIMEOUT ( 15 * TICKS_PER_SEC ) + +/** + * Default number of times to retry commands + * + * We may need to retry commands. For example, the underlying + * connection may be closed by the SAN target due to an inactivity + * timeout, or the SAN target may return pointless "error" messages + * such as "SCSI power-on occurred". + */ +#define SAN_DEFAULT_RETRIES 10 + +/** + * Delay between reopening attempts + * + * Some SAN targets will always accept connections instantly and + * report a temporary unavailability by e.g. failing the TEST UNIT + * READY command. Avoid bombarding such targets by introducing a + * small delay between attempts. + */ +#define SAN_REOPEN_DELAY_SECS 5 + +/** List of SAN devices */ +LIST_HEAD ( san_devices ); + +/** Number of times to retry commands */ +static unsigned long san_retries = SAN_DEFAULT_RETRIES; + +/** + * Find SAN device by drive number + * + * @v drive Drive number + * @ret sandev SAN device, or NULL + */ +struct san_device * sandev_find ( unsigned int drive ) { + struct san_device *sandev; + + list_for_each_entry ( sandev, &san_devices, list ) { + if ( sandev->drive == drive ) + return sandev; + } + return NULL; +} + +/** + * Free SAN device + * + * @v refcnt Reference count + */ +static void sandev_free ( struct refcnt *refcnt ) { + struct san_device *sandev = + container_of ( refcnt, struct san_device, refcnt ); + unsigned int i; + + assert ( ! timer_running ( &sandev->timer ) ); + assert ( ! sandev->active ); + assert ( list_empty ( &sandev->opened ) ); + for ( i = 0 ; i < sandev->paths ; i++ ) { + uri_put ( sandev->path[i].uri ); + assert ( sandev->path[i].desc == NULL ); + } + free ( sandev ); +} + +/** + * Close SAN device command + * + * @v sandev SAN device + * @v rc Reason for close + */ +static void sandev_command_close ( struct san_device *sandev, int rc ) { + + /* Stop timer */ + stop_timer ( &sandev->timer ); + + /* Restart interface */ + intf_restart ( &sandev->command, rc ); + + /* Record command status */ + sandev->command_rc = rc; +} + +/** + * Record SAN device capacity + * + * @v sandev SAN device + * @v capacity SAN device capacity + */ +static void sandev_command_capacity ( struct san_device *sandev, + struct block_device_capacity *capacity ) { + + /* Record raw capacity information */ + memcpy ( &sandev->capacity, capacity, sizeof ( sandev->capacity ) ); +} + +/** SAN device command interface operations */ +static struct interface_operation sandev_command_op[] = { + INTF_OP ( intf_close, struct san_device *, sandev_command_close ), + INTF_OP ( block_capacity, struct san_device *, + sandev_command_capacity ), +}; + +/** SAN device command interface descriptor */ +static struct interface_descriptor sandev_command_desc = + INTF_DESC ( struct san_device, command, sandev_command_op ); + +/** + * Handle SAN device command timeout + * + * @v retry Retry timer + */ +static void sandev_command_expired ( struct retry_timer *timer, + int over __unused ) { + struct san_device *sandev = + container_of ( timer, struct san_device, timer ); + + sandev_command_close ( sandev, -ETIMEDOUT ); +} + +/** + * Open SAN path + * + * @v sanpath SAN path + * @ret rc Return status code + */ +static int sanpath_open ( struct san_path *sanpath ) { + struct san_device *sandev = sanpath->sandev; + int rc; + + /* Sanity check */ + list_check_contains_entry ( sanpath, &sandev->closed, list ); + + /* Open interface */ + if ( ( rc = xfer_open_uri ( &sanpath->block, sanpath->uri ) ) != 0 ) { + DBGC ( sandev, "SAN %#02x.%d could not (re)open URI: " + "%s\n", sandev->drive, sanpath->index, strerror ( rc ) ); + return rc; + } + + /* Update ACPI descriptor, if applicable */ + if ( ! ( sandev->flags & SAN_NO_DESCRIBE ) ) { + if ( sanpath->desc ) + acpi_del ( sanpath->desc ); + sanpath->desc = acpi_describe ( &sanpath->block ); + if ( sanpath->desc ) + acpi_add ( sanpath->desc ); + } + + /* Start process */ + process_add ( &sanpath->process ); + + /* Mark as opened */ + list_del ( &sanpath->list ); + list_add_tail ( &sanpath->list, &sandev->opened ); + + /* Record as in progress */ + sanpath->path_rc = -EINPROGRESS; + + return 0; +} + +/** + * Close SAN path + * + * @v sanpath SAN path + * @v rc Reason for close + */ +static void sanpath_close ( struct san_path *sanpath, int rc ) { + struct san_device *sandev = sanpath->sandev; + + /* Record status */ + sanpath->path_rc = rc; + + /* Mark as closed */ + list_del ( &sanpath->list ); + list_add_tail ( &sanpath->list, &sandev->closed ); + + /* Stop process */ + process_del ( &sanpath->process ); + + /* Restart interfaces, avoiding potential loops */ + if ( sanpath == sandev->active ) { + intfs_restart ( rc, &sandev->command, &sanpath->block, NULL ); + sandev->active = NULL; + sandev_command_close ( sandev, rc ); + } else { + intf_restart ( &sanpath->block, rc ); + } +} + +/** + * Handle closure of underlying block device interface + * + * @v sanpath SAN path + * @v rc Reason for close + */ +static void sanpath_block_close ( struct san_path *sanpath, int rc ) { + struct san_device *sandev = sanpath->sandev; + + /* Any closure is an error from our point of view */ + if ( rc == 0 ) + rc = -ENOTCONN; + DBGC ( sandev, "SAN %#02x.%d closed: %s\n", + sandev->drive, sanpath->index, strerror ( rc ) ); + + /* Close path */ + sanpath_close ( sanpath, rc ); +} + +/** + * Check flow control window + * + * @v sanpath SAN path + */ +static size_t sanpath_block_window ( struct san_path *sanpath __unused ) { + + /* We are never ready to receive data via this interface. + * This prevents objects that support both block and stream + * interfaces from attempting to send us stream data. + */ + return 0; +} + +/** + * SAN path process + * + * @v sanpath SAN path + */ +static void sanpath_step ( struct san_path *sanpath ) { + struct san_device *sandev = sanpath->sandev; + + /* Ignore if we are already the active device */ + if ( sanpath == sandev->active ) + return; + + /* Wait until path has become available */ + if ( ! xfer_window ( &sanpath->block ) ) + return; + + /* Record status */ + sanpath->path_rc = 0; + + /* Mark as active path or close as applicable */ + if ( ! sandev->active ) { + DBGC ( sandev, "SAN %#02x.%d is active\n", + sandev->drive, sanpath->index ); + sandev->active = sanpath; + } else { + DBGC ( sandev, "SAN %#02x.%d is available\n", + sandev->drive, sanpath->index ); + sanpath_close ( sanpath, 0 ); + } +} + +/** SAN path block interface operations */ +static struct interface_operation sanpath_block_op[] = { + INTF_OP ( intf_close, struct san_path *, sanpath_block_close ), + INTF_OP ( xfer_window, struct san_path *, sanpath_block_window ), + INTF_OP ( xfer_window_changed, struct san_path *, sanpath_step ), +}; + +/** SAN path block interface descriptor */ +static struct interface_descriptor sanpath_block_desc = + INTF_DESC ( struct san_path, block, sanpath_block_op ); + +/** SAN path process descriptor */ +static struct process_descriptor sanpath_process_desc = + PROC_DESC_ONCE ( struct san_path, process, sanpath_step ); + +/** + * Restart SAN device interface + * + * @v sandev SAN device + * @v rc Reason for restart + */ +static void sandev_restart ( struct san_device *sandev, int rc ) { + struct san_path *sanpath; + + /* Restart all block device interfaces */ + while ( ( sanpath = list_first_entry ( &sandev->opened, + struct san_path, list ) ) ) { + sanpath_close ( sanpath, rc ); + } + + /* Clear active path */ + sandev->active = NULL; + + /* Close any outstanding command */ + sandev_command_close ( sandev, rc ); +} + +/** + * (Re)open SAN device + * + * @v sandev SAN device + * @ret rc Return status code + * + * This function will block until the device is available. + */ +int sandev_reopen ( struct san_device *sandev ) { + struct san_path *sanpath; + int rc; + + /* Unquiesce system */ + unquiesce(); + + /* Close any outstanding command and restart interfaces */ + sandev_restart ( sandev, -ECONNRESET ); + assert ( sandev->active == NULL ); + assert ( list_empty ( &sandev->opened ) ); + + /* Open all paths */ + while ( ( sanpath = list_first_entry ( &sandev->closed, + struct san_path, list ) ) ) { + if ( ( rc = sanpath_open ( sanpath ) ) != 0 ) + goto err_open; + } + + /* Wait for any device to become available, or for all devices + * to fail. + */ + while ( sandev->active == NULL ) { + step(); + if ( list_empty ( &sandev->opened ) ) { + /* Get status of the first device to be + * closed. Do this on the basis that earlier + * errors (e.g. "invalid IQN") are probably + * more interesting than later errors + * (e.g. "TCP timeout"). + */ + rc = -ENODEV; + list_for_each_entry ( sanpath, &sandev->closed, list ) { + rc = sanpath->path_rc; + break; + } + DBGC ( sandev, "SAN %#02x never became available: %s\n", + sandev->drive, strerror ( rc ) ); + goto err_none; + } + } + + assert ( ! list_empty ( &sandev->opened ) ); + return 0; + + err_none: + err_open: + sandev_restart ( sandev, rc ); + return rc; +} + +/** SAN device read/write command parameters */ +struct san_command_rw_params { + /** SAN device read/write operation */ + int ( * block_rw ) ( struct interface *control, struct interface *data, + uint64_t lba, unsigned int count, + userptr_t buffer, size_t len ); + /** Data buffer */ + userptr_t buffer; + /** Starting LBA */ + uint64_t lba; + /** Block count */ + unsigned int count; +}; + +/** SAN device command parameters */ +union san_command_params { + /** Read/write command parameters */ + struct san_command_rw_params rw; +}; + +/** + * Initiate SAN device read/write command + * + * @v sandev SAN device + * @v params Command parameters + * @ret rc Return status code + */ +static int sandev_command_rw ( struct san_device *sandev, + const union san_command_params *params ) { + struct san_path *sanpath = sandev->active; + size_t len = ( params->rw.count * sandev->capacity.blksize ); + int rc; + + /* Sanity check */ + assert ( sanpath != NULL ); + + /* Initiate read/write command */ + if ( ( rc = params->rw.block_rw ( &sanpath->block, &sandev->command, + params->rw.lba, params->rw.count, + params->rw.buffer, len ) ) != 0 ) { + DBGC ( sandev, "SAN %#02x.%d could not initiate read/write: " + "%s\n", sandev->drive, sanpath->index, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Initiate SAN device read capacity command + * + * @v sandev SAN device + * @v params Command parameters + * @ret rc Return status code + */ +static int +sandev_command_read_capacity ( struct san_device *sandev, + const union san_command_params *params __unused){ + struct san_path *sanpath = sandev->active; + int rc; + + /* Sanity check */ + assert ( sanpath != NULL ); + + /* Initiate read capacity command */ + if ( ( rc = block_read_capacity ( &sanpath->block, + &sandev->command ) ) != 0 ) { + DBGC ( sandev, "SAN %#02x.%d could not initiate read capacity: " + "%s\n", sandev->drive, sanpath->index, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Execute a single SAN device command and wait for completion + * + * @v sandev SAN device + * @v command Command + * @v params Command parameters (if required) + * @ret rc Return status code + */ +static int +sandev_command ( struct san_device *sandev, + int ( * command ) ( struct san_device *sandev, + const union san_command_params *params ), + const union san_command_params *params ) { + unsigned int retries = 0; + int rc; + + /* Sanity check */ + assert ( ! timer_running ( &sandev->timer ) ); + + /* Unquiesce system */ + unquiesce(); + + /* (Re)try command */ + do { + + /* Reopen block device if applicable */ + if ( sandev_needs_reopen ( sandev ) && + ( ( rc = sandev_reopen ( sandev ) ) != 0 ) ) { + + /* Delay reopening attempts */ + sleep_fixed ( SAN_REOPEN_DELAY_SECS ); + + /* Retry opening indefinitely for multipath devices */ + if ( sandev->paths <= 1 ) + retries++; + + continue; + } + + /* Initiate command */ + if ( ( rc = command ( sandev, params ) ) != 0 ) { + retries++; + continue; + } + + /* Start expiry timer */ + start_timer_fixed ( &sandev->timer, SAN_COMMAND_TIMEOUT ); + + /* Wait for command to complete */ + while ( timer_running ( &sandev->timer ) ) + step(); + + /* Check command status */ + if ( ( rc = sandev->command_rc ) != 0 ) { + retries++; + continue; + } + + return 0; + + } while ( retries <= san_retries ); + + /* Sanity check */ + assert ( ! timer_running ( &sandev->timer ) ); + + return rc; +} + +/** + * Reset SAN device + * + * @v sandev SAN device + * @ret rc Return status code + */ +int sandev_reset ( struct san_device *sandev ) { + int rc; + + DBGC ( sandev, "SAN %#02x reset\n", sandev->drive ); + + /* Close and reopen underlying block device */ + if ( ( rc = sandev_reopen ( sandev ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Read from or write to SAN device + * + * @v sandev SAN device + * @v lba Starting logical block address + * @v count Number of logical blocks + * @v buffer Data buffer + * @v block_rw Block read/write method + * @ret rc Return status code + */ +static int sandev_rw ( struct san_device *sandev, uint64_t lba, + unsigned int count, userptr_t buffer, + int ( * block_rw ) ( struct interface *control, + struct interface *data, + uint64_t lba, unsigned int count, + userptr_t buffer, size_t len ) ) { + union san_command_params params; + unsigned int remaining; + size_t frag_len; + int rc; + + /* Initialise command parameters */ + params.rw.block_rw = block_rw; + params.rw.buffer = buffer; + params.rw.lba = ( lba << sandev->blksize_shift ); + params.rw.count = sandev->capacity.max_count; + remaining = ( count << sandev->blksize_shift ); + + /* Read/write fragments */ + while ( remaining ) { + + /* Determine fragment length */ + if ( params.rw.count > remaining ) + params.rw.count = remaining; + + /* Execute command */ + if ( ( rc = sandev_command ( sandev, sandev_command_rw, + ¶ms ) ) != 0 ) + return rc; + + /* Move to next fragment */ + frag_len = ( sandev->capacity.blksize * params.rw.count ); + params.rw.buffer = userptr_add ( params.rw.buffer, frag_len ); + params.rw.lba += params.rw.count; + remaining -= params.rw.count; + } + + return 0; +} + +/** + * Read from SAN device + * + * @v sandev SAN device + * @v lba Starting logical block address + * @v count Number of logical blocks + * @v buffer Data buffer + * @ret rc Return status code + */ +int sandev_read ( struct san_device *sandev, uint64_t lba, + unsigned int count, userptr_t buffer ) { + int rc; + + /* Read from device */ + if ( ( rc = sandev_rw ( sandev, lba, count, buffer, block_read ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Write to SAN device + * + * @v sandev SAN device + * @v lba Starting logical block address + * @v count Number of logical blocks + * @v buffer Data buffer + * @ret rc Return status code + */ +int sandev_write ( struct san_device *sandev, uint64_t lba, + unsigned int count, userptr_t buffer ) { + int rc; + + /* Write to device */ + if ( ( rc = sandev_rw ( sandev, lba, count, buffer, block_write ) ) != 0 ) + return rc; + + /* Quiesce system. This is a heuristic designed to ensure + * that the system is quiesced before Windows starts up, since + * a Windows SAN boot will typically write a status flag to + * the disk as its last action before transferring control to + * the native drivers. + */ + quiesce(); + + return 0; +} + +/** + * Describe SAN device + * + * @v sandev SAN device + * @ret rc Return status code + * + * Allow connections to progress until all existent path descriptors + * are complete. + */ +static int sandev_describe ( struct san_device *sandev ) { + struct san_path *sanpath; + struct acpi_descriptor *desc; + int rc; + + /* Wait for all paths to be either described or closed */ + while ( 1 ) { + + /* Allow connections to progress */ + step(); + + /* Fail if any closed path has an incomplete descriptor */ + list_for_each_entry ( sanpath, &sandev->closed, list ) { + desc = sanpath->desc; + if ( ! desc ) + continue; + if ( ( rc = desc->model->complete ( desc ) ) != 0 ) { + DBGC ( sandev, "SAN %#02x.%d could not be " + "described: %s\n", sandev->drive, + sanpath->index, strerror ( rc ) ); + return rc; + } + } + + /* Succeed if no paths have an incomplete descriptor */ + rc = 0; + list_for_each_entry ( sanpath, &sandev->opened, list ) { + desc = sanpath->desc; + if ( ! desc ) + continue; + if ( ( rc = desc->model->complete ( desc ) ) != 0 ) + break; + } + if ( rc == 0 ) + return 0; + } +} + +/** + * Remove SAN device descriptors + * + * @v sandev SAN device + */ +static void sandev_undescribe ( struct san_device *sandev ) { + struct san_path *sanpath; + unsigned int i; + + /* Remove all ACPI descriptors */ + for ( i = 0 ; i < sandev->paths ; i++ ) { + sanpath = &sandev->path[i]; + if ( sanpath->desc ) { + acpi_del ( sanpath->desc ); + sanpath->desc = NULL; + } + } +} + +/** + * Configure SAN device as a CD-ROM, if applicable + * + * @v sandev SAN device + * @ret rc Return status code + * + * Both BIOS and UEFI require SAN devices to be accessed with a block + * size of 2048. While we could require the user to configure the + * block size appropriately, this is non-trivial and would impose a + * substantial learning effort on the user. Instead, we check for the + * presence of the ISO9660 primary volume descriptor and, if found, + * then we force a block size of 2048 and map read/write requests + * appropriately. + */ +static int sandev_parse_iso9660 ( struct san_device *sandev ) { + static const struct iso9660_primary_descriptor_fixed primary_check = { + .type = ISO9660_TYPE_PRIMARY, + .id = ISO9660_ID, + }; + union { + struct iso9660_primary_descriptor primary; + char bytes[ISO9660_BLKSIZE]; + } *scratch; + unsigned int blksize; + unsigned int blksize_shift; + unsigned int lba; + unsigned int count; + int rc; + + /* Calculate required blocksize shift for potential CD-ROM access */ + blksize = sandev->capacity.blksize; + blksize_shift = 0; + while ( blksize < ISO9660_BLKSIZE ) { + blksize <<= 1; + blksize_shift++; + } + if ( blksize > ISO9660_BLKSIZE ) { + /* Cannot be a CD-ROM. This is not an error. */ + rc = 0; + goto invalid_blksize; + } + lba = ( ISO9660_PRIMARY_LBA << blksize_shift ); + count = ( 1 << blksize_shift ); + + /* Allocate scratch area */ + scratch = malloc ( ISO9660_BLKSIZE ); + if ( ! scratch ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Read primary volume descriptor */ + if ( ( rc = sandev_read ( sandev, lba, count, + virt_to_user ( scratch ) ) ) != 0 ) { + DBGC ( sandev, "SAN %#02x could not read ISO9660 primary" + "volume descriptor: %s\n", + sandev->drive, strerror ( rc ) ); + goto err_rw; + } + + /* Configure as CD-ROM if applicable */ + if ( memcmp ( &scratch->primary.fixed, &primary_check, + sizeof ( primary_check ) ) == 0 ) { + DBGC ( sandev, "SAN %#02x contains an ISO9660 filesystem; " + "treating as CD-ROM\n", sandev->drive ); + sandev->blksize_shift = blksize_shift; + sandev->is_cdrom = 1; + } + + err_rw: + free ( scratch ); + err_alloc: + invalid_blksize: + return rc; +} + +/** + * Allocate SAN device + * + * @v uris List of URIs + * @v count Number of URIs + * @v priv_size Size of private data + * @ret sandev SAN device, or NULL + */ +struct san_device * alloc_sandev ( struct uri **uris, unsigned int count, + size_t priv_size ) { + struct san_device *sandev; + struct san_path *sanpath; + size_t size; + unsigned int i; + + /* Allocate and initialise structure */ + size = ( sizeof ( *sandev ) + ( count * sizeof ( sandev->path[0] ) ) ); + sandev = zalloc ( size + priv_size ); + if ( ! sandev ) + return NULL; + ref_init ( &sandev->refcnt, sandev_free ); + intf_init ( &sandev->command, &sandev_command_desc, &sandev->refcnt ); + timer_init ( &sandev->timer, sandev_command_expired, &sandev->refcnt ); + sandev->priv = ( ( ( void * ) sandev ) + size ); + sandev->paths = count; + INIT_LIST_HEAD ( &sandev->opened ); + INIT_LIST_HEAD ( &sandev->closed ); + for ( i = 0 ; i < count ; i++ ) { + sanpath = &sandev->path[i]; + sanpath->sandev = sandev; + sanpath->index = i; + sanpath->uri = uri_get ( uris[i] ); + list_add_tail ( &sanpath->list, &sandev->closed ); + intf_init ( &sanpath->block, &sanpath_block_desc, + &sandev->refcnt ); + process_init_stopped ( &sanpath->process, &sanpath_process_desc, + &sandev->refcnt ); + sanpath->path_rc = -EINPROGRESS; + } + + return sandev; +} + +/** + * Register SAN device + * + * @v sandev SAN device + * @v drive Drive number + * @v flags Flags + * @ret rc Return status code + */ +int register_sandev ( struct san_device *sandev, unsigned int drive, + unsigned int flags ) { + int rc; + + /* Check that drive number is not in use */ + if ( sandev_find ( drive ) != NULL ) { + DBGC ( sandev, "SAN %#02x is already in use\n", drive ); + rc = -EADDRINUSE; + goto err_in_use; + } + + /* Record drive number and flags */ + sandev->drive = drive; + sandev->flags = flags; + + /* Check that device is capable of being opened (i.e. that all + * URIs are well-formed and that at least one path is + * working). + */ + if ( ( rc = sandev_reopen ( sandev ) ) != 0 ) + goto err_reopen; + + /* Describe device */ + if ( ( rc = sandev_describe ( sandev ) ) != 0 ) + goto err_describe; + + /* Read device capacity */ + if ( ( rc = sandev_command ( sandev, sandev_command_read_capacity, + NULL ) ) != 0 ) + goto err_capacity; + + /* Configure as a CD-ROM, if applicable */ + if ( ( rc = sandev_parse_iso9660 ( sandev ) ) != 0 ) + goto err_iso9660; + + /* Add to list of SAN devices */ + list_add_tail ( &sandev->list, &san_devices ); + DBGC ( sandev, "SAN %#02x registered\n", sandev->drive ); + + return 0; + + list_del ( &sandev->list ); + err_iso9660: + err_capacity: + err_describe: + err_reopen: + sandev_restart ( sandev, rc ); + sandev_undescribe ( sandev ); + err_in_use: + return rc; +} + +/** + * Unregister SAN device + * + * @v sandev SAN device + */ +void unregister_sandev ( struct san_device *sandev ) { + + /* Sanity check */ + assert ( ! timer_running ( &sandev->timer ) ); + + /* Remove from list of SAN devices */ + list_del ( &sandev->list ); + + /* Shut down interfaces */ + sandev_restart ( sandev, 0 ); + + /* Remove ACPI descriptors */ + sandev_undescribe ( sandev ); + + DBGC ( sandev, "SAN %#02x unregistered\n", sandev->drive ); +} + +/** The "san-drive" setting */ +const struct setting san_drive_setting __setting ( SETTING_SANBOOT_EXTRA, + san-drive ) = { + .name = "san-drive", + .description = "SAN drive number", + .tag = DHCP_EB_SAN_DRIVE, + .type = &setting_type_uint8, +}; + +/** + * Get default SAN drive number + * + * @ret drive Default drive number + */ +unsigned int san_default_drive ( void ) { + unsigned long drive; + + /* Use "san-drive" setting, if specified */ + if ( fetch_uint_setting ( NULL, &san_drive_setting, &drive ) >= 0 ) + return drive; + + /* Otherwise, default to booting from first hard disk */ + return SAN_DEFAULT_DRIVE; +} + +/** The "san-retries" setting */ +const struct setting san_retries_setting __setting ( SETTING_SANBOOT_EXTRA, + san-retries ) = { + .name = "san-retries", + .description = "SAN retry count", + .tag = DHCP_EB_SAN_RETRY, + .type = &setting_type_int8, +}; + +/** + * Apply SAN boot settings + * + * @ret rc Return status code + */ +static int sandev_apply ( void ) { + + /* Apply "san-retries" setting */ + if ( fetch_uint_setting ( NULL, &san_retries_setting, + &san_retries ) < 0 ) { + san_retries = SAN_DEFAULT_RETRIES; + } + + return 0; +} + +/** Settings applicator */ +struct settings_applicator sandev_applicator __settings_applicator = { + .apply = sandev_apply, +}; diff --git a/src/core/uart.c b/src/core/uart.c new file mode 100644 index 00000000..b85fe076 --- /dev/null +++ b/src/core/uart.c @@ -0,0 +1,153 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * 16550-compatible UART + * + */ + +#include +#include +#include + +/** Timeout for transmit holding register to become empty */ +#define UART_THRE_TIMEOUT_MS 100 + +/** Timeout for transmitter to become empty */ +#define UART_TEMT_TIMEOUT_MS 1000 + +/** + * Transmit data + * + * @v uart UART + * @v data Data + */ +void uart_transmit ( struct uart *uart, uint8_t data ) { + unsigned int i; + uint8_t lsr; + + /* Wait for transmitter holding register to become empty */ + for ( i = 0 ; i < UART_THRE_TIMEOUT_MS ; i++ ) { + lsr = uart_read ( uart, UART_LSR ); + if ( lsr & UART_LSR_THRE ) + break; + mdelay ( 1 ); + } + + /* Transmit data (even if we timed out) */ + uart_write ( uart, UART_THR, data ); +} + +/** + * Flush data + * + * @v uart UART + */ +void uart_flush ( struct uart *uart ) { + unsigned int i; + uint8_t lsr; + + /* Wait for transmitter and receiver to become empty */ + for ( i = 0 ; i < UART_TEMT_TIMEOUT_MS ; i++ ) { + uart_read ( uart, UART_RBR ); + lsr = uart_read ( uart, UART_LSR ); + if ( ( lsr & UART_LSR_TEMT ) && ! ( lsr & UART_LSR_DR ) ) + break; + } +} + +/** + * Check for existence of UART + * + * @v uart UART + * @ret rc Return status code + */ +int uart_exists ( struct uart *uart ) { + + /* Fail if no UART port is defined */ + if ( ! uart->base ) + return -ENODEV; + + /* Fail if UART scratch register seems not to be present */ + uart_write ( uart, UART_SCR, 0x18 ); + if ( uart_read ( uart, UART_SCR ) != 0x18 ) + return -ENODEV; + uart_write ( uart, UART_SCR, 0xae ); + if ( uart_read ( uart, UART_SCR ) != 0xae ) + return -ENODEV; + + return 0; +} + +/** + * Initialise UART + * + * @v uart UART + * @v baud Baud rate, or zero to leave unchanged + * @v lcr Line control register value, or zero to leave unchanged + * @ret rc Return status code + */ +int uart_init ( struct uart *uart, unsigned int baud, uint8_t lcr ) { + uint8_t dlm; + uint8_t dll; + int rc; + + /* Check for existence of UART */ + if ( ( rc = uart_exists ( uart ) ) != 0 ) + return rc; + + /* Configure divisor and line control register, if applicable */ + if ( ! lcr ) + lcr = uart_read ( uart, UART_LCR ); + uart->lcr = lcr; + uart_write ( uart, UART_LCR, ( lcr | UART_LCR_DLAB ) ); + if ( baud ) { + uart->divisor = ( UART_MAX_BAUD / baud ); + dlm = ( ( uart->divisor >> 8 ) & 0xff ); + dll = ( ( uart->divisor >> 0 ) & 0xff ); + uart_write ( uart, UART_DLM, dlm ); + uart_write ( uart, UART_DLL, dll ); + } else { + dlm = uart_read ( uart, UART_DLM ); + dll = uart_read ( uart, UART_DLL ); + uart->divisor = ( ( dlm << 8 ) | dll ); + } + uart_write ( uart, UART_LCR, ( lcr & ~UART_LCR_DLAB ) ); + + /* Disable interrupts */ + uart_write ( uart, UART_IER, 0 ); + + /* Enable FIFOs */ + uart_write ( uart, UART_FCR, UART_FCR_FE ); + + /* Assert DTR and RTS */ + uart_write ( uart, UART_MCR, ( UART_MCR_DTR | UART_MCR_RTS ) ); + + /* Flush any stale data */ + uart_flush ( uart ); + + return 0; +} diff --git a/src/crypto/aes.c b/src/crypto/aes.c new file mode 100644 index 00000000..b9e206bf --- /dev/null +++ b/src/crypto/aes.c @@ -0,0 +1,808 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * AES algorithm + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** AES strides + * + * These are the strides (modulo 16) used to walk through the AES + * input state bytes in order of byte position after [Inv]ShiftRows. + */ +enum aes_stride { + /** Input stride for ShiftRows + * + * 0 4 8 c + * \ \ \ + * 1 5 9 d + * \ \ \ + * 2 6 a e + * \ \ \ + * 3 7 b f + */ + AES_STRIDE_SHIFTROWS = +5, + /** Input stride for InvShiftRows + * + * 0 4 8 c + * / / / + * 1 5 9 d + * / / / + * 2 6 a e + * / / / + * 3 7 b f + */ + AES_STRIDE_INVSHIFTROWS = -3, +}; + +/** A single AES lookup table entry + * + * This represents the product (in the Galois field GF(2^8)) of an + * eight-byte vector multiplier with a single scalar multiplicand. + * + * The vector multipliers used for AES will be {1,1,1,3,2,1,1,3} for + * MixColumns and {1,9,13,11,14,9,13,11} for InvMixColumns. This + * allows for the result of multiplying any single column of the + * [Inv]MixColumns matrix by a scalar value to be obtained simply by + * extracting the relevant four-byte subset from the lookup table + * entry. + * + * For example, to find the result of multiplying the second column of + * the MixColumns matrix by the scalar value 0x80: + * + * MixColumns column[0]: { 2, 1, 1, 3 } + * MixColumns column[1]: { 3, 2, 1, 1 } + * MixColumns column[2]: { 1, 3, 2, 1 } + * MixColumns column[3]: { 1, 1, 3, 2 } + * Vector multiplier: { 1, 1, 1, 3, 2, 1, 1, 3 } + * Scalar multiplicand: 0x80 + * Lookup table entry: { 0x80, 0x80, 0x80, 0x9b, 0x1b, 0x80, 0x80, 0x9b } + * + * The second column of the MixColumns matrix is {3,2,1,1}. The + * product of this column with the scalar value 0x80 can be obtained + * by extracting the relevant four-byte subset of the lookup table + * entry: + * + * MixColumns column[1]: { 3, 2, 1, 1 } + * Vector multiplier: { 1, 1, 1, 3, 2, 1, 1, 3 } + * Lookup table entry: { 0x80, 0x80, 0x80, 0x9b, 0x1b, 0x80, 0x80, 0x9b } + * Product: { 0x9b, 0x1b, 0x80, 0x80 } + * + * The column lookups require only seven bytes of the eight-byte + * entry: the remaining (first) byte is used to hold the scalar + * multiplicand itself (i.e. the first byte of the vector multiplier + * is always chosen to be 1). + */ +union aes_table_entry { + /** Viewed as an array of bytes */ + uint8_t byte[8]; +} __attribute__ (( packed )); + +/** An AES lookup table + * + * This represents the products (in the Galois field GF(2^8)) of a + * constant eight-byte vector multiplier with all possible 256 scalar + * multiplicands. + * + * The entries are indexed by the AES [Inv]SubBytes S-box output + * values (denoted S(N)). This allows for the result of multiplying + * any single column of the [Inv]MixColumns matrix by S(N) to be + * obtained simply by extracting the relevant four-byte subset from + * the Nth table entry. For example: + * + * Input byte (N): 0x3a + * SubBytes output S(N): 0x80 + * MixColumns column[1]: { 3, 2, 1, 1 } + * Vector multiplier: { 1, 1, 1, 3, 2, 1, 1, 3 } + * Table entry[0x3a]: { 0x80, 0x80, 0x80, 0x9b, 0x1b, 0x80, 0x80, 0x9b } + * Product: { 0x9b, 0x1b, 0x80, 0x80 } + * + * Since the first byte of the eight-byte vector multiplier is always + * chosen to be 1, the value of S(N) may be lookup up by extracting + * the first byte of the Nth table entry. + */ +struct aes_table { + /** Table entries, indexed by S(N) */ + union aes_table_entry entry[256]; +} __attribute__ (( aligned ( 8 ) )); + +/** AES MixColumns lookup table */ +static struct aes_table aes_mixcolumns; + +/** AES InvMixColumns lookup table */ +static struct aes_table aes_invmixcolumns; + +/** + * Multiply [Inv]MixColumns matrix column by scalar multiplicand + * + * @v entry AES lookup table entry for scalar multiplicand + * @v column [Inv]MixColumns matrix column index + * @ret product Product of matrix column with scalar multiplicand + */ +static inline __attribute__ (( always_inline )) uint32_t +aes_entry_column ( const union aes_table_entry *entry, unsigned int column ) { + const union { + uint8_t byte; + uint32_t column; + } __attribute__ (( may_alias )) *product; + + /* Locate relevant four-byte subset */ + product = container_of ( &entry->byte[ 4 - column ], + typeof ( *product ), byte ); + + /* Extract this four-byte subset */ + return product->column; +} + +/** + * Multiply [Inv]MixColumns matrix column by S-boxed input byte + * + * @v table AES lookup table + * @v stride AES row shift stride + * @v in AES input state + * @v offset Output byte offset (after [Inv]ShiftRows) + * @ret product Product of matrix column with S(input byte) + * + * Note that the specified offset is not the offset of the input byte; + * it is the offset of the output byte which corresponds to the input + * byte. This output byte offset is used to calculate both the input + * byte offset and to select the appropriate matric column. + * + * With a compile-time constant offset, this function will optimise + * down to a single "movzbl" (to extract the input byte) and will + * generate a single x86 memory reference expression which can then be + * used directly within a single "xorl" instruction. + */ +static inline __attribute__ (( always_inline )) uint32_t +aes_column ( const struct aes_table *table, size_t stride, + const union aes_matrix *in, size_t offset ) { + const union aes_table_entry *entry; + unsigned int byte; + + /* Extract input byte corresponding to this output byte offset + * (i.e. perform [Inv]ShiftRows). + */ + byte = in->byte[ ( stride * offset ) & 0xf ]; + + /* Locate lookup table entry for this input byte (i.e. perform + * [Inv]SubBytes). + */ + entry = &table->entry[byte]; + + /* Multiply appropriate matrix column by this input byte + * (i.e. perform [Inv]MixColumns). + */ + return aes_entry_column ( entry, ( offset & 0x3 ) ); +} + +/** + * Calculate intermediate round output column + * + * @v table AES lookup table + * @v stride AES row shift stride + * @v in AES input state + * @v key AES round key + * @v column Column index + * @ret output Output column value + */ +static inline __attribute__ (( always_inline )) uint32_t +aes_output ( const struct aes_table *table, size_t stride, + const union aes_matrix *in, const union aes_matrix *key, + unsigned int column ) { + size_t offset = ( column * 4 ); + + /* Perform [Inv]ShiftRows, [Inv]SubBytes, [Inv]MixColumns, and + * AddRoundKey for this column. The loop is unrolled to allow + * for the required compile-time constant optimisations. + */ + return ( aes_column ( table, stride, in, ( offset + 0 ) ) ^ + aes_column ( table, stride, in, ( offset + 1 ) ) ^ + aes_column ( table, stride, in, ( offset + 2 ) ) ^ + aes_column ( table, stride, in, ( offset + 3 ) ) ^ + key->column[column] ); +} + +/** + * Perform a single intermediate round + * + * @v table AES lookup table + * @v stride AES row shift stride + * @v in AES input state + * @v out AES output state + * @v key AES round key + */ +static inline __attribute__ (( always_inline )) void +aes_round ( const struct aes_table *table, size_t stride, + const union aes_matrix *in, union aes_matrix *out, + const union aes_matrix *key ) { + + /* Perform [Inv]ShiftRows, [Inv]SubBytes, [Inv]MixColumns, and + * AddRoundKey for all columns. The loop is unrolled to allow + * for the required compile-time constant optimisations. + */ + out->column[0] = aes_output ( table, stride, in, key, 0 ); + out->column[1] = aes_output ( table, stride, in, key, 1 ); + out->column[2] = aes_output ( table, stride, in, key, 2 ); + out->column[3] = aes_output ( table, stride, in, key, 3 ); +} + +/** + * Perform encryption intermediate rounds + * + * @v in AES input state + * @v out AES output state + * @v key Round keys + * @v rounds Number of rounds (must be odd) + * + * This function is deliberately marked as non-inlinable to ensure + * maximal availability of registers for GCC's register allocator, + * which has a tendency to otherwise spill performance-critical + * registers to the stack. + */ +static __attribute__ (( noinline )) void +aes_encrypt_rounds ( union aes_matrix *in, union aes_matrix *out, + const union aes_matrix *key, unsigned int rounds ) { + union aes_matrix *tmp; + + /* Perform intermediate rounds */ + do { + /* Perform one intermediate round */ + aes_round ( &aes_mixcolumns, AES_STRIDE_SHIFTROWS, + in, out, key++ ); + + /* Swap input and output states for next round */ + tmp = in; + in = out; + out = tmp; + + } while ( --rounds ); +} + +/** + * Perform decryption intermediate rounds + * + * @v in AES input state + * @v out AES output state + * @v key Round keys + * @v rounds Number of rounds (must be odd) + * + * As with aes_encrypt_rounds(), this function is deliberately marked + * as non-inlinable. + * + * This function could potentially use the same binary code as is used + * for encryption. To compensate for the difference between ShiftRows + * and InvShiftRows, half of the input byte offsets would have to be + * modifiable at runtime (half by an offset of +4/-4, half by an + * offset of -4/+4 for ShiftRows/InvShiftRows). This can be + * accomplished in x86 assembly within the number of available + * registers, but GCC's register allocator struggles to do so, + * resulting in a significant performance decrease due to registers + * being spilled to the stack. We therefore use two separate but very + * similar binary functions based on the same C source. + */ +static __attribute__ (( noinline )) void +aes_decrypt_rounds ( union aes_matrix *in, union aes_matrix *out, + const union aes_matrix *key, unsigned int rounds ) { + union aes_matrix *tmp; + + /* Perform intermediate rounds */ + do { + /* Perform one intermediate round */ + aes_round ( &aes_invmixcolumns, AES_STRIDE_INVSHIFTROWS, + in, out, key++ ); + + /* Swap input and output states for next round */ + tmp = in; + in = out; + out = tmp; + + } while ( --rounds ); +} + +/** + * Perform standalone AddRoundKey + * + * @v state AES state + * @v key AES round key + */ +static inline __attribute__ (( always_inline )) void +aes_addroundkey ( union aes_matrix *state, const union aes_matrix *key ) { + + state->column[0] ^= key->column[0]; + state->column[1] ^= key->column[1]; + state->column[2] ^= key->column[2]; + state->column[3] ^= key->column[3]; +} + +/** + * Perform final round + * + * @v table AES lookup table + * @v stride AES row shift stride + * @v in AES input state + * @v out AES output state + * @v key AES round key + */ +static void aes_final ( const struct aes_table *table, size_t stride, + const union aes_matrix *in, union aes_matrix *out, + const union aes_matrix *key ) { + const union aes_table_entry *entry; + unsigned int byte; + size_t out_offset; + size_t in_offset; + + /* Perform [Inv]ShiftRows and [Inv]SubBytes */ + for ( out_offset = 0, in_offset = 0 ; out_offset < 16 ; + out_offset++, in_offset = ( ( in_offset + stride ) & 0xf ) ) { + + /* Extract input byte (i.e. perform [Inv]ShiftRows) */ + byte = in->byte[in_offset]; + + /* Locate lookup table entry for this input byte + * (i.e. perform [Inv]SubBytes). + */ + entry = &table->entry[byte]; + + /* Store output byte */ + out->byte[out_offset] = entry->byte[0]; + } + + /* Perform AddRoundKey */ + aes_addroundkey ( out, key ); +} + +/** + * Encrypt data + * + * @v ctx Context + * @v src Data to encrypt + * @v dst Buffer for encrypted data + * @v len Length of data + */ +static void aes_encrypt ( void *ctx, const void *src, void *dst, size_t len ) { + struct aes_context *aes = ctx; + union aes_matrix buffer[2]; + union aes_matrix *in = &buffer[0]; + union aes_matrix *out = &buffer[1]; + unsigned int rounds = aes->rounds; + + /* Sanity check */ + assert ( len == sizeof ( *in ) ); + + /* Initialise input state */ + memcpy ( in, src, sizeof ( *in ) ); + + /* Perform initial round (AddRoundKey) */ + aes_addroundkey ( in, &aes->encrypt.key[0] ); + + /* Perform intermediate rounds (ShiftRows, SubBytes, + * MixColumns, AddRoundKey). + */ + aes_encrypt_rounds ( in, out, &aes->encrypt.key[1], ( rounds - 2 ) ); + in = out; + + /* Perform final round (ShiftRows, SubBytes, AddRoundKey) */ + out = dst; + aes_final ( &aes_mixcolumns, AES_STRIDE_SHIFTROWS, in, out, + &aes->encrypt.key[ rounds - 1 ] ); +} + +/** + * Decrypt data + * + * @v ctx Context + * @v src Data to decrypt + * @v dst Buffer for decrypted data + * @v len Length of data + */ +static void aes_decrypt ( void *ctx, const void *src, void *dst, size_t len ) { + struct aes_context *aes = ctx; + union aes_matrix buffer[2]; + union aes_matrix *in = &buffer[0]; + union aes_matrix *out = &buffer[1]; + unsigned int rounds = aes->rounds; + + /* Sanity check */ + assert ( len == sizeof ( *in ) ); + + /* Initialise input state */ + memcpy ( in, src, sizeof ( *in ) ); + + /* Perform initial round (AddRoundKey) */ + aes_addroundkey ( in, &aes->decrypt.key[0] ); + + /* Perform intermediate rounds (InvShiftRows, InvSubBytes, + * InvMixColumns, AddRoundKey). + */ + aes_decrypt_rounds ( in, out, &aes->decrypt.key[1], ( rounds - 2 ) ); + in = out; + + /* Perform final round (InvShiftRows, InvSubBytes, AddRoundKey) */ + out = dst; + aes_final ( &aes_invmixcolumns, AES_STRIDE_INVSHIFTROWS, in, out, + &aes->decrypt.key[ rounds - 1 ] ); +} + +/** + * Multiply a polynomial by (x) modulo (x^8 + x^4 + x^3 + x^2 + 1) in GF(2^8) + * + * @v poly Polynomial to be multiplied + * @ret result Result + */ +static __attribute__ (( const )) unsigned int aes_double ( unsigned int poly ) { + + /* Multiply polynomial by (x), placing the resulting x^8 + * coefficient in the LSB (i.e. rotate byte left by one). + */ + poly = rol8 ( poly, 1 ); + + /* If coefficient of x^8 (in LSB) is non-zero, then reduce by + * subtracting (x^8 + x^4 + x^3 + x^2 + 1) in GF(2^8). + */ + if ( poly & 0x01 ) { + poly ^= 0x01; /* Subtract x^8 (currently in LSB) */ + poly ^= 0x1b; /* Subtract (x^4 + x^3 + x^2 + 1) */ + } + + return poly; +} + +/** + * Fill in MixColumns lookup table entry + * + * @v entry AES lookup table entry for scalar multiplicand + * + * The MixColumns lookup table vector multiplier is {1,1,1,3,2,1,1,3}. + */ +static void aes_mixcolumns_entry ( union aes_table_entry *entry ) { + unsigned int scalar_x_1; + unsigned int scalar_x; + unsigned int scalar; + + /* Retrieve scalar multiplicand */ + scalar = entry->byte[0]; + entry->byte[1] = scalar; + entry->byte[2] = scalar; + entry->byte[5] = scalar; + entry->byte[6] = scalar; + + /* Calculate scalar multiplied by (x) */ + scalar_x = aes_double ( scalar ); + entry->byte[4] = scalar_x; + + /* Calculate scalar multiplied by (x + 1) */ + scalar_x_1 = ( scalar_x ^ scalar ); + entry->byte[3] = scalar_x_1; + entry->byte[7] = scalar_x_1; +} + +/** + * Fill in InvMixColumns lookup table entry + * + * @v entry AES lookup table entry for scalar multiplicand + * + * The InvMixColumns lookup table vector multiplier is {1,9,13,11,14,9,13,11}. + */ +static void aes_invmixcolumns_entry ( union aes_table_entry *entry ) { + unsigned int scalar_x3_x2_x; + unsigned int scalar_x3_x2_1; + unsigned int scalar_x3_x2; + unsigned int scalar_x3_x_1; + unsigned int scalar_x3_1; + unsigned int scalar_x3; + unsigned int scalar_x2; + unsigned int scalar_x; + unsigned int scalar; + + /* Retrieve scalar multiplicand */ + scalar = entry->byte[0]; + + /* Calculate scalar multiplied by (x) */ + scalar_x = aes_double ( scalar ); + + /* Calculate scalar multiplied by (x^2) */ + scalar_x2 = aes_double ( scalar_x ); + + /* Calculate scalar multiplied by (x^3) */ + scalar_x3 = aes_double ( scalar_x2 ); + + /* Calculate scalar multiplied by (x^3 + 1) */ + scalar_x3_1 = ( scalar_x3 ^ scalar ); + entry->byte[1] = scalar_x3_1; + entry->byte[5] = scalar_x3_1; + + /* Calculate scalar multiplied by (x^3 + x + 1) */ + scalar_x3_x_1 = ( scalar_x3_1 ^ scalar_x ); + entry->byte[3] = scalar_x3_x_1; + entry->byte[7] = scalar_x3_x_1; + + /* Calculate scalar multiplied by (x^3 + x^2) */ + scalar_x3_x2 = ( scalar_x3 ^ scalar_x2 ); + + /* Calculate scalar multiplied by (x^3 + x^2 + 1) */ + scalar_x3_x2_1 = ( scalar_x3_x2 ^ scalar ); + entry->byte[2] = scalar_x3_x2_1; + entry->byte[6] = scalar_x3_x2_1; + + /* Calculate scalar multiplied by (x^3 + x^2 + x) */ + scalar_x3_x2_x = ( scalar_x3_x2 ^ scalar_x ); + entry->byte[4] = scalar_x3_x2_x; +} + +/** + * Generate AES lookup tables + * + */ +static void aes_generate ( void ) { + union aes_table_entry *entry; + union aes_table_entry *inventry; + unsigned int poly = 0x01; + unsigned int invpoly = 0x01; + unsigned int transformed; + unsigned int i; + + /* Iterate over non-zero values of GF(2^8) using generator (x + 1) */ + do { + + /* Multiply polynomial by (x + 1) */ + poly ^= aes_double ( poly ); + + /* Divide inverse polynomial by (x + 1). This code + * fragment is taken directly from the Wikipedia page + * on the Rijndael S-box. An explanation of why it + * works would be greatly appreciated. + */ + invpoly ^= ( invpoly << 1 ); + invpoly ^= ( invpoly << 2 ); + invpoly ^= ( invpoly << 4 ); + if ( invpoly & 0x80 ) + invpoly ^= 0x09; + invpoly &= 0xff; + + /* Apply affine transformation */ + transformed = ( 0x63 ^ invpoly ^ rol8 ( invpoly, 1 ) ^ + rol8 ( invpoly, 2 ) ^ rol8 ( invpoly, 3 ) ^ + rol8 ( invpoly, 4 ) ); + + /* Populate S-box (within MixColumns lookup table) */ + aes_mixcolumns.entry[poly].byte[0] = transformed; + + } while ( poly != 0x01 ); + + /* Populate zeroth S-box entry (which has no inverse) */ + aes_mixcolumns.entry[0].byte[0] = 0x63; + + /* Fill in MixColumns and InvMixColumns lookup tables */ + for ( i = 0 ; i < 256 ; i++ ) { + + /* Fill in MixColumns lookup table entry */ + entry = &aes_mixcolumns.entry[i]; + aes_mixcolumns_entry ( entry ); + + /* Populate inverse S-box (within InvMixColumns lookup table) */ + inventry = &aes_invmixcolumns.entry[ entry->byte[0] ]; + inventry->byte[0] = i; + + /* Fill in InvMixColumns lookup table entry */ + aes_invmixcolumns_entry ( inventry ); + } +} + +/** + * Rotate key column + * + * @v column Key column + * @ret column Updated key column + */ +static inline __attribute__ (( always_inline )) uint32_t +aes_key_rotate ( uint32_t column ) { + + return ( ( __BYTE_ORDER == __LITTLE_ENDIAN ) ? + ror32 ( column, 8 ) : rol32 ( column, 8 ) ); +} + +/** + * Apply S-box to key column + * + * @v column Key column + * @ret column Updated key column + */ +static uint32_t aes_key_sbox ( uint32_t column ) { + unsigned int i; + uint8_t byte; + + for ( i = 0 ; i < 4 ; i++ ) { + byte = ( column & 0xff ); + byte = aes_mixcolumns.entry[byte].byte[0]; + column = ( ( column & ~0xff ) | byte ); + column = rol32 ( column, 8 ); + } + return column; +} + +/** + * Apply schedule round constant to key column + * + * @v column Key column + * @v rcon Round constant + * @ret column Updated key column + */ +static inline __attribute__ (( always_inline )) uint32_t +aes_key_rcon ( uint32_t column, unsigned int rcon ) { + + return ( ( __BYTE_ORDER == __LITTLE_ENDIAN ) ? + ( column ^ rcon ) : ( column ^ ( rcon << 24 ) ) ); +} + +/** + * Set key + * + * @v ctx Context + * @v key Key + * @v keylen Key length + * @ret rc Return status code + */ +static int aes_setkey ( void *ctx, const void *key, size_t keylen ) { + struct aes_context *aes = ctx; + union aes_matrix *enc; + union aes_matrix *dec; + union aes_matrix temp; + union aes_matrix zero; + unsigned int rcon = 0x01; + unsigned int rounds; + size_t offset = 0; + uint32_t *prev; + uint32_t *next; + uint32_t *end; + uint32_t tmp; + + /* Generate lookup tables, if not already done */ + if ( ! aes_mixcolumns.entry[0].byte[0] ) + aes_generate(); + + /* Validate key length and calculate number of intermediate rounds */ + switch ( keylen ) { + case ( 128 / 8 ) : + rounds = 11; + break; + case ( 192 / 8 ) : + rounds = 13; + break; + case ( 256 / 8 ) : + rounds = 15; + break; + default: + DBGC ( aes, "AES %p unsupported key length (%zd bits)\n", + aes, ( keylen * 8 ) ); + return -EINVAL; + } + aes->rounds = rounds; + enc = aes->encrypt.key; + end = enc[rounds].column; + + /* Copy raw key */ + memcpy ( enc, key, keylen ); + prev = enc->column; + next = ( ( ( void * ) prev ) + keylen ); + tmp = next[-1]; + + /* Construct expanded key */ + while ( next < end ) { + + /* If this is the first column of an expanded key + * block, or the middle column of an AES-256 key + * block, then apply the S-box. + */ + if ( ( offset == 0 ) || ( ( offset | keylen ) == 48 ) ) + tmp = aes_key_sbox ( tmp ); + + /* If this is the first column of an expanded key + * block then rotate and apply the round constant. + */ + if ( offset == 0 ) { + tmp = aes_key_rotate ( tmp ); + tmp = aes_key_rcon ( tmp, rcon ); + rcon = aes_double ( rcon ); + } + + /* XOR with previous key column */ + tmp ^= *prev; + + /* Store column */ + *next = tmp; + + /* Move to next column */ + offset += sizeof ( *next ); + if ( offset == keylen ) + offset = 0; + next++; + prev++; + } + DBGC2 ( aes, "AES %p expanded %zd-bit key:\n", aes, ( keylen * 8 ) ); + DBGC2_HDA ( aes, 0, &aes->encrypt, ( rounds * sizeof ( *enc ) ) ); + + /* Convert to decryption key */ + memset ( &zero, 0, sizeof ( zero ) ); + dec = &aes->decrypt.key[ rounds - 1 ]; + memcpy ( dec--, enc++, sizeof ( *dec ) ); + while ( dec > aes->decrypt.key ) { + /* Perform InvMixColumns (by reusing the encryption + * final-round code to perform ShiftRows+SubBytes and + * reusing the decryption intermediate-round code to + * perform InvShiftRows+InvSubBytes+InvMixColumns, all + * with a zero encryption key). + */ + aes_final ( &aes_mixcolumns, AES_STRIDE_SHIFTROWS, + enc++, &temp, &zero ); + aes_decrypt_rounds ( &temp, dec--, &zero, 1 ); + } + memcpy ( dec--, enc++, sizeof ( *dec ) ); + DBGC2 ( aes, "AES %p inverted %zd-bit key:\n", aes, ( keylen * 8 ) ); + DBGC2_HDA ( aes, 0, &aes->decrypt, ( rounds * sizeof ( *dec ) ) ); + + return 0; +} + +/** + * Set initialisation vector + * + * @v ctx Context + * @v iv Initialisation vector + */ +static void aes_setiv ( void *ctx __unused, const void *iv __unused ) { + /* Nothing to do */ +} + +/** Basic AES algorithm */ +struct cipher_algorithm aes_algorithm = { + .name = "aes", + .ctxsize = sizeof ( struct aes_context ), + .blocksize = AES_BLOCKSIZE, + .setkey = aes_setkey, + .setiv = aes_setiv, + .encrypt = aes_encrypt, + .decrypt = aes_decrypt, +}; + +/* AES in Electronic Codebook mode */ +ECB_CIPHER ( aes_ecb, aes_ecb_algorithm, + aes_algorithm, struct aes_context, AES_BLOCKSIZE ); + +/* AES in Cipher Block Chaining mode */ +CBC_CIPHER ( aes_cbc, aes_cbc_algorithm, + aes_algorithm, struct aes_context, AES_BLOCKSIZE ); diff --git a/src/crypto/ecb.c b/src/crypto/ecb.c new file mode 100644 index 00000000..3c9cf340 --- /dev/null +++ b/src/crypto/ecb.c @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2009 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * Electronic codebook (ECB) + * + */ + +/** + * Encrypt data + * + * @v ctx Context + * @v src Data to encrypt + * @v dst Buffer for encrypted data + * @v len Length of data + * @v raw_cipher Underlying cipher algorithm + */ +void ecb_encrypt ( void *ctx, const void *src, void *dst, size_t len, + struct cipher_algorithm *raw_cipher ) { + size_t blocksize = raw_cipher->blocksize; + + assert ( ( len % blocksize ) == 0 ); + + while ( len ) { + cipher_encrypt ( raw_cipher, ctx, src, dst, blocksize ); + dst += blocksize; + src += blocksize; + len -= blocksize; + } +} + +/** + * Decrypt data + * + * @v ctx Context + * @v src Data to decrypt + * @v dst Buffer for decrypted data + * @v len Length of data + * @v raw_cipher Underlying cipher algorithm + */ +void ecb_decrypt ( void *ctx, const void *src, void *dst, size_t len, + struct cipher_algorithm *raw_cipher ) { + size_t blocksize = raw_cipher->blocksize; + + assert ( ( len % blocksize ) == 0 ); + + while ( len ) { + cipher_decrypt ( raw_cipher, ctx, src, dst, blocksize ); + dst += blocksize; + src += blocksize; + len -= blocksize; + } +} diff --git a/src/crypto/md4.c b/src/crypto/md4.c new file mode 100644 index 00000000..ca5dcc21 --- /dev/null +++ b/src/crypto/md4.c @@ -0,0 +1,269 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * MD4 algorithm + * + */ + +#include +#include +#include +#include +#include +#include +#include + +/** MD4 variables */ +struct md4_variables { + /* This layout matches that of struct md4_digest_data, + * allowing for efficient endianness-conversion, + */ + uint32_t a; + uint32_t b; + uint32_t c; + uint32_t d; + uint32_t w[16]; +} __attribute__ (( packed )); + +/** MD4 shift amounts */ +static const uint8_t r[3][4] = { + { 3, 7, 11, 19 }, + { 3, 5, 9, 13 }, + { 3, 9, 11, 15 }, +}; + +/** + * f(b,c,d,w) for steps 0 to 15 + * + * @v v MD4 variables + * @v i Index within round + * @ret f f(b,c,d,w) + */ +static uint32_t md4_f_0_15 ( struct md4_variables *v, unsigned int i ) { + return ( ( ( v->b & v->c ) | ( ~v->b & v->d ) ) + v->w[i] ); +} + +/** + * f(b,c,d,w) for steps 16 to 31 + * + * @v v MD4 variables + * @v i Index within round + * @ret f f(b,c,d,w) + */ +static uint32_t md4_f_16_31 ( struct md4_variables *v, unsigned int i ) { + return ( ( ( v->b & v->c ) | ( v->b & v->d ) | ( v->c & v->d ) ) + + v->w[ ( ( i << 2 ) | ( i >> 2 ) ) % 16 ] ); +} + +/** + * f(b,c,d,w) for steps 32 to 47 + * + * @v v MD4 variables + * @v i Index within round + * @ret f f(b,c,d,w) + */ +static uint32_t md4_f_32_47 ( struct md4_variables *v, unsigned int i ) { + static const uint8_t reverse[16] = { + 0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15 + }; + return ( ( v->b ^ v->c ^ v->d ) + v->w[reverse[i]] ); +} + +/** An MD4 step function */ +struct md4_step { + /** + * Calculate f(b,c,d,w) + * + * @v v MD4 variables + * @v i Index within round + * @ret f f(b,c,d,w) + */ + uint32_t ( * f ) ( struct md4_variables *v, unsigned int i ); + /** Constant */ + uint32_t constant; +}; + +/** MD4 steps */ +static struct md4_step md4_steps[4] = { + /** 0 to 15 */ + { .f = md4_f_0_15, .constant = 0x00000000UL }, + /** 16 to 31 */ + { .f = md4_f_16_31, .constant = 0x5a827999UL }, + /** 32 to 47 */ + { .f = md4_f_32_47, .constant = 0x6ed9eba1UL }, +}; + +/** + * Initialise MD4 algorithm + * + * @v ctx MD4 context + */ +static void md4_init ( void *ctx ) { + struct md4_context *context = ctx; + + context->ddd.dd.digest.h[0] = cpu_to_le32 ( 0x67452301 ); + context->ddd.dd.digest.h[1] = cpu_to_le32 ( 0xefcdab89 ); + context->ddd.dd.digest.h[2] = cpu_to_le32 ( 0x98badcfe ); + context->ddd.dd.digest.h[3] = cpu_to_le32 ( 0x10325476 ); + context->len = 0; +} + +/** + * Calculate MD4 digest of accumulated data + * + * @v context MD4 context + */ +static void md4_digest ( struct md4_context *context ) { + union { + union md4_digest_data_dwords ddd; + struct md4_variables v; + } u; + uint32_t *a = &u.v.a; + uint32_t *b = &u.v.b; + uint32_t *c = &u.v.c; + uint32_t *d = &u.v.d; + uint32_t *w = u.v.w; + uint32_t f; + uint32_t temp; + struct md4_step *step; + unsigned int round; + unsigned int i; + + /* Sanity checks */ + assert ( ( context->len % sizeof ( context->ddd.dd.data ) ) == 0 ); + linker_assert ( &u.ddd.dd.digest.h[0] == a, md4_bad_layout ); + linker_assert ( &u.ddd.dd.digest.h[1] == b, md4_bad_layout ); + linker_assert ( &u.ddd.dd.digest.h[2] == c, md4_bad_layout ); + linker_assert ( &u.ddd.dd.digest.h[3] == d, md4_bad_layout ); + linker_assert ( &u.ddd.dd.data.dword[0] == w, md4_bad_layout ); + + DBGC ( context, "MD4 digesting:\n" ); + DBGC_HDA ( context, 0, &context->ddd.dd.digest, + sizeof ( context->ddd.dd.digest ) ); + DBGC_HDA ( context, context->len, &context->ddd.dd.data, + sizeof ( context->ddd.dd.data ) ); + + /* Convert h[0..3] to host-endian, and initialise a, b, c, d, + * and x[0..15] + */ + for ( i = 0 ; i < ( sizeof ( u.ddd.dword ) / + sizeof ( u.ddd.dword[0] ) ) ; i++ ) { + le32_to_cpus ( &context->ddd.dword[i] ); + u.ddd.dword[i] = context->ddd.dword[i]; + } + + /* Main loop */ + for ( i = 0 ; i < 48 ; i++ ) { + round = ( i / 16 ); + step = &md4_steps[round]; + f = step->f ( &u.v, ( i % 16 ) ); + temp = *d; + *d = *c; + *c = *b; + *b = rol32 ( ( *a + f + step->constant ), r[round][ i % 4 ] ); + *a = temp; + DBGC2 ( context, "%2d : %08x %08x %08x %08x\n", + i, *a, *b, *c, *d ); + } + + /* Add chunk to hash and convert back to little-endian */ + for ( i = 0 ; i < 4 ; i++ ) { + context->ddd.dd.digest.h[i] = + cpu_to_le32 ( context->ddd.dd.digest.h[i] + + u.ddd.dd.digest.h[i] ); + } + + DBGC ( context, "MD4 digested:\n" ); + DBGC_HDA ( context, 0, &context->ddd.dd.digest, + sizeof ( context->ddd.dd.digest ) ); +} + +/** + * Accumulate data with MD4 algorithm + * + * @v ctx MD4 context + * @v data Data + * @v len Length of data + */ +static void md4_update ( void *ctx, const void *data, size_t len ) { + struct md4_context *context = ctx; + const uint8_t *byte = data; + size_t offset; + + /* Accumulate data a byte at a time, performing the digest + * whenever we fill the data buffer + */ + while ( len-- ) { + offset = ( context->len % sizeof ( context->ddd.dd.data ) ); + context->ddd.dd.data.byte[offset] = *(byte++); + context->len++; + if ( ( context->len % sizeof ( context->ddd.dd.data ) ) == 0 ) + md4_digest ( context ); + } +} + +/** + * Generate MD4 digest + * + * @v ctx MD4 context + * @v out Output buffer + */ +static void md4_final ( void *ctx, void *out ) { + struct md4_context *context = ctx; + uint64_t len_bits; + uint8_t pad; + + /* Record length before pre-processing */ + len_bits = cpu_to_le64 ( ( ( uint64_t ) context->len ) * 8 ); + + /* Pad with a single "1" bit followed by as many "0" bits as required */ + pad = 0x80; + do { + md4_update ( ctx, &pad, sizeof ( pad ) ); + pad = 0x00; + } while ( ( context->len % sizeof ( context->ddd.dd.data ) ) != + offsetof ( typeof ( context->ddd.dd.data ), final.len ) ); + + /* Append length (in bits) */ + md4_update ( ctx, &len_bits, sizeof ( len_bits ) ); + assert ( ( context->len % sizeof ( context->ddd.dd.data ) ) == 0 ); + + /* Copy out final digest */ + memcpy ( out, &context->ddd.dd.digest, + sizeof ( context->ddd.dd.digest ) ); +} + +/** MD4 algorithm */ +struct digest_algorithm md4_algorithm = { + .name = "md4", + .ctxsize = sizeof ( struct md4_context ), + .blocksize = sizeof ( union md4_block ), + .digestsize = sizeof ( struct md4_digest ), + .init = md4_init, + .update = md4_update, + .final = md4_final, +}; diff --git a/src/crypto/mishmash/oid_md4.c b/src/crypto/mishmash/oid_md4.c new file mode 100644 index 00000000..1054a79b --- /dev/null +++ b/src/crypto/mishmash/oid_md4.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "md4" object identifier */ +static uint8_t oid_md4[] = { ASN1_OID_MD4 }; + +/** "md4" OID-identified algorithm */ +struct asn1_algorithm oid_md4_algorithm __asn1_algorithm = { + .name = "md4", + .digest = &md4_algorithm, + .oid = ASN1_OID_CURSOR ( oid_md4 ), +}; diff --git a/src/crypto/mishmash/oid_md5.c b/src/crypto/mishmash/oid_md5.c new file mode 100644 index 00000000..96149d09 --- /dev/null +++ b/src/crypto/mishmash/oid_md5.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "md5" object identifier */ +static uint8_t oid_md5[] = { ASN1_OID_MD5 }; + +/** "md5" OID-identified algorithm */ +struct asn1_algorithm oid_md5_algorithm __asn1_algorithm = { + .name = "md5", + .digest = &md5_algorithm, + .oid = ASN1_OID_CURSOR ( oid_md5 ), +}; diff --git a/src/crypto/mishmash/oid_rsa.c b/src/crypto/mishmash/oid_rsa.c new file mode 100644 index 00000000..1360c311 --- /dev/null +++ b/src/crypto/mishmash/oid_rsa.c @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "rsaEncryption" object identifier */ +static uint8_t oid_rsa_encryption[] = { ASN1_OID_RSAENCRYPTION }; + +/** "rsaEncryption" OID-identified algorithm */ +struct asn1_algorithm rsa_encryption_algorithm __asn1_algorithm = { + .name = "rsaEncryption", + .pubkey = &rsa_algorithm, + .digest = NULL, + .oid = ASN1_OID_CURSOR ( oid_rsa_encryption ), +}; diff --git a/src/crypto/mishmash/oid_sha1.c b/src/crypto/mishmash/oid_sha1.c new file mode 100644 index 00000000..0ab3bac6 --- /dev/null +++ b/src/crypto/mishmash/oid_sha1.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha1" object identifier */ +static uint8_t oid_sha1[] = { ASN1_OID_SHA1 }; + +/** "sha1" OID-identified algorithm */ +struct asn1_algorithm oid_sha1_algorithm __asn1_algorithm = { + .name = "sha1", + .digest = &sha1_algorithm, + .oid = ASN1_OID_CURSOR ( oid_sha1 ), +}; diff --git a/src/crypto/mishmash/oid_sha224.c b/src/crypto/mishmash/oid_sha224.c new file mode 100644 index 00000000..1ff6884a --- /dev/null +++ b/src/crypto/mishmash/oid_sha224.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha224" object identifier */ +static uint8_t oid_sha224[] = { ASN1_OID_SHA224 }; + +/** "sha224" OID-identified algorithm */ +struct asn1_algorithm oid_sha224_algorithm __asn1_algorithm = { + .name = "sha224", + .digest = &sha224_algorithm, + .oid = ASN1_OID_CURSOR ( oid_sha224 ), +}; diff --git a/src/crypto/mishmash/oid_sha256.c b/src/crypto/mishmash/oid_sha256.c new file mode 100644 index 00000000..51ea585c --- /dev/null +++ b/src/crypto/mishmash/oid_sha256.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha256" object identifier */ +static uint8_t oid_sha256[] = { ASN1_OID_SHA256 }; + +/** "sha256" OID-identified algorithm */ +struct asn1_algorithm oid_sha256_algorithm __asn1_algorithm = { + .name = "sha256", + .digest = &sha256_algorithm, + .oid = ASN1_OID_CURSOR ( oid_sha256 ), +}; diff --git a/src/crypto/mishmash/oid_sha384.c b/src/crypto/mishmash/oid_sha384.c new file mode 100644 index 00000000..5ba4d60a --- /dev/null +++ b/src/crypto/mishmash/oid_sha384.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha384" object identifier */ +static uint8_t oid_sha384[] = { ASN1_OID_SHA384 }; + +/** "sha384" OID-identified algorithm */ +struct asn1_algorithm oid_sha384_algorithm __asn1_algorithm = { + .name = "sha384", + .digest = &sha384_algorithm, + .oid = ASN1_OID_CURSOR ( oid_sha384 ), +}; diff --git a/src/crypto/mishmash/oid_sha512.c b/src/crypto/mishmash/oid_sha512.c new file mode 100644 index 00000000..38e3c1a3 --- /dev/null +++ b/src/crypto/mishmash/oid_sha512.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha512" object identifier */ +static uint8_t oid_sha512[] = { ASN1_OID_SHA512 }; + +/** "sha512" OID-identified algorithm */ +struct asn1_algorithm oid_sha512_algorithm __asn1_algorithm = { + .name = "sha512", + .digest = &sha512_algorithm, + .oid = ASN1_OID_CURSOR ( oid_sha512 ), +}; diff --git a/src/crypto/mishmash/oid_sha512_224.c b/src/crypto/mishmash/oid_sha512_224.c new file mode 100644 index 00000000..2300dad6 --- /dev/null +++ b/src/crypto/mishmash/oid_sha512_224.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha512_224" object identifier */ +static uint8_t oid_sha512_224[] = { ASN1_OID_SHA512_224 }; + +/** "sha512_224" OID-identified algorithm */ +struct asn1_algorithm oid_sha512_224_algorithm __asn1_algorithm = { + .name = "sha512/224", + .digest = &sha512_224_algorithm, + .oid = ASN1_OID_CURSOR ( oid_sha512_224 ), +}; diff --git a/src/crypto/mishmash/oid_sha512_256.c b/src/crypto/mishmash/oid_sha512_256.c new file mode 100644 index 00000000..6af61fea --- /dev/null +++ b/src/crypto/mishmash/oid_sha512_256.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha512_256" object identifier */ +static uint8_t oid_sha512_256[] = { ASN1_OID_SHA512_256 }; + +/** "sha512_256" OID-identified algorithm */ +struct asn1_algorithm oid_sha512_256_algorithm __asn1_algorithm = { + .name = "sha512/256", + .digest = &sha512_256_algorithm, + .oid = ASN1_OID_CURSOR ( oid_sha512_256 ), +}; diff --git a/src/crypto/mishmash/rsa_aes_cbc_sha1.c b/src/crypto/mishmash/rsa_aes_cbc_sha1.c new file mode 100644 index 00000000..06722c0e --- /dev/null +++ b/src/crypto/mishmash/rsa_aes_cbc_sha1.c @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include + +/** TLS_RSA_WITH_AES_128_CBC_SHA cipher suite */ +struct tls_cipher_suite tls_rsa_with_aes_128_cbc_sha __tls_cipher_suite (03) = { + .code = htons ( TLS_RSA_WITH_AES_128_CBC_SHA ), + .key_len = ( 128 / 8 ), + .pubkey = &rsa_algorithm, + .cipher = &aes_cbc_algorithm, + .digest = &sha1_algorithm, +}; + +/** TLS_RSA_WITH_AES_256_CBC_SHA cipher suite */ +struct tls_cipher_suite tls_rsa_with_aes_256_cbc_sha __tls_cipher_suite (04) = { + .code = htons ( TLS_RSA_WITH_AES_256_CBC_SHA ), + .key_len = ( 256 / 8 ), + .pubkey = &rsa_algorithm, + .cipher = &aes_cbc_algorithm, + .digest = &sha1_algorithm, +}; diff --git a/src/crypto/mishmash/rsa_aes_cbc_sha256.c b/src/crypto/mishmash/rsa_aes_cbc_sha256.c new file mode 100644 index 00000000..c609eace --- /dev/null +++ b/src/crypto/mishmash/rsa_aes_cbc_sha256.c @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include + +/** TLS_RSA_WITH_AES_128_CBC_SHA256 cipher suite */ +struct tls_cipher_suite tls_rsa_with_aes_128_cbc_sha256 __tls_cipher_suite(01)={ + .code = htons ( TLS_RSA_WITH_AES_128_CBC_SHA256 ), + .key_len = ( 128 / 8 ), + .pubkey = &rsa_algorithm, + .cipher = &aes_cbc_algorithm, + .digest = &sha256_algorithm, +}; + +/** TLS_RSA_WITH_AES_256_CBC_SHA256 cipher suite */ +struct tls_cipher_suite tls_rsa_with_aes_256_cbc_sha256 __tls_cipher_suite(02)={ + .code = htons ( TLS_RSA_WITH_AES_256_CBC_SHA256 ), + .key_len = ( 256 / 8 ), + .pubkey = &rsa_algorithm, + .cipher = &aes_cbc_algorithm, + .digest = &sha256_algorithm, +}; diff --git a/src/crypto/mishmash/rsa_md5.c b/src/crypto/mishmash/rsa_md5.c new file mode 100644 index 00000000..ac828ac1 --- /dev/null +++ b/src/crypto/mishmash/rsa_md5.c @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** "md5WithRSAEncryption" object identifier */ +static uint8_t oid_md5_with_rsa_encryption[] = + { ASN1_OID_MD5WITHRSAENCRYPTION }; + +/** "md5WithRSAEncryption" OID-identified algorithm */ +struct asn1_algorithm md5_with_rsa_encryption_algorithm __asn1_algorithm = { + .name = "md5WithRSAEncryption", + .pubkey = &rsa_algorithm, + .digest = &md5_algorithm, + .oid = ASN1_OID_CURSOR ( oid_md5_with_rsa_encryption ), +}; + +/** MD5 digestInfo prefix */ +static const uint8_t rsa_md5_prefix_data[] = + { RSA_DIGESTINFO_PREFIX ( MD5_DIGEST_SIZE, ASN1_OID_MD5 ) }; + +/** MD5 digestInfo prefix */ +struct rsa_digestinfo_prefix rsa_md5_prefix __rsa_digestinfo_prefix = { + .digest = &md5_algorithm, + .data = rsa_md5_prefix_data, + .len = sizeof ( rsa_md5_prefix_data ), +}; diff --git a/src/crypto/mishmash/rsa_sha1.c b/src/crypto/mishmash/rsa_sha1.c new file mode 100644 index 00000000..39424bf2 --- /dev/null +++ b/src/crypto/mishmash/rsa_sha1.c @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** "sha1WithRSAEncryption" object identifier */ +static uint8_t oid_sha1_with_rsa_encryption[] = + { ASN1_OID_SHA1WITHRSAENCRYPTION }; + +/** "sha1WithRSAEncryption" OID-identified algorithm */ +struct asn1_algorithm sha1_with_rsa_encryption_algorithm __asn1_algorithm = { + .name = "sha1WithRSAEncryption", + .pubkey = &rsa_algorithm, + .digest = &sha1_algorithm, + .oid = ASN1_OID_CURSOR ( oid_sha1_with_rsa_encryption ), +}; + +/** SHA-1 digestInfo prefix */ +static const uint8_t rsa_sha1_prefix_data[] = + { RSA_DIGESTINFO_PREFIX ( SHA1_DIGEST_SIZE, ASN1_OID_SHA1 ) }; + +/** SHA-1 digestInfo prefix */ +struct rsa_digestinfo_prefix rsa_sha1_prefix __rsa_digestinfo_prefix = { + .digest = &sha1_algorithm, + .data = rsa_sha1_prefix_data, + .len = sizeof ( rsa_sha1_prefix_data ), +}; + +/** RSA with SHA-1 signature hash algorithm */ +struct tls_signature_hash_algorithm tls_rsa_sha1 __tls_sig_hash_algorithm = { + .code = { + .signature = TLS_RSA_ALGORITHM, + .hash = TLS_SHA1_ALGORITHM, + }, + .pubkey = &rsa_algorithm, + .digest = &sha1_algorithm, +}; diff --git a/src/crypto/mishmash/rsa_sha224.c b/src/crypto/mishmash/rsa_sha224.c new file mode 100644 index 00000000..5e8755aa --- /dev/null +++ b/src/crypto/mishmash/rsa_sha224.c @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** "sha224WithRSAEncryption" object identifier */ +static uint8_t oid_sha224_with_rsa_encryption[] = + { ASN1_OID_SHA224WITHRSAENCRYPTION }; + +/** "sha224WithRSAEncryption" OID-identified algorithm */ +struct asn1_algorithm sha224_with_rsa_encryption_algorithm __asn1_algorithm = { + .name = "sha224WithRSAEncryption", + .pubkey = &rsa_algorithm, + .digest = &sha224_algorithm, + .oid = ASN1_OID_CURSOR ( oid_sha224_with_rsa_encryption ), +}; + +/** SHA-224 digestInfo prefix */ +static const uint8_t rsa_sha224_prefix_data[] = + { RSA_DIGESTINFO_PREFIX ( SHA224_DIGEST_SIZE, ASN1_OID_SHA224 ) }; + +/** SHA-224 digestInfo prefix */ +struct rsa_digestinfo_prefix rsa_sha224_prefix __rsa_digestinfo_prefix = { + .digest = &sha224_algorithm, + .data = rsa_sha224_prefix_data, + .len = sizeof ( rsa_sha224_prefix_data ), +}; + +/** RSA with SHA-224 signature hash algorithm */ +struct tls_signature_hash_algorithm tls_rsa_sha224 __tls_sig_hash_algorithm = { + .code = { + .signature = TLS_RSA_ALGORITHM, + .hash = TLS_SHA224_ALGORITHM, + }, + .pubkey = &rsa_algorithm, + .digest = &sha224_algorithm, +}; diff --git a/src/crypto/mishmash/rsa_sha256.c b/src/crypto/mishmash/rsa_sha256.c new file mode 100644 index 00000000..b44af5f1 --- /dev/null +++ b/src/crypto/mishmash/rsa_sha256.c @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** "sha256WithRSAEncryption" object identifier */ +static uint8_t oid_sha256_with_rsa_encryption[] = + { ASN1_OID_SHA256WITHRSAENCRYPTION }; + +/** "sha256WithRSAEncryption" OID-identified algorithm */ +struct asn1_algorithm sha256_with_rsa_encryption_algorithm __asn1_algorithm = { + .name = "sha256WithRSAEncryption", + .pubkey = &rsa_algorithm, + .digest = &sha256_algorithm, + .oid = ASN1_OID_CURSOR ( oid_sha256_with_rsa_encryption ), +}; + +/** SHA-256 digestInfo prefix */ +static const uint8_t rsa_sha256_prefix_data[] = + { RSA_DIGESTINFO_PREFIX ( SHA256_DIGEST_SIZE, ASN1_OID_SHA256 ) }; + +/** SHA-256 digestInfo prefix */ +struct rsa_digestinfo_prefix rsa_sha256_prefix __rsa_digestinfo_prefix = { + .digest = &sha256_algorithm, + .data = rsa_sha256_prefix_data, + .len = sizeof ( rsa_sha256_prefix_data ), +}; + +/** RSA with SHA-256 signature hash algorithm */ +struct tls_signature_hash_algorithm tls_rsa_sha256 __tls_sig_hash_algorithm = { + .code = { + .signature = TLS_RSA_ALGORITHM, + .hash = TLS_SHA256_ALGORITHM, + }, + .pubkey = &rsa_algorithm, + .digest = &sha256_algorithm, +}; diff --git a/src/crypto/mishmash/rsa_sha384.c b/src/crypto/mishmash/rsa_sha384.c new file mode 100644 index 00000000..af22a2bf --- /dev/null +++ b/src/crypto/mishmash/rsa_sha384.c @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** "sha384WithRSAEncryption" object identifier */ +static uint8_t oid_sha384_with_rsa_encryption[] = + { ASN1_OID_SHA384WITHRSAENCRYPTION }; + +/** "sha384WithRSAEncryption" OID-identified algorithm */ +struct asn1_algorithm sha384_with_rsa_encryption_algorithm __asn1_algorithm = { + .name = "sha384WithRSAEncryption", + .pubkey = &rsa_algorithm, + .digest = &sha384_algorithm, + .oid = ASN1_OID_CURSOR ( oid_sha384_with_rsa_encryption ), +}; + +/** SHA-384 digestInfo prefix */ +static const uint8_t rsa_sha384_prefix_data[] = + { RSA_DIGESTINFO_PREFIX ( SHA384_DIGEST_SIZE, ASN1_OID_SHA384 ) }; + +/** SHA-384 digestInfo prefix */ +struct rsa_digestinfo_prefix rsa_sha384_prefix __rsa_digestinfo_prefix = { + .digest = &sha384_algorithm, + .data = rsa_sha384_prefix_data, + .len = sizeof ( rsa_sha384_prefix_data ), +}; + +/** RSA with SHA-384 signature hash algorithm */ +struct tls_signature_hash_algorithm tls_rsa_sha384 __tls_sig_hash_algorithm = { + .code = { + .signature = TLS_RSA_ALGORITHM, + .hash = TLS_SHA384_ALGORITHM, + }, + .pubkey = &rsa_algorithm, + .digest = &sha384_algorithm, +}; diff --git a/src/crypto/mishmash/rsa_sha512.c b/src/crypto/mishmash/rsa_sha512.c new file mode 100644 index 00000000..29ee1549 --- /dev/null +++ b/src/crypto/mishmash/rsa_sha512.c @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** "sha512WithRSAEncryption" object identifier */ +static uint8_t oid_sha512_with_rsa_encryption[] = + { ASN1_OID_SHA512WITHRSAENCRYPTION }; + +/** "sha512WithRSAEncryption" OID-identified algorithm */ +struct asn1_algorithm sha512_with_rsa_encryption_algorithm __asn1_algorithm = { + .name = "sha512WithRSAEncryption", + .pubkey = &rsa_algorithm, + .digest = &sha512_algorithm, + .oid = ASN1_OID_CURSOR ( oid_sha512_with_rsa_encryption ), +}; + +/** SHA-512 digestInfo prefix */ +static const uint8_t rsa_sha512_prefix_data[] = + { RSA_DIGESTINFO_PREFIX ( SHA512_DIGEST_SIZE, ASN1_OID_SHA512 ) }; + +/** SHA-512 digestInfo prefix */ +struct rsa_digestinfo_prefix rsa_sha512_prefix __rsa_digestinfo_prefix = { + .digest = &sha512_algorithm, + .data = rsa_sha512_prefix_data, + .len = sizeof ( rsa_sha512_prefix_data ), +}; + +/** RSA with SHA-512 signature hash algorithm */ +struct tls_signature_hash_algorithm tls_rsa_sha512 __tls_sig_hash_algorithm = { + .code = { + .signature = TLS_RSA_ALGORITHM, + .hash = TLS_SHA512_ALGORITHM, + }, + .pubkey = &rsa_algorithm, + .digest = &sha512_algorithm, +}; diff --git a/src/crypto/ntlm.c b/src/crypto/ntlm.c new file mode 100644 index 00000000..870af213 --- /dev/null +++ b/src/crypto/ntlm.c @@ -0,0 +1,334 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * NT LAN Manager (NTLM) authentication + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** Negotiate message + * + * This message content is fixed since there is no need to specify the + * calling workstation name or domain name, and the set of flags is + * mandated by the MS-NLMP specification. + */ +const struct ntlm_negotiate ntlm_negotiate = { + .header = { + .magic = NTLM_MAGIC, + .type = cpu_to_le32 ( NTLM_NEGOTIATE ), + }, + .flags = cpu_to_le32 ( NTLM_NEGOTIATE_EXTENDED_SESSIONSECURITY | + NTLM_NEGOTIATE_ALWAYS_SIGN | + NTLM_NEGOTIATE_NTLM | + NTLM_REQUEST_TARGET | + NTLM_NEGOTIATE_UNICODE ), +}; + +/** + * Parse NTLM Challenge + * + * @v challenge Challenge message + * @v len Length of Challenge message + * @v info Challenge information to fill in + * @ret rc Return status code + */ +int ntlm_challenge ( struct ntlm_challenge *challenge, size_t len, + struct ntlm_challenge_info *info ) { + size_t offset; + + DBGC ( challenge, "NTLM challenge message:\n" ); + DBGC_HDA ( challenge, 0, challenge, len ); + + /* Sanity checks */ + if ( len < sizeof ( *challenge ) ) { + DBGC ( challenge, "NTLM underlength challenge (%zd bytes)\n", + len ); + return -EINVAL; + } + + /* Extract nonce */ + info->nonce = &challenge->nonce; + DBGC ( challenge, "NTLM challenge nonce:\n" ); + DBGC_HDA ( challenge, 0, info->nonce, sizeof ( *info->nonce ) ); + + /* Extract target information */ + info->len = le16_to_cpu ( challenge->info.len ); + offset = le32_to_cpu ( challenge->info.offset ); + if ( ( offset > len ) || + ( info->len > ( len - offset ) ) ) { + DBGC ( challenge, "NTLM target information outside " + "challenge\n" ); + DBGC_HDA ( challenge, 0, challenge, len ); + return -EINVAL; + } + info->target = ( ( ( void * ) challenge ) + offset ); + DBGC ( challenge, "NTLM challenge target information:\n" ); + DBGC_HDA ( challenge, 0, info->target, info->len ); + + return 0; +} + +/** + * Calculate NTLM verification key + * + * @v domain Domain name (or NULL) + * @v username User name (or NULL) + * @v password Password (or NULL) + * @v key Key to fill in + * + * This is the NTOWFv2() function as defined in MS-NLMP. + */ +void ntlm_key ( const char *domain, const char *username, + const char *password, struct ntlm_key *key ) { + struct digest_algorithm *md4 = &md4_algorithm; + struct digest_algorithm *md5 = &md5_algorithm; + union { + uint8_t md4[MD4_CTX_SIZE]; + uint8_t md5[MD5_CTX_SIZE]; + } ctx; + uint8_t digest[MD4_DIGEST_SIZE]; + size_t digest_len; + uint8_t c; + uint16_t wc; + + /* Use empty usernames/passwords if not specified */ + if ( ! domain ) + domain = ""; + if ( ! username ) + username = ""; + if ( ! password ) + password = ""; + + /* Construct MD4 digest of (Unicode) password */ + digest_init ( md4, ctx.md4 ); + while ( ( c = *(password++) ) ) { + wc = cpu_to_le16 ( c ); + digest_update ( md4, ctx.md4, &wc, sizeof ( wc ) ); + } + digest_final ( md4, ctx.md4, digest ); + + /* Construct HMAC-MD5 of (Unicode) upper-case username */ + digest_len = sizeof ( digest ); + hmac_init ( md5, ctx.md5, digest, &digest_len ); + while ( ( c = *(username++) ) ) { + wc = cpu_to_le16 ( toupper ( c ) ); + hmac_update ( md5, ctx.md5, &wc, sizeof ( wc ) ); + } + while ( ( c = *(domain++) ) ) { + wc = cpu_to_le16 ( c ); + hmac_update ( md5, ctx.md5, &wc, sizeof ( wc ) ); + } + hmac_final ( md5, ctx.md5, digest, &digest_len, key->raw ); + DBGC ( key, "NTLM key:\n" ); + DBGC_HDA ( key, 0, key, sizeof ( *key ) ); +} + +/** + * Construct NTLM responses + * + * @v info Challenge information + * @v key Verification key + * @v nonce Nonce, or NULL to use a random nonce + * @v lm LAN Manager response to fill in + * @v nt NT response to fill in + */ +void ntlm_response ( struct ntlm_challenge_info *info, struct ntlm_key *key, + struct ntlm_nonce *nonce, struct ntlm_lm_response *lm, + struct ntlm_nt_response *nt ) { + struct digest_algorithm *md5 = &md5_algorithm; + struct ntlm_nonce tmp_nonce; + uint8_t ctx[MD5_CTX_SIZE]; + size_t key_len = sizeof ( *key ); + unsigned int i; + + /* Generate random nonce, if needed */ + if ( ! nonce ) { + for ( i = 0 ; i < sizeof ( tmp_nonce ) ; i++ ) + tmp_nonce.raw[i] = random(); + nonce = &tmp_nonce; + } + + /* Construct LAN Manager response */ + memcpy ( &lm->nonce, nonce, sizeof ( lm->nonce ) ); + hmac_init ( md5, ctx, key->raw, &key_len ); + hmac_update ( md5, ctx, info->nonce, sizeof ( *info->nonce ) ); + hmac_update ( md5, ctx, &lm->nonce, sizeof ( lm->nonce ) ); + hmac_final ( md5, ctx, key->raw, &key_len, lm->digest ); + DBGC ( key, "NTLM LAN Manager response:\n" ); + DBGC_HDA ( key, 0, lm, sizeof ( *lm ) ); + + /* Construct NT response */ + memset ( nt, 0, sizeof ( *nt ) ); + nt->version = NTLM_VERSION_NTLMV2; + nt->high = NTLM_VERSION_NTLMV2; + memcpy ( &nt->nonce, nonce, sizeof ( nt->nonce ) ); + hmac_init ( md5, ctx, key->raw, &key_len ); + hmac_update ( md5, ctx, info->nonce, sizeof ( *info->nonce ) ); + hmac_update ( md5, ctx, &nt->version, + ( sizeof ( *nt ) - + offsetof ( typeof ( *nt ), version ) ) ); + hmac_update ( md5, ctx, info->target, info->len ); + hmac_update ( md5, ctx, &nt->zero, sizeof ( nt->zero ) ); + hmac_final ( md5, ctx, key->raw, &key_len, nt->digest ); + DBGC ( key, "NTLM NT response prefix:\n" ); + DBGC_HDA ( key, 0, nt, sizeof ( *nt ) ); +} + +/** + * Append data to NTLM message + * + * @v header Message header, or NULL to only calculate next payload + * @v data Data descriptor + * @v payload Data payload + * @v len Length of data + * @ret payload Next data payload + */ +static void * ntlm_append ( struct ntlm_header *header, struct ntlm_data *data, + void *payload, size_t len ) { + + /* Populate data descriptor */ + if ( header ) { + data->offset = cpu_to_le32 ( payload - ( ( void * ) header ) ); + data->len = data->max_len = cpu_to_le16 ( len ); + } + + return ( payload + len ); +} + +/** + * Append Unicode string data to NTLM message + * + * @v header Message header, or NULL to only calculate next payload + * @v data Data descriptor + * @v payload Data payload + * @v string String to append, or NULL + * @ret payload Next data payload + */ +static void * ntlm_append_string ( struct ntlm_header *header, + struct ntlm_data *data, void *payload, + const char *string ) { + uint16_t *tmp = payload; + uint8_t c; + + /* Convert string to Unicode */ + for ( tmp = payload ; ( string && ( c = *(string++) ) ) ; tmp++ ) { + if ( header ) + *tmp = cpu_to_le16 ( c ); + } + + /* Append string data */ + return ntlm_append ( header, data, payload, + ( ( ( void * ) tmp ) - payload ) ); +} + +/** + * Construct NTLM Authenticate message + * + * @v info Challenge information + * @v domain Domain name, or NULL + * @v username User name, or NULL + * @v workstation Workstation name, or NULL + * @v lm LAN Manager response + * @v nt NT response + * @v auth Message to fill in, or NULL to only calculate length + * @ret len Length of message + */ +size_t ntlm_authenticate ( struct ntlm_challenge_info *info, const char *domain, + const char *username, const char *workstation, + struct ntlm_lm_response *lm, + struct ntlm_nt_response *nt, + struct ntlm_authenticate *auth ) { + void *tmp; + size_t nt_len; + size_t len; + + /* Construct response header */ + if ( auth ) { + memset ( auth, 0, sizeof ( *auth ) ); + memcpy ( auth->header.magic, ntlm_negotiate.header.magic, + sizeof ( auth->header.magic ) ); + auth->header.type = cpu_to_le32 ( NTLM_AUTHENTICATE ); + auth->flags = ntlm_negotiate.flags; + } + tmp = ( ( ( void * ) auth ) + sizeof ( *auth ) ); + + /* Construct LAN Manager response */ + if ( auth ) + memcpy ( tmp, lm, sizeof ( *lm ) ); + tmp = ntlm_append ( &auth->header, &auth->lm, tmp, sizeof ( *lm ) ); + + /* Construct NT response */ + nt_len = ( sizeof ( *nt ) + info->len + sizeof ( nt->zero ) ); + if ( auth ) { + memcpy ( tmp, nt, sizeof ( *nt ) ); + memcpy ( ( tmp + sizeof ( *nt ) ), info->target, info->len ); + memset ( ( tmp + sizeof ( *nt ) + info->len ), 0, + sizeof ( nt->zero ) ); + } + tmp = ntlm_append ( &auth->header, &auth->nt, tmp, nt_len ); + + /* Populate domain, user, and workstation names */ + tmp = ntlm_append_string ( &auth->header, &auth->domain, tmp, domain ); + tmp = ntlm_append_string ( &auth->header, &auth->user, tmp, username ); + tmp = ntlm_append_string ( &auth->header, &auth->workstation, tmp, + workstation ); + + /* Calculate length */ + len = ( tmp - ( ( void * ) auth ) ); + if ( auth ) { + DBGC ( auth, "NTLM authenticate message:\n" ); + DBGC_HDA ( auth, 0, auth, len ); + } + + return len; +} + +/** + * Calculate NTLM Authenticate message length + * + * @v info Challenge information + * @v domain Domain name, or NULL + * @v username User name, or NULL + * @v workstation Workstation name, or NULL + * @ret len Length of Authenticate message + */ +size_t ntlm_authenticate_len ( struct ntlm_challenge_info *info, + const char *domain, const char *username, + const char *workstation ) { + + return ntlm_authenticate ( info, domain, username, workstation, + NULL, NULL, NULL ); +} diff --git a/src/crypto/sha224.c b/src/crypto/sha224.c new file mode 100644 index 00000000..e54a0abb --- /dev/null +++ b/src/crypto/sha224.c @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * SHA-224 algorithm + * + */ + +#include +#include +#include +#include + +/** SHA-224 initial digest values */ +static const struct sha256_digest sha224_init_digest = { + .h = { + cpu_to_be32 ( 0xc1059ed8 ), + cpu_to_be32 ( 0x367cd507 ), + cpu_to_be32 ( 0x3070dd17 ), + cpu_to_be32 ( 0xf70e5939 ), + cpu_to_be32 ( 0xffc00b31 ), + cpu_to_be32 ( 0x68581511 ), + cpu_to_be32 ( 0x64f98fa7 ), + cpu_to_be32 ( 0xbefa4fa4 ), + }, +}; + +/** + * Initialise SHA-224 algorithm + * + * @v ctx SHA-224 context + */ +static void sha224_init ( void *ctx ) { + struct sha256_context *context = ctx; + + sha256_family_init ( context, &sha224_init_digest, SHA224_DIGEST_SIZE ); +} + +/** SHA-224 algorithm */ +struct digest_algorithm sha224_algorithm = { + .name = "sha224", + .ctxsize = sizeof ( struct sha256_context ), + .blocksize = sizeof ( union sha256_block ), + .digestsize = SHA224_DIGEST_SIZE, + .init = sha224_init, + .update = sha256_update, + .final = sha256_final, +}; diff --git a/src/crypto/sha384.c b/src/crypto/sha384.c new file mode 100644 index 00000000..f1af6fc6 --- /dev/null +++ b/src/crypto/sha384.c @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * SHA-384 algorithm + * + */ + +#include +#include +#include +#include + +/** SHA-384 initial digest values */ +static const struct sha512_digest sha384_init_digest = { + .h = { + cpu_to_be64 ( 0xcbbb9d5dc1059ed8ULL ), + cpu_to_be64 ( 0x629a292a367cd507ULL ), + cpu_to_be64 ( 0x9159015a3070dd17ULL ), + cpu_to_be64 ( 0x152fecd8f70e5939ULL ), + cpu_to_be64 ( 0x67332667ffc00b31ULL ), + cpu_to_be64 ( 0x8eb44a8768581511ULL ), + cpu_to_be64 ( 0xdb0c2e0d64f98fa7ULL ), + cpu_to_be64 ( 0x47b5481dbefa4fa4ULL ), + }, +}; + +/** + * Initialise SHA-384 algorithm + * + * @v ctx SHA-384 context + */ +static void sha384_init ( void *ctx ) { + struct sha512_context *context = ctx; + + sha512_family_init ( context, &sha384_init_digest, SHA384_DIGEST_SIZE ); +} + +/** SHA-384 algorithm */ +struct digest_algorithm sha384_algorithm = { + .name = "sha384", + .ctxsize = sizeof ( struct sha512_context ), + .blocksize = sizeof ( union sha512_block ), + .digestsize = SHA384_DIGEST_SIZE, + .init = sha384_init, + .update = sha512_update, + .final = sha512_final, +}; diff --git a/src/crypto/sha512.c b/src/crypto/sha512.c new file mode 100644 index 00000000..e8489501 --- /dev/null +++ b/src/crypto/sha512.c @@ -0,0 +1,292 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * SHA-512 algorithm + * + */ + +#include +#include +#include +#include +#include +#include +#include + +/** SHA-512 variables */ +struct sha512_variables { + /* This layout matches that of struct sha512_digest_data, + * allowing for efficient endianness-conversion, + */ + uint64_t a; + uint64_t b; + uint64_t c; + uint64_t d; + uint64_t e; + uint64_t f; + uint64_t g; + uint64_t h; + uint64_t w[SHA512_ROUNDS]; +} __attribute__ (( packed )); + +/** SHA-512 constants */ +static const uint64_t k[SHA512_ROUNDS] = { + 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, + 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, + 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL, + 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, + 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, + 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, + 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL, + 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, + 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, + 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, + 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL, + 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, + 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, + 0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, + 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL, + 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, + 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL, + 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, + 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL, + 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, + 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, + 0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, + 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL, + 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, + 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, + 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, + 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL +}; + +/** SHA-512 initial digest values */ +static const struct sha512_digest sha512_init_digest = { + .h = { + cpu_to_be64 ( 0x6a09e667f3bcc908ULL ), + cpu_to_be64 ( 0xbb67ae8584caa73bULL ), + cpu_to_be64 ( 0x3c6ef372fe94f82bULL ), + cpu_to_be64 ( 0xa54ff53a5f1d36f1ULL ), + cpu_to_be64 ( 0x510e527fade682d1ULL ), + cpu_to_be64 ( 0x9b05688c2b3e6c1fULL ), + cpu_to_be64 ( 0x1f83d9abfb41bd6bULL ), + cpu_to_be64 ( 0x5be0cd19137e2179ULL ), + }, +}; + +/** + * Initialise SHA-512 family algorithm + * + * @v context SHA-512 context + * @v init Initial digest values + * @v digestsize Digest size + */ +void sha512_family_init ( struct sha512_context *context, + const struct sha512_digest *init, + size_t digestsize ) { + + context->len = 0; + context->digestsize = digestsize; + memcpy ( &context->ddq.dd.digest, init, + sizeof ( context->ddq.dd.digest ) ); +} + +/** + * Initialise SHA-512 algorithm + * + * @v ctx SHA-512 context + */ +static void sha512_init ( void *ctx ) { + struct sha512_context *context = ctx; + + sha512_family_init ( context, &sha512_init_digest, + sizeof ( struct sha512_digest ) ); +} + +/** + * Calculate SHA-512 digest of accumulated data + * + * @v context SHA-512 context + */ +static void sha512_digest ( struct sha512_context *context ) { + union { + union sha512_digest_data_qwords ddq; + struct sha512_variables v; + } u; + uint64_t *a = &u.v.a; + uint64_t *b = &u.v.b; + uint64_t *c = &u.v.c; + uint64_t *d = &u.v.d; + uint64_t *e = &u.v.e; + uint64_t *f = &u.v.f; + uint64_t *g = &u.v.g; + uint64_t *h = &u.v.h; + uint64_t *w = u.v.w; + uint64_t s0; + uint64_t s1; + uint64_t maj; + uint64_t t1; + uint64_t t2; + uint64_t ch; + unsigned int i; + + /* Sanity checks */ + assert ( ( context->len % sizeof ( context->ddq.dd.data ) ) == 0 ); + linker_assert ( &u.ddq.dd.digest.h[0] == a, sha512_bad_layout ); + linker_assert ( &u.ddq.dd.digest.h[1] == b, sha512_bad_layout ); + linker_assert ( &u.ddq.dd.digest.h[2] == c, sha512_bad_layout ); + linker_assert ( &u.ddq.dd.digest.h[3] == d, sha512_bad_layout ); + linker_assert ( &u.ddq.dd.digest.h[4] == e, sha512_bad_layout ); + linker_assert ( &u.ddq.dd.digest.h[5] == f, sha512_bad_layout ); + linker_assert ( &u.ddq.dd.digest.h[6] == g, sha512_bad_layout ); + linker_assert ( &u.ddq.dd.digest.h[7] == h, sha512_bad_layout ); + linker_assert ( &u.ddq.dd.data.qword[0] == w, sha512_bad_layout ); + + DBGC ( context, "SHA512 digesting:\n" ); + DBGC_HDA ( context, 0, &context->ddq.dd.digest, + sizeof ( context->ddq.dd.digest ) ); + DBGC_HDA ( context, context->len, &context->ddq.dd.data, + sizeof ( context->ddq.dd.data ) ); + + /* Convert h[0..7] to host-endian, and initialise a, b, c, d, + * e, f, g, h, and w[0..15] + */ + for ( i = 0 ; i < ( sizeof ( u.ddq.qword ) / + sizeof ( u.ddq.qword[0] ) ) ; i++ ) { + be64_to_cpus ( &context->ddq.qword[i] ); + u.ddq.qword[i] = context->ddq.qword[i]; + } + + /* Initialise w[16..79] */ + for ( i = 16 ; i < SHA512_ROUNDS ; i++ ) { + s0 = ( ror64 ( w[i-15], 1 ) ^ ror64 ( w[i-15], 8 ) ^ + ( w[i-15] >> 7 ) ); + s1 = ( ror64 ( w[i-2], 19 ) ^ ror64 ( w[i-2], 61 ) ^ + ( w[i-2] >> 6 ) ); + w[i] = ( w[i-16] + s0 + w[i-7] + s1 ); + } + + /* Main loop */ + for ( i = 0 ; i < SHA512_ROUNDS ; i++ ) { + s0 = ( ror64 ( *a, 28 ) ^ ror64 ( *a, 34 ) ^ ror64 ( *a, 39 ) ); + maj = ( ( *a & *b ) ^ ( *a & *c ) ^ ( *b & *c ) ); + t2 = ( s0 + maj ); + s1 = ( ror64 ( *e, 14 ) ^ ror64 ( *e, 18 ) ^ ror64 ( *e, 41 ) ); + ch = ( ( *e & *f ) ^ ( (~*e) & *g ) ); + t1 = ( *h + s1 + ch + k[i] + w[i] ); + *h = *g; + *g = *f; + *f = *e; + *e = ( *d + t1 ); + *d = *c; + *c = *b; + *b = *a; + *a = ( t1 + t2 ); + DBGC2 ( context, "%2d : %016llx %016llx %016llx %016llx " + "%016llx %016llx %016llx %016llx\n", + i, *a, *b, *c, *d, *e, *f, *g, *h ); + } + + /* Add chunk to hash and convert back to big-endian */ + for ( i = 0 ; i < 8 ; i++ ) { + context->ddq.dd.digest.h[i] = + cpu_to_be64 ( context->ddq.dd.digest.h[i] + + u.ddq.dd.digest.h[i] ); + } + + DBGC ( context, "SHA512 digested:\n" ); + DBGC_HDA ( context, 0, &context->ddq.dd.digest, + sizeof ( context->ddq.dd.digest ) ); +} + +/** + * Accumulate data with SHA-512 algorithm + * + * @v ctx SHA-512 context + * @v data Data + * @v len Length of data + */ +void sha512_update ( void *ctx, const void *data, size_t len ) { + struct sha512_context *context = ctx; + const uint8_t *byte = data; + size_t offset; + + /* Accumulate data a byte at a time, performing the digest + * whenever we fill the data buffer + */ + while ( len-- ) { + offset = ( context->len % sizeof ( context->ddq.dd.data ) ); + context->ddq.dd.data.byte[offset] = *(byte++); + context->len++; + if ( ( context->len % sizeof ( context->ddq.dd.data ) ) == 0 ) + sha512_digest ( context ); + } +} + +/** + * Generate SHA-512 digest + * + * @v ctx SHA-512 context + * @v out Output buffer + */ +void sha512_final ( void *ctx, void *out ) { + struct sha512_context *context = ctx; + uint64_t len_bits_hi; + uint64_t len_bits_lo; + uint8_t pad; + + /* Record length before pre-processing */ + len_bits_hi = 0; + len_bits_lo = cpu_to_be64 ( ( ( uint64_t ) context->len ) * 8 ); + + /* Pad with a single "1" bit followed by as many "0" bits as required */ + pad = 0x80; + do { + sha512_update ( ctx, &pad, sizeof ( pad ) ); + pad = 0x00; + } while ( ( context->len % sizeof ( context->ddq.dd.data ) ) != + offsetof ( typeof ( context->ddq.dd.data ), final.len_hi ) ); + + /* Append length (in bits) */ + sha512_update ( ctx, &len_bits_hi, sizeof ( len_bits_hi ) ); + sha512_update ( ctx, &len_bits_lo, sizeof ( len_bits_lo ) ); + assert ( ( context->len % sizeof ( context->ddq.dd.data ) ) == 0 ); + + /* Copy out final digest */ + memcpy ( out, &context->ddq.dd.digest, context->digestsize ); +} + +/** SHA-512 algorithm */ +struct digest_algorithm sha512_algorithm = { + .name = "sha512", + .ctxsize = sizeof ( struct sha512_context ), + .blocksize = sizeof ( union sha512_block ), + .digestsize = sizeof ( struct sha512_digest ), + .init = sha512_init, + .update = sha512_update, + .final = sha512_final, +}; diff --git a/src/crypto/sha512_224.c b/src/crypto/sha512_224.c new file mode 100644 index 00000000..b6728726 --- /dev/null +++ b/src/crypto/sha512_224.c @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * SHA-512/224 algorithm + * + */ + +#include +#include +#include +#include + +/** SHA-512/224 initial digest values */ +static const struct sha512_digest sha512_224_init_digest = { + .h = { + cpu_to_be64 ( 0x8c3d37c819544da2ULL ), + cpu_to_be64 ( 0x73e1996689dcd4d6ULL ), + cpu_to_be64 ( 0x1dfab7ae32ff9c82ULL ), + cpu_to_be64 ( 0x679dd514582f9fcfULL ), + cpu_to_be64 ( 0x0f6d2b697bd44da8ULL ), + cpu_to_be64 ( 0x77e36f7304c48942ULL ), + cpu_to_be64 ( 0x3f9d85a86a1d36c8ULL ), + cpu_to_be64 ( 0x1112e6ad91d692a1ULL ), + }, +}; + +/** + * Initialise SHA-512/224 algorithm + * + * @v ctx SHA-512/224 context + */ +static void sha512_224_init ( void *ctx ) { + struct sha512_context *context = ctx; + + sha512_family_init ( context, &sha512_224_init_digest, + SHA512_224_DIGEST_SIZE ); +} + +/** SHA-512/224 algorithm */ +struct digest_algorithm sha512_224_algorithm = { + .name = "sha512/224", + .ctxsize = sizeof ( struct sha512_context ), + .blocksize = sizeof ( union sha512_block ), + .digestsize = SHA512_224_DIGEST_SIZE, + .init = sha512_224_init, + .update = sha512_update, + .final = sha512_final, +}; diff --git a/src/crypto/sha512_256.c b/src/crypto/sha512_256.c new file mode 100644 index 00000000..8163631e --- /dev/null +++ b/src/crypto/sha512_256.c @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * SHA-512/256 algorithm + * + */ + +#include +#include +#include +#include + +/** SHA-512/256 initial digest values */ +static const struct sha512_digest sha512_256_init_digest = { + .h = { + cpu_to_be64 ( 0x22312194fc2bf72cULL ), + cpu_to_be64 ( 0x9f555fa3c84c64c2ULL ), + cpu_to_be64 ( 0x2393b86b6f53b151ULL ), + cpu_to_be64 ( 0x963877195940eabdULL ), + cpu_to_be64 ( 0x96283ee2a88effe3ULL ), + cpu_to_be64 ( 0xbe5e1e2553863992ULL ), + cpu_to_be64 ( 0x2b0199fc2c85b8aaULL ), + cpu_to_be64 ( 0x0eb72ddc81c52ca2ULL ), + }, +}; + +/** + * Initialise SHA-512/256 algorithm + * + * @v ctx SHA-512/256 context + */ +static void sha512_256_init ( void *ctx ) { + struct sha512_context *context = ctx; + + sha512_family_init ( context, &sha512_256_init_digest, + SHA512_256_DIGEST_SIZE ); +} + +/** SHA-512/256 algorithm */ +struct digest_algorithm sha512_256_algorithm = { + .name = "sha512/256", + .ctxsize = sizeof ( struct sha512_context ), + .blocksize = sizeof ( union sha512_block ), + .digestsize = SHA512_256_DIGEST_SIZE, + .init = sha512_256_init, + .update = sha512_update, + .final = sha512_final, +}; diff --git a/src/drivers/bitbash/mii_bit.c b/src/drivers/bitbash/mii_bit.c new file mode 100644 index 00000000..5f0ec04a --- /dev/null +++ b/src/drivers/bitbash/mii_bit.c @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2018 Sylvie Barlow . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** + * Transfer bits over MII bit-bashing interface + * + * @v basher Bit basher + * @v mask Mask + * @v write Data to write + * @ret read Data read + */ +static uint32_t mii_bit_xfer ( struct bit_basher *basher, + uint32_t mask, uint32_t write ) { + uint32_t read = 0; + int bit; + + for ( ; mask ; mask >>= 1 ) { + + /* Delay */ + udelay ( 1 ); + + /* Write bit to basher */ + write_bit ( basher, MII_BIT_MDIO, ( write & mask ) ); + + /* Read bit from basher */ + bit = read_bit ( basher, MII_BIT_MDIO ); + read <<= 1; + read |= ( bit & 1 ); + + /* Set clock high */ + write_bit ( basher, MII_BIT_MDC, 1 ); + + /* Delay */ + udelay ( 1 ); + + /* Set clock low */ + write_bit ( basher, MII_BIT_MDC, 0 ); + } + return read; +} + +/** + * Read or write via MII bit-bashing interface + * + * @v basher Bit basher + * @v phy PHY address + * @v reg Register address + * @v data Data to write + * @v cmd Command + * @ret data Data read + */ +static unsigned int mii_bit_rw ( struct bit_basher *basher, + unsigned int phy, unsigned int reg, + unsigned int data, unsigned int cmd ) { + + /* Initiate drive for write */ + write_bit ( basher, MII_BIT_DRIVE, 1 ); + + /* Write start */ + mii_bit_xfer ( basher, MII_BIT_START_MASK, MII_BIT_START ); + + /* Write command */ + mii_bit_xfer ( basher, MII_BIT_CMD_MASK, cmd ); + + /* Write PHY address */ + mii_bit_xfer ( basher, MII_BIT_PHY_MASK, phy ); + + /* Write register address */ + mii_bit_xfer ( basher, MII_BIT_REG_MASK, reg ); + + /* Switch drive to read if applicable */ + write_bit ( basher, MII_BIT_DRIVE, ( cmd & MII_BIT_CMD_RW ) ); + + /* Allow space for turnaround */ + mii_bit_xfer ( basher, MII_BIT_SWITCH_MASK, MII_BIT_SWITCH ); + + /* Read or write data */ + data = mii_bit_xfer (basher, MII_BIT_DATA_MASK, data ); + + /* Initiate drive for read */ + write_bit ( basher, MII_BIT_DRIVE, 0 ); + + return data; +} + +/** + * Read from MII register + * + * @v mdio MII interface + * @v phy PHY address + * @v reg Register address + * @ret data Data read, or negative error + */ +static int mii_bit_read ( struct mii_interface *mdio, unsigned int phy, + unsigned int reg ) { + struct mii_bit_basher *miibit = + container_of ( mdio, struct mii_bit_basher, mdio ); + struct bit_basher *basher = &miibit->basher; + + return mii_bit_rw ( basher, phy, reg, 0, MII_BIT_CMD_READ ); +} + +/** + * Write to MII register + * + * @v mdio MII interface + * @v phy PHY address + * @v reg Register address + * @v data Data to write + * @ret rc Return status code + */ +static int mii_bit_write ( struct mii_interface *mdio, unsigned int phy, + unsigned int reg, unsigned int data ) { + struct mii_bit_basher *miibit = + container_of ( mdio, struct mii_bit_basher, mdio ); + struct bit_basher *basher = &miibit->basher; + + mii_bit_rw ( basher, phy, reg, data, MII_BIT_CMD_WRITE ); + return 0; +} + +/** MII bit basher operations */ +static struct mii_operations mii_bit_op = { + .read = mii_bit_read, + .write = mii_bit_write, +}; + +/** + * Initialise bit-bashing interface + * + * @v miibit MII bit basher + */ +void init_mii_bit_basher ( struct mii_bit_basher *miibit ) { + mdio_init ( &miibit->mdio, &mii_bit_op ); +}; diff --git a/src/drivers/bus/cdc.c b/src/drivers/bus/cdc.c new file mode 100644 index 00000000..373a0307 --- /dev/null +++ b/src/drivers/bus/cdc.c @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * USB Communications Device Class (CDC) + * + */ + +/** + * Locate CDC union functional descriptor + * + * @v config Configuration descriptor + * @v interface Interface descriptor + * @ret desc Union functional descriptor, or NULL if not found + */ +struct cdc_union_descriptor * +cdc_union_descriptor ( struct usb_configuration_descriptor *config, + struct usb_interface_descriptor *interface ) { + struct cdc_union_descriptor *desc; + + for_each_interface_descriptor ( desc, config, interface ) { + if ( ( desc->header.type == USB_CS_INTERFACE_DESCRIPTOR ) && + ( desc->subtype == CDC_SUBTYPE_UNION ) ) + return desc; + } + return NULL; +} diff --git a/src/drivers/bus/pciea.c b/src/drivers/bus/pciea.c new file mode 100644 index 00000000..aaa69cf4 --- /dev/null +++ b/src/drivers/bus/pciea.c @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** @file + * + * PCI Enhanced Allocation + * + */ + +/** + * Locate PCI Enhanced Allocation BAR equivalent entry + * + * @v pci PCI device + * @v bei BAR equivalent indicator + * @ret offset PCI Enhanced Allocation entry offset, or negative error + */ +static int pciea_offset ( struct pci_device *pci, unsigned int bei ) { + uint8_t entries; + uint32_t desc; + unsigned int i; + int offset; + + /* Locate Enhanced Allocation capability */ + offset = pci_find_capability ( pci, PCI_CAP_ID_EA ); + if ( offset < 0 ) + return offset; + + /* Get number of entries */ + pci_read_config_byte ( pci, ( offset + PCIEA_ENTRIES ), &entries ); + entries &= PCIEA_ENTRIES_MASK; + + /* Locate first entry */ + offset += PCIEA_FIRST; + + /* Search for a matching entry */ + for ( i = 0 ; i < entries ; i++ ) { + + /* Read entry descriptor */ + pci_read_config_dword ( pci, offset, &desc ); + + /* Check for a matching entry */ + if ( ( desc & PCIEA_DESC_ENABLED ) && + ( bei == PCIEA_DESC_BEI ( desc ) ) ) + return offset; + + /* Move to next entry */ + offset += ( ( PCIEA_DESC_SIZE ( desc ) + 1 ) << 2 ); + } + + return -ENOENT; +} + +/** + * Read PCI Enhanced Allocation BAR equivalent value + * + * @v pci PCI device + * @v bei BAR equivalent indicator + * @v low_offset Offset to low dword of value + * @ret value BAR equivalent value + */ +static unsigned long pciea_bar_value ( struct pci_device *pci, unsigned int bei, + unsigned int low_offset ) { + uint32_t low; + uint32_t high; + int offset; + + /* Locate Enhanced Allocation offset for this BEI */ + offset = pciea_offset ( pci, bei ); + if ( offset < 0 ) + return 0; + + /* Read BAR equivalent */ + offset += low_offset; + pci_read_config_dword ( pci, offset, &low ); + if ( low & PCIEA_LOW_ATTR_64BIT ) { + offset += PCIEA_LOW_HIGH; + pci_read_config_dword ( pci, offset, &high ); + if ( high ) { + if ( sizeof ( unsigned long ) > sizeof ( uint32_t ) ) { + return ( ( ( uint64_t ) high << 32 ) | low ); + } else { + DBGC ( pci, PCI_FMT " unhandled 64-bit EA BAR " + "%08x%08x\n", + PCI_ARGS ( pci ), high, low ); + return 0; + } + } + } + return low; +} + +/** + * Find the start of a PCI Enhanced Allocation BAR equivalent + * + * @v pci PCI device + * @v bei BAR equivalent indicator + * @ret start BAR start address + * + * If the address exceeds the size of an unsigned long (i.e. if a + * 64-bit BAR has a non-zero high dword on a 32-bit machine), the + * return value will be zero. + */ +unsigned long pciea_bar_start ( struct pci_device *pci, unsigned int bei ) { + unsigned long base; + + base = pciea_bar_value ( pci, bei, PCIEA_LOW_BASE ); + return ( base & ~PCIEA_LOW_ATTR_MASK ); +} + +/** + * Find the size of a PCI Enhanced Allocation BAR equivalent + * + * @v pci PCI device + * @v bei BAR equivalent indicator + * @ret size BAR size + */ +unsigned long pciea_bar_size ( struct pci_device *pci, unsigned int bei ) { + unsigned long limit; + + limit = pciea_bar_value ( pci, bei, PCIEA_LOW_LIMIT ); + return ( limit ? ( ( limit | PCIEA_LOW_ATTR_MASK ) + 1 ) : 0 ); +} diff --git a/src/drivers/bus/pcimsix.c b/src/drivers/bus/pcimsix.c new file mode 100644 index 00000000..eb0450d9 --- /dev/null +++ b/src/drivers/bus/pcimsix.c @@ -0,0 +1,251 @@ +/* + * Copyright (C) 2019 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include + +/** @file + * + * PCI MSI-X interrupts + * + */ + +/** + * Get MSI-X descriptor name (for debugging) + * + * @v cfg Configuration space offset + * @ret name Descriptor name + */ +static const char * pci_msix_name ( unsigned int cfg ) { + + switch ( cfg ) { + case PCI_MSIX_DESC_TABLE: return "table"; + case PCI_MSIX_DESC_PBA: return "PBA"; + default: return ""; + } +} + +/** + * Map MSI-X BAR portion + * + * @v pci PCI device + * @v msix MSI-X capability + * @v cfg Configuration space offset + * @ret io I/O address + */ +static void * pci_msix_ioremap ( struct pci_device *pci, struct pci_msix *msix, + unsigned int cfg ) { + uint32_t desc; + unsigned int bar; + unsigned long start; + unsigned long offset; + unsigned long base; + void *io; + + /* Read descriptor */ + pci_read_config_dword ( pci, ( msix->cap + cfg ), &desc ); + + /* Get BAR */ + bar = PCI_MSIX_DESC_BIR ( desc ); + offset = PCI_MSIX_DESC_OFFSET ( desc ); + start = pci_bar_start ( pci, PCI_BASE_ADDRESS ( bar ) ); + if ( ! start ) { + DBGC ( msix, "MSI-X %p %s could not find BAR%d\n", + msix, pci_msix_name ( cfg ), bar ); + return NULL; + } + base = ( start + offset ); + DBGC ( msix, "MSI-X %p %s at %#08lx (BAR%d+%#lx)\n", + msix, pci_msix_name ( cfg ), base, bar, offset ); + + /* Map BAR portion */ + io = pci_ioremap ( pci, ( start + offset ), PCI_MSIX_LEN ); + if ( ! io ) { + DBGC ( msix, "MSI-X %p %s could not map %#08lx\n", + msix, pci_msix_name ( cfg ), base ); + return NULL; + } + + return io; +} + +/** + * Enable MSI-X interrupts + * + * @v pci PCI device + * @v msix MSI-X capability + * @ret rc Return status code + */ +int pci_msix_enable ( struct pci_device *pci, struct pci_msix *msix ) { + uint16_t ctrl; + int rc; + + /* Locate capability */ + msix->cap = pci_find_capability ( pci, PCI_CAP_ID_MSIX ); + if ( ! msix->cap ) { + DBGC ( msix, "MSI-X %p found no MSI-X capability in " + PCI_FMT "\n", msix, PCI_ARGS ( pci ) ); + rc = -ENOENT; + goto err_cap; + } + + /* Extract interrupt count */ + pci_read_config_word ( pci, ( msix->cap + PCI_MSIX_CTRL ), &ctrl ); + msix->count = ( PCI_MSIX_CTRL_SIZE ( ctrl ) + 1 ); + DBGC ( msix, "MSI-X %p has %d vectors for " PCI_FMT "\n", + msix, msix->count, PCI_ARGS ( pci ) ); + + /* Map MSI-X table */ + msix->table = pci_msix_ioremap ( pci, msix, PCI_MSIX_DESC_TABLE ); + if ( ! msix->table ) { + rc = -ENOENT; + goto err_table; + } + + /* Map pending bit array */ + msix->pba = pci_msix_ioremap ( pci, msix, PCI_MSIX_DESC_PBA ); + if ( ! msix->pba ) { + rc = -ENOENT; + goto err_pba; + } + + /* Enable MSI-X */ + ctrl &= ~PCI_MSIX_CTRL_MASK; + ctrl |= PCI_MSIX_CTRL_ENABLE; + pci_write_config_word ( pci, ( msix->cap + PCI_MSIX_CTRL ), ctrl ); + + return 0; + + iounmap ( msix->pba ); + err_pba: + iounmap ( msix->table ); + err_table: + err_cap: + return rc; +} + +/** + * Disable MSI-X interrupts + * + * @v pci PCI device + * @v msix MSI-X capability + */ +void pci_msix_disable ( struct pci_device *pci, struct pci_msix *msix ) { + uint16_t ctrl; + + /* Disable MSI-X */ + pci_read_config_word ( pci, ( msix->cap + PCI_MSIX_CTRL ), &ctrl ); + ctrl &= ~PCI_MSIX_CTRL_ENABLE; + pci_write_config_word ( pci, ( msix->cap + PCI_MSIX_CTRL ), ctrl ); + + /* Unmap pending bit array */ + iounmap ( msix->pba ); + + /* Unmap MSI-X table */ + iounmap ( msix->table ); +} + +/** + * Map MSI-X interrupt vector + * + * @v msix MSI-X capability + * @v vector MSI-X vector + * @v address Message address + * @v data Message data + */ +void pci_msix_map ( struct pci_msix *msix, unsigned int vector, + physaddr_t address, uint32_t data ) { + void *base; + + /* Sanity check */ + assert ( vector < msix->count ); + + /* Map interrupt vector */ + base = ( msix->table + PCI_MSIX_VECTOR ( vector ) ); + writel ( ( address & 0xffffffffUL ), ( base + PCI_MSIX_ADDRESS_LO ) ); + if ( sizeof ( address ) > sizeof ( uint32_t ) ) { + writel ( ( ( ( uint64_t ) address ) >> 32 ), + ( base + PCI_MSIX_ADDRESS_HI ) ); + } else { + writel ( 0, ( base + PCI_MSIX_ADDRESS_HI ) ); + } + writel ( data, ( base + PCI_MSIX_DATA ) ); +} + +/** + * Control MSI-X interrupt vector + * + * @v msix MSI-X capability + * @v vector MSI-X vector + * @v mask Control mask + */ +void pci_msix_control ( struct pci_msix *msix, unsigned int vector, + uint32_t mask ) { + void *base; + uint32_t ctrl; + + /* Mask/unmask interrupt vector */ + base = ( msix->table + PCI_MSIX_VECTOR ( vector ) ); + ctrl = readl ( base + PCI_MSIX_CONTROL ); + ctrl &= ~PCI_MSIX_CONTROL_MASK; + ctrl |= mask; + writel ( ctrl, ( base + PCI_MSIX_CONTROL ) ); +} + +/** + * Dump MSI-X interrupt state (for debugging) + * + * @v msix MSI-X capability + * @v vector MSI-X vector + */ +void pci_msix_dump ( struct pci_msix *msix, unsigned int vector ) { + void *base; + uint32_t address_hi; + uint32_t address_lo; + physaddr_t address; + uint32_t data; + uint32_t ctrl; + uint32_t pba; + + /* Do nothing in non-debug builds */ + if ( ! DBG_LOG ) + return; + + /* Mask/unmask interrupt vector */ + base = ( msix->table + PCI_MSIX_VECTOR ( vector ) ); + address_hi = readl ( base + PCI_MSIX_ADDRESS_HI ); + address_lo = readl ( base + PCI_MSIX_ADDRESS_LO ); + data = readl ( base + PCI_MSIX_DATA ); + ctrl = readl ( base + PCI_MSIX_CONTROL ); + pba = readl ( msix->pba ); + address = ( ( ( ( uint64_t ) address_hi ) << 32 ) | address_lo ); + DBGC ( msix, "MSI-X %p vector %d %#08x => %#08lx%s%s\n", + msix, vector, data, address, + ( ( ctrl & PCI_MSIX_CONTROL_MASK ) ? " (masked)" : "" ), + ( ( pba & ( 1 << vector ) ) ? " (pending)" : "" ) ); +} diff --git a/src/drivers/bus/usb.c b/src/drivers/bus/usb.c new file mode 100644 index 00000000..70a86c91 --- /dev/null +++ b/src/drivers/bus/usb.c @@ -0,0 +1,2342 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Universal Serial Bus (USB) + * + */ + +/** List of USB buses */ +struct list_head usb_buses = LIST_HEAD_INIT ( usb_buses ); + +/** List of changed ports */ +static struct list_head usb_changed = LIST_HEAD_INIT ( usb_changed ); + +/** List of halted endpoints */ +static struct list_head usb_halted = LIST_HEAD_INIT ( usb_halted ); + +/****************************************************************************** + * + * Utility functions + * + ****************************************************************************** + */ + +/** + * Get USB speed name (for debugging) + * + * @v speed Speed + * @ret name Speed name + */ +static inline const char * usb_speed_name ( unsigned int speed ) { + static const char *exponents[4] = { "", "k", "M", "G" }; + static char buf[ 10 /* "xxxxxXbps" + NUL */ ]; + unsigned int mantissa; + unsigned int exponent; + + /* Extract mantissa and exponent */ + mantissa = USB_SPEED_MANTISSA ( speed ); + exponent = USB_SPEED_EXPONENT ( speed ); + + /* Name speed */ + switch ( speed ) { + case USB_SPEED_NONE: return "DETACHED"; + case USB_SPEED_LOW: return "low"; + case USB_SPEED_FULL: return "full"; + case USB_SPEED_HIGH: return "high"; + case USB_SPEED_SUPER: return "super"; + default: + snprintf ( buf, sizeof ( buf ), "%d%sbps", + mantissa, exponents[exponent] ); + return buf; + } +} + +/** + * Transcribe USB BCD-coded value (for debugging) + * + * @v bcd BCD-coded value + * @ret string Transcribed value + */ +static inline const char * usb_bcd ( uint16_t bcd ) { + static char buf[ 6 /* "xx.xx" + NUL */ ]; + uint8_t high = ( bcd >> 8 ); + uint8_t low = ( bcd >> 0 ); + + snprintf ( buf, sizeof ( buf ), "%x.%02x", high, low ); + return buf; +} + +/****************************************************************************** + * + * USB descriptors + * + ****************************************************************************** + */ + +/** + * Locate USB interface association descriptor + * + * @v config Configuraton descriptor + * @v first First interface number + * @ret desc Interface association descriptor, or NULL if not found + */ +static struct usb_interface_association_descriptor * +usb_interface_association_descriptor ( struct usb_configuration_descriptor + *config, + unsigned int first ) { + struct usb_interface_association_descriptor *desc; + + /* Find a matching interface association descriptor */ + for_each_config_descriptor ( desc, config ) { + if ( ( desc->header.type == + USB_INTERFACE_ASSOCIATION_DESCRIPTOR ) && + ( desc->first == first ) ) + return desc; + } + return NULL; +} + +/** + * Locate USB interface descriptor + * + * @v config Configuraton descriptor + * @v interface Interface number + * @v alternate Alternate setting + * @ret desc Interface descriptor, or NULL if not found + */ +struct usb_interface_descriptor * +usb_interface_descriptor ( struct usb_configuration_descriptor *config, + unsigned int interface, unsigned int alternate ) { + struct usb_interface_descriptor *desc; + + /* Find a matching interface descriptor */ + for_each_config_descriptor ( desc, config ) { + if ( ( desc->header.type == USB_INTERFACE_DESCRIPTOR ) && + ( desc->interface == interface ) && + ( desc->alternate == alternate ) ) + return desc; + } + return NULL; +} + +/** + * Locate USB endpoint descriptor + * + * @v config Configuration descriptor + * @v interface Interface descriptor + * @v type Endpoint (internal) type + * @v index Endpoint index + * @ret desc Descriptor, or NULL if not found + */ +struct usb_endpoint_descriptor * +usb_endpoint_descriptor ( struct usb_configuration_descriptor *config, + struct usb_interface_descriptor *interface, + unsigned int type, unsigned int index ) { + struct usb_endpoint_descriptor *desc; + unsigned int attributes = ( type & USB_ENDPOINT_ATTR_TYPE_MASK ); + unsigned int direction = ( type & USB_DIR_IN ); + + /* Find a matching endpoint descriptor */ + for_each_interface_descriptor ( desc, config, interface ) { + if ( ( desc->header.type == USB_ENDPOINT_DESCRIPTOR ) && + ( ( desc->attributes & + USB_ENDPOINT_ATTR_TYPE_MASK ) == attributes ) && + ( ( desc->endpoint & USB_DIR_IN ) == direction ) && + ( index-- == 0 ) ) + return desc; + } + return NULL; +} + +/** + * Locate USB endpoint companion descriptor + * + * @v config Configuration descriptor + * @v desc Endpoint descriptor + * @ret descx Companion descriptor, or NULL if not found + */ +struct usb_endpoint_companion_descriptor * +usb_endpoint_companion_descriptor ( struct usb_configuration_descriptor *config, + struct usb_endpoint_descriptor *desc ) { + struct usb_endpoint_companion_descriptor *descx; + + /* Get companion descriptor, if present */ + descx = container_of ( usb_next_descriptor ( &desc->header ), + struct usb_endpoint_companion_descriptor, + header ); + return ( ( usb_is_within_config ( config, &descx->header ) && + descx->header.type == USB_ENDPOINT_COMPANION_DESCRIPTOR ) + ? descx : NULL ); +} + +/****************************************************************************** + * + * USB endpoint + * + ****************************************************************************** + */ + +/** + * Get USB endpoint name (for debugging) + * + * @v ep USB endpoint + * @ret name Endpoint name + */ +const char * usb_endpoint_name ( struct usb_endpoint *ep ) { + static char buf[ 9 /* "EPxx OUT" + NUL */ ]; + unsigned int address = ep->address; + + snprintf ( buf, sizeof ( buf ), "EP%d%s", + ( address & USB_ENDPOINT_MAX ), + ( address ? + ( ( address & USB_ENDPOINT_IN ) ? " IN" : " OUT" ) : "" )); + return buf; +} + +/** + * Describe USB endpoint from device configuration + * + * @v ep USB endpoint + * @v config Configuration descriptor + * @v interface Interface descriptor + * @v type Endpoint (internal) type + * @v index Endpoint index + * @ret rc Return status code + */ +int usb_endpoint_described ( struct usb_endpoint *ep, + struct usb_configuration_descriptor *config, + struct usb_interface_descriptor *interface, + unsigned int type, unsigned int index ) { + struct usb_device *usb = ep->usb; + struct usb_endpoint_descriptor *desc; + struct usb_endpoint_companion_descriptor *descx; + unsigned int sizes; + unsigned int burst; + unsigned int interval; + size_t mtu; + + /* Locate endpoint descriptor */ + desc = usb_endpoint_descriptor ( config, interface, type, index ); + if ( ! desc ) + return -ENOENT; + + /* Locate companion descriptor, if any */ + descx = usb_endpoint_companion_descriptor ( config, desc ); + + /* Calculate MTU and burst size */ + sizes = le16_to_cpu ( desc->sizes ); + mtu = USB_ENDPOINT_MTU ( sizes ); + burst = ( descx ? descx->burst : USB_ENDPOINT_BURST ( sizes ) ); + + /* Calculate interval */ + if ( ( type & USB_ENDPOINT_ATTR_TYPE_MASK ) == + USB_ENDPOINT_ATTR_INTERRUPT ) { + if ( usb->speed >= USB_SPEED_HIGH ) { + /* 2^(desc->interval-1) is a microframe count */ + interval = ( 1 << ( desc->interval - 1 ) ); + } else { + /* desc->interval is a (whole) frame count */ + interval = ( desc->interval << 3 ); + } + } else { + /* desc->interval is a microframe count */ + interval = desc->interval; + } + + /* Describe endpoint */ + usb_endpoint_describe ( ep, desc->endpoint, desc->attributes, + mtu, burst, interval ); + return 0; +} + +/** + * Open USB endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ +int usb_endpoint_open ( struct usb_endpoint *ep ) { + struct usb_device *usb = ep->usb; + unsigned int idx = USB_ENDPOINT_IDX ( ep->address ); + int rc; + + /* Populate host controller operations */ + ep->host = &usb->port->hub->bus->op->endpoint; + + /* Add to endpoint list */ + if ( usb->ep[idx] != NULL ) { + DBGC ( usb, "USB %s %s is already open\n", + usb->name, usb_endpoint_name ( ep ) ); + rc = -EALREADY; + goto err_already; + } + usb->ep[idx] = ep; + INIT_LIST_HEAD ( &ep->halted ); + + /* Open endpoint */ + if ( ( rc = ep->host->open ( ep ) ) != 0 ) { + DBGC ( usb, "USB %s %s could not open: %s\n", usb->name, + usb_endpoint_name ( ep ), strerror ( rc ) ); + goto err_open; + } + ep->open = 1; + + DBGC2 ( usb, "USB %s %s opened with MTU %zd, burst %d, interval %d\n", + usb->name, usb_endpoint_name ( ep ), ep->mtu, ep->burst, + ep->interval ); + return 0; + + ep->open = 0; + ep->host->close ( ep ); + err_open: + usb->ep[idx] = NULL; + err_already: + if ( ep->max ) + usb_flush ( ep ); + return rc; +} + +/** + * Clear transaction translator (if applicable) + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int usb_endpoint_clear_tt ( struct usb_endpoint *ep ) { + struct usb_device *usb = ep->usb; + struct usb_port *tt; + int rc; + + /* Do nothing if this is a periodic endpoint */ + if ( ep->attributes & USB_ENDPOINT_ATTR_PERIODIC ) + return 0; + + /* Do nothing if this endpoint is not behind a transaction translator */ + tt = usb_transaction_translator ( usb ); + if ( ! tt ) + return 0; + + /* Clear transaction translator buffer */ + if ( ( rc = tt->hub->driver->clear_tt ( tt->hub, tt, ep ) ) != 0 ) { + DBGC ( usb, "USB %s %s could not clear transaction translator: " + "%s\n", usb->name, usb_endpoint_name ( ep ), + strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Clear endpoint halt (if applicable) + * + * @v ep USB endpoint + * @ret rc Return status code + */ +int usb_endpoint_clear_halt ( struct usb_endpoint *ep ) { + struct usb_device *usb = ep->usb; + unsigned int type; + int rc; + + /* Clear transaction translator, if applicable */ + if ( ( rc = usb_endpoint_clear_tt ( ep ) ) != 0 ) + return rc; + + /* Clear endpoint halt (if applicable) */ + type = ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK ); + if ( ( type != USB_ENDPOINT_ATTR_CONTROL ) && + ( ( rc = usb_clear_feature ( usb, USB_RECIP_ENDPOINT, + USB_ENDPOINT_HALT, + ep->address ) ) != 0 ) ) { + DBGC ( usb, "USB %s %s could not clear endpoint halt: %s\n", + usb->name, usb_endpoint_name ( ep ), strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Close USB endpoint + * + * @v ep USB endpoint + */ +void usb_endpoint_close ( struct usb_endpoint *ep ) { + struct usb_device *usb = ep->usb; + unsigned int idx = USB_ENDPOINT_IDX ( ep->address ); + + /* Sanity checks */ + assert ( usb->ep[idx] == ep ); + + /* Close endpoint */ + ep->open = 0; + ep->host->close ( ep ); + assert ( ep->fill == 0 ); + + /* Remove from endpoint list */ + usb->ep[idx] = NULL; + list_del ( &ep->halted ); + + /* Discard any recycled buffers, if applicable */ + if ( ep->max ) + usb_flush ( ep ); + + /* Clear transaction translator, if applicable */ + usb_endpoint_clear_tt ( ep ); +} + +/** + * Reset USB endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int usb_endpoint_reset ( struct usb_endpoint *ep ) { + struct usb_device *usb = ep->usb; + int rc; + + /* Sanity check */ + assert ( ! list_empty ( &ep->halted ) ); + + /* Clear device halt, if applicable */ + if ( ( rc = usb_endpoint_clear_halt ( ep ) ) != 0 ) + return rc; + + /* Reset endpoint */ + if ( ( rc = ep->host->reset ( ep ) ) != 0 ) { + DBGC ( usb, "USB %s %s could not reset: %s\n", + usb->name, usb_endpoint_name ( ep ), strerror ( rc ) ); + return rc; + } + + /* Remove from list of halted endpoints */ + list_del ( &ep->halted ); + INIT_LIST_HEAD ( &ep->halted ); + + DBGC ( usb, "USB %s %s reset\n", + usb->name, usb_endpoint_name ( ep ) ); + return 0; +} + +/** + * Update endpoint MTU + * + * @v ep USB endpoint + * @v mtu New MTU + * @ret rc Return status code + */ +static int usb_endpoint_mtu ( struct usb_endpoint *ep, size_t mtu ) { + struct usb_device *usb = ep->usb; + int rc; + + /* Update MTU */ + ep->mtu = mtu; + if ( ( rc = ep->host->mtu ( ep ) ) != 0 ) { + DBGC ( usb, "USB %s %s could not update MTU: %s\n", + usb->name, usb_endpoint_name ( ep ), strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Enqueue USB message transfer + * + * @v ep USB endpoint + * @v request Request + * @v value Value parameter + * @v index Index parameter + * @v iobuf I/O buffer + * @ret rc Return status code + * + * The I/O buffer must have sufficient headroom to contain a setup + * packet. + */ +int usb_message ( struct usb_endpoint *ep, unsigned int request, + unsigned int value, unsigned int index, + struct io_buffer *iobuf ) { + struct usb_device *usb = ep->usb; + struct usb_port *port = usb->port; + struct usb_setup_packet *packet; + size_t len = iob_len ( iobuf ); + int rc; + + /* Sanity check */ + assert ( iob_headroom ( iobuf ) >= sizeof ( *packet ) ); + + /* Fail immediately if device has been unplugged */ + if ( port->disconnected ) + return -ENODEV; + + /* Reset endpoint if required */ + if ( ( ! list_empty ( &ep->halted ) ) && + ( ( rc = usb_endpoint_reset ( ep ) ) != 0 ) ) + return rc; + + /* Zero input data buffer (if applicable) */ + if ( request & USB_DIR_IN ) + memset ( iobuf->data, 0, len ); + + /* Construct setup packet */ + packet = iob_push ( iobuf, sizeof ( *packet ) ); + packet->request = cpu_to_le16 ( request ); + packet->value = cpu_to_le16 ( value ); + packet->index = cpu_to_le16 ( index ); + packet->len = cpu_to_le16 ( len ); + + /* Enqueue message transfer */ + if ( ( rc = ep->host->message ( ep, iobuf ) ) != 0 ) { + DBGC ( usb, "USB %s %s could not enqueue message transfer: " + "%s\n", usb->name, usb_endpoint_name ( ep ), + strerror ( rc ) ); + return rc; + } + + /* Increment fill level */ + ep->fill++; + + return 0; +} + +/** + * Enqueue USB stream transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v terminate Terminate using a short packet + * @ret rc Return status code + */ +int usb_stream ( struct usb_endpoint *ep, struct io_buffer *iobuf, + int terminate ) { + struct usb_device *usb = ep->usb; + struct usb_port *port = usb->port; + int zlp; + int rc; + + /* Fail immediately if device has been unplugged */ + if ( port->disconnected ) + return -ENODEV; + + /* Reset endpoint if required */ + if ( ( ! list_empty ( &ep->halted ) ) && + ( ( rc = usb_endpoint_reset ( ep ) ) != 0 ) ) + return rc; + + /* Append a zero-length packet if necessary */ + zlp = terminate; + if ( iob_len ( iobuf ) & ( ep->mtu - 1 ) ) + zlp = 0; + + /* Enqueue stream transfer */ + if ( ( rc = ep->host->stream ( ep, iobuf, zlp ) ) != 0 ) { + DBGC ( usb, "USB %s %s could not enqueue stream transfer: %s\n", + usb->name, usb_endpoint_name ( ep ), strerror ( rc ) ); + return rc; + } + + /* Increment fill level */ + ep->fill++; + + return 0; +} + +/** + * Complete transfer (possibly with error) + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +void usb_complete_err ( struct usb_endpoint *ep, struct io_buffer *iobuf, + int rc ) { + struct usb_device *usb = ep->usb; + + /* Decrement fill level */ + assert ( ep->fill > 0 ); + ep->fill--; + + /* Schedule reset, if applicable */ + if ( ( rc != 0 ) && ep->open ) { + DBGC ( usb, "USB %s %s completion failed: %s\n", + usb->name, usb_endpoint_name ( ep ), strerror ( rc ) ); + list_del ( &ep->halted ); + list_add_tail ( &ep->halted, &usb_halted ); + } + + /* Report completion */ + ep->driver->complete ( ep, iobuf, rc ); +} + +/****************************************************************************** + * + * Endpoint refilling + * + ****************************************************************************** + */ + +/** + * Prefill endpoint recycled buffer list + * + * @v ep USB endpoint + * @ret rc Return status code + */ +int usb_prefill ( struct usb_endpoint *ep ) { + struct io_buffer *iobuf; + size_t reserve = ep->reserve; + size_t len = ( ep->len ? ep->len : ep->mtu ); + unsigned int fill; + int rc; + + /* Sanity checks */ + assert ( ep->fill == 0 ); + assert ( ep->max > 0 ); + assert ( list_empty ( &ep->recycled ) ); + + /* Fill recycled buffer list */ + for ( fill = 0 ; fill < ep->max ; fill++ ) { + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( reserve + len ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err_alloc; + } + iob_reserve ( iobuf, reserve ); + + /* Add to recycled buffer list */ + list_add_tail ( &iobuf->list, &ep->recycled ); + } + + return 0; + + err_alloc: + usb_flush ( ep ); + return rc; +} + +/** + * Refill endpoint up to specified limit + * + * @v ep USB endpoint + * @v max Fill limit + * @ret rc Return status code + */ +int usb_refill_limit ( struct usb_endpoint *ep, unsigned int max ) { + struct io_buffer *iobuf; + size_t reserve = ep->reserve; + size_t len = ( ep->len ? ep->len : ep->mtu ); + int rc; + + /* Sanity checks */ + assert ( ep->open ); + assert ( ep->max > 0 ); + + /* Refill endpoint */ + if ( max > ep->max ) + max = ep->max; + while ( ep->fill < max ) { + + /* Get or allocate buffer */ + if ( list_empty ( &ep->recycled ) ) { + /* Recycled buffer list is empty; allocate new buffer */ + iobuf = alloc_iob ( reserve + len ); + if ( ! iobuf ) + return -ENOMEM; + iob_reserve ( iobuf, reserve ); + } else { + /* Get buffer from recycled buffer list */ + iobuf = list_first_entry ( &ep->recycled, + struct io_buffer, list ); + assert ( iobuf != NULL ); + list_del ( &iobuf->list ); + } + + /* Reset buffer to maximum size */ + assert ( iob_len ( iobuf ) <= len ); + iob_put ( iobuf, ( len - iob_len ( iobuf ) ) ); + + /* Enqueue buffer */ + if ( ( rc = usb_stream ( ep, iobuf, 0 ) ) != 0 ) { + list_add ( &iobuf->list, &ep->recycled ); + return rc; + } + } + + return 0; +} + +/** + * Refill endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ +int usb_refill ( struct usb_endpoint *ep ) { + return usb_refill_limit ( ep, ep->max ); +} + +/** + * Discard endpoint recycled buffer list + * + * @v ep USB endpoint + */ +void usb_flush ( struct usb_endpoint *ep ) { + struct io_buffer *iobuf; + struct io_buffer *tmp; + + /* Sanity checks */ + assert ( ! ep->open ); + assert ( ep->max > 0 ); + + /* Free all I/O buffers */ + list_for_each_entry_safe ( iobuf, tmp, &ep->recycled, list ) { + list_del ( &iobuf->list ); + free_iob ( iobuf ); + } +} + +/****************************************************************************** + * + * Control endpoint + * + ****************************************************************************** + */ + +/** USB control transfer pseudo-header */ +struct usb_control_pseudo_header { + /** Completion status */ + int rc; +}; + +/** + * Complete USB control transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void usb_control_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct usb_device *usb = ep->usb; + struct usb_control_pseudo_header *pshdr; + + /* Record completion status in buffer */ + pshdr = iob_push ( iobuf, sizeof ( *pshdr ) ); + pshdr->rc = rc; + + /* Add to list of completed I/O buffers */ + list_add_tail ( &iobuf->list, &usb->complete ); +} + +/** USB control endpoint driver operations */ +static struct usb_endpoint_driver_operations usb_control_operations = { + .complete = usb_control_complete, +}; + +/** + * Issue USB control transaction + * + * @v usb USB device + * @v request Request + * @v value Value parameter + * @v index Index parameter + * @v data Data buffer (if any) + * @v len Length of data + * @ret rc Return status code + */ +int usb_control ( struct usb_device *usb, unsigned int request, + unsigned int value, unsigned int index, void *data, + size_t len ) { + struct usb_bus *bus = usb->port->hub->bus; + struct usb_endpoint *ep = &usb->control; + struct io_buffer *iobuf; + struct io_buffer *cmplt; + union { + struct usb_setup_packet setup; + struct usb_control_pseudo_header pshdr; + } *headroom; + struct usb_control_pseudo_header *pshdr; + unsigned int i; + int rc; + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( sizeof ( *headroom ) + len ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err_alloc; + } + iob_reserve ( iobuf, sizeof ( *headroom ) ); + iob_put ( iobuf, len ); + if ( request & USB_DIR_IN ) { + memset ( data, 0, len ); + } else { + memcpy ( iobuf->data, data, len ); + } + + /* Enqueue message */ + if ( ( rc = usb_message ( ep, request, value, index, iobuf ) ) != 0 ) + goto err_message; + + /* Wait for completion */ + for ( i = 0 ; i < USB_CONTROL_MAX_WAIT_MS ; i++ ) { + + /* Poll bus */ + usb_poll ( bus ); + + /* Check for completion */ + while ( ( cmplt = list_first_entry ( &usb->complete, + struct io_buffer, + list ) ) ) { + + /* Remove from completion list */ + list_del ( &cmplt->list ); + + /* Extract and strip completion status */ + pshdr = cmplt->data; + iob_pull ( cmplt, sizeof ( *pshdr ) ); + rc = pshdr->rc; + + /* Discard stale completions */ + if ( cmplt != iobuf ) { + DBGC ( usb, "USB %s stale control completion: " + "%s\n", usb->name, strerror ( rc ) ); + DBGC_HDA ( usb, 0, cmplt->data, + iob_len ( cmplt ) ); + free_iob ( cmplt ); + continue; + } + + /* Fail immediately if completion was in error */ + if ( rc != 0 ) { + DBGC ( usb, "USB %s control %04x:%04x:%04x " + "failed: %s\n", usb->name, request, + value, index, strerror ( rc ) ); + free_iob ( cmplt ); + usb_endpoint_reset ( ep ); + return rc; + } + + /* Copy completion to data buffer, if applicable */ + assert ( iob_len ( cmplt ) <= len ); + if ( request & USB_DIR_IN ) + memcpy ( data, cmplt->data, iob_len ( cmplt ) ); + free_iob ( cmplt ); + return 0; + } + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( usb, "USB %s timed out waiting for control %04x:%04x:%04x\n", + usb->name, request, value, index ); + return -ETIMEDOUT; + + err_message: + free_iob ( iobuf ); + err_alloc: + return rc; +} + +/** + * Get default language ID + * + * @v usb USB device + * @ret language Language ID + */ +static unsigned int usb_get_default_language ( struct usb_device *usb ) { + struct { + struct usb_descriptor_header header; + uint16_t language[1]; + } __attribute__ (( packed )) desc; + unsigned int language; + int rc; + + /* Get descriptor */ + if ( ( rc = usb_get_descriptor ( usb, 0, USB_STRING_DESCRIPTOR, 0, 0, + &desc.header, sizeof ( desc ) ) ) !=0){ + DBGC ( usb, "USB %s has no default language: %s\n", + usb->name, strerror ( rc ) ); + return USB_LANG_ENGLISH; + } + + /* Use first language ID */ + language = le16_to_cpu ( desc.language[0] ); + DBGC2 ( usb, "USB %s default language %#04x\n", usb->name, language ); + return language; +} + +/** + * Get USB string descriptor + * + * @v usb USB device + * @v index String index + * @v language Language ID, or 0 to use default + * @v buf Data buffer + * @v len Length of buffer + * @ret len String length (excluding NUL), or negative error + */ +int usb_get_string_descriptor ( struct usb_device *usb, unsigned int index, + unsigned int language, char *buf, size_t len ) { + size_t max = ( len ? ( len - 1 /* NUL */ ) : 0 ); + struct { + struct usb_descriptor_header header; + uint16_t character[max]; + } __attribute__ (( packed )) *desc; + unsigned int actual; + unsigned int i; + int rc; + + /* Use default language ID, if applicable */ + if ( ( language == 0 ) && ( index != 0 ) ) { + if ( ! usb->language ) + usb->language = usb_get_default_language ( usb ); + language = usb->language; + } + + /* Allocate buffer for string */ + desc = malloc ( sizeof ( *desc ) ); + if ( ! desc ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Get descriptor */ + if ( ( rc = usb_get_descriptor ( usb, 0, USB_STRING_DESCRIPTOR, index, + language, &desc->header, + sizeof ( *desc ) ) ) != 0 ) + goto err_get_descriptor; + + /* Calculate string length */ + if ( desc->header.len < sizeof ( desc->header ) ) { + rc = -EINVAL; + goto err_len; + } + actual = ( ( desc->header.len - sizeof ( desc->header ) ) / + sizeof ( desc->character[0] ) ); + + /* Copy to buffer */ + for ( i = 0 ; ( ( i < actual ) && ( i < max ) ) ; i++ ) + buf[i] = le16_to_cpu ( desc->character[i] ); + if ( len ) + buf[i] = '\0'; + + /* Free buffer */ + free ( desc ); + + return actual; + + err_len: + err_get_descriptor: + free ( desc ); + err_alloc: + return rc; +} + +/****************************************************************************** + * + * USB device driver + * + ****************************************************************************** + */ + +/** + * Get USB configuration descriptor + * + * @v usb USB device + * @v index Configuration index + * @ret config Configuration descriptor + * @ret rc Return status code + * + * The configuration descriptor is dynamically allocated and must + * eventually be freed by the caller. + */ +static int +usb_config_descriptor ( struct usb_device *usb, unsigned int index, + struct usb_configuration_descriptor **config ) { + struct usb_configuration_descriptor partial; + size_t len; + int rc; + + /* Read first part of configuration descriptor to get size */ + if ( ( rc = usb_get_config_descriptor ( usb, index, &partial, + sizeof ( partial ) ) ) != 0 ) { + DBGC ( usb, "USB %s could not get configuration descriptor %d: " + "%s\n", usb->name, index, strerror ( rc ) ); + goto err_get_partial; + } + len = le16_to_cpu ( partial.len ); + if ( len < sizeof ( partial ) ) { + DBGC ( usb, "USB %s underlength configuraton descriptor %d\n", + usb->name, index ); + rc = -EINVAL; + goto err_partial_len; + } + + /* Allocate buffer for whole configuration descriptor */ + *config = malloc ( len ); + if ( ! *config ) { + rc = -ENOMEM; + goto err_alloc_config; + } + + /* Read whole configuration descriptor */ + if ( ( rc = usb_get_config_descriptor ( usb, index, *config, + len ) ) != 0 ) { + DBGC ( usb, "USB %s could not get configuration descriptor %d: " + "%s\n", usb->name, index, strerror ( rc ) ); + goto err_get_config_descriptor; + } + if ( (*config)->len != partial.len ) { + DBGC ( usb, "USB %s bad configuration descriptor %d length\n", + usb->name, index ); + rc = -EINVAL; + goto err_config_len; + } + + return 0; + + err_config_len: + err_get_config_descriptor: + free ( *config ); + err_alloc_config: + err_partial_len: + err_get_partial: + return rc; +} + +/** + * Describe USB function + * + * @v usb USB device + * @v config Configuration descriptor + * @v first First interface number + * @v interfaces Interface list to fill in + * @v desc Function descriptor to fill in + * @ret rc Return status code + */ +static int usb_describe ( struct usb_device *usb, + struct usb_configuration_descriptor *config, + unsigned int first, uint8_t *interfaces, + struct usb_function_descriptor *desc ) { + struct usb_interface_association_descriptor *association; + struct usb_interface_descriptor *interface; + struct cdc_union_descriptor *cdc_union; + unsigned int i; + + /* Fill in vendor and product ID */ + memset ( desc, 0, sizeof ( *desc ) ); + desc->vendor = le16_to_cpu ( usb->device.vendor ); + desc->product = le16_to_cpu ( usb->device.product ); + + /* First, look for an interface association descriptor */ + association = usb_interface_association_descriptor ( config, first ); + if ( association ) { + + /* Sanity check */ + assert ( association->first == first ); + if ( ( first + association->count ) > config->interfaces ) { + DBGC ( usb, "USB %s has invalid association [%d-%d)\n", + usb->name, first, ( first + association->count)); + return -ERANGE; + } + + /* Describe function */ + memcpy ( &desc->class.class, &association->class, + sizeof ( desc->class.class ) ); + desc->count = association->count; + for ( i = 0 ; i < association->count ; i++ ) + interfaces[i] = ( first + i ); + return 0; + } + + /* Next, look for an interface descriptor */ + interface = usb_interface_descriptor ( config, first, 0 ); + if ( ! interface ) { + DBGC ( usb, "USB %s has no descriptor for interface %d\n", + usb->name, first ); + return -ENOENT; + } + + /* Describe function */ + memcpy ( &desc->class.class, &interface->class, + sizeof ( desc->class.class ) ); + desc->count = 1; + interfaces[0] = first; + + /* Look for a CDC union descriptor, if applicable */ + if ( ( desc->class.class.class == USB_CLASS_CDC ) && + ( cdc_union = cdc_union_descriptor ( config, interface ) ) ) { + + /* Determine interface count */ + desc->count = ( ( cdc_union->header.len - + offsetof ( typeof ( *cdc_union ), + interface[0] ) ) / + sizeof ( cdc_union->interface[0] ) ); + if ( desc->count > config->interfaces ) { + DBGC ( usb, "USB %s has invalid union functional " + "descriptor with %d interfaces\n", + usb->name, desc->count ); + return -ERANGE; + } + + /* Describe function */ + for ( i = 0 ; i < desc->count ; i++ ) { + if ( cdc_union->interface[i] >= config->interfaces ) { + DBGC ( usb, "USB %s has invalid union " + "functional descriptor covering " + "interface %d\n", usb->name, + cdc_union->interface[i] ); + return -ERANGE; + } + interfaces[i] = cdc_union->interface[i]; + } + + return 0; + } + + return 0; +} + +/** + * Update list of used interface + * + * @v usb USB device + * @v count Number of interfaces + * @v interface List of interfaces + * @v used List of already-used interfaces + * @ret rc Return status code + */ +static int usb_used ( struct usb_device *usb, unsigned int count, + uint8_t *interface, uint8_t *used ) { + unsigned int i; + + for ( i = 0 ; i < count ; i++ ) { + if ( used[interface[i]] ) { + DBGC ( usb, "USB %s interface %d already in use\n", + usb->name, interface[i] ); + return -EINVAL; + } + used[interface[i]] = 1; + } + return 0; +} + +/** + * Find USB device driver + * + * @v desc Function descriptor + * @ret id USB device ID, or NULL + * @ret driver USB device driver, or NULL + */ +struct usb_driver * usb_find_driver ( struct usb_function_descriptor *desc, + struct usb_device_id **id ) { + struct usb_driver *driver; + unsigned int i; + + /* Look for a matching driver */ + for_each_table_entry ( driver, USB_DRIVERS ) { + for ( i = 0 ; i < driver->id_count ; i++ ) { + + /* Ignore non-matching driver class */ + if ( ( driver->class.class.scalar ^ desc->class.scalar ) + & driver->class.mask.scalar ) + continue; + + /* Look for a matching ID */ + *id = &driver->ids[i]; + if ( ( ( (*id)->vendor == desc->vendor ) || + ( (*id)->vendor == USB_ANY_ID ) ) && + ( ( (*id)->product == desc->product ) || + ( (*id)->product == USB_ANY_ID ) ) ) + return driver; + } + } + + /* Not found */ + *id = NULL; + return NULL; +} + +/** + * Get USB device configuration score + * + * @v usb USB device + * @v config Configuration descriptor + * @ret score Device configuration score, or negative error + */ +static int usb_score ( struct usb_device *usb, + struct usb_configuration_descriptor *config ) { + uint8_t used[config->interfaces]; + uint8_t interface[config->interfaces]; + struct usb_function_descriptor desc; + struct usb_driver *driver; + struct usb_device_id *id; + unsigned int first; + unsigned int score = 0; + int rc; + + /* Identify each function in turn */ + memset ( used, 0, sizeof ( used ) ); + for ( first = 0 ; first < config->interfaces ; first++ ) { + + /* Skip interfaces already used */ + if ( used[first] ) + continue; + + /* Describe function */ + if ( ( rc = usb_describe ( usb, config, first, interface, + &desc ) ) != 0 ) + return rc; + + /* Update used interfaces */ + if ( ( rc = usb_used ( usb, desc.count, interface, + used ) ) != 0 ) + return rc; + + /* Look for a driver for this function */ + driver = usb_find_driver ( &desc, &id ); + if ( driver ) + score += driver->score; + } + + return score; +} + +/** + * Probe USB device driver + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int usb_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct usb_device *usb = func->usb; + struct usb_driver *driver; + struct usb_device_id *id; + int rc; + + /* Identify driver */ + driver = usb_find_driver ( &func->desc, &id ); + if ( ! driver ) { + DBGC ( usb, "USB %s %04x:%04x class %d:%d:%d has no driver\n", + func->name, func->desc.vendor, func->desc.product, + func->desc.class.class.class, + func->desc.class.class.subclass, + func->desc.class.class.protocol ); + return -ENOENT; + } + + /* Record driver */ + func->driver = driver; + func->id = id; + func->dev.driver_name = id->name; + + /* Probe driver */ + if ( ( rc = driver->probe ( func, config ) ) != 0 ) { + DBGC ( usb, "USB %s failed to probe driver %s: %s\n", + func->name, id->name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Remove USB device driver + * + * @v func USB function + */ +static void usb_remove ( struct usb_function *func ) { + + /* Remove driver */ + func->driver->remove ( func ); +} + +/** + * Probe all USB device drivers + * + * @v usb USB device + * @v config Configuration descriptor + */ +static void +usb_probe_all ( struct usb_device *usb, + struct usb_configuration_descriptor *config ) { + struct usb_bus *bus = usb->port->hub->bus; + struct usb_function *func; + uint8_t used[config->interfaces]; + unsigned int first; + unsigned int i; + int rc; + + /* Identify each function in turn */ + memset ( used, 0, sizeof ( used ) ); + for ( first = 0 ; first < config->interfaces ; first++ ) { + + /* Skip interfaces already used */ + if ( used[first] ) + continue; + + /* Allocate and initialise structure */ + func = zalloc ( sizeof ( *func ) + + ( config->interfaces * + sizeof ( func->interface[0] ) ) ); + if ( ! func ) + goto err_alloc; + func->name = func->dev.name; + func->usb = usb; + func->dev.desc.bus_type = BUS_TYPE_USB; + func->dev.desc.location = usb->address; + func->dev.desc.vendor = le16_to_cpu ( usb->device.vendor ); + func->dev.desc.device = le16_to_cpu ( usb->device.product ); + snprintf ( func->dev.name, sizeof ( func->dev.name ), + "%s-%d.%d", usb->name, config->config, first ); + INIT_LIST_HEAD ( &func->dev.children ); + func->dev.parent = bus->dev; + list_add_tail ( &func->list, &usb->functions ); + + /* Identify function */ + if ( ( rc = usb_describe ( usb, config, first, func->interface, + &func->desc ) ) != 0 ) + goto err_describe; + assert ( func->desc.count <= config->interfaces ); + + /* Mark interfaces as used */ + if ( ( rc = usb_used ( usb, func->desc.count, func->interface, + used ) ) != 0 ) + goto err_used; + + /* Probe device driver */ + if ( ( rc = usb_probe ( func, config ) ) != 0 ) + goto err_probe; + DBGC ( usb, "USB %s %04x:%04x class %d:%d:%d interfaces ", + func->name, func->desc.vendor, func->desc.product, + func->desc.class.class.class, + func->desc.class.class.subclass, + func->desc.class.class.protocol ); + for ( i = 0 ; i < func->desc.count ; i++ ) + DBGC ( usb, "%s%d", ( i ? "," : "" ), + func->interface[i] ); + DBGC ( usb, " using driver %s\n", func->dev.driver_name ); + + /* Add to device hierarchy */ + list_add_tail ( &func->dev.siblings, &bus->dev->children ); + + continue; + + list_del ( &func->dev.siblings ); + usb_remove ( func ); + err_probe: + err_used: + err_describe: + list_del ( &func->list ); + free ( func ); + err_alloc: + /* Continue registering other functions */ + continue; + } +} + +/** + * Remove all device drivers + * + * @v usb USB device + */ +static void usb_remove_all ( struct usb_device *usb ) { + struct usb_function *func; + struct usb_function *tmp; + + /* Remove all functions */ + list_for_each_entry_safe ( func, tmp, &usb->functions, list ) { + + /* Remove device driver */ + usb_remove ( func ); + + /* Remove from device hierarchy */ + assert ( list_empty ( &func->dev.children ) ); + list_del ( &func->dev.siblings ); + + /* Remove from list of functions */ + list_del ( &func->list ); + + /* Free function */ + free ( func ); + } +} + +/** + * Clear USB device configuration + * + * @v usb USB device + */ +static void usb_deconfigure ( struct usb_device *usb ) { + unsigned int i; + + /* Remove device drivers */ + usb_remove_all ( usb ); + + /* Sanity checks */ + for ( i = 0 ; i < ( sizeof ( usb->ep ) / sizeof ( usb->ep[0] ) ) ; i++){ + if ( i != USB_ENDPOINT_IDX ( USB_EP0_ADDRESS ) ) + assert ( usb->ep[i] == NULL ); + } + + /* Clear device configuration */ + usb_set_configuration ( usb, 0 ); +} + +/** + * Choose our preferred USB device configuration + * + * @v usb USB device + * @ret rc Return status code + */ +static int usb_autoconfigure ( struct usb_device *usb ) { + struct usb_configuration_descriptor *config; + unsigned int preferred = 0; + unsigned int index; + int score; + int best = 0; + int rc; + + /* Calculate driver score for each configuration index */ + for ( index = 0 ; index < usb->device.configurations ; index++ ) { + + /* Read configuration descriptor */ + if ( ( rc = usb_config_descriptor ( usb, index, + &config ) ) != 0 ) + goto err_config; + + /* Get score for this configuration */ + score = usb_score ( usb, config ); + if ( score < 0 ) { + rc = score; + goto err_score; + } + DBGC2 ( usb, "USB %s configuration %d score %d\n", + usb->name, config->config, score ); + + /* Record as preferred configuration, if applicable */ + if ( score > best ) { + best = score; + preferred = index; + } + + /* Free configuration descriptor */ + free ( config ); + config = NULL; + } + + /* Read preferred configuration descriptor */ + if ( ( rc = usb_config_descriptor ( usb, preferred, &config ) ) != 0 ) + goto err_preferred; + + /* Set configuration */ + if ( ( rc = usb_set_configuration ( usb, config->config ) ) != 0){ + DBGC ( usb, "USB %s could not set configuration %d: %s\n", + usb->name, config->config, strerror ( rc ) ); + goto err_set_configuration; + } + + /* Probe USB device drivers */ + usb_probe_all ( usb, config ); + + /* Free configuration descriptor */ + free ( config ); + + return 0; + + usb_remove_all ( usb ); + usb_set_configuration ( usb, 0 ); + err_set_configuration: + free ( config ); + err_preferred: + return rc; + + err_score: + free ( config ); + err_config: + return rc; +} + +/****************************************************************************** + * + * USB device + * + ****************************************************************************** + */ + +/** + * Allocate USB device + * + * @v port USB port + * @ret usb USB device, or NULL on allocation failure + */ +static struct usb_device * alloc_usb ( struct usb_port *port ) { + struct usb_hub *hub = port->hub; + struct usb_bus *bus = hub->bus; + struct usb_device *usb; + + /* Allocate and initialise structure */ + usb = zalloc ( sizeof ( *usb ) ); + if ( ! usb ) + return NULL; + snprintf ( usb->name, sizeof ( usb->name ), "%s%c%d", hub->name, + ( hub->usb ? '.' : '-' ), port->address ); + usb->port = port; + INIT_LIST_HEAD ( &usb->functions ); + usb->host = &bus->op->device; + usb_endpoint_init ( &usb->control, usb, &usb_control_operations ); + INIT_LIST_HEAD ( &usb->complete ); + + return usb; +} + +/** + * Register USB device + * + * @v usb USB device + * @ret rc Return status code + */ +static int register_usb ( struct usb_device *usb ) { + struct usb_port *port = usb->port; + struct usb_hub *hub = port->hub; + struct usb_bus *bus = hub->bus; + unsigned int protocol; + size_t mtu; + int rc; + + /* Add to port */ + if ( port->usb != NULL ) { + DBGC ( hub, "USB hub %s port %d is already registered to %s\n", + hub->name, port->address, port->usb->name ); + rc = -EALREADY; + goto err_already; + } + port->usb = usb; + + /* Add to bus device list */ + list_add_tail ( &usb->list, &bus->devices ); + + /* Enable device */ + if ( ( rc = hub->driver->enable ( hub, port ) ) != 0 ) { + DBGC ( hub, "USB hub %s port %d could not enable: %s\n", + hub->name, port->address, strerror ( rc ) ); + goto err_enable; + } + + /* Allow recovery interval since port may have been reset */ + mdelay ( USB_RESET_RECOVER_DELAY_MS ); + + /* Get device speed */ + if ( ( rc = hub->driver->speed ( hub, port ) ) != 0 ) { + DBGC ( hub, "USB hub %s port %d could not get speed: %s\n", + hub->name, port->address, strerror ( rc ) ); + goto err_speed; + } + usb->speed = port->speed; + DBGC2 ( usb, "USB %s attached as %s-speed device\n", + usb->name, usb_speed_name ( usb->speed ) ); + + /* Open device */ + if ( ( rc = usb->host->open ( usb ) ) != 0 ) { + DBGC ( usb, "USB %s could not open: %s\n", + usb->name, strerror ( rc ) ); + goto err_open; + } + + /* Describe control endpoint */ + mtu = USB_EP0_DEFAULT_MTU ( usb->speed ); + usb_endpoint_describe ( &usb->control, USB_EP0_ADDRESS, + USB_EP0_ATTRIBUTES, mtu, USB_EP0_BURST, + USB_EP0_INTERVAL ); + + /* Open control endpoint */ + if ( ( rc = usb_endpoint_open ( &usb->control ) ) != 0 ) + goto err_open_control; + assert ( usb_endpoint ( usb, USB_EP0_ADDRESS ) == &usb->control ); + + /* Assign device address */ + if ( ( rc = usb->host->address ( usb ) ) != 0 ) { + DBGC ( usb, "USB %s could not set address: %s\n", + usb->name, strerror ( rc ) ); + goto err_address; + } + DBGC2 ( usb, "USB %s assigned address %d\n", usb->name, usb->address ); + + /* Allow recovery interval after Set Address command */ + mdelay ( USB_SET_ADDRESS_RECOVER_DELAY_MS ); + + /* Read first part of device descriptor to get EP0 MTU */ + if ( ( rc = usb_get_mtu ( usb, &usb->device ) ) != 0 ) { + DBGC ( usb, "USB %s could not get MTU: %s\n", + usb->name, strerror ( rc ) ); + goto err_get_mtu; + } + + /* Calculate EP0 MTU */ + protocol = le16_to_cpu ( usb->device.protocol ); + mtu = ( ( protocol < USB_PROTO_3_0 ) ? + usb->device.mtu : ( 1 << usb->device.mtu ) ); + DBGC2 ( usb, "USB %s has control MTU %zd (guessed %zd)\n", + usb->name, mtu, usb->control.mtu ); + + /* Update MTU */ + if ( ( rc = usb_endpoint_mtu ( &usb->control, mtu ) ) != 0 ) + goto err_mtu; + + /* Read whole device descriptor */ + if ( ( rc = usb_get_device_descriptor ( usb, &usb->device ) ) != 0 ) { + DBGC ( usb, "USB %s could not get device descriptor: %s\n", + usb->name, strerror ( rc ) ); + goto err_get_device_descriptor; + } + DBGC ( usb, "USB %s addr %d %04x:%04x class %d:%d:%d (v%s, %s-speed, " + "MTU %zd)\n", usb->name, usb->address, + le16_to_cpu ( usb->device.vendor ), + le16_to_cpu ( usb->device.product ), usb->device.class.class, + usb->device.class.subclass, usb->device.class.protocol, + usb_bcd ( le16_to_cpu ( usb->device.protocol ) ), + usb_speed_name ( usb->speed ), usb->control.mtu ); + + /* Configure device */ + if ( ( rc = usb_autoconfigure ( usb ) ) != 0 ) + goto err_autoconfigure; + + return 0; + + usb_deconfigure ( usb ); + err_autoconfigure: + err_get_device_descriptor: + err_mtu: + err_get_mtu: + err_address: + usb_endpoint_close ( &usb->control ); + err_open_control: + usb->host->close ( usb ); + err_open: + err_speed: + /* Leave port enabled on failure, to avoid an endless loop of + * failed device registrations. + */ + err_enable: + list_del ( &usb->list ); + port->usb = NULL; + err_already: + return rc; +} + +/** + * Unregister USB device + * + * @v usb USB device + */ +static void unregister_usb ( struct usb_device *usb ) { + struct usb_port *port = usb->port; + struct usb_hub *hub = port->hub; + struct io_buffer *iobuf; + struct io_buffer *tmp; + + DBGC ( usb, "USB %s addr %d %04x:%04x class %d:%d:%d removed\n", + usb->name, usb->address, le16_to_cpu ( usb->device.vendor ), + le16_to_cpu ( usb->device.product ), usb->device.class.class, + usb->device.class.subclass, usb->device.class.protocol ); + + /* Sanity checks */ + assert ( port->usb == usb ); + + /* Clear device configuration */ + usb_deconfigure ( usb ); + + /* Close control endpoint */ + usb_endpoint_close ( &usb->control ); + + /* Discard any stale control completions */ + list_for_each_entry_safe ( iobuf, tmp, &usb->complete, list ) { + list_del ( &iobuf->list ); + free_iob ( iobuf ); + } + + /* Close device */ + usb->host->close ( usb ); + + /* Disable port */ + hub->driver->disable ( hub, port ); + + /* Remove from bus device list */ + list_del ( &usb->list ); + + /* Remove from port */ + port->usb = NULL; +} + +/** + * Free USB device + * + * @v usb USB device + */ +static void free_usb ( struct usb_device *usb ) { + unsigned int i; + + /* Sanity checks */ + for ( i = 0 ; i < ( sizeof ( usb->ep ) / sizeof ( usb->ep[0] ) ) ; i++ ) + assert ( usb->ep[i] == NULL ); + assert ( list_empty ( &usb->functions ) ); + assert ( list_empty ( &usb->complete ) ); + + /* Free device */ + free ( usb ); +} + +/****************************************************************************** + * + * USB device hotplug event handling + * + ****************************************************************************** + */ + +/** + * Handle newly attached USB device + * + * @v port USB port + * @ret rc Return status code + */ +static int usb_attached ( struct usb_port *port ) { + struct usb_device *usb; + int rc; + + /* Mark port as attached */ + port->attached = 1; + + /* Sanity checks */ + assert ( port->usb == NULL ); + + /* Allocate USB device */ + usb = alloc_usb ( port ); + if ( ! usb ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Register USB device */ + if ( ( rc = register_usb ( usb ) ) != 0 ) + goto err_register; + + return 0; + + unregister_usb ( usb ); + err_register: + free_usb ( usb ); + err_alloc: + return rc; +} + +/** + * Handle newly detached USB device + * + * @v port USB port + */ +static void usb_detached ( struct usb_port *port ) { + struct usb_device *usb = port->usb; + + /* Mark port as detached */ + port->attached = 0; + + /* Do nothing if we have no USB device */ + if ( ! usb ) + return; + + /* Unregister USB device */ + unregister_usb ( usb ); + + /* Free USB device */ + free_usb ( usb ); +} + +/** + * Handle newly attached or detached USB device + * + * @v port USB port + * @ret rc Return status code + */ +static int usb_hotplugged ( struct usb_port *port ) { + struct usb_hub *hub = port->hub; + int rc; + + /* Get current port speed */ + if ( ( rc = hub->driver->speed ( hub, port ) ) != 0 ) { + DBGC ( hub, "USB hub %s port %d could not get speed: %s\n", + hub->name, port->address, strerror ( rc ) ); + /* Treat as a disconnection */ + port->disconnected = 1; + port->speed = USB_SPEED_NONE; + } + + /* Detach device, if applicable */ + if ( port->attached && ( port->disconnected || ! port->speed ) ) + usb_detached ( port ); + + /* Clear any recorded disconnections */ + port->disconnected = 0; + + /* Attach device, if applicable */ + if ( port->speed && ( ! port->attached ) && + ( ( rc = usb_attached ( port ) ) != 0 ) ) + return rc; + + return 0; +} + +/****************************************************************************** + * + * USB process + * + ****************************************************************************** + */ + +/** + * Report port status change + * + * @v port USB port + */ +void usb_port_changed ( struct usb_port *port ) { + + /* Record hub port status change */ + list_del ( &port->changed ); + list_add_tail ( &port->changed, &usb_changed ); +} + +/** + * Handle newly attached or detached USB device + * + */ +static void usb_hotplug ( void ) { + struct usb_port *port; + + /* Handle any changed ports, allowing for the fact that the + * port list may change as we perform hotplug actions. + */ + while ( ! list_empty ( &usb_changed ) ) { + + /* Get first changed port */ + port = list_first_entry ( &usb_changed, struct usb_port, + changed ); + assert ( port != NULL ); + + /* Remove from list of changed ports */ + list_del ( &port->changed ); + INIT_LIST_HEAD ( &port->changed ); + + /* Perform appropriate hotplug action */ + usb_hotplugged ( port ); + } +} + +/** + * USB process + * + * @v process USB process + */ +static void usb_step ( struct process *process __unused ) { + struct usb_bus *bus; + struct usb_endpoint *ep; + + /* Poll all buses */ + for_each_usb_bus ( bus ) + usb_poll ( bus ); + + /* Attempt to reset first halted endpoint in list, if any. We + * do not attempt to process the complete list, since this + * would require extra code to allow for the facts that the + * halted endpoint list may change as we do so, and that + * resetting an endpoint may fail. + */ + if ( ( ep = list_first_entry ( &usb_halted, struct usb_endpoint, + halted ) ) != NULL ) + usb_endpoint_reset ( ep ); + + /* Handle any changed ports */ + usb_hotplug(); +} + +/** USB process */ +PERMANENT_PROCESS ( usb_process, usb_step ); + +/****************************************************************************** + * + * USB hub + * + ****************************************************************************** + */ + +/** + * Allocate USB hub + * + * @v bus USB bus + * @v usb Underlying USB device, if any + * @v ports Number of ports + * @v driver Hub driver operations + * @ret hub USB hub, or NULL on allocation failure + */ +struct usb_hub * alloc_usb_hub ( struct usb_bus *bus, struct usb_device *usb, + unsigned int ports, + struct usb_hub_driver_operations *driver ) { + struct usb_hub *hub; + struct usb_port *port; + unsigned int i; + + /* Allocate and initialise structure */ + hub = zalloc ( sizeof ( *hub ) + ( ports * sizeof ( hub->port[0] ) ) ); + if ( ! hub ) + return NULL; + hub->name = ( usb ? usb->name : bus->name ); + hub->bus = bus; + hub->usb = usb; + if ( usb ) + hub->protocol = usb->port->protocol; + hub->ports = ports; + hub->driver = driver; + hub->host = &bus->op->hub; + + /* Initialise port list */ + for ( i = 1 ; i <= hub->ports ; i++ ) { + port = usb_port ( hub, i ); + port->hub = hub; + port->address = i; + if ( usb ) + port->protocol = usb->port->protocol; + INIT_LIST_HEAD ( &port->changed ); + } + + return hub; +} + +/** + * Register USB hub + * + * @v hub USB hub + * @ret rc Return status code + */ +int register_usb_hub ( struct usb_hub *hub ) { + struct usb_bus *bus = hub->bus; + struct usb_port *port; + unsigned int i; + int rc; + + /* Add to hub list */ + list_add_tail ( &hub->list, &bus->hubs ); + + /* Open hub (host controller) */ + if ( ( rc = hub->host->open ( hub ) ) != 0 ) { + DBGC ( hub, "USB hub %s could not open: %s\n", + hub->name, strerror ( rc ) ); + goto err_host_open; + } + + /* Open hub (driver) */ + if ( ( rc = hub->driver->open ( hub ) ) != 0 ) { + DBGC ( hub, "USB hub %s could not open: %s\n", + hub->name, strerror ( rc ) ); + goto err_driver_open; + } + + /* Delay to allow ports to stabilise */ + mdelay ( USB_PORT_DELAY_MS ); + + /* Mark all ports as changed */ + for ( i = 1 ; i <= hub->ports ; i++ ) { + port = usb_port ( hub, i ); + usb_port_changed ( port ); + } + + /* Some hubs seem to defer reporting device connections until + * their interrupt endpoint is polled for the first time. + * Poll the bus once now in order to pick up any such + * connections. + */ + usb_poll ( bus ); + + return 0; + + hub->driver->close ( hub ); + err_driver_open: + hub->host->close ( hub ); + err_host_open: + list_del ( &hub->list ); + return rc; +} + +/** + * Unregister USB hub + * + * @v hub USB hub + */ +void unregister_usb_hub ( struct usb_hub *hub ) { + struct usb_port *port; + unsigned int i; + + /* Detach all devices */ + for ( i = 1 ; i <= hub->ports ; i++ ) { + port = usb_port ( hub, i ); + if ( port->attached ) + usb_detached ( port ); + } + + /* Close hub (driver) */ + hub->driver->close ( hub ); + + /* Close hub (host controller) */ + hub->host->close ( hub ); + + /* Cancel any pending port status changes */ + for ( i = 1 ; i <= hub->ports ; i++ ) { + port = usb_port ( hub, i ); + list_del ( &port->changed ); + INIT_LIST_HEAD ( &port->changed ); + } + + /* Remove from hub list */ + list_del ( &hub->list ); +} + +/** + * Free USB hub + * + * @v hub USB hub + */ +void free_usb_hub ( struct usb_hub *hub ) { + struct usb_port *port; + unsigned int i; + + /* Sanity checks */ + for ( i = 1 ; i <= hub->ports ; i++ ) { + port = usb_port ( hub, i ); + assert ( ! port->attached ); + assert ( port->usb == NULL ); + assert ( list_empty ( &port->changed ) ); + } + + /* Free hub */ + free ( hub ); +} + +/****************************************************************************** + * + * USB bus + * + ****************************************************************************** + */ + +/** + * Allocate USB bus + * + * @v dev Underlying hardware device + * @v ports Number of root hub ports + * @v mtu Largest transfer allowed on the bus + * @v op Host controller operations + * @ret bus USB bus, or NULL on allocation failure + */ +struct usb_bus * alloc_usb_bus ( struct device *dev, unsigned int ports, + size_t mtu, struct usb_host_operations *op ) { + struct usb_bus *bus; + + /* Allocate and initialise structure */ + bus = zalloc ( sizeof ( *bus ) ); + if ( ! bus ) + goto err_alloc_bus; + bus->name = dev->name; + bus->dev = dev; + bus->mtu = mtu; + bus->op = op; + INIT_LIST_HEAD ( &bus->devices ); + INIT_LIST_HEAD ( &bus->hubs ); + bus->host = &bus->op->bus; + + /* Allocate root hub */ + bus->hub = alloc_usb_hub ( bus, NULL, ports, &op->root ); + if ( ! bus->hub ) + goto err_alloc_hub; + + return bus; + + free_usb_hub ( bus->hub ); + err_alloc_hub: + free ( bus ); + err_alloc_bus: + return NULL; +} + +/** + * Register USB bus + * + * @v bus USB bus + * @ret rc Return status code + */ +int register_usb_bus ( struct usb_bus *bus ) { + int rc; + + /* Sanity checks */ + assert ( bus->hub != NULL ); + + /* Open bus */ + if ( ( rc = bus->host->open ( bus ) ) != 0 ) + goto err_open; + + /* Add to list of USB buses */ + list_add_tail ( &bus->list, &usb_buses ); + + /* Register root hub */ + if ( ( rc = register_usb_hub ( bus->hub ) ) != 0 ) + goto err_register_hub; + + /* Attach any devices already present */ + usb_hotplug(); + + return 0; + + unregister_usb_hub ( bus->hub ); + err_register_hub: + list_del ( &bus->list ); + bus->host->close ( bus ); + err_open: + return rc; +} + +/** + * Unregister USB bus + * + * @v bus USB bus + */ +void unregister_usb_bus ( struct usb_bus *bus ) { + + /* Sanity checks */ + assert ( bus->hub != NULL ); + + /* Unregister root hub */ + unregister_usb_hub ( bus->hub ); + + /* Remove from list of USB buses */ + list_del ( &bus->list ); + + /* Close bus */ + bus->host->close ( bus ); + + /* Sanity checks */ + assert ( list_empty ( &bus->devices ) ); + assert ( list_empty ( &bus->hubs ) ); +} + +/** + * Free USB bus + * + * @v bus USB bus + */ +void free_usb_bus ( struct usb_bus *bus ) { + struct usb_endpoint *ep; + struct usb_port *port; + + /* Sanity checks */ + assert ( list_empty ( &bus->devices ) ); + assert ( list_empty ( &bus->hubs ) ); + list_for_each_entry ( ep, &usb_halted, halted ) + assert ( ep->usb->port->hub->bus != bus ); + list_for_each_entry ( port, &usb_changed, changed ) + assert ( port->hub->bus != bus ); + + /* Free root hub */ + free_usb_hub ( bus->hub ); + + /* Free bus */ + free ( bus ); +} + +/** + * Find USB bus by device location + * + * @v bus_type Bus type + * @v location Bus location + * @ret bus USB bus, or NULL + */ +struct usb_bus * find_usb_bus_by_location ( unsigned int bus_type, + unsigned int location ) { + struct usb_bus *bus; + + for_each_usb_bus ( bus ) { + if ( ( bus->dev->desc.bus_type == bus_type ) && + ( bus->dev->desc.location == location ) ) + return bus; + } + + return NULL; +} + +/****************************************************************************** + * + * USB address assignment + * + ****************************************************************************** + */ + +/** + * Allocate device address + * + * @v bus USB bus + * @ret address Device address, or negative error + */ +int usb_alloc_address ( struct usb_bus *bus ) { + unsigned int address; + + /* Find first free device address */ + address = ffsll ( ~bus->addresses ); + if ( ! address ) + return -ENOENT; + + /* Mark address as used */ + bus->addresses |= ( 1ULL << ( address - 1 ) ); + + return address; +} + +/** + * Free device address + * + * @v bus USB bus + * @v address Device address + */ +void usb_free_address ( struct usb_bus *bus, unsigned int address ) { + + /* Sanity check */ + assert ( address > 0 ); + assert ( bus->addresses & ( 1ULL << ( address - 1 ) ) ); + + /* Mark address as free */ + bus->addresses &= ~( 1ULL << ( address - 1 ) ); +} + +/****************************************************************************** + * + * USB bus topology + * + ****************************************************************************** + */ + +/** + * Get USB route string + * + * @v usb USB device + * @ret route USB route string + */ +unsigned int usb_route_string ( struct usb_device *usb ) { + struct usb_device *parent; + unsigned int route; + + /* Navigate up to root hub, constructing route string as we go */ + for ( route = 0 ; ( parent = usb->port->hub->usb ) ; usb = parent ) { + route <<= 4; + route |= ( ( usb->port->address > 0xf ) ? + 0xf : usb->port->address ); + } + + return route; +} + +/** + * Get USB depth + * + * @v usb USB device + * @ret depth Hub depth + */ +unsigned int usb_depth ( struct usb_device *usb ) { + struct usb_device *parent; + unsigned int depth; + + /* Navigate up to root hub, constructing depth as we go */ + for ( depth = 0 ; ( parent = usb->port->hub->usb ) ; usb = parent ) + depth++; + + return depth; +} + +/** + * Get USB root hub port + * + * @v usb USB device + * @ret port Root hub port + */ +struct usb_port * usb_root_hub_port ( struct usb_device *usb ) { + struct usb_device *parent; + + /* Navigate up to root hub */ + while ( ( parent = usb->port->hub->usb ) ) + usb = parent; + + return usb->port; +} + +/** + * Get USB transaction translator + * + * @v usb USB device + * @ret port Transaction translator port, or NULL + */ +struct usb_port * usb_transaction_translator ( struct usb_device *usb ) { + struct usb_device *parent; + + /* Navigate up to root hub. If we find a low-speed or + * full-speed device with a higher-speed parent hub, then that + * device's port is the transaction translator. + */ + for ( ; ( parent = usb->port->hub->usb ) ; usb = parent ) { + if ( ( usb->speed <= USB_SPEED_FULL ) && + ( parent->speed > USB_SPEED_FULL ) ) + return usb->port; + } + + return NULL; +} + +/* Drag in objects via register_usb_bus() */ +REQUIRING_SYMBOL ( register_usb_bus ); + +/* Drag in USB configuration */ +REQUIRE_OBJECT ( config_usb ); + +/* Drag in hub driver */ +REQUIRE_OBJECT ( usbhub ); diff --git a/src/drivers/infiniband/CIB_PRM.h b/src/drivers/infiniband/CIB_PRM.h new file mode 100755 index 00000000..d578f9b0 --- /dev/null +++ b/src/drivers/infiniband/CIB_PRM.h @@ -0,0 +1,1167 @@ +/* + * Copyright (C) 2013-2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#ifndef __CIB_PRM__ +#define __CIB_PRM__ + +typedef unsigned long long __be64; +typedef uint32_t __be32; +typedef uint16_t __be16; + +#define GOLAN_CMD_DATA_BLOCK_SIZE (1 << 9) +#define GOLAN_CMD_PAS_CNT (GOLAN_CMD_DATA_BLOCK_SIZE / sizeof(__be64)) +#define MAILBOX_STRIDE (1 << 10) +#define MAILBOX_MASK (MAILBOX_STRIDE - 1) + +#define GOLAN_PCI_CMD_XPORT 7 +#define CMD_OWNER_HW 0x1 +#define GOLAN_LOG_MAX_QP 0x1 +#define IB_NUM_PKEYS 0x20 + +struct health_buffer { + __be32 assert_var[5]; + __be32 rsvd0[3]; + __be32 assert_exit_ptr; + __be32 assert_callra; + __be32 rsvd1[2]; + __be32 fw_ver; + __be32 hw_id; + __be32 rsvd2; + u8 irisc_index; + u8 synd; + __be16 ext_sync; +} __attribute ( ( packed ) ); + +struct golan_hca_init_seg { + __be32 fw_rev; + __be32 cmdif_rev_fw_sub; + __be32 rsvd0[2]; + __be32 cmdq_addr_h; + __be32 cmdq_addr_l_sz; + __be32 cmd_dbell; + __be32 rsvd1[121]; + struct health_buffer health; + __be32 rsvd2[884]; + __be32 health_counter; + __be32 rsvd3[1023]; + __be64 ieee1588_clk; + __be32 ieee1588_clk_type; + __be32 clr_intx; +} __attribute ( ( packed ) ); + +enum golan_manage_pages_mode { + GOLAN_PAGES_CANT_GIVE = 0, + GOLAN_PAGES_GIVE = 1, + GOLAN_PAGES_TAKE = 2 +}; + +enum golan_qry_pages_mode { + GOLAN_BOOT_PAGES = 0x1, + GOLAN_INIT_PAGES = 0x2, + GOLAN_REG_PAGES = 0x3, +}; + +enum { + GOLAN_REG_PCAP = 0x5001, + GOLAN_REG_PMTU = 0x5003, + GOLAN_REG_PTYS = 0x5004, + GOLAN_REG_PAOS = 0x5006, + GOLAN_REG_PMAOS = 0x5012, + GOLAN_REG_PUDE = 0x5009, + GOLAN_REG_PMPE = 0x5010, + GOLAN_REG_PELC = 0x500e, + GOLAN_REG_PMLP = 0, /* TBD */ + GOLAN_REG_NODE_DESC = 0x6001, + GOLAN_REG_HOST_ENDIANESS = 0x7004, +}; + +enum { + GOLAN_CMD_OP_QUERY_HCA_CAP = 0x100, + GOLAN_CMD_OP_QUERY_ADAPTER = 0x101, + GOLAN_CMD_OP_INIT_HCA = 0x102, + GOLAN_CMD_OP_TEARDOWN_HCA = 0x103, + GOLAN_CMD_OP_ENABLE_HCA = 0x104, + GOLAN_CMD_OP_DISABLE_HCA = 0x105, + + GOLAN_CMD_OP_QUERY_PAGES = 0x107, + GOLAN_CMD_OP_MANAGE_PAGES = 0x108, + GOLAN_CMD_OP_SET_HCA_CAP = 0x109, + + GOLAN_CMD_OP_CREATE_MKEY = 0x200, + GOLAN_CMD_OP_QUERY_MKEY = 0x201, + GOLAN_CMD_OP_DESTROY_MKEY = 0x202, + GOLAN_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, + + GOLAN_CMD_OP_CREATE_EQ = 0x301, + GOLAN_CMD_OP_DESTROY_EQ = 0x302, + GOLAN_CMD_OP_QUERY_EQ = 0x303, + + GOLAN_CMD_OP_CREATE_CQ = 0x400, + GOLAN_CMD_OP_DESTROY_CQ = 0x401, + GOLAN_CMD_OP_QUERY_CQ = 0x402, + GOLAN_CMD_OP_MODIFY_CQ = 0x403, + + GOLAN_CMD_OP_CREATE_QP = 0x500, + GOLAN_CMD_OP_DESTROY_QP = 0x501, + GOLAN_CMD_OP_RST2INIT_QP = 0x502, + GOLAN_CMD_OP_INIT2RTR_QP = 0x503, + GOLAN_CMD_OP_RTR2RTS_QP = 0x504, + GOLAN_CMD_OP_RTS2RTS_QP = 0x505, + GOLAN_CMD_OP_SQERR2RTS_QP = 0x506, + GOLAN_CMD_OP_2ERR_QP = 0x507, + GOLAN_CMD_OP_RTS2SQD_QP = 0x508, + GOLAN_CMD_OP_SQD2RTS_QP = 0x509, + GOLAN_CMD_OP_2RST_QP = 0x50a, + GOLAN_CMD_OP_QUERY_QP = 0x50b, + GOLAN_CMD_OP_CONF_SQP = 0x50c, + GOLAN_CMD_OP_MAD_IFC = 0x50d, + GOLAN_CMD_OP_INIT2INIT_QP = 0x50e, + GOLAN_CMD_OP_SUSPEND_QP = 0x50f, + GOLAN_CMD_OP_UNSUSPEND_QP = 0x510, + GOLAN_CMD_OP_SQD2SQD_QP = 0x511, + GOLAN_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512, + GOLAN_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513, + GOLAN_CMD_OP_QUERY_QP_COUNTER_SET = 0x514, + + GOLAN_CMD_OP_CREATE_PSV = 0x600, + GOLAN_CMD_OP_DESTROY_PSV = 0x601, + GOLAN_CMD_OP_QUERY_PSV = 0x602, + GOLAN_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603, + GOLAN_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604, + + GOLAN_CMD_OP_CREATE_SRQ = 0x700, + GOLAN_CMD_OP_DESTROY_SRQ = 0x701, + GOLAN_CMD_OP_QUERY_SRQ = 0x702, + GOLAN_CMD_OP_ARM_RQ = 0x703, + GOLAN_CMD_OP_RESIZE_SRQ = 0x704, + + GOLAN_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762, + GOLAN_CMD_OP_QUERY_HCA_VPORT_GID = 0x764, + GOLAN_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765, + + GOLAN_CMD_OP_ALLOC_PD = 0x800, + GOLAN_CMD_OP_DEALLOC_PD = 0x801, + GOLAN_CMD_OP_ALLOC_UAR = 0x802, + GOLAN_CMD_OP_DEALLOC_UAR = 0x803, + + GOLAN_CMD_OP_ATTACH_TO_MCG = 0x806, + GOLAN_CMD_OP_DETACH_FROM_MCG = 0x807, + + + GOLAN_CMD_OP_ALLOC_XRCD = 0x80e, + GOLAN_CMD_OP_DEALLOC_XRCD = 0x80f, + + GOLAN_CMD_OP_ACCESS_REG = 0x805, +}; + +struct golan_inbox_hdr { + __be16 opcode; + u8 rsvd[4]; + __be16 opmod; +} __attribute ( ( packed ) ); + +struct golan_cmd_layout { + u8 type; + u8 rsvd0[3]; + __be32 inlen; + union { + __be64 in_ptr; + __be32 in_ptr32[2]; + }; + __be32 in[4]; + __be32 out[4]; + union { + __be64 out_ptr; + __be32 out_ptr32[2]; + }; + __be32 outlen; + u8 token; + u8 sig; + u8 rsvd1; + volatile u8 status_own; +} __attribute ( ( packed ) ); + +struct golan_outbox_hdr { + u8 status; + u8 rsvd[3]; + __be32 syndrome; +} __attribute ( ( packed ) ); + +enum { + GOLAN_DEV_CAP_FLAG_RC = 1LL << 0, + GOLAN_DEV_CAP_FLAG_UC = 1LL << 1, + GOLAN_DEV_CAP_FLAG_UD = 1LL << 2, + GOLAN_DEV_CAP_FLAG_XRC = 1LL << 3, + GOLAN_DEV_CAP_FLAG_SRQ = 1LL << 6, + GOLAN_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, + GOLAN_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, + GOLAN_DEV_CAP_FLAG_APM = 1LL << 17, + GOLAN_DEV_CAP_FLAG_ATOMIC = 1LL << 18, + GOLAN_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, + GOLAN_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, + GOLAN_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, + GOLAN_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, + GOLAN_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, + GOLAN_DEV_CAP_FLAG_DCT = 1LL << 41, + GOLAN_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46, +}; + + +struct golan_hca_cap { + u8 rsvd1[16]; + u8 log_max_srq_sz; + u8 log_max_qp_sz; + __be16 log_max_qp; + u8 log_max_strq_sz; + u8 log_max_srqs; + u8 rsvd4[2]; + u8 rsvd5; + u8 log_max_cq_sz; + u8 rsvd6; + u8 log_max_cq; + u8 log_max_eq_sz; + u8 log_max_mkey; + u8 rsvd7; + u8 log_max_eq; + u8 max_indirection; + u8 log_max_mrw_sz; + u8 log_max_bsf_list_sz; + u8 log_max_klm_list_sz; + u8 rsvd_8_0; + u8 log_max_ra_req_dc; + u8 rsvd_8_1; + u8 log_max_ra_res_dc; + u8 rsvd9; + u8 log_max_ra_req_qp; + u8 rsvd10; + u8 log_max_ra_res_qp; + u8 rsvd11[4]; + __be16 max_qp_count; + __be16 pkey_table_size; + u8 rsvd13; + u8 local_ca_ack_delay; + u8 rsvd14; + u8 num_ports; + u8 log_max_msg; + u8 rsvd15[3]; + __be16 stat_rate_support; + u8 rsvd16[2]; + __be64 flags; + u8 rsvd17; + u8 uar_sz; + u8 rsvd18; + u8 log_pg_sz; + __be16 bf_log_bf_reg_size; + u8 rsvd19[4]; + __be16 max_wqe_sz_sq; + u8 rsvd20[2]; + __be16 max_wqe_sz_rq; + u8 rsvd21[2]; + __be16 max_wqe_sz_sq_dc; + u8 rsvd22[4]; + __be16 max_qp_mcg; + u8 rsvd23; + u8 log_max_mcg; + u8 rsvd24; + u8 log_max_pd; + u8 rsvd25; + u8 log_max_xrcd; + u8 rsvd26[40]; + __be32 uar_page_sz; + u8 rsvd27[28]; + u8 log_msx_atomic_size_qp; + u8 rsvd28[2]; + u8 log_msx_atomic_size_dc; + u8 rsvd29[76]; +} __attribute ( ( packed ) ); + +struct golan_query_pages_inbox { + struct golan_inbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +struct golan_query_pages_outbox { + struct golan_outbox_hdr hdr; + u8 rsvd[2]; + __be16 func_id; + __be32 num_pages; +} __attribute ( ( packed ) ); + +struct golan_cmd_query_hca_cap_mbox_in { + struct golan_inbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +struct golan_cmd_query_hca_cap_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd0[8]; + struct golan_hca_cap hca_cap; +} __attribute ( ( packed ) ); + +struct golan_cmd_set_hca_cap_mbox_in { + struct golan_inbox_hdr hdr; + u8 rsvd[8]; + struct golan_hca_cap hca_cap; +} __attribute ( ( packed ) ); + +struct golan_cmd_set_hca_cap_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd0[8]; +} __attribute ( ( packed ) ); + +struct golan_cmd_init_hca_mbox_in { + struct golan_inbox_hdr hdr; + u8 rsvd0[2]; + __be16 profile; + u8 rsvd1[4]; +} __attribute ( ( packed ) ); + +struct golan_cmd_init_hca_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +enum golan_teardown { + GOLAN_TEARDOWN_GRACEFUL = 0x0, + GOLAN_TEARDOWN_PANIC = 0x1 +}; + +struct golan_cmd_teardown_hca_mbox_in { + struct golan_inbox_hdr hdr; + u8 rsvd0[2]; + __be16 profile; + u8 rsvd1[4]; +} __attribute ( ( packed ) ); + +struct golan_cmd_teardown_hca_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +struct golan_enable_hca_mbox_in { + struct golan_inbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +struct golan_enable_hca_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +struct golan_disable_hca_mbox_in { + struct golan_inbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +struct golan_disable_hca_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +struct golan_manage_pages_inbox_data { + u8 rsvd2[16]; + __be64 pas[0]; +} __attribute ( ( packed ) ); + +struct golan_manage_pages_inbox { + struct golan_inbox_hdr hdr; + __be16 rsvd0; + __be16 func_id; + __be32 num_entries; + struct golan_manage_pages_inbox_data data; +} __attribute ( ( packed ) ); + +struct golan_manage_pages_outbox_data { + __be64 pas[0]; +} __attribute ( ( packed ) ); + +struct golan_manage_pages_outbox { + struct golan_outbox_hdr hdr; + __be32 num_entries; + __be32 rsrvd; + struct golan_manage_pages_outbox_data data; +} __attribute ( ( packed ) ); + +struct golan_reg_host_endianess { + u8 he; + u8 rsvd[15]; +} __attribute ( ( packed ) ); + +struct golan_cmd_prot_block { + union { + __be64 data[GOLAN_CMD_PAS_CNT]; + u8 bdata[GOLAN_CMD_DATA_BLOCK_SIZE]; + }; + u8 rsvd0[48]; + __be64 next; + __be32 block_num; + u8 rsvd1; + u8 token; + u8 ctrl_sig; + u8 sig; +} __attribute ( ( packed ) ); + +/* MAD IFC structures */ +#define GOLAN_MAD_SIZE 256 +#define GOLAN_MAD_IFC_NO_VALIDATION 0x3 +#define GOLAN_MAD_IFC_RLID_BIT 16 + +struct golan_mad_ifc_mbox_in { + struct golan_inbox_hdr hdr; + __be16 remote_lid; + u8 rsvd0; + u8 port; + u8 rsvd1[4]; + u8 mad[GOLAN_MAD_SIZE]; +} __attribute ( ( packed ) ); + +struct golan_mad_ifc_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd[8]; + u8 mad[GOLAN_MAD_SIZE]; +} __attribute ( ( packed ) ); + +/* UAR Structures */ +struct golan_alloc_uar_mbox_in { + struct golan_inbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +struct golan_alloc_uar_mbox_out { + struct golan_outbox_hdr hdr; + __be32 uarn; + u8 rsvd[4]; +} __attribute ( ( packed ) ); + +struct golan_free_uar_mbox_in { + struct golan_inbox_hdr hdr; + __be32 uarn; + u8 rsvd[4]; +} __attribute ( ( packed ) ); + +struct golan_free_uar_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +/* Event Queue Structures */ +enum { + GOLAN_EQ_STATE_ARMED = 0x9, + GOLAN_EQ_STATE_FIRED = 0xa, + GOLAN_EQ_STATE_ALWAYS_ARMED = 0xb, +}; + + +struct golan_eq_context { + u8 status; + u8 ec_oi; + u8 st; + u8 rsvd2[7]; + __be16 page_pffset; + __be32 log_sz_usr_page; + u8 rsvd3[7]; + u8 intr; + u8 log_page_size; + u8 rsvd4[15]; + __be32 consumer_counter; + __be32 produser_counter; + u8 rsvd5[16]; +} __attribute ( ( packed ) ); + +struct golan_create_eq_mbox_in_data { + struct golan_eq_context ctx; + u8 rsvd2[8]; + __be64 events_mask; + u8 rsvd3[176]; + __be64 pas[0]; +} __attribute ( ( packed ) ); + +struct golan_create_eq_mbox_in { + struct golan_inbox_hdr hdr; + u8 rsvd0[3]; + u8 input_eqn; + u8 rsvd1[4]; + struct golan_create_eq_mbox_in_data data; +} __attribute ( ( packed ) ); + +struct golan_create_eq_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd0[3]; + u8 eq_number; + u8 rsvd1[4]; +} __attribute ( ( packed ) ); + +struct golan_destroy_eq_mbox_in { + struct golan_inbox_hdr hdr; + u8 rsvd0[3]; + u8 eqn; + u8 rsvd1[4]; +} __attribute ( ( packed ) ); + +struct golan_destroy_eq_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +/***********************************************/ +/************** Query Vport ****************/ +struct golan_query_hca_vport_context_inbox { + struct golan_inbox_hdr hdr; + __be16 other_vport : 1; + __be16 rsvd1 : 7; + __be16 port_num : 4; + __be16 rsvd2 : 4; + __be16 vport_number; + u8 rsvd[4]; +} __attribute ( ( packed ) ); + +struct golan_query_hca_vport_context_data { + __be32 field_select; + __be32 rsvd1[7]; + //**** + __be16 sm_virt_aware : 1; + __be16 has_smi : 1; + __be16 has_raw : 1; + __be16 grh_required : 1; + __be16 rsvd2 : 12; + u8 port_physical_state : 4; + u8 vport_state_policy : 4; + u8 port_state : 4; + u8 vport_state : 4; + //**** + u8 rsvd3[4]; + //**** + __be32 system_image_guid[2]; + //**** + __be32 port_guid[2]; + //**** + __be32 node_guid[2]; + //**** + __be32 cap_mask1; + __be32 cap_mask1_field_select; + __be32 cap_mask2; + __be32 cap_mask2_field_select; + u8 rsvd4[16]; + __be16 lid; + u8 rsvd5 : 4; + u8 init_type_reply : 4; + u8 lmc : 3; + u8 subnet_timeout : 5; + __be16 sm_lid; + u8 sm_sl : 4; + u8 rsvd6 : 4; + u8 rsvd7; + __be16 qkey_violation_counter; + __be16 pkey_violation_counter; + u8 rsvd8[100]; +} __attribute ( ( packed ) ); + +struct golan_query_hca_vport_context_outbox { + struct golan_outbox_hdr hdr; + u8 rsvd[8]; + struct golan_query_hca_vport_context_data context_data; +} __attribute ( ( packed ) ); + +struct golan_query_hca_vport_gid_inbox { + struct golan_inbox_hdr hdr; + u8 other_vport : 1; + u8 rsvd1 : 7; + u8 port_num : 4; + u8 rsvd2 : 4; + __be16 vport_number; + __be16 rsvd3; + __be16 gid_index; +} __attribute ( ( packed ) ); + +struct golan_query_hca_vport_gid_outbox { + struct golan_outbox_hdr hdr; + u8 rsvd0[4]; + __be16 gids_num; + u8 rsvd1[2]; + __be32 gid0[4]; +} __attribute ( ( packed ) ); + +struct golan_query_hca_vport_pkey_inbox { + struct golan_inbox_hdr hdr; + u8 other_vport : 1; + u8 rsvd1 : 7; + u8 port_num : 4; + u8 rsvd2 : 4; + __be16 vport_number; + __be16 rsvd3; + __be16 pkey_index; +} __attribute ( ( packed ) ); + +struct golan_query_hca_vport_pkey_data { + __be16 rsvd1; + __be16 pkey0; +} __attribute ( ( packed ) ); + +struct golan_query_hca_vport_pkey_outbox { + struct golan_outbox_hdr hdr; + u8 rsvd[8]; + struct golan_query_hca_vport_pkey_data *pkey_data; +} __attribute ( ( packed ) ); + +struct golan_eqe_comp { + __be32 reserved[6]; + __be32 cqn; +} __attribute ( ( packed ) ); + +struct golan_eqe_qp_srq { + __be32 reserved[6]; + __be32 qp_srq_n; +} __attribute ( ( packed ) ); + +struct golan_eqe_cq_err { + __be32 cqn; + u8 reserved1[7]; + u8 syndrome; +} __attribute ( ( packed ) ); + +struct golan_eqe_dropped_packet { +}; + +struct golan_eqe_port_state { + u8 reserved0[8]; + u8 port; +} __attribute ( ( packed ) ); + +struct golan_eqe_gpio { + __be32 reserved0[2]; + __be64 gpio_event; +} __attribute ( ( packed ) ); + +struct golan_eqe_congestion { + u8 type; + u8 rsvd0; + u8 congestion_level; +} __attribute ( ( packed ) ); + +struct golan_eqe_stall_vl { + u8 rsvd0[3]; + u8 port_vl; +} __attribute ( ( packed ) ); + +struct golan_eqe_cmd { + __be32 vector; + __be32 rsvd[6]; +} __attribute ( ( packed ) ); + +struct golan_eqe_page_req { + u8 rsvd0[2]; + __be16 func_id; + u8 rsvd1[2]; + __be16 num_pages; + __be32 rsvd2[5]; +} __attribute ( ( packed ) ); + +union ev_data { + __be32 raw[7]; + struct golan_eqe_cmd cmd; + struct golan_eqe_comp comp; + struct golan_eqe_qp_srq qp_srq; + struct golan_eqe_cq_err cq_err; + struct golan_eqe_dropped_packet dp; + struct golan_eqe_port_state port; + struct golan_eqe_gpio gpio; + struct golan_eqe_congestion cong; + struct golan_eqe_stall_vl stall_vl; + struct golan_eqe_page_req req_pages; +} __attribute__ ((packed)); + +struct golan_eqe { + u8 rsvd0; + u8 type; + u8 rsvd1; + u8 sub_type; + __be32 rsvd2[7]; + union ev_data data; + __be16 rsvd3; + u8 signature; + u8 owner; +} __attribute__ ((packed)); + +/* Protection Domain Structures */ +struct golan_alloc_pd_mbox_in { + struct golan_inbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +struct golan_alloc_pd_mbox_out { + struct golan_outbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +} __attribute ( ( packed ) ); + +struct golan_dealloc_pd_mbox_in { + struct golan_inbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +} __attribute ( ( packed ) ); + +struct golan_dealloc_pd_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +/* Memory key structures */ +#define GOLAN_IB_ACCESS_LOCAL_READ (1 << 2) +#define GOLAN_IB_ACCESS_LOCAL_WRITE (1 << 3) +#define GOLAN_MKEY_LEN64 (1 << 31) +#define GOLAN_CREATE_MKEY_SEG_QPN_BIT 8 + +struct golan_mkey_seg { + /* + * This is a two bit field occupying bits 31-30. + * bit 31 is always 0, + * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation + */ + u8 status; + u8 pcie_control; + u8 flags; + u8 version; + __be32 qpn_mkey7_0; + u8 rsvd1[4]; + __be32 flags_pd; + __be64 start_addr; + __be64 len; + __be32 bsfs_octo_size; + u8 rsvd2[16]; + __be32 xlt_oct_size; + u8 rsvd3[3]; + u8 log2_page_size; + u8 rsvd4[4]; +} __attribute ( ( packed ) ); + +struct golan_create_mkey_mbox_in_data { + struct golan_mkey_seg seg; + u8 rsvd1[16]; + __be32 xlat_oct_act_size; + __be32 bsf_coto_act_size; + u8 rsvd2[168]; + __be64 pas[0]; +} __attribute ( ( packed ) ); + +struct golan_create_mkey_mbox_in { + struct golan_inbox_hdr hdr; + __be32 input_mkey_index; + u8 rsvd0[4]; + struct golan_create_mkey_mbox_in_data data; +} __attribute ( ( packed ) ); + +struct golan_create_mkey_mbox_out { + struct golan_outbox_hdr hdr; + __be32 mkey; + u8 rsvd[4]; +} __attribute ( ( packed ) ); + +struct golan_destroy_mkey_mbox_in { + struct golan_inbox_hdr hdr; + __be32 mkey; + u8 rsvd[4]; +} __attribute ( ( packed ) ); + +struct golan_destroy_mkey_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd[8]; +} __attribute ( ( packed ) ); + +/* Completion Queue Structures */ +enum { + GOLAN_CQ_STATE_ARMED = 9, + GOLAN_CQ_STATE_ALWAYS_ARMED = 0xb, + GOLAN_CQ_STATE_FIRED = 0xa +}; + +enum { + GOLAN_CQE_REQ = 0, + GOLAN_CQE_RESP_WR_IMM = 1, + GOLAN_CQE_RESP_SEND = 2, + GOLAN_CQE_RESP_SEND_IMM = 3, + GOLAN_CQE_RESP_SEND_INV = 4, + GOLAN_CQE_RESIZE_CQ = 0xff, /* TBD */ + GOLAN_CQE_REQ_ERR = 13, + GOLAN_CQE_RESP_ERR = 14 +}; + +struct golan_cq_context { + u8 status; + u8 cqe_sz_flags; + u8 st; + u8 rsvd3; + u8 rsvd4[6]; + __be16 page_offset; + __be32 log_sz_usr_page; + __be16 cq_period; + __be16 cq_max_count; + __be16 rsvd20; + __be16 c_eqn; + u8 log_pg_sz; + u8 rsvd25[7]; + __be32 last_notified_index; + __be32 solicit_producer_index; + __be32 consumer_counter; + __be32 producer_counter; + u8 rsvd48[8]; + __be64 db_record_addr; +} __attribute ( ( packed ) ); + + +struct golan_create_cq_mbox_in_data { + struct golan_cq_context ctx; + u8 rsvd6[192]; + __be64 pas[0]; +} __attribute ( ( packed ) ); + +struct golan_create_cq_mbox_in { + struct golan_inbox_hdr hdr; + __be32 input_cqn; + u8 rsvdx[4]; + struct golan_create_cq_mbox_in_data data; +} __attribute ( ( packed ) ); + +struct golan_create_cq_mbox_out { + struct golan_outbox_hdr hdr; + __be32 cqn; + u8 rsvd0[4]; +} __attribute ( ( packed ) ); + +struct golan_destroy_cq_mbox_in { + struct golan_inbox_hdr hdr; + __be32 cqn; + u8 rsvd0[4]; +} __attribute ( ( packed ) ); + +struct golan_destroy_cq_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd0[8]; +} __attribute ( ( packed ) ); + +struct golan_err_cqe { + u8 rsvd0[32]; + __be32 srqn; + u8 rsvd1[16]; + u8 hw_syndrom; + u8 rsvd2; + u8 vendor_err_synd; + u8 syndrome; + __be32 s_wqe_opcode_qpn; + __be16 wqe_counter; + u8 signature; + u8 op_own; +} __attribute ( ( packed ) ); + +struct golan_cqe64 { + u8 rsvd0[17]; + u8 ml_path; + u8 rsvd20[4]; + __be16 slid; + __be32 flags_rqpn; + u8 rsvd28[4]; + __be32 srqn; + __be32 imm_inval_pkey; + u8 rsvd40[4]; + __be32 byte_cnt; + __be64 timestamp; + __be32 sop_drop_qpn; + __be16 wqe_counter; + u8 signature; + u8 op_own; +} __attribute ( ( packed ) ); + +/* Queue Pair Structures */ +#define GOLAN_QP_CTX_ST_BIT 16 +#define GOLAN_QP_CTX_PM_STATE_BIT 11 +#define GOLAN_QP_CTX_FRE_BIT 11 +#define GOLAN_QP_CTX_RLKY_BIT 4 +#define GOLAN_QP_CTX_RQ_SIZE_BIT 3 +#define GOLAN_QP_CTX_SQ_SIZE_BIT 11 +#define GOLAN_QP_CTX_MTU_BIT 5 +#define GOLAN_QP_CTX_ACK_REQ_FREQ_BIT 28 + +enum { + GOLAN_QP_CTX_DONT_USE_RSRVD_LKEY = 0, + GOLAN_QP_CTX_USE_RSRVD_LKEY = 1 +}; + +enum { + GOLAN_IB_ACK_REQ_FREQ = 8, +}; + +enum golan_qp_optpar { + GOLAN_QP_PARAM_ALT_ADDR_PATH = 1 << 0, + GOLAN_QP_PARAM_RRE = 1 << 1, + GOLAN_QP_PARAM_RAE = 1 << 2, + GOLAN_QP_PARAM_RWE = 1 << 3, + GOLAN_QP_PARAM_PKEY_INDEX = 1 << 4, + GOLAN_QP_PARAM_Q_KEY = 1 << 5, + GOLAN_QP_PARAM_RNR_TIMEOUT = 1 << 6, + GOLAN_QP_PARAM_PRIMARY_ADDR_PATH = 1 << 7, + GOLAN_QP_PARAM_SRA_MAX = 1 << 8, + GOLAN_QP_PARAM_RRA_MAX = 1 << 9, + GOLAN_QP_PARAM_PM_STATE = 1 << 10, + GOLAN_QP_PARAM_RETRY_COUNT = 1 << 12, + GOLAN_QP_PARAM_RNR_RETRY = 1 << 13, + GOLAN_QP_PARAM_ACK_TIMEOUT = 1 << 14, + GOLAN_QP_PARAM_PRI_PORT = 1 << 16, + GOLAN_QP_PARAM_SRQN = 1 << 18, + GOLAN_QP_PARAM_CQN_RCV = 1 << 19, + GOLAN_QP_PARAM_DC_HS = 1 << 20, + GOLAN_QP_PARAM_DC_KEY = 1 << 21 +}; + +#define GOLAN_QP_PARAMS_INIT2RTR_MASK (GOLAN_QP_PARAM_PKEY_INDEX |\ + GOLAN_QP_PARAM_Q_KEY |\ + GOLAN_QP_PARAM_RWE |\ + GOLAN_QP_PARAM_RRE) + +#define GOLAN_QP_PARAMS_RTR2RTS_MASK (GOLAN_QP_PARAM_PM_STATE |\ + GOLAN_QP_PARAM_RNR_TIMEOUT |\ + GOLAN_QP_PARAM_Q_KEY |\ + GOLAN_QP_PARAM_RWE |\ + GOLAN_QP_PARAM_RRE) + + +enum { + GOLAN_QP_ST_RC = 0x0, + GOLAN_QP_ST_UC = 0x1, + GOLAN_QP_ST_UD = 0x2, + GOLAN_QP_ST_XRC = 0x3, + GOLAN_QP_ST_MLX = 0x4, + GOLAN_QP_ST_DC = 0x5, + GOLAN_QP_ST_QP0 = 0x7, + GOLAN_QP_ST_QP1 = 0x8, + GOLAN_QP_ST_RAW_ETHERTYPE = 0x9, + GOLAN_QP_ST_RAW_IPV6 = 0xa, + GOLAN_QP_ST_SNIFFER = 0xb, + GOLAN_QP_ST_SYNC_UMR = 0xe, + GOLAN_QP_ST_PTP_1588 = 0xd, + GOLAN_QP_ST_REG_UMR = 0xc, + GOLAN_QP_ST_MAX +}; + +enum { + GOLAN_QP_PM_MIGRATED = 0x3, + GOLAN_QP_PM_ARMED = 0x0, + GOLAN_QP_PM_REARM = 0x1 +}; + +enum { + GOLAN_QP_LAT_SENSITIVE = 1 << 28, + GOLAN_QP_ENABLE_SIG = 1 << 31 +}; + + +struct golan_qp_db { + u8 rsvd0[2]; + __be16 recv_db; + u8 rsvd1[2]; + __be16 send_db; +} __attribute ( ( packed ) ); + +enum { + GOLAN_WQE_CTRL_CQ_UPDATE = 2 << 2, /*Wissam, wtf?*/ + GOLAN_WQE_CTRL_SOLICITED = 1 << 1 +}; + +struct golan_wqe_ctrl_seg { + __be32 opmod_idx_opcode; + __be32 qpn_ds; + u8 signature; + u8 rsvd[2]; + u8 fm_ce_se; + __be32 imm; +} __attribute ( ( packed ) ); + +struct golan_av { + union { + struct { + __be32 qkey; + __be32 reserved; + } qkey; + __be64 dc_key; + } key; + __be32 dqp_dct; + u8 stat_rate_sl; + u8 fl_mlid; + __be16 rlid; + u8 reserved0[10]; + u8 tclass; + u8 hop_limit; + __be32 grh_gid_fl; + u8 rgid[16]; +} __attribute ( ( packed ) ); + +struct golan_wqe_data_seg { + __be32 byte_count; + __be32 lkey; + __be64 addr; +} __attribute ( ( packed ) ); + +struct golan_wqe_signature_seg { + u8 rsvd0[4]; + u8 signature; + u8 rsvd1[11]; +} __attribute ( ( packed ) ); + +struct golan_wqe_inline_seg { + __be32 byte_count; +} __attribute ( ( packed ) ); + +struct golan_qp_path { + u8 fl; + u8 rsvd3; + u8 free_ar; + u8 pkey_index; + u8 rsvd0; + u8 grh_mlid; + __be16 rlid; + u8 ackto_lt; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 tclass_flowlabel; + u8 rgid[16]; + u8 rsvd1[4]; + u8 sl; + u8 port; + u8 rsvd2[6]; +} __attribute ( ( packed ) ); + +struct golan_qp_context { + __be32 flags; + __be32 flags_pd; + u8 mtu_msgmax; + u8 rq_size_stride; + __be16 sq_crq_size; + __be32 qp_counter_set_usr_page; + __be32 wire_qpn; + __be32 log_pg_sz_remote_qpn; + struct golan_qp_path pri_path; + struct golan_qp_path alt_path; + __be32 params1; + u8 reserved2[4]; + __be32 next_send_psn; + __be32 cqn_send; + u8 reserved3[8]; + __be32 last_acked_psn; + __be32 ssn; + __be32 params2; + __be32 rnr_nextrecvpsn; + __be32 xrcd; + __be32 cqn_recv; + __be64 db_rec_addr; + __be32 qkey; + __be32 rq_type_srqn; + __be32 rmsn; + __be16 hw_sq_wqe_counter; + __be16 sw_sq_wqe_counter; + __be16 hw_rcyclic_byte_counter; + __be16 hw_rq_counter; + __be16 sw_rcyclic_byte_counter; + __be16 sw_rq_counter; + u8 rsvd0[5]; + u8 cgs; + u8 cs_req; + u8 cs_res; + __be64 dc_access_key; + u8 rsvd1[24]; +} __attribute ( ( packed ) ); + +struct golan_create_qp_mbox_in_data { + __be32 opt_param_mask; + u8 rsvd1[4]; + struct golan_qp_context ctx; + u8 rsvd3[16]; + __be64 pas[0]; +} __attribute ( ( packed ) ); + +struct golan_create_qp_mbox_in { + struct golan_inbox_hdr hdr; + __be32 input_qpn; + u8 rsvd0[4]; + struct golan_create_qp_mbox_in_data data; +} __attribute ( ( packed ) ); + +struct golan_create_qp_mbox_out { + struct golan_outbox_hdr hdr; + __be32 qpn; + u8 rsvd0[4]; +} __attribute ( ( packed ) ); + +struct golan_destroy_qp_mbox_in { + struct golan_inbox_hdr hdr; + __be32 qpn; + u8 rsvd0[4]; +} __attribute ( ( packed ) ); + +struct golan_destroy_qp_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd0[8]; +} __attribute ( ( packed ) ); + +struct golan_modify_qp_mbox_in_data { + __be32 optparam; + u8 rsvd0[4]; + struct golan_qp_context ctx; +} __attribute ( ( packed ) ); + +struct golan_modify_qp_mbox_in { + struct golan_inbox_hdr hdr; + __be32 qpn; + u8 rsvd1[4]; + struct golan_modify_qp_mbox_in_data data; +} __attribute ( ( packed ) ); + +struct golan_modify_qp_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvd0[8]; +} __attribute ( ( packed ) ); + +struct golan_attach_mcg_mbox_in { + struct golan_inbox_hdr hdr; + __be32 qpn; + __be32 rsvd; + u8 gid[16]; +} __attribute ( ( packed ) ); + +struct golan_attach_mcg_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvf[8]; +} __attribute ( ( packed ) ); + +struct golan_detach_mcg_mbox_in { + struct golan_inbox_hdr hdr; + __be32 qpn; + __be32 rsvd; + u8 gid[16]; +} __attribute ( ( packed ) ); + +struct golan_detach_mcg_mbox_out { + struct golan_outbox_hdr hdr; + u8 rsvf[8]; +} __attribute ( ( packed ) ); + + +#define MAILBOX_SIZE sizeof(struct golan_cmd_prot_block) + +#endif /* __CIB_PRM__ */ diff --git a/src/drivers/infiniband/flexboot_nodnic.c b/src/drivers/infiniband/flexboot_nodnic.c new file mode 100644 index 00000000..54b85840 --- /dev/null +++ b/src/drivers/infiniband/flexboot_nodnic.c @@ -0,0 +1,1592 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "flexboot_nodnic.h" +#include "mlx_utils/include/public/mlx_types.h" +#include "mlx_utils/include/public/mlx_utils.h" +#include "mlx_utils/include/public/mlx_bail.h" +#include "mlx_nodnic/include/mlx_cmd.h" +#include "mlx_utils/include/public/mlx_memory.h" +#include "mlx_utils/include/public/mlx_pci.h" +#include "mlx_nodnic/include/mlx_device.h" +#include "mlx_nodnic/include/mlx_port.h" +#include +#include +#include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h" +#include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h" +#include "mlx_utils/include/public/mlx_pci_gw.h" +#include "mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h" +#include "mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h" + +/*************************************************************************** + * + * Completion queue operations + * + *************************************************************************** + */ +static int flexboot_nodnic_arm_cq ( struct flexboot_nodnic_port *port ) { +#ifndef DEVICE_CX3 + mlx_uint32 val32 = 0; + union arm_cq_uar cq_uar; + +#define ARM_CQ_UAR_CQ_CI_MASK 0xffffff +#define ARM_CQ_UAR_CMDSN_MASK 3 +#define ARM_CQ_UAR_CMDSN_OFFSET 28 +#define ARM_CQ_UAR_CQ_CI_OFFSET 0x20 + if ( port->port_priv.device->device_cap.support_bar_cq_ctrl ) { + cq_uar.dword[0] = cpu_to_be32((port->eth_cq->next_idx & ARM_CQ_UAR_CQ_CI_MASK) | + ((port->cmdsn++ & ARM_CQ_UAR_CMDSN_MASK) << ARM_CQ_UAR_CMDSN_OFFSET)); + cq_uar.dword[1] = cpu_to_be32(port->eth_cq->cqn); + wmb(); + writeq(cq_uar.qword, port->port_priv.device->uar.virt + ARM_CQ_UAR_CQ_CI_OFFSET); + port->port_priv.arm_cq_doorbell_record->dword[0] = cq_uar.dword[1]; + port->port_priv.arm_cq_doorbell_record->dword[1] = cq_uar.dword[0]; + } else { + val32 = ( port->eth_cq->next_idx & 0xffffff ); + if ( nodnic_port_set ( & port->port_priv, nodnic_port_option_arm_cq, val32 ) ) { + MLX_DEBUG_ERROR( port->port_priv.device, "Failed to arm the CQ\n" ); + return MLX_FAILED; + } + } +#else + mlx_utils *utils = port->port_priv.device->utils; + nodnic_port_data_flow_gw *ptr = port->port_priv.data_flow_gw; + mlx_uint32 data = 0; + mlx_uint32 val = 0; + + if ( port->port_priv.device->device_cap.crspace_doorbells == 0 ) { + val = ( port->eth_cq->next_idx & 0xffff ); + if ( nodnic_port_set ( & port->port_priv, nodnic_port_option_arm_cq, val ) ) { + MLX_DEBUG_ERROR( port->port_priv.device, "Failed to arm the CQ\n" ); + return MLX_FAILED; + } + } else { + /* Arming the CQ with CQ CI should be with this format - + * 16 bit - CQ CI - same endianness as the FW (don't swap bytes) + * 15 bit - reserved + * 1 bit - arm CQ - must correct the endianness with the reserved above */ + data = ( ( ( port->eth_cq->next_idx & 0xffff ) << 16 ) | 0x0080 ); + /* Write the new index and update FW that new data was submitted */ + mlx_pci_mem_write ( utils, MlxPciWidthUint32, 0, + ( mlx_uintn ) & ( ptr->armcq_cq_ci_dword ), 1, &data ); + } +#endif + return 0; +} + +/** + * Create completion queue + * + * @v ibdev Infiniband device + * @v cq Completion queue + * @ret rc Return status code + */ +static int flexboot_nodnic_create_cq ( struct ib_device *ibdev , + struct ib_completion_queue *cq ) { + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1]; + struct flexboot_nodnic_completion_queue *flexboot_nodnic_cq; + mlx_status status = MLX_SUCCESS; + mlx_uint32 cqn; + + flexboot_nodnic_cq = (struct flexboot_nodnic_completion_queue *) + zalloc(sizeof(*flexboot_nodnic_cq)); + if ( flexboot_nodnic_cq == NULL ) { + status = MLX_OUT_OF_RESOURCES; + goto qp_alloc_err; + } + + status = nodnic_port_create_cq(&port->port_priv, + cq->num_cqes * + flexboot_nodnic->callbacks->get_cqe_size(), + &flexboot_nodnic_cq->nodnic_completion_queue + ); + MLX_FATAL_CHECK_STATUS(status, create_err, + "nodnic_port_create_cq failed"); + flexboot_nodnic->callbacks->cqe_set_owner( + flexboot_nodnic_cq->nodnic_completion_queue->cq_virt, + cq->num_cqes); + if ( flexboot_nodnic->device_priv.device_cap.support_bar_cq_ctrl ) { + status = nodnic_port_query(&port->port_priv, + nodnic_port_option_cq_n_index, + (mlx_uint32 *)&cqn ); + MLX_FATAL_CHECK_STATUS(status, read_cqn_err, + "failed to query cqn"); + cq->cqn = cqn; + } + + ib_cq_set_drvdata ( cq, flexboot_nodnic_cq ); + return status; +read_cqn_err: +create_err: + free(flexboot_nodnic_cq); +qp_alloc_err: + return status; +} + +/** + * Destroy completion queue + * + * @v ibdev Infiniband device + * @v cq Completion queue + */ +static void flexboot_nodnic_destroy_cq ( struct ib_device *ibdev , + struct ib_completion_queue *cq ) { + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1]; + struct flexboot_nodnic_completion_queue *flexboot_nodnic_cq = ib_cq_get_drvdata ( cq ); + + nodnic_port_destroy_cq(&port->port_priv, + flexboot_nodnic_cq->nodnic_completion_queue); + + free(flexboot_nodnic_cq); +} + +static +struct ib_work_queue * flexboot_nodnic_find_wq ( struct ib_device *ibdev , + struct ib_completion_queue *cq, + unsigned long qpn, int is_send ) { + struct ib_work_queue *wq; + struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp; + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1]; + struct nodnic_ring *ring; + mlx_uint32 out_qpn; + list_for_each_entry ( wq, &cq->work_queues, list ) { + flexboot_nodnic_qp = ib_qp_get_drvdata ( wq->qp ); + if( wq->is_send == is_send && wq->is_send == TRUE ) { + ring = &flexboot_nodnic_qp->nodnic_queue_pair->send.nodnic_ring; + } else if( wq->is_send == is_send && wq->is_send == FALSE ) { + ring = &flexboot_nodnic_qp->nodnic_queue_pair->receive.nodnic_ring; + } else { + continue; + } + nodnic_port_get_qpn(&port->port_priv, ring, &out_qpn); + if ( out_qpn == qpn ) + return wq; + } + return NULL; +} + +/** + * Handle completion + * + * @v ibdev Infiniband device + * @v cq Completion queue + * @v cqe Hardware completion queue entry + * @ret rc Return status code + */ +static int flexboot_nodnic_complete ( struct ib_device *ibdev, + struct ib_completion_queue *cq, + struct cqe_data *cqe_data ) { + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct ib_work_queue *wq; + struct ib_queue_pair *qp; + struct io_buffer *iobuf; + struct ib_address_vector recv_dest; + struct ib_address_vector recv_source; + unsigned long qpn; + unsigned long wqe_idx; + unsigned long wqe_idx_mask; + size_t len; + int rc = 0; + + /* Parse completion */ + qpn = cqe_data->qpn; + + if ( cqe_data->is_error == TRUE ) { + DBGC ( flexboot_nodnic, "flexboot_nodnic %p CQN %#lx syndrome %x vendor %x\n", + flexboot_nodnic, cq->cqn, cqe_data->syndrome, + cqe_data->vendor_err_syndrome ); + rc = -EIO; + /* Don't return immediately; propagate error to completer */ + } + + /* Identify work queue */ + wq = flexboot_nodnic_find_wq( ibdev, cq, qpn, cqe_data->is_send ); + if ( wq == NULL ) { + DBGC ( flexboot_nodnic, + "flexboot_nodnic %p CQN %#lx unknown %s QPN %#lx\n", + flexboot_nodnic, cq->cqn, + ( cqe_data->is_send ? "send" : "recv" ), qpn ); + return -EIO; + } + qp = wq->qp; + + /* Identify work queue entry */ + wqe_idx = cqe_data->wqe_counter; + wqe_idx_mask = ( wq->num_wqes - 1 ); + DBGCP ( flexboot_nodnic, + "NODNIC %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n", + flexboot_nodnic, cq->cqn, qp->qpn, + ( cqe_data->is_send ? "send" : "recv" ), + wqe_idx ); + + /* Identify I/O buffer */ + iobuf = wq->iobufs[wqe_idx & wqe_idx_mask]; + if ( iobuf == NULL ) { + DBGC ( flexboot_nodnic, + "NODNIC %p CQN %#lx QPN %#lx empty %s WQE %#lx\n", + flexboot_nodnic, cq->cqn, qp->qpn, + ( cqe_data->is_send ? "send" : "recv" ), wqe_idx ); + return -EIO; + } + wq->iobufs[wqe_idx & wqe_idx_mask] = NULL; + + if ( cqe_data->is_send == TRUE ) { + /* Hand off to completion handler */ + ib_complete_send ( ibdev, qp, iobuf, rc ); + } else if ( rc != 0 ) { + /* Propagate error to receive completion handler */ + ib_complete_recv ( ibdev, qp, NULL, NULL, iobuf, rc ); + } else { + /* Set received length */ + len = cqe_data->byte_cnt; + assert ( len <= iob_tailroom ( iobuf ) ); + iob_put ( iobuf, len ); + memset ( &recv_dest, 0, sizeof ( recv_dest ) ); + recv_dest.qpn = qpn; + memset ( &recv_source, 0, sizeof ( recv_source ) ); + switch ( qp->type ) { + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_UD: + case IB_QPT_RC: + break; + case IB_QPT_ETH: + break; + default: + assert ( 0 ); + return -EINVAL; + } + /* Hand off to completion handler */ + ib_complete_recv ( ibdev, qp, &recv_dest, + &recv_source, iobuf, rc ); + } + + return rc; +} +/** + * Poll completion queue + * + * @v ibdev Infiniband device + * @v cq Completion queues + */ +static void flexboot_nodnic_poll_cq ( struct ib_device *ibdev, + struct ib_completion_queue *cq) { + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct flexboot_nodnic_completion_queue *flexboot_nodnic_cq = ib_cq_get_drvdata ( cq ); + void *cqe; + mlx_size cqe_size; + struct cqe_data cqe_data; + unsigned int cqe_idx_mask; + int rc; + + cqe_size = flexboot_nodnic->callbacks->get_cqe_size(); + while ( TRUE ) { + /* Look for completion entry */ + cqe_idx_mask = ( cq->num_cqes - 1 ); + cqe = ((uint8_t *)flexboot_nodnic_cq->nodnic_completion_queue->cq_virt) + + cqe_size * (cq->next_idx & cqe_idx_mask); + + /* TODO: check fill_completion */ + flexboot_nodnic->callbacks->fill_completion(cqe, &cqe_data); + if ( cqe_data.owner ^ + ( ( cq->next_idx & cq->num_cqes ) ? 1 : 0 ) ) { + /* Entry still owned by hardware; end of poll */ + break; + } + /* Handle completion */ + rc = flexboot_nodnic_complete ( ibdev, cq, &cqe_data ); + if ( rc != 0 ) { + DBGC ( flexboot_nodnic, "flexboot_nodnic %p CQN %#lx failed to complete: %s\n", + flexboot_nodnic, cq->cqn, strerror ( rc ) ); + DBGC_HDA ( flexboot_nodnic, virt_to_phys ( cqe ), + cqe, sizeof ( *cqe ) ); + } + + /* Update completion queue's index */ + cq->next_idx++; + } +} +/*************************************************************************** + * + * Queue pair operations + * + *************************************************************************** + */ + + +/** + * Create queue pair + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @ret rc Return status code + */ +static int flexboot_nodnic_create_qp ( struct ib_device *ibdev, + struct ib_queue_pair *qp ) { + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1]; + struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp; + mlx_status status = MLX_SUCCESS; + + flexboot_nodnic_qp = (struct flexboot_nodnic_queue_pair *)zalloc(sizeof(*flexboot_nodnic_qp)); + if ( flexboot_nodnic_qp == NULL ) { + status = MLX_OUT_OF_RESOURCES; + goto qp_alloc_err; + } + + status = nodnic_port_create_qp(&port->port_priv, + (nodnic_queue_pair_type) qp->type, + qp->send.num_wqes * sizeof(struct nodnic_send_wqbb), + qp->send.num_wqes, + qp->recv.num_wqes * sizeof(struct nodnic_recv_wqe), + qp->recv.num_wqes, + &flexboot_nodnic_qp->nodnic_queue_pair); + MLX_FATAL_CHECK_STATUS(status, create_err, + "nodnic_port_create_qp failed"); + ib_qp_set_drvdata ( qp, flexboot_nodnic_qp ); + return status; +create_err: + free(flexboot_nodnic_qp); +qp_alloc_err: + return status; +} + +/** + * Modify queue pair + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @ret rc Return status code + */ +static int flexboot_nodnic_modify_qp ( struct ib_device *ibdev __unused, + struct ib_queue_pair *qp __unused) { + /*not needed*/ + return 0; +} + +/** + * Destroy queue pair + * + * @v ibdev Infiniband device + * @v qp Queue pair + */ +static void flexboot_nodnic_destroy_qp ( struct ib_device *ibdev, + struct ib_queue_pair *qp ) { + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1]; + struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp ); + + nodnic_port_destroy_qp(&port->port_priv, + (nodnic_queue_pair_type) qp->type, + flexboot_nodnic_qp->nodnic_queue_pair); + + free(flexboot_nodnic_qp); +} + +/*************************************************************************** + * + * Work request operations + * + *************************************************************************** + */ + +/** + * Post send work queue entry + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @v av Address vector + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int flexboot_nodnic_post_send ( struct ib_device *ibdev, + struct ib_queue_pair *qp, + struct ib_address_vector *av, + struct io_buffer *iobuf) { + + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp ); + struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1]; + struct ib_work_queue *wq = &qp->send; + struct nodnic_send_wqbb *wqbb; + nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair; + struct nodnic_send_ring *send_ring = &nodnic_qp->send; + mlx_status status = MLX_SUCCESS; + unsigned int wqe_idx_mask; + unsigned long wqe_idx; + + if ( ( port->port_priv.dma_state == FALSE ) || + ( port->port_priv.port_state & NODNIC_PORT_DISABLING_DMA ) ) { + DBGC ( flexboot_nodnic, "flexboot_nodnic DMA disabled\n"); + status = -ENETDOWN; + goto post_send_done; + } + + /* Allocate work queue entry */ + wqe_idx = wq->next_idx; + wqe_idx_mask = ( wq->num_wqes - 1 ); + if ( wq->iobufs[wqe_idx & wqe_idx_mask] ) { + DBGC ( flexboot_nodnic, "flexboot_nodnic %p QPN %#lx send queue full\n", + flexboot_nodnic, qp->qpn ); + status = -ENOBUFS; + goto post_send_done; + } + wqbb = &send_ring->wqe_virt[wqe_idx & wqe_idx_mask]; + wq->iobufs[wqe_idx & wqe_idx_mask] = iobuf; + + assert ( flexboot_nodnic->callbacks-> + fill_send_wqe[qp->type] != NULL ); + status = flexboot_nodnic->callbacks-> + fill_send_wqe[qp->type] ( ibdev, qp, av, iobuf, + wqbb, wqe_idx ); + if ( status != 0 ) { + DBGC ( flexboot_nodnic, "flexboot_nodnic %p QPN %#lx fill send wqe failed\n", + flexboot_nodnic, qp->qpn ); + goto post_send_done; + } + + wq->next_idx++; + + status = port->port_priv.send_doorbell ( &port->port_priv, + &send_ring->nodnic_ring, ( mlx_uint16 ) wq->next_idx ); + if ( flexboot_nodnic->callbacks->tx_uar_send_doorbell_fn ) { + flexboot_nodnic->callbacks->tx_uar_send_doorbell_fn ( ibdev, wqbb ); + } + if ( status != 0 ) { + DBGC ( flexboot_nodnic, "flexboot_nodnic %p ring send doorbell failed\n", flexboot_nodnic ); + } + +post_send_done: + return status; +} + +/** + * Post receive work queue entry + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int flexboot_nodnic_post_recv ( struct ib_device *ibdev, + struct ib_queue_pair *qp, + struct io_buffer *iobuf ) { + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp ); + struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1]; + struct ib_work_queue *wq = &qp->recv; + nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair; + struct nodnic_recv_ring *recv_ring = &nodnic_qp->receive; + struct nodnic_recv_wqe *wqe; + unsigned int wqe_idx_mask; + mlx_status status = MLX_SUCCESS; + + /* Allocate work queue entry */ + wqe_idx_mask = ( wq->num_wqes - 1 ); + if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) { + DBGC ( flexboot_nodnic, + "flexboot_nodnic %p QPN %#lx receive queue full\n", + flexboot_nodnic, qp->qpn ); + status = -ENOBUFS; + goto post_recv_done; + } + wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf; + wqe = &((struct nodnic_recv_wqe*)recv_ring->wqe_virt)[wq->next_idx & wqe_idx_mask]; + + MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) ); + MLX_FILL_1 ( &wqe->data[0], 1, l_key, flexboot_nodnic->device_priv.lkey ); + MLX_FILL_H ( &wqe->data[0], 2, + local_address_h, virt_to_bus ( iobuf->data ) ); + MLX_FILL_1 ( &wqe->data[0], 3, + local_address_l, virt_to_bus ( iobuf->data ) ); + + wq->next_idx++; + + status = port->port_priv.recv_doorbell ( &port->port_priv, + &recv_ring->nodnic_ring, ( mlx_uint16 ) wq->next_idx ); + if ( status != 0 ) { + DBGC ( flexboot_nodnic, "flexboot_nodnic %p ring receive doorbell failed\n", flexboot_nodnic ); + } +post_recv_done: + return status; +} + +/*************************************************************************** + * + * Event queues + * + *************************************************************************** + */ + +static void flexboot_nodnic_poll_eq ( struct ib_device *ibdev ) { + struct flexboot_nodnic *flexboot_nodnic; + struct flexboot_nodnic_port *port; + struct net_device *netdev; + nodnic_port_state state = 0; + mlx_status status; + + if ( ! ibdev ) { + DBG ( "%s: ibdev = NULL!!!\n", __FUNCTION__ ); + return; + } + + flexboot_nodnic = ib_get_drvdata ( ibdev ); + port = &flexboot_nodnic->port[ibdev->port - 1]; + netdev = port->netdev; + + if ( ! netdev_is_open ( netdev ) ) { + DBG2( "%s: port %d is closed\n", __FUNCTION__, port->ibdev->port ); + return; + } + + /* we don't poll EQ. Just poll link status if it's not active */ + if ( ! netdev_link_ok ( netdev ) ) { + status = nodnic_port_get_state ( &port->port_priv, &state ); + MLX_FATAL_CHECK_STATUS(status, state_err, "nodnic_port_get_state failed"); + + if ( state == nodnic_port_state_active ) { + DBG( "%s: port %d physical link is up\n", __FUNCTION__, + port->ibdev->port ); + port->type->state_change ( flexboot_nodnic, port, 1 ); + } + } +state_err: + return; +} + +/*************************************************************************** + * + * Multicast group operations + * + *************************************************************************** + */ +static int flexboot_nodnic_mcast_attach ( struct ib_device *ibdev, + struct ib_queue_pair *qp, + union ib_gid *gid) { + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1]; + mlx_mac_address mac; + mlx_status status = MLX_SUCCESS; + + switch (qp->type) { + case IB_QPT_ETH: + memcpy(&mac, gid, sizeof(mac)); + status = nodnic_port_add_mac_filter(&port->port_priv, mac); + MLX_CHECK_STATUS(flexboot_nodnic->device_priv, status, mac_err, + "nodnic_port_add_mac_filter failed"); + break; + default: + break; + } +mac_err: + return status; +} +static void flexboot_nodnic_mcast_detach ( struct ib_device *ibdev, + struct ib_queue_pair *qp, + union ib_gid *gid ) { + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1]; + mlx_mac_address mac; + mlx_status status = MLX_SUCCESS; + + switch (qp->type) { + case IB_QPT_ETH: + memcpy(&mac, gid, sizeof(mac)); + status = nodnic_port_remove_mac_filter(&port->port_priv, mac); + MLX_CHECK_STATUS(flexboot_nodnic->device_priv, status, mac_err, + "nodnic_port_remove_mac_filter failed"); + break; + default: + break; + } +mac_err: + return; +} +/*************************************************************************** + * + * Infiniband link-layer operations + * + *************************************************************************** + */ + +/** + * Initialise Infiniband link + * + * @v ibdev Infiniband device + * @ret rc Return status code + */ +static int flexboot_nodnic_ib_open ( struct ib_device *ibdev __unused) { + int rc = 0; + + /*TODO: add implementation*/ + return rc; +} + +/** + * Close Infiniband link + * + * @v ibdev Infiniband device + */ +static void flexboot_nodnic_ib_close ( struct ib_device *ibdev __unused) { + /*TODO: add implementation*/ +} + +/** + * Inform embedded subnet management agent of a received MAD + * + * @v ibdev Infiniband device + * @v mad MAD + * @ret rc Return status code + */ +static int flexboot_nodnic_inform_sma ( struct ib_device *ibdev __unused, + union ib_mad *mad __unused) { + /*TODO: add implementation*/ + return 0; +} + +/** flexboot_nodnic Infiniband operations */ +static struct ib_device_operations flexboot_nodnic_ib_operations = { + .create_cq = flexboot_nodnic_create_cq, + .destroy_cq = flexboot_nodnic_destroy_cq, + .create_qp = flexboot_nodnic_create_qp, + .modify_qp = flexboot_nodnic_modify_qp, + .destroy_qp = flexboot_nodnic_destroy_qp, + .post_send = flexboot_nodnic_post_send, + .post_recv = flexboot_nodnic_post_recv, + .poll_cq = flexboot_nodnic_poll_cq, + .poll_eq = flexboot_nodnic_poll_eq, + .open = flexboot_nodnic_ib_open, + .close = flexboot_nodnic_ib_close, + .mcast_attach = flexboot_nodnic_mcast_attach, + .mcast_detach = flexboot_nodnic_mcast_detach, + .set_port_info = flexboot_nodnic_inform_sma, + .set_pkey_table = flexboot_nodnic_inform_sma, +}; +/*************************************************************************** + * + * + * + *************************************************************************** + */ + +#define FLEX_NODNIC_TX_POLL_TOUT 500000 +#define FLEX_NODNIC_TX_POLL_USLEEP 10 + +static void flexboot_nodnic_complete_all_tx ( struct flexboot_nodnic_port *port ) { + struct ib_device *ibdev = port->ibdev; + struct ib_completion_queue *cq; + struct ib_work_queue *wq; + int keep_polling = 0; + int timeout = FLEX_NODNIC_TX_POLL_TOUT; + + list_for_each_entry ( cq, &ibdev->cqs, list ) { + do { + ib_poll_cq ( ibdev, cq ); + keep_polling = 0; + list_for_each_entry ( wq, &cq->work_queues, list ) { + if ( wq->is_send ) + keep_polling += ( wq->fill > 0 ); + } + udelay ( FLEX_NODNIC_TX_POLL_USLEEP ); + } while ( keep_polling && ( timeout-- > 0 ) ); + } +} + +static void flexboot_nodnic_port_disable_dma ( struct flexboot_nodnic_port *port ) { + nodnic_port_priv *port_priv = & ( port->port_priv ); + mlx_status status; + + if ( ! ( port_priv->port_state & NODNIC_PORT_OPENED ) ) + return; + + port_priv->port_state |= NODNIC_PORT_DISABLING_DMA; + flexboot_nodnic_complete_all_tx ( port ); + if ( ( status = nodnic_port_disable_dma ( port_priv ) ) ) { + MLX_DEBUG_WARN ( port, "Failed to disable DMA %d\n", status ); + } + + port_priv->port_state &= ~NODNIC_PORT_DISABLING_DMA; +} + +/*************************************************************************** + * + * Ethernet operation + * + *************************************************************************** + */ + +/** Number of flexboot_nodnic Ethernet send work queue entries */ +#define FLEXBOOT_NODNIC_ETH_NUM_SEND_WQES 64 + +/** Number of flexboot_nodnic Ethernet receive work queue entries */ +#define FLEXBOOT_NODNIC_ETH_NUM_RECV_WQES 64 +/** flexboot nodnic Ethernet queue pair operations */ +static struct ib_queue_pair_operations flexboot_nodnic_eth_qp_op = { + .alloc_iob = alloc_iob, +}; + +/** + * Transmit packet via flexboot_nodnic Ethernet device + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int flexboot_nodnic_eth_transmit ( struct net_device *netdev, + struct io_buffer *iobuf) { + struct flexboot_nodnic_port *port = netdev->priv; + struct ib_device *ibdev = port->ibdev; + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + int rc; + + rc = ib_post_send ( ibdev, port->eth_qp, NULL, iobuf); + /* Transmit packet */ + if ( rc != 0) { + DBGC ( flexboot_nodnic, "NODNIC %p port %d could not transmit: %s\n", + flexboot_nodnic, ibdev->port, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Handle flexboot_nodnic Ethernet device send completion + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void flexboot_nodnic_eth_complete_send ( struct ib_device *ibdev __unused, + struct ib_queue_pair *qp, + struct io_buffer *iobuf, + int rc) { + struct net_device *netdev = ib_qp_get_ownerdata ( qp ); + + netdev_tx_complete_err ( netdev, iobuf, rc ); +} + +/** + * Handle flexboot_nodnic Ethernet device receive completion + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @v av Address vector, or NULL + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void flexboot_nodnic_eth_complete_recv ( struct ib_device *ibdev __unused, + struct ib_queue_pair *qp, + struct ib_address_vector *dest __unused, + struct ib_address_vector *source, + struct io_buffer *iobuf, + int rc) { + struct net_device *netdev = ib_qp_get_ownerdata ( qp ); + + if ( rc != 0 ) { + DBG ( "Received packet with error\n" ); + netdev_rx_err ( netdev, iobuf, rc ); + return; + } + + if ( source == NULL ) { + DBG ( "Received packet without address vector\n" ); + netdev_rx_err ( netdev, iobuf, -ENOTTY ); + return; + } + + netdev_rx ( netdev, iobuf ); +} + +/** flexboot_nodnic Ethernet device completion operations */ +static struct ib_completion_queue_operations flexboot_nodnic_eth_cq_op = { + .complete_send = flexboot_nodnic_eth_complete_send, + .complete_recv = flexboot_nodnic_eth_complete_recv, +}; + +/** + * Poll flexboot_nodnic Ethernet device + * + * @v netdev Network device + */ +static void flexboot_nodnic_eth_poll ( struct net_device *netdev) { + struct flexboot_nodnic_port *port = netdev->priv; + struct ib_device *ibdev = port->ibdev; + + ib_poll_eq ( ibdev ); +} + +/** + * Open flexboot_nodnic Ethernet device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int flexboot_nodnic_eth_open ( struct net_device *netdev ) { + struct flexboot_nodnic_port *port = netdev->priv; + struct ib_device *ibdev = port->ibdev; + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + mlx_status status = MLX_SUCCESS; + struct ib_completion_queue *dummy_cq = NULL; + struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = NULL; + mlx_uint64 cq_size = 0; + mlx_uint32 qpn = 0; + nodnic_port_state state = nodnic_port_state_down; + int rc; + + if ( port->port_priv.port_state & NODNIC_PORT_OPENED ) { + DBGC ( flexboot_nodnic, "%s: port %d is already opened\n", + __FUNCTION__, port->ibdev->port ); + return 0; + } + + port->port_priv.port_state |= NODNIC_PORT_OPENED; + + dummy_cq = zalloc ( sizeof ( struct ib_completion_queue ) ); + if ( dummy_cq == NULL ) { + DBGC ( flexboot_nodnic, "%s: Failed to allocate dummy CQ\n", __FUNCTION__ ); + status = MLX_OUT_OF_RESOURCES; + goto err_create_dummy_cq; + } + INIT_LIST_HEAD ( &dummy_cq->work_queues ); + + if ( ( rc = ib_create_qp ( ibdev, IB_QPT_ETH, + FLEXBOOT_NODNIC_ETH_NUM_SEND_WQES, dummy_cq, + FLEXBOOT_NODNIC_ETH_NUM_RECV_WQES, dummy_cq, + &flexboot_nodnic_eth_qp_op, netdev->name, + &port->eth_qp ) ) != 0 ) { + DBGC ( flexboot_nodnic, "flexboot_nodnic %p port %d could not create queue pair\n", + flexboot_nodnic, ibdev->port ); + status = MLX_OUT_OF_RESOURCES; + goto err_create_qp; + } + + ib_qp_set_ownerdata ( port->eth_qp, netdev ); + + status = nodnic_port_get_cq_size(&port->port_priv, &cq_size); + MLX_FATAL_CHECK_STATUS(status, get_cq_size_err, + "nodnic_port_get_cq_size failed"); + + if ( ( rc = ib_create_cq ( ibdev, cq_size, &flexboot_nodnic_eth_cq_op, + &port->eth_cq ) ) != 0 ) { + DBGC ( flexboot_nodnic, + "flexboot_nodnic %p port %d could not create completion queue\n", + flexboot_nodnic, ibdev->port ); + status = MLX_OUT_OF_RESOURCES; + goto err_create_cq; + } + port->eth_qp->send.cq = port->eth_cq; + list_del(&port->eth_qp->send.list); + list_add ( &port->eth_qp->send.list, &port->eth_cq->work_queues ); + port->eth_qp->recv.cq = port->eth_cq; + port->cmdsn = 0; + list_del(&port->eth_qp->recv.list); + list_add ( &port->eth_qp->recv.list, &port->eth_cq->work_queues ); + + status = nodnic_port_allocate_eq(&port->port_priv, + flexboot_nodnic->device_priv.device_cap.log_working_buffer_size); + MLX_FATAL_CHECK_STATUS(status, eq_alloc_err, + "nodnic_port_allocate_eq failed"); + + status = nodnic_port_init(&port->port_priv); + MLX_FATAL_CHECK_STATUS(status, init_err, + "nodnic_port_init failed"); + + /* update qp - qpn */ + flexboot_nodnic_qp = ib_qp_get_drvdata ( port->eth_qp ); + status = nodnic_port_get_qpn(&port->port_priv, + &flexboot_nodnic_qp->nodnic_queue_pair->send.nodnic_ring, + &qpn); + MLX_FATAL_CHECK_STATUS(status, qpn_err, + "nodnic_port_get_qpn failed"); + port->eth_qp->qpn = qpn; + + /* Fill receive rings */ + ib_refill_recv ( ibdev, port->eth_qp ); + + status = nodnic_port_enable_dma(&port->port_priv); + MLX_FATAL_CHECK_STATUS(status, dma_err, + "nodnic_port_enable_dma failed"); + + if (flexboot_nodnic->device_priv.device_cap.support_promisc_filter) { + status = nodnic_port_set_promisc(&port->port_priv, TRUE); + MLX_FATAL_CHECK_STATUS(status, promisc_err, + "nodnic_port_set_promisc failed"); + } + + status = nodnic_port_get_state(&port->port_priv, &state); + MLX_FATAL_CHECK_STATUS(status, state_err, + "nodnic_port_get_state failed"); + + port->type->state_change ( + flexboot_nodnic, port, state == nodnic_port_state_active ); + + DBGC ( flexboot_nodnic, "%s: port %d opened (link is %s)\n", + __FUNCTION__, port->ibdev->port, + ( ( state == nodnic_port_state_active ) ? "Up" : "Down" ) ); + + free(dummy_cq); + return 0; +state_err: +promisc_err: +dma_err: +qpn_err: + nodnic_port_close(&port->port_priv); +init_err: + nodnic_port_free_eq(&port->port_priv); +eq_alloc_err: +err_create_cq: +get_cq_size_err: + ib_destroy_qp(ibdev, port->eth_qp ); +err_create_qp: + free(dummy_cq); +err_create_dummy_cq: + port->port_priv.port_state &= ~NODNIC_PORT_OPENED; + return status; +} + +/** + * Close flexboot_nodnic Ethernet device + * + * @v netdev Network device + */ +static void flexboot_nodnic_eth_close ( struct net_device *netdev) { + struct flexboot_nodnic_port *port = netdev->priv; + struct ib_device *ibdev = port->ibdev; + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + mlx_status status = MLX_SUCCESS; + + if ( ! ( port->port_priv.port_state & NODNIC_PORT_OPENED ) ) { + DBGC ( flexboot_nodnic, "%s: port %d is already closed\n", + __FUNCTION__, port->ibdev->port ); + return; + } + + if (flexboot_nodnic->device_priv.device_cap.support_promisc_filter) { + if ( ( status = nodnic_port_set_promisc( &port->port_priv, FALSE ) ) ) { + DBGC ( flexboot_nodnic, + "nodnic_port_set_promisc failed (status = %d)\n", status ); + } + } + + flexboot_nodnic_port_disable_dma ( port ); + + port->port_priv.port_state &= ~NODNIC_PORT_OPENED; + + port->type->state_change ( flexboot_nodnic, port, FALSE ); + + /* Close port */ + status = nodnic_port_close(&port->port_priv); + if ( status != MLX_SUCCESS ) { + DBGC ( flexboot_nodnic, "flexboot_nodnic %p port %d could not close port: %s\n", + flexboot_nodnic, ibdev->port, strerror ( status ) ); + /* Nothing we can do about this */ + } + + ib_destroy_qp ( ibdev, port->eth_qp ); + port->eth_qp = NULL; + ib_destroy_cq ( ibdev, port->eth_cq ); + port->eth_cq = NULL; + + nodnic_port_free_eq(&port->port_priv); + + DBGC ( flexboot_nodnic, "%s: port %d closed\n", __FUNCTION__, port->ibdev->port ); +} + +void flexboot_nodnic_eth_irq ( struct net_device *netdev, int enable ) { + struct flexboot_nodnic_port *port = netdev->priv; + + if ( enable ) { + if ( ( port->port_priv.port_state & NODNIC_PORT_OPENED ) && + ! ( port->port_priv.port_state & NODNIC_PORT_DISABLING_DMA ) ) { + flexboot_nodnic_arm_cq ( port ); + } else { + /* do nothing */ + } + } else { + nodnic_device_clear_int( port->port_priv.device ); + } +} + +/** flexboot_nodnic Ethernet network device operations */ +static struct net_device_operations flexboot_nodnic_eth_operations = { + .open = flexboot_nodnic_eth_open, + .close = flexboot_nodnic_eth_close, + .transmit = flexboot_nodnic_eth_transmit, + .poll = flexboot_nodnic_eth_poll, +}; + +/** + * Register flexboot_nodnic Ethernet device + */ +static int flexboot_nodnic_register_netdev ( struct flexboot_nodnic *flexboot_nodnic, + struct flexboot_nodnic_port *port) { + mlx_status status = MLX_SUCCESS; + struct net_device *netdev; + struct ib_device *ibdev = port->ibdev; + union { + uint8_t bytes[8]; + uint32_t dwords[2]; + } mac; + + /* Allocate network devices */ + netdev = alloc_etherdev ( 0 ); + if ( netdev == NULL ) { + DBGC ( flexboot_nodnic, "flexboot_nodnic %p port %d could not allocate net device\n", + flexboot_nodnic, ibdev->port ); + status = MLX_OUT_OF_RESOURCES; + goto alloc_err; + } + port->netdev = netdev; + netdev_init ( netdev, &flexboot_nodnic_eth_operations ); + netdev->dev = ibdev->dev; + netdev->priv = port; + + status = nodnic_port_query(&port->port_priv, + nodnic_port_option_mac_high, + &mac.dwords[0]); + MLX_FATAL_CHECK_STATUS(status, mac_err, + "failed to query mac high"); + status = nodnic_port_query(&port->port_priv, + nodnic_port_option_mac_low, + &mac.dwords[1]); + MLX_FATAL_CHECK_STATUS(status, mac_err, + "failed to query mac low"); + mac.dwords[0] = htonl(mac.dwords[0]); + mac.dwords[1] = htonl(mac.dwords[1]); + memcpy ( netdev->hw_addr, + &mac.bytes[2], ETH_ALEN); + /* Register network device */ + status = register_netdev ( netdev ); + if ( status != MLX_SUCCESS ) { + DBGC ( flexboot_nodnic, + "flexboot_nodnic %p port %d could not register network device: %s\n", + flexboot_nodnic, ibdev->port, strerror ( status ) ); + goto reg_err; + } + return status; +reg_err: +mac_err: + netdev_put ( netdev ); +alloc_err: + return status; +} + +/** + * Handle flexboot_nodnic Ethernet device port state change + */ +static void flexboot_nodnic_state_change_netdev ( struct flexboot_nodnic *flexboot_nodnic __unused, + struct flexboot_nodnic_port *port, + int link_up ) { + struct net_device *netdev = port->netdev; + + if ( link_up ) + netdev_link_up ( netdev ); + else + netdev_link_down ( netdev ); + +} + +/** + * Unregister flexboot_nodnic Ethernet device + */ +static void flexboot_nodnic_unregister_netdev ( struct flexboot_nodnic *flexboot_nodnic __unused, + struct flexboot_nodnic_port *port ) { + struct net_device *netdev = port->netdev; + unregister_netdev ( netdev ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** flexboot_nodnic Ethernet port type */ +static struct flexboot_nodnic_port_type flexboot_nodnic_port_type_eth = { + .register_dev = flexboot_nodnic_register_netdev, + .state_change = flexboot_nodnic_state_change_netdev, + .unregister_dev = flexboot_nodnic_unregister_netdev, +}; + +/*************************************************************************** + * + * PCI interface helper functions + * + *************************************************************************** + */ +static +mlx_status +flexboot_nodnic_allocate_infiniband_devices( struct flexboot_nodnic *flexboot_nodnic_priv ) { + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv; + struct pci_device *pci = flexboot_nodnic_priv->pci; + struct ib_device *ibdev = NULL; + unsigned int i = 0; + + /* Allocate Infiniband devices */ + for (; i < device_priv->device_cap.num_ports; i++) { + if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) ) + continue; + ibdev = alloc_ibdev(0); + if (ibdev == NULL) { + status = MLX_OUT_OF_RESOURCES; + goto err_alloc_ibdev; + } + flexboot_nodnic_priv->port[i].ibdev = ibdev; + ibdev->op = &flexboot_nodnic_ib_operations; + ibdev->dev = &pci->dev; + ibdev->port = ( FLEXBOOT_NODNIC_PORT_BASE + i); + ib_set_drvdata(ibdev, flexboot_nodnic_priv); + } + return status; +err_alloc_ibdev: + for ( i-- ; ( signed int ) i >= 0 ; i-- ) + ibdev_put ( flexboot_nodnic_priv->port[i].ibdev ); + return status; +} + +static +mlx_status +flexboot_nodnic_thin_init_ports( struct flexboot_nodnic *flexboot_nodnic_priv ) { + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv; + nodnic_port_priv *port_priv = NULL; + unsigned int i = 0; + + for ( i = 0; i < device_priv->device_cap.num_ports; i++ ) { + if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) ) + continue; + port_priv = &flexboot_nodnic_priv->port[i].port_priv; + status = nodnic_port_thin_init( device_priv, port_priv, i ); + MLX_FATAL_CHECK_STATUS(status, thin_init_err, + "flexboot_nodnic_thin_init_ports failed"); + } +thin_init_err: + return status; +} + + +static +mlx_status +flexboot_nodnic_set_ports_type ( struct flexboot_nodnic *flexboot_nodnic_priv ) { + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv; + nodnic_port_priv *port_priv = NULL; + nodnic_port_type type = NODNIC_PORT_TYPE_UNKNOWN; + unsigned int i = 0; + + for ( i = 0 ; i < device_priv->device_cap.num_ports ; i++ ) { + if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) ) + continue; + port_priv = &flexboot_nodnic_priv->port[i].port_priv; + status = nodnic_port_get_type(port_priv, &type); + MLX_FATAL_CHECK_STATUS(status, type_err, + "nodnic_port_get_type failed"); + switch ( type ) { + case NODNIC_PORT_TYPE_ETH: + DBGC ( flexboot_nodnic_priv, "Port %d type is Ethernet\n", i ); + flexboot_nodnic_priv->port[i].type = &flexboot_nodnic_port_type_eth; + break; + case NODNIC_PORT_TYPE_IB: + DBGC ( flexboot_nodnic_priv, "Port %d type is Infiniband\n", i ); + status = MLX_UNSUPPORTED; + goto type_err; + default: + DBGC ( flexboot_nodnic_priv, "Port %d type is unknown\n", i ); + status = MLX_UNSUPPORTED; + goto type_err; + } + } +type_err: + return status; +} + +static +mlx_status +flexboot_nodnic_ports_register_dev( struct flexboot_nodnic *flexboot_nodnic_priv ) { + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv; + struct flexboot_nodnic_port *port = NULL; + unsigned int i = 0; + + for (; i < device_priv->device_cap.num_ports; i++) { + if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) ) + continue; + port = &flexboot_nodnic_priv->port[i]; + status = port->type->register_dev ( flexboot_nodnic_priv, port ); + MLX_FATAL_CHECK_STATUS(status, reg_err, + "port register_dev failed"); + } +reg_err: + return status; +} + +static +mlx_status +flexboot_nodnic_ports_unregister_dev ( struct flexboot_nodnic *flexboot_nodnic_priv ) { + struct flexboot_nodnic_port *port; + nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv; + int i = (device_priv->device_cap.num_ports - 1); + + for (; i >= 0; i--) { + if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) ) + continue; + port = &flexboot_nodnic_priv->port[i]; + port->type->unregister_dev(flexboot_nodnic_priv, port); + ibdev_put(flexboot_nodnic_priv->port[i].ibdev); + } + return MLX_SUCCESS; +} + +/*************************************************************************** + * + * flexboot nodnic interface + * + *************************************************************************** + */ +__unused static void flexboot_nodnic_enable_dma ( struct flexboot_nodnic *nodnic ) { + nodnic_port_priv *port_priv; + mlx_status status; + int i; + + for ( i = 0; i < nodnic->device_priv.device_cap.num_ports; i++ ) { + if ( ! ( nodnic->port_mask & ( i + 1 ) ) ) + continue; + port_priv = & ( nodnic->port[i].port_priv ); + if ( ! ( port_priv->port_state & NODNIC_PORT_OPENED ) ) + continue; + + if ( ( status = nodnic_port_enable_dma ( port_priv ) ) ) { + MLX_DEBUG_WARN ( nodnic, "Failed to enable DMA %d\n", status ); + } + } +} + +__unused static void flexboot_nodnic_disable_dma ( struct flexboot_nodnic *nodnic ) { + int i; + + for ( i = 0; i < nodnic->device_priv.device_cap.num_ports; i++ ) { + if ( ! ( nodnic->port_mask & ( i + 1 ) ) ) + continue; + flexboot_nodnic_port_disable_dma ( & ( nodnic->port[i] ) ); + } +} + +int flexboot_nodnic_is_supported ( struct pci_device *pci ) { + mlx_utils utils; + mlx_pci_gw_buffer buffer; + mlx_status status; + int is_supported = 0; + + DBG ( "%s: start\n", __FUNCTION__ ); + + memset ( &utils, 0, sizeof ( utils ) ); + + status = mlx_utils_init ( &utils, pci ); + MLX_CHECK_STATUS ( pci, status, utils_init_err, "mlx_utils_init failed" ); + + status = mlx_pci_gw_init ( &utils ); + MLX_CHECK_STATUS ( pci, status, pci_gw_init_err, "mlx_pci_gw_init failed" ); + + status = mlx_pci_gw_read ( &utils, PCI_GW_SPACE_NODNIC, + NODNIC_NIC_INTERFACE_SUPPORTED_OFFSET, &buffer ); + + if ( status == MLX_SUCCESS ) { + buffer >>= NODNIC_NIC_INTERFACE_SUPPORTED_BIT; + is_supported = ( buffer & 0x1 ); + } + + mlx_pci_gw_teardown( &utils ); + +pci_gw_init_err: + mlx_utils_teardown(&utils); +utils_init_err: + DBG ( "%s: NODNIC is %s supported (status = %d)\n", + __FUNCTION__, ( is_supported ? "": "not" ), status ); + return is_supported; +} + + +void flexboot_nodnic_copy_mac ( uint8_t mac_addr[], uint32_t low_byte, + uint16_t high_byte ) { + union mac_addr { + struct { + uint32_t low_byte; + uint16_t high_byte; + }; + uint8_t mac_addr[ETH_ALEN]; + } mac_addr_aux; + + mac_addr_aux.high_byte = high_byte; + mac_addr_aux.low_byte = low_byte; + + mac_addr[0] = mac_addr_aux.mac_addr[5]; + mac_addr[1] = mac_addr_aux.mac_addr[4]; + mac_addr[2] = mac_addr_aux.mac_addr[3]; + mac_addr[3] = mac_addr_aux.mac_addr[2]; + mac_addr[4] = mac_addr_aux.mac_addr[1]; + mac_addr[5] = mac_addr_aux.mac_addr[0]; +} + +static mlx_status flexboot_nodnic_get_factory_mac ( + struct flexboot_nodnic *flexboot_nodnic_priv, uint8_t port __unused ) { + struct mlx_vmac_query_virt_mac virt_mac; + mlx_status status; + + memset ( & virt_mac, 0, sizeof ( virt_mac ) ); + status = mlx_vmac_query_virt_mac ( flexboot_nodnic_priv->device_priv.utils, + &virt_mac ); + if ( ! status ) { + DBGC ( flexboot_nodnic_priv, "NODNIC %p Failed to set the virtual MAC\n" + ,flexboot_nodnic_priv ); + } + + return status; +} + + +/** + * Set port masking + * + * @v flexboot_nodnic nodnic device + * @ret rc Return status code + */ +static int flexboot_nodnic_set_port_masking ( struct flexboot_nodnic *flexboot_nodnic ) { + unsigned int i; + nodnic_device_priv *device_priv = &flexboot_nodnic->device_priv; + + flexboot_nodnic->port_mask = 0; + for ( i = 0; i < device_priv->device_cap.num_ports; i++ ) { + flexboot_nodnic->port_mask |= (i + 1); + } + + if ( ! flexboot_nodnic->port_mask ) { + /* No port was enabled */ + DBGC ( flexboot_nodnic, "NODNIC %p No port was enabled for " + "booting\n", flexboot_nodnic ); + return -ENETUNREACH; + } + + return 0; +} + +int init_mlx_utils ( mlx_utils **utils, struct pci_device *pci ) { + int rc = 0; + + *utils = ( mlx_utils * ) zalloc ( sizeof ( mlx_utils ) ); + if ( *utils == NULL ) { + DBGC ( utils, "%s: Failed to allocate utils\n", __FUNCTION__ ); + rc = -1; + goto err_utils_alloc; + } + if ( mlx_utils_init ( *utils, pci ) ) { + DBGC ( utils, "%s: mlx_utils_init failed\n", __FUNCTION__ ); + rc = -1; + goto err_utils_init; + } + if ( mlx_pci_gw_init ( *utils ) ){ + DBGC ( utils, "%s: mlx_pci_gw_init failed\n", __FUNCTION__ ); + rc = -1; + goto err_cmd_init; + } + + return 0; + + mlx_pci_gw_teardown ( *utils ); +err_cmd_init: + mlx_utils_teardown ( *utils ); +err_utils_init: + free ( *utils ); +err_utils_alloc: + *utils = NULL; + + return rc; +} + +void free_mlx_utils ( mlx_utils **utils ) { + + mlx_pci_gw_teardown ( *utils ); + mlx_utils_teardown ( *utils ); + free ( *utils ); + *utils = NULL; +} + +/** + * Initialise Nodnic PCI parameters + * + * @v hermon Nodnic device + */ +static int flexboot_nodnic_alloc_uar ( struct flexboot_nodnic *flexboot_nodnic ) { + mlx_status status = MLX_SUCCESS; + struct pci_device *pci = flexboot_nodnic->pci; + nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar; + + if ( ! flexboot_nodnic->device_priv.device_cap.support_uar_tx_db ) { + DBGC ( flexboot_nodnic, "%s: tx db using uar is not supported \n", __FUNCTION__ ); + return -ENOTSUP; + } + /* read uar offset then allocate */ + if ( ( status = nodnic_port_set_send_uar_offset ( &flexboot_nodnic->port[0].port_priv ) ) ) { + DBGC ( flexboot_nodnic, "%s: nodnic_port_set_send_uar_offset failed," + "status = %d\n", __FUNCTION__, status ); + return -EINVAL; + } + uar->phys = ( pci_bar_start ( pci, FLEXBOOT_NODNIC_HCA_BAR ) + (mlx_uint32)uar->offset ); + uar->virt = ( void * )( pci_ioremap ( pci, uar->phys, FLEXBOOT_NODNIC_PAGE_SIZE ) ); + + return status; +} + +static int flexboot_nodnic_dealloc_uar ( struct flexboot_nodnic *flexboot_nodnic ) { + nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar; + + if ( uar->virt ) { + iounmap( uar->virt ); + uar->virt = NULL; + } + + return MLX_SUCCESS; +} + + +int flexboot_nodnic_probe ( struct pci_device *pci, + struct flexboot_nodnic_callbacks *callbacks, + void *drv_priv __unused ) { + mlx_status status = MLX_SUCCESS; + struct flexboot_nodnic *flexboot_nodnic_priv = NULL; + nodnic_device_priv *device_priv = NULL; + int i = 0; + + if ( ( pci == NULL ) || ( callbacks == NULL ) ) { + DBGC ( flexboot_nodnic_priv, "%s: Bad Parameter\n", __FUNCTION__ ); + return -EINVAL; + } + + flexboot_nodnic_priv = zalloc( sizeof ( *flexboot_nodnic_priv ) ); + if ( flexboot_nodnic_priv == NULL ) { + DBGC ( flexboot_nodnic_priv, "%s: Failed to allocate priv data\n", __FUNCTION__ ); + status = MLX_OUT_OF_RESOURCES; + goto device_err_alloc; + } + + /* Register settings + * Note that pci->priv will be the device private data */ + flexboot_nodnic_priv->pci = pci; + flexboot_nodnic_priv->callbacks = callbacks; + pci_set_drvdata ( pci, flexboot_nodnic_priv ); + + device_priv = &flexboot_nodnic_priv->device_priv; + /* init mlx utils */ + status = init_mlx_utils ( & device_priv->utils, pci ); + MLX_FATAL_CHECK_STATUS(status, err_utils_init, + "init_mlx_utils failed"); + + /* init device */ + status = nodnic_device_init( device_priv ); + MLX_FATAL_CHECK_STATUS(status, device_init_err, + "nodnic_device_init failed"); + + status = nodnic_device_get_cap( device_priv ); + MLX_FATAL_CHECK_STATUS(status, get_cap_err, + "nodnic_device_get_cap failed"); + + if ( mlx_set_admin_mtu ( device_priv->utils, 1, EN_DEFAULT_ADMIN_MTU ) ) { + MLX_DEBUG_ERROR( device_priv->utils, "Failed to set admin mtu\n" ); + } + + status = flexboot_nodnic_set_port_masking ( flexboot_nodnic_priv ); + MLX_FATAL_CHECK_STATUS(status, err_set_masking, + "flexboot_nodnic_set_port_masking failed"); + + status = flexboot_nodnic_allocate_infiniband_devices( flexboot_nodnic_priv ); + MLX_FATAL_CHECK_STATUS(status, err_alloc_ibdev, + "flexboot_nodnic_allocate_infiniband_devices failed"); + + /* port init */ + status = flexboot_nodnic_thin_init_ports( flexboot_nodnic_priv ); + MLX_FATAL_CHECK_STATUS(status, err_thin_init_ports, + "flexboot_nodnic_thin_init_ports failed"); + + if ( ( status = flexboot_nodnic_alloc_uar ( flexboot_nodnic_priv ) ) ) { + DBGC(flexboot_nodnic_priv, "%s: flexboot_nodnic_alloc_uar failed" + " ( status = %d )\n",__FUNCTION__, status ); + } + + /* device reg */ + status = flexboot_nodnic_set_ports_type( flexboot_nodnic_priv ); + MLX_CHECK_STATUS( flexboot_nodnic_priv, status, err_set_ports_types, + "flexboot_nodnic_set_ports_type failed"); + + status = flexboot_nodnic_ports_register_dev( flexboot_nodnic_priv ); + MLX_FATAL_CHECK_STATUS(status, reg_err, + "flexboot_nodnic_ports_register_dev failed"); + + for ( i = 0; i < device_priv->device_cap.num_ports; i++ ) { + if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) ) + continue; + flexboot_nodnic_get_factory_mac ( flexboot_nodnic_priv, i ); + } + + /* Update ETH operations with IRQ function if supported */ + DBGC ( flexboot_nodnic_priv, "%s: %s IRQ function\n", + __FUNCTION__, ( callbacks->irq ? "Valid" : "No" ) ); + flexboot_nodnic_eth_operations.irq = callbacks->irq; + return 0; + + flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv ); +reg_err: +err_set_ports_types: + flexboot_nodnic_dealloc_uar ( flexboot_nodnic_priv ); +err_thin_init_ports: +err_alloc_ibdev: +err_set_masking: +get_cap_err: + nodnic_device_teardown ( device_priv ); +device_init_err: + free_mlx_utils ( & device_priv->utils ); +err_utils_init: + free ( flexboot_nodnic_priv ); +device_err_alloc: + return status; +} + +void flexboot_nodnic_remove ( struct pci_device *pci ) +{ + struct flexboot_nodnic *flexboot_nodnic_priv = pci_get_drvdata ( pci ); + nodnic_device_priv *device_priv = & ( flexboot_nodnic_priv->device_priv ); + + flexboot_nodnic_dealloc_uar ( flexboot_nodnic_priv ); + flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv ); + nodnic_device_teardown( device_priv ); + free_mlx_utils ( & device_priv->utils ); + free( flexboot_nodnic_priv ); +} diff --git a/src/drivers/infiniband/flexboot_nodnic.h b/src/drivers/infiniband/flexboot_nodnic.h new file mode 100644 index 00000000..84a6768c --- /dev/null +++ b/src/drivers/infiniband/flexboot_nodnic.h @@ -0,0 +1,190 @@ +#ifndef SRC_DRIVERS_INFINIBAND_FLEXBOOT_NODNIC_FLEXBOOT_NODNIC_H_ +#define SRC_DRIVERS_INFINIBAND_FLEXBOOT_NODNIC_FLEXBOOT_NODNIC_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "mlx_nodnic/include/mlx_nodnic_data_structures.h" +#include "nodnic_prm.h" +#include +#include +#include +#include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h" + +/* + * If defined, use interrupts in NODNIC driver + */ +#define NODNIC_IRQ_ENABLED + +#define FLEXBOOT_NODNIC_MAX_PORTS 2 +#define FLEXBOOT_NODNIC_PORT_BASE 1 + +#define FLEXBOOT_NODNIC_OPCODE_SEND 0xa +#define FLEXBOOT_NODNIC_HCA_BAR PCI_BASE_ADDRESS_0 //BAR 0 +#define FLEXBOOT_NODNIC_PAGE_SHIFT 12 +#define FLEXBOOT_NODNIC_PAGE_SIZE (1 << FLEXBOOT_NODNIC_PAGE_SHIFT) +#define FLEXBOOT_NODNIC_PAGE_MASK (FLEXBOOT_NODNIC_PAGE_SIZE - 1) +#define EN_DEFAULT_ADMIN_MTU 1522 + +/* Port protocol */ +enum flexboot_nodnic_protocol { + FLEXBOOT_NODNIC_PROT_IB_IPV6 = 0, + FLEXBOOT_NODNIC_PROT_ETH, + FLEXBOOT_NODNIC_PROT_IB_IPV4, + FLEXBOOT_NODNIC_PROT_FCOE +}; + +/** A flexboot nodnic port */ +struct flexboot_nodnic_port { + /** Infiniband device */ + struct ib_device *ibdev; + /** Network device */ + struct net_device *netdev; + /** nodic port */ + nodnic_port_priv port_priv; + /** Port type */ + struct flexboot_nodnic_port_type *type; + /** Ethernet completion queue */ + struct ib_completion_queue *eth_cq; + /** Ethernet queue pair */ + struct ib_queue_pair *eth_qp; + mlx_uint8 cmdsn; +}; + + +/** A flexboot nodnic queue pair */ +struct flexboot_nodnic_queue_pair { + nodnic_qp *nodnic_queue_pair; +}; + +/** A flexboot nodnic cq */ +struct flexboot_nodnic_completion_queue { + nodnic_cq *nodnic_completion_queue; +}; + +/** A flexboot_nodnic device */ +struct flexboot_nodnic { + /** PCI device */ + struct pci_device *pci; + /** nic specific data*/ + struct flexboot_nodnic_callbacks *callbacks; + /**nodnic device*/ + nodnic_device_priv device_priv; + /**flexboot_nodnic ports*/ + struct flexboot_nodnic_port port[FLEXBOOT_NODNIC_MAX_PORTS]; + /** Device open request counter */ + unsigned int open_count; + /** Port masking */ + u16 port_mask; + /** device private data */ + void *priv_data; +}; + +/** A flexboot_nodnic port type */ +struct flexboot_nodnic_port_type { + /** Register port + * + * @v flexboot_nodnic flexboot_nodnic device + * @v port flexboot_nodnic port + * @ret mlx_status Return status code + */ + mlx_status ( * register_dev ) ( + struct flexboot_nodnic *flexboot_nodnic, + struct flexboot_nodnic_port *port + ); + /** Port state changed + * + * @v flexboot_nodnic flexboot_nodnic device + * @v port flexboot_nodnic port + * @v link_up Link is up + */ + void ( * state_change ) ( + struct flexboot_nodnic *flexboot_nodnic, + struct flexboot_nodnic_port *port, + int link_up + ); + /** Unregister port + * + * @v flexboot_nodnic flexboot_nodnic device + * @v port flexboot_nodnic port + */ + void ( * unregister_dev ) ( + struct flexboot_nodnic *flexboot_nodnic, + struct flexboot_nodnic_port *port + ); +}; + +struct cqe_data{ + mlx_boolean owner; + mlx_uint32 qpn; + mlx_uint32 is_send; + mlx_uint32 is_error; + mlx_uint32 syndrome; + mlx_uint32 vendor_err_syndrome; + mlx_uint32 wqe_counter; + mlx_uint32 byte_cnt; +}; + +union arm_cq_uar { + struct { + //big endian + mlx_uint32 reserved0 :2; + mlx_uint32 cmdn :2; + mlx_uint32 reserved1 :3; + mlx_uint32 cmd :1; + mlx_uint32 cq_ci :24; + mlx_uint32 reserved2 :8; + mlx_uint32 cq_n :24; + }; + mlx_uint32 dword[2]; + mlx_uint64 qword; +}; + +struct flexboot_nodnic_callbacks { + mlx_status ( * fill_completion ) ( void *cqe, struct cqe_data *cqe_data ); + mlx_status ( * cqe_set_owner ) ( void *cq, unsigned int num_cqes ); + mlx_size ( * get_cqe_size ) (); + mlx_status ( * fill_send_wqe[5] ) ( + struct ib_device *ibdev, + struct ib_queue_pair *qp, + struct ib_address_vector *av, + struct io_buffer *iobuf, + struct nodnic_send_wqbb *wqbb, + unsigned long wqe_idx + ); + void ( * irq ) ( struct net_device *netdev, int enable ); + mlx_status ( * tx_uar_send_doorbell_fn ) ( + struct ib_device *ibdev, + struct nodnic_send_wqbb *wqbb + ); +}; + +int flexboot_nodnic_probe ( struct pci_device *pci, + struct flexboot_nodnic_callbacks *callbacks, + void *drv_priv ); +void flexboot_nodnic_remove ( struct pci_device *pci ); +void flexboot_nodnic_eth_irq ( struct net_device *netdev, int enable ); +int flexboot_nodnic_is_supported ( struct pci_device *pci ); +void flexboot_nodnic_copy_mac ( uint8_t mac_addr[], uint32_t low_byte, + uint16_t high_byte ); +int init_mlx_utils ( mlx_utils **utils, struct pci_device *pci ); +void free_mlx_utils ( mlx_utils **utils ); +#endif /* SRC_DRIVERS_INFINIBAND_FLEXBOOT_NODNIC_FLEXBOOT_NODNIC_H_ */ diff --git a/src/drivers/infiniband/golan.c b/src/drivers/infiniband/golan.c new file mode 100755 index 00000000..7ab4a4ee --- /dev/null +++ b/src/drivers/infiniband/golan.c @@ -0,0 +1,2653 @@ +/* + * Copyright (C) 2013-2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include "flexboot_nodnic.h" +#include +#include +#include +#include +#include +#include "mlx_utils/include/public/mlx_pci_gw.h" +#include +#include +#include "mlx_nodnic/include/mlx_port.h" +#include "nodnic_shomron_prm.h" +#include "golan.h" +#include "mlx_utils/include/public/mlx_bail.h" +#include "mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h" + + +#define DEVICE_IS_CIB( device ) ( device == 0x1011 ) + +/******************************************************************************/ +/************* Very simple memory management for umalloced pages **************/ +/******* Temporary solution until full memory management is implemented *******/ +/******************************************************************************/ + +struct golan_page { + struct list_head list; + userptr_t addr; +}; + +static void golan_free_fw_areas ( struct golan *golan ) { + int i; + + for (i = 0; i < GOLAN_FW_AREAS_NUM; i++) { + if ( golan->fw_areas[i].area ) { + ufree ( golan->fw_areas[i].area ); + golan->fw_areas[i].area = UNULL; + } + } +} + +static int golan_init_fw_areas ( struct golan *golan ) { + int rc = 0, i = 0; + + if ( ! golan ) { + rc = -EINVAL; + goto err_golan_init_fw_areas_bad_param; + } + + for (i = 0; i < GOLAN_FW_AREAS_NUM; i++) + golan->fw_areas[i].area = UNULL; + + return rc; + + err_golan_init_fw_areas_bad_param: + return rc; +} + +/******************************************************************************/ + +const char *golan_qp_state_as_string[] = { + "RESET", + "INIT", + "RTR", + "RTS", + "SQD", + "SQE", + "ERR" +}; + +static inline int golan_check_rc_and_cmd_status ( struct golan_cmd_layout *cmd, int rc ) { + struct golan_outbox_hdr *out_hdr = ( struct golan_outbox_hdr * ) ( cmd->out ); + if ( rc == -EBUSY ) { + DBG ( "HCA is busy (rc = -EBUSY)\n" ); + return rc; + } else if ( out_hdr->status ) { + DBG("%s status = 0x%x - syndrom = 0x%x\n", __FUNCTION__, + out_hdr->status, be32_to_cpu(out_hdr->syndrome)); + return out_hdr->status; + } + return 0; +} + +#define GOLAN_CHECK_RC_AND_CMD_STATUS(_lable) \ + do { \ + if ( ( rc = golan_check_rc_and_cmd_status ( cmd, rc ) ) ) \ + goto _lable; \ + } while (0) + +#define GOLAN_PRINT_RC_AND_CMD_STATUS golan_check_rc_and_cmd_status ( cmd, rc ) + + +struct mbox { + union { + struct golan_cmd_prot_block mblock; + u8 data[MAILBOX_STRIDE]; + __be64 qdata[MAILBOX_STRIDE >> 3]; + }; +}; + +static inline uint32_t ilog2(uint32_t mem) +{ + return ( fls ( mem ) - 1 ); +} + +#define CTRL_SIG_SZ (sizeof(mailbox->mblock) - sizeof(mailbox->mblock.bdata) - 2) + +static inline u8 xor8_buf(void *buf, int len) +{ + u8 sum = 0; + int i; + u8 *ptr = buf; + + for (i = 0; i < len; ++i) + sum ^= ptr[i]; + + return sum; +} + +static inline const char *cmd_status_str(u8 status) +{ + switch (status) { + case 0x0: return "OK"; + case 0x1: return "internal error"; + case 0x2: return "bad operation"; + case 0x3: return "bad parameter"; + case 0x4: return "bad system state"; + case 0x5: return "bad resource"; + case 0x6: return "resource busy"; + case 0x8: return "limits exceeded"; + case 0x9: return "bad resource state"; + case 0xa: return "bad index"; + case 0xf: return "no resources"; + case 0x50: return "bad input length"; + case 0x51: return "bad output length"; + case 0x10: return "bad QP state"; + case 0x30: return "bad packet (discarded)"; + case 0x40: return "bad size too many outstanding CQEs"; + case 0xff: return "Command Timed Out"; + default: return "unknown status"; + } +} + +static inline uint16_t fw_rev_maj(struct golan *golan) +{ + return be32_to_cpu(readl(&golan->iseg->fw_rev)) & 0xffff; +} + +static inline u16 fw_rev_min(struct golan *golan) +{ + return be32_to_cpu(readl(&golan->iseg->fw_rev)) >> 16; +} + +static inline u16 fw_rev_sub(struct golan *golan) +{ + return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) & 0xffff; +} + +static inline u16 cmdif_rev(struct golan *golan) +{ + return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) >> 16; +} + + +static inline struct golan_cmd_layout *get_cmd( struct golan *golan, int idx ) +{ + return golan->cmd.addr + (idx << golan->cmd.log_stride); +} + +static inline void golan_calc_sig(struct golan *golan, uint32_t cmd_idx, + uint32_t inbox_idx, uint32_t outbox_idx) +{ + struct golan_cmd_layout *cmd = get_cmd(golan, cmd_idx); + struct mbox *mailbox = NULL; + + if (inbox_idx != NO_MBOX) { + mailbox = GET_INBOX(golan, inbox_idx); + mailbox->mblock.token = cmd->token; + mailbox->mblock.ctrl_sig = ~xor8_buf(mailbox->mblock.rsvd0, + CTRL_SIG_SZ); + } + if (outbox_idx != NO_MBOX) { + mailbox = GET_OUTBOX(golan, outbox_idx); + mailbox->mblock.token = cmd->token; + mailbox->mblock.ctrl_sig = ~xor8_buf(mailbox->mblock.rsvd0, + CTRL_SIG_SZ); + } + cmd->sig = ~xor8_buf(cmd, sizeof(*cmd)); +} + +static inline void show_out_status(uint32_t *out) +{ + DBG("%x\n", be32_to_cpu(out[0])); + DBG("%x\n", be32_to_cpu(out[1])); + DBG("%x\n", be32_to_cpu(out[2])); + DBG("%x\n", be32_to_cpu(out[3])); +} +/** + * Check if CMD has finished. + */ +static inline uint32_t is_command_finished( struct golan *golan, int idx) +{ + wmb(); + return !(get_cmd( golan , idx )->status_own & CMD_OWNER_HW); +} + +/** + * Wait for Golan command completion + * + * @v golan Golan device + * @ret rc Return status code + */ +static inline int golan_cmd_wait(struct golan *golan, int idx, const char *command) +{ + unsigned int wait; + int rc = -EBUSY; + + for ( wait = GOLAN_HCR_MAX_WAIT_MS ; wait ; --wait ) { + if (is_command_finished(golan, idx)) { + rc = CMD_STATUS(golan, idx); + rmb(); + break; + } else { + mdelay ( 1 ); + } + } + if (rc) { + DBGC (golan ,"[%s]RC is %s[%x]\n", command, cmd_status_str(rc), rc); + } + + golan->cmd_bm &= ~(1 << idx); + return rc; +} + +/** + * Notify the HW that commands are ready + */ +static inline void send_command(struct golan *golan) +{ + wmb(); //Make sure the command is visible in "memory". + writel(cpu_to_be32(golan->cmd_bm) , &golan->iseg->cmd_dbell); +} + +static inline int send_command_and_wait(struct golan *golan, uint32_t cmd_idx, + uint32_t inbox_idx, uint32_t outbox_idx, const char *command) +{ + golan_calc_sig(golan, cmd_idx, inbox_idx, outbox_idx); + send_command(golan); + return golan_cmd_wait(golan, cmd_idx, command); +} + +/** + * Prepare a FW command, + * In - comamnd idx (Must be valid) + * writes the command parameters. + */ +static inline struct golan_cmd_layout *write_cmd(struct golan *golan, int idx, + uint16_t opcode, uint16_t opmod, + uint16_t inbox_idx, + uint16_t outbox_idx, uint16_t inlen, + uint16_t outlen) +{ + struct golan_cmd_layout *cmd = get_cmd(golan , idx); + struct golan_inbox_hdr *hdr = (struct golan_inbox_hdr *)cmd->in; + static uint8_t token; + + memset(cmd, 0, sizeof(*cmd)); + + cmd->type = GOLAN_PCI_CMD_XPORT; + cmd->status_own = CMD_OWNER_HW; + cmd->outlen = cpu_to_be32(outlen); + cmd->inlen = cpu_to_be32(inlen); + hdr->opcode = cpu_to_be16(opcode); + hdr->opmod = cpu_to_be16(opmod); + + if (inbox_idx != NO_MBOX) { + memset(GET_INBOX(golan, inbox_idx), 0, MAILBOX_SIZE); + cmd->in_ptr = VIRT_2_BE64_BUS(GET_INBOX(golan, inbox_idx)); + cmd->token = ++token; + } + if (outbox_idx != NO_MBOX) { + memset(GET_OUTBOX(golan, outbox_idx), 0, MAILBOX_SIZE); + cmd->out_ptr = VIRT_2_BE64_BUS(GET_OUTBOX(golan, outbox_idx)); + } + + golan->cmd_bm |= 1 << idx; + + assert ( cmd != NULL ); + return cmd; +} + +static inline int golan_core_enable_hca(struct golan *golan) +{ + struct golan_cmd_layout *cmd; + int rc = 0; + + DBGC(golan, "%s\n", __FUNCTION__); + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ENABLE_HCA, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_enable_hca_mbox_in), + sizeof(struct golan_enable_hca_mbox_out)); + + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_PRINT_RC_AND_CMD_STATUS; + return rc; +} + +static inline void golan_disable_hca(struct golan *golan) +{ + struct golan_cmd_layout *cmd; + int rc; + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DISABLE_HCA, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_disable_hca_mbox_in), + sizeof(struct golan_disable_hca_mbox_out)); + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_PRINT_RC_AND_CMD_STATUS; +} + +static inline int golan_set_hca_cap(struct golan *golan) +{ + struct golan_cmd_layout *cmd; + int rc; + + DBGC(golan, "%s\n", __FUNCTION__); + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_SET_HCA_CAP, 0x0, + GEN_MBOX, NO_MBOX, + sizeof(struct golan_cmd_set_hca_cap_mbox_in), + sizeof(struct golan_cmd_set_hca_cap_mbox_out)); + + golan->caps.flags &= ~GOLAN_DEV_CAP_FLAG_CMDIF_CSUM; + DBGC( golan , "%s caps.uar_sz = %d\n", __FUNCTION__, golan->caps.uar_sz); + DBGC( golan , "%s caps.log_pg_sz = %d\n", __FUNCTION__, golan->caps.log_pg_sz); + DBGC( golan , "%s caps.log_uar_sz = %d\n", __FUNCTION__, be32_to_cpu(golan->caps.uar_page_sz)); + golan->caps.uar_page_sz = 0; + golan->caps.log_max_qp = GOLAN_LOG_MAX_QP; + + memcpy(((struct golan_hca_cap *)GET_INBOX(golan, GEN_MBOX)), + &(golan->caps), + sizeof(struct golan_hca_cap)); + + //if command failed we should reset the caps in golan->caps + rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_PRINT_RC_AND_CMD_STATUS; + return rc; +} + +static inline int golan_qry_hca_cap(struct golan *golan) +{ + struct golan_cmd_layout *cmd; + int rc = 0; + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_CAP, 0x1, + NO_MBOX, GEN_MBOX, + sizeof(struct golan_cmd_query_hca_cap_mbox_in), + sizeof(struct golan_cmd_query_hca_cap_mbox_out)); + + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, GEN_MBOX, __FUNCTION__); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_hca_cap ); + + memcpy(&(golan->caps), + ((struct golan_hca_cap *)GET_OUTBOX(golan, GEN_MBOX)), + sizeof(struct golan_hca_cap)); +err_query_hca_cap: + return rc; +} + +static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16 func_id ) { + uint32_t out_num_entries = 0; + int size_ibox = 0; + int size_obox = 0; + int rc = 0; + + DBGC(golan, "%s\n", __FUNCTION__); + + while ( pages > 0 ) { + uint32_t pas_num = min(pages, MAX_PASE_MBOX); + struct golan_cmd_layout *cmd; + struct golan_manage_pages_inbox *in; + + size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE); + size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE); + + cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_MANAGE_PAGES, GOLAN_PAGES_TAKE, + MEM_MBOX, MEM_MBOX, + size_ibox, + size_obox); + + in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */ + + in->func_id = func_id; /* Already BE */ + in->num_entries = cpu_to_be32(pas_num); + + if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) { + out_num_entries = be32_to_cpu(((struct golan_manage_pages_outbox *)(cmd->out))->num_entries); + } else { + if ( rc == -EBUSY ) { + DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" ); + } else { + DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n", + __FUNCTION__, rc, cmd_status_str(rc), + CMD_SYND(golan, MEM_CMD_IDX), + get_cmd( golan , MEM_CMD_IDX )->status_own, + be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num); + } + return rc; + } + + pages -= out_num_entries; + } + DBGC( golan , "%s Pages handled\n", __FUNCTION__); + return rc; +} + +static inline int golan_provide_pages ( struct golan *golan , uint32_t pages + , __be16 func_id,struct golan_firmware_area *fw_area) { + struct mbox *mailbox; + int size_ibox = 0; + int size_obox = 0; + int rc = 0; + userptr_t next_page_addr = UNULL; + + DBGC(golan, "%s\n", __FUNCTION__); + if ( ! fw_area->area ) { + fw_area->area = umalloc ( GOLAN_PAGE_SIZE * pages ); + if ( fw_area->area == UNULL ) { + rc = -ENOMEM; + DBGC (golan ,"Failed to allocated %d pages \n",pages); + goto err_golan_alloc_fw_area; + } + fw_area->npages = pages; + } + assert ( fw_area->npages == pages ); + next_page_addr = fw_area->area; + while ( pages > 0 ) { + uint32_t pas_num = min(pages, MAX_PASE_MBOX); + unsigned i, j; + struct golan_cmd_layout *cmd; + struct golan_manage_pages_inbox *in; + userptr_t addr = 0; + + mailbox = GET_INBOX(golan, MEM_MBOX); + size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE); + size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE); + + cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_MANAGE_PAGES, GOLAN_PAGES_GIVE, + MEM_MBOX, MEM_MBOX, + size_ibox, + size_obox); + + in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */ + + in->func_id = func_id; /* Already BE */ + in->num_entries = cpu_to_be32(pas_num); + + for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j, + next_page_addr += GOLAN_PAGE_SIZE ) { + addr = next_page_addr; + if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) { + DBGC (golan ,"Addr not Page alligned [%lx %lx]\n", user_to_phys(addr, 0), addr); + } + mailbox->mblock.data[j] = USR_2_BE64_BUS(addr); + } + + if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) { + pages -= pas_num; + golan->total_dma_pages += pas_num; + } else { + if ( rc == -EBUSY ) { + DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" ); + } else { + DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n", + __FUNCTION__, rc, cmd_status_str(rc), + CMD_SYND(golan, MEM_CMD_IDX), + get_cmd( golan , MEM_CMD_IDX )->status_own, + be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num); + } + goto err_send_command; + } + } + DBGC( golan , "%s Pages handled\n", __FUNCTION__); + return 0; + +err_send_command: +err_golan_alloc_fw_area: + /* Go over In box and free pages */ + /* Send Error to FW */ + /* What is next - Disable HCA? */ + DBGC (golan ,"%s Failed (rc = 0x%x)\n", __FUNCTION__, rc); + return rc; +} + +static inline int golan_handle_pages(struct golan *golan, + enum golan_qry_pages_mode qry, + enum golan_manage_pages_mode mode) +{ + struct golan_cmd_layout *cmd; + + int rc = 0; + int32_t pages; + uint16_t total_pages; + __be16 func_id; + + DBGC(golan, "%s\n", __FUNCTION__); + + cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, qry, + NO_MBOX, NO_MBOX, + sizeof(struct golan_query_pages_inbox), + sizeof(struct golan_query_pages_outbox)); + + rc = send_command_and_wait(golan, MEM_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_handle_pages_query ); + + pages = be32_to_cpu(QRY_PAGES_OUT(golan, MEM_CMD_IDX)->num_pages); + + DBGC( golan , "%s pages needed: %d\n", __FUNCTION__, pages); + + func_id = QRY_PAGES_OUT(golan, MEM_CMD_IDX)->func_id; + + total_pages = (( pages >= 0 ) ? pages : ( pages * ( -1 ) )); + + if ( mode == GOLAN_PAGES_GIVE ) { + rc = golan_provide_pages(golan, total_pages, func_id, & ( golan->fw_areas[qry-1] )); + } else { + rc = golan_take_pages(golan, golan->total_dma_pages, func_id); + golan->total_dma_pages = 0; + } + + if ( rc ) { + DBGC (golan , "Failed to %s pages (rc = %d) - DMA pages allocated = %d\n", + ( ( mode == GOLAN_PAGES_GIVE ) ? "give" : "take" ), rc , golan->total_dma_pages ); + return rc; + } + + return 0; + +err_handle_pages_query: + DBGC (golan ,"%s Qyery pages failed (rc = 0x%x)\n", __FUNCTION__, rc); + return rc; +} + +static inline int golan_set_access_reg ( struct golan *golan __attribute__ (( unused )), uint32_t reg __attribute__ (( unused ))) +{ +#if 0 + write_cmd(golan, _CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_reg_host_endianess), + sizeof(struct golan_reg_host_endianess)); + in->arg = cpu_to_be32(arg); + in->register_id = cpu_to_be16(reg_num); +#endif + DBGC (golan ," %s Not implemented yet\n", __FUNCTION__); + return 0; +} + +static inline void golan_cmd_uninit ( struct golan *golan ) +{ + free_dma(golan->mboxes.outbox, GOLAN_PAGE_SIZE); + free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE); + free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE); +} + +/** + * Initialise Golan Command Q parameters + * -- Alocate a 4kb page for the Command Q + * -- Read the stride and log num commands available + * -- Write the address to cmdq_phy_addr in iseg + * @v golan Golan device + */ +static inline int golan_cmd_init ( struct golan *golan ) +{ + int rc = 0; + uint32_t addr_l_sz; + + if (!(golan->cmd.addr = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) { + rc = -ENOMEM; + goto malloc_dma_failed; + } + if (!(golan->mboxes.inbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) { + rc = -ENOMEM; + goto malloc_dma_inbox_failed; + } + if (!(golan->mboxes.outbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) { + rc = -ENOMEM; + goto malloc_dma_outbox_failed; + } + addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz)); + + golan->cmd.log_stride = addr_l_sz & 0xf; + golan->cmd.size = 1 << (( addr_l_sz >> 4 ) & 0xf); + + addr_l_sz = virt_to_bus(golan->cmd.addr); + writel(0 /* cpu_to_be32(golan->cmd.addr) >> 32 */, &golan->iseg->cmdq_addr_h); + writel(cpu_to_be32(addr_l_sz), &golan->iseg->cmdq_addr_l_sz); + wmb(); //Make sure the addr is visible in "memory". + + addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz)); + + DBGC( golan , "%s Command interface was initialized\n", __FUNCTION__); + return 0; + +malloc_dma_outbox_failed: + free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE); +malloc_dma_inbox_failed: + free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE); +malloc_dma_failed: + DBGC (golan ,"%s Failed to initialize command interface (rc = 0x%x)\n", + __FUNCTION__, rc); + return rc; +} + +static inline int golan_hca_init(struct golan *golan) +{ + struct golan_cmd_layout *cmd; + int rc = 0; + + DBGC(golan, "%s\n", __FUNCTION__); + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_INIT_HCA, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_cmd_init_hca_mbox_in), + sizeof(struct golan_cmd_init_hca_mbox_out)); + + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_PRINT_RC_AND_CMD_STATUS; + return rc; +} + +static inline void golan_teardown_hca(struct golan *golan, enum golan_teardown op_mod) +{ + struct golan_cmd_layout *cmd; + int rc; + + DBGC (golan, "%s in\n", __FUNCTION__); + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_TEARDOWN_HCA, op_mod, + NO_MBOX, NO_MBOX, + sizeof(struct golan_cmd_teardown_hca_mbox_in), + sizeof(struct golan_cmd_teardown_hca_mbox_out)); + + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_PRINT_RC_AND_CMD_STATUS; + + DBGC (golan, "%s HCA teardown compleated\n", __FUNCTION__); +} + +static inline int golan_alloc_uar(struct golan *golan) +{ + struct golan_uar *uar = &golan->uar; + struct golan_cmd_layout *cmd; + struct golan_alloc_uar_mbox_out *out; + int rc; + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ALLOC_UAR, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_alloc_uar_mbox_in), + sizeof(struct golan_alloc_uar_mbox_out)); + + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_uar_cmd ); + out = (struct golan_alloc_uar_mbox_out *) ( cmd->out ); + + uar->index = be32_to_cpu(out->uarn) & 0xffffff; + + uar->phys = (pci_bar_start(golan->pci, GOLAN_HCA_BAR) + (uar->index << GOLAN_PAGE_SHIFT)); + uar->virt = (void *)(pci_ioremap(golan->pci, uar->phys, GOLAN_PAGE_SIZE)); + + DBGC( golan , "%s: UAR allocated with index 0x%x\n", __FUNCTION__, uar->index); + return 0; + +err_alloc_uar_cmd: + DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc); + return rc; +} + +static void golan_dealloc_uar(struct golan *golan) +{ + struct golan_cmd_layout *cmd; + uint32_t uar_index = golan->uar.index; + int rc; + + DBGC (golan, "%s in\n", __FUNCTION__); + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DEALLOC_UAR, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_free_uar_mbox_in), + sizeof(struct golan_free_uar_mbox_out)); + + ((struct golan_free_uar_mbox_in *)(cmd->in))->uarn = cpu_to_be32(uar_index); + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_PRINT_RC_AND_CMD_STATUS; + golan->uar.index = 0; + + DBGC (golan, "%s UAR (0x%x) was destroyed\n", __FUNCTION__, uar_index); +} + +static void golan_eq_update_ci(struct golan_event_queue *eq, int arm) +{ + __be32 *addr = eq->doorbell + (arm ? 0 : 2); + u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); + writel(cpu_to_be32(val) , addr); + /* We still want ordering, just not swabbing, so add a barrier */ + wmb(); +} + +static int golan_create_eq(struct golan *golan) +{ + struct golan_event_queue *eq = &golan->eq; + struct golan_create_eq_mbox_in_data *in; + struct golan_cmd_layout *cmd; + struct golan_create_eq_mbox_out *out; + int rc, i; + + eq->cons_index = 0; + eq->size = GOLAN_NUM_EQES * sizeof(eq->eqes[0]); + eq->eqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE ); + if (!eq->eqes) { + rc = -ENOMEM; + goto err_create_eq_eqe_alloc; + } + + /* Set EQEs ownership bit to HW ownership */ + for (i = 0; i < GOLAN_NUM_EQES; ++i) { + eq->eqes[i].owner = GOLAN_EQE_HW_OWNERSHIP; + } + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_EQ, 0x0, + GEN_MBOX, NO_MBOX, + sizeof(struct golan_create_eq_mbox_in) + GOLAN_PAS_SIZE, + sizeof(struct golan_create_eq_mbox_out)); + + in = (struct golan_create_eq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX); + + /* Fill the physical address of the page */ + in->pas[0] = VIRT_2_BE64_BUS( eq->eqes ); + in->ctx.log_sz_usr_page = cpu_to_be32((ilog2(GOLAN_NUM_EQES)) << 24 | golan->uar.index); + DBGC( golan , "UAR idx %x (BE %x)\n", golan->uar.index, in->ctx.log_sz_usr_page); + in->events_mask = cpu_to_be64(1 << GOLAN_EVENT_TYPE_PORT_CHANGE); + + rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_eq_cmd ); + out = (struct golan_create_eq_mbox_out *)cmd->out; + + eq->eqn = out->eq_number; + eq->doorbell = ((void *)golan->uar.virt) + GOLAN_EQ_DOORBELL_OFFSET; + + /* EQs are created in ARMED state */ + golan_eq_update_ci(eq, GOLAN_EQ_UNARMED); + + DBGC( golan , "%s: Event queue created (EQN = 0x%x)\n", __FUNCTION__, eq->eqn); + return 0; + +err_create_eq_cmd: + free_dma ( eq->eqes , GOLAN_PAGE_SIZE ); +err_create_eq_eqe_alloc: + DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc); + return rc; +} + +static void golan_destory_eq(struct golan *golan) +{ + struct golan_cmd_layout *cmd; + struct golan_destroy_eq_mbox_in *in; + uint8_t eqn = golan->eq.eqn; + int rc; + + DBGC (golan, "%s in\n", __FUNCTION__); + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_EQ, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_destroy_eq_mbox_in), + sizeof(struct golan_destroy_eq_mbox_out)); + + in = GOLAN_MBOX_IN ( cmd, in ); + in->eqn = eqn; + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_PRINT_RC_AND_CMD_STATUS; + + free_dma ( golan->eq.eqes , GOLAN_PAGE_SIZE ); + golan->eq.eqn = 0; + + DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn); +} + +static int golan_alloc_pd(struct golan *golan) +{ + struct golan_cmd_layout *cmd; + struct golan_alloc_pd_mbox_out *out; + int rc; + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ALLOC_PD, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_alloc_pd_mbox_in), + sizeof(struct golan_alloc_pd_mbox_out)); + + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_pd_cmd ); + out = (struct golan_alloc_pd_mbox_out *) ( cmd->out ); + + golan->pdn = (be32_to_cpu(out->pdn) & 0xffffff); + DBGC( golan , "%s: Protection domain created (PDN = 0x%x)\n", __FUNCTION__, + golan->pdn); + return 0; + +err_alloc_pd_cmd: + DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc); + return rc; +} + +static void golan_dealloc_pd(struct golan *golan) +{ + struct golan_cmd_layout *cmd; + uint32_t pdn = golan->pdn; + int rc; + + DBGC (golan,"%s in\n", __FUNCTION__); + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DEALLOC_PD, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_alloc_pd_mbox_in), + sizeof(struct golan_alloc_pd_mbox_out)); + + ((struct golan_dealloc_pd_mbox_in *)(cmd->in))->pdn = cpu_to_be32(pdn); + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_PRINT_RC_AND_CMD_STATUS; + golan->pdn = 0; + + DBGC (golan ,"%s Protection domain (0x%x) was destroyed\n", __FUNCTION__, pdn); +} + +static int golan_create_mkey(struct golan *golan) +{ + struct golan_create_mkey_mbox_in_data *in; + struct golan_cmd_layout *cmd; + struct golan_create_mkey_mbox_out *out; + int rc; + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_MKEY, 0x0, + GEN_MBOX, NO_MBOX, + sizeof(struct golan_create_mkey_mbox_in), + sizeof(struct golan_create_mkey_mbox_out)); + + in = (struct golan_create_mkey_mbox_in_data *)GET_INBOX(golan, GEN_MBOX); + + in->seg.flags = GOLAN_IB_ACCESS_LOCAL_WRITE | GOLAN_IB_ACCESS_LOCAL_READ; + in->seg.flags_pd = cpu_to_be32(golan->pdn | GOLAN_MKEY_LEN64); + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << GOLAN_CREATE_MKEY_SEG_QPN_BIT); + + rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_mkey_cmd ); + out = (struct golan_create_mkey_mbox_out *) ( cmd->out ); + + golan->mkey = ((be32_to_cpu(out->mkey) & 0xffffff) << 8); + DBGC( golan , "%s: Got DMA Key for local access read/write (MKEY = 0x%x)\n", + __FUNCTION__, golan->mkey); + return 0; +err_create_mkey_cmd: + DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc); + return rc; +} + +static void golan_destroy_mkey(struct golan *golan) +{ + struct golan_cmd_layout *cmd; + u32 mkey = golan->mkey; + int rc; + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_MKEY, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_destroy_mkey_mbox_in), + sizeof(struct golan_destroy_mkey_mbox_out)); + ((struct golan_destroy_mkey_mbox_in *)(cmd->in))->mkey = cpu_to_be32(mkey >> 8); + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_PRINT_RC_AND_CMD_STATUS; + golan->mkey = 0; + + DBGC( golan , "%s DMA Key (0x%x) for local access write was destroyed\n" + , __FUNCTION__, mkey); +} + + +/** + * Initialise Golan PCI parameters + * + * @v golan Golan device + */ +static inline void golan_pci_init(struct golan *golan) +{ + struct pci_device *pci = golan->pci; + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Get HCA BAR */ + golan->iseg = pci_ioremap ( pci, pci_bar_start ( pci, GOLAN_HCA_BAR), + GOLAN_PCI_CONFIG_BAR_SIZE ); +} + +static inline struct golan *golan_alloc() +{ + void *golan = zalloc(sizeof(struct golan)); + if ( !golan ) + goto err_zalloc; + + return golan; + +err_zalloc: + return NULL; +} + +/** + * Create completion queue + * + * @v ibdev Infiniband device + * @v cq Completion queue + * @ret rc Return status code + */ +static int golan_create_cq(struct ib_device *ibdev, + struct ib_completion_queue *cq) +{ + struct golan *golan = ib_get_drvdata(ibdev); + struct golan_completion_queue *golan_cq; + struct golan_cmd_layout *cmd; + struct golan_create_cq_mbox_in_data *in; + struct golan_create_cq_mbox_out *out; + int rc; + unsigned int i; + + golan_cq = zalloc(sizeof(*golan_cq)); + if (!golan_cq) { + rc = -ENOMEM; + goto err_create_cq; + } + golan_cq->size = sizeof(golan_cq->cqes[0]) * cq->num_cqes; + golan_cq->doorbell_record = malloc_dma(GOLAN_CQ_DB_RECORD_SIZE, + GOLAN_CQ_DB_RECORD_SIZE); + if (!golan_cq->doorbell_record) { + rc = -ENOMEM; + goto err_create_cq_db_alloc; + } + + golan_cq->cqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE ); + if (!golan_cq->cqes) { + rc = -ENOMEM; + goto err_create_cq_cqe_alloc; + } + + /* Set CQEs ownership bit to HW ownership */ + for (i = 0; i < cq->num_cqes; ++i) { + golan_cq->cqes[i].op_own = ((GOLAN_CQE_OPCODE_NOT_VALID << + GOLAN_CQE_OPCODE_BIT) | + GOLAN_CQE_HW_OWNERSHIP); + } + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_CQ, 0x0, + GEN_MBOX, NO_MBOX, + sizeof(struct golan_create_cq_mbox_in) + GOLAN_PAS_SIZE, + sizeof(struct golan_create_cq_mbox_out)); + + in = (struct golan_create_cq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX); + + /* Fill the physical address of the page */ + in->pas[0] = VIRT_2_BE64_BUS( golan_cq->cqes ); + in->ctx.cqe_sz_flags = GOLAN_CQE_SIZE_64 << 5; + in->ctx.log_sz_usr_page = cpu_to_be32(((ilog2(cq->num_cqes)) << 24) | golan->uar.index); + in->ctx.c_eqn = cpu_to_be16(golan->eq.eqn); + in->ctx.db_record_addr = VIRT_2_BE64_BUS(golan_cq->doorbell_record); + + rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_cq_cmd ); + out = (struct golan_create_cq_mbox_out *) ( cmd->out ); + + cq->cqn = (be32_to_cpu(out->cqn) & 0xffffff); + + ib_cq_set_drvdata(cq, golan_cq); + + DBGC( golan , "%s CQ created successfully (CQN = 0x%lx)\n", __FUNCTION__, cq->cqn); + return 0; + +err_create_cq_cmd: + free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE ); +err_create_cq_cqe_alloc: + free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE); +err_create_cq_db_alloc: + free ( golan_cq ); +err_create_cq: + DBGC (golan ,"%s out rc = 0x%x\n", __FUNCTION__, rc); + return rc; +} + +/** + * Destroy completion queue + * + * @v ibdev Infiniband device + * @v cq Completion queue + */ +static void golan_destroy_cq(struct ib_device *ibdev, + struct ib_completion_queue *cq) +{ + struct golan *golan = ib_get_drvdata(ibdev); + struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq); + struct golan_cmd_layout *cmd; + uint32_t cqn = cq->cqn; + int rc; + + DBGC (golan, "%s in\n", __FUNCTION__); + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_CQ, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_destroy_cq_mbox_in), + sizeof(struct golan_destroy_cq_mbox_out)); + ((struct golan_destroy_cq_mbox_in *)(cmd->in))->cqn = cpu_to_be32(cqn); + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_PRINT_RC_AND_CMD_STATUS; + cq->cqn = 0; + + ib_cq_set_drvdata(cq, NULL); + free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE ); + free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE); + free(golan_cq); + + DBGC (golan, "%s CQ number 0x%x was destroyed\n", __FUNCTION__, cqn); +} + +static void golan_cq_clean(struct ib_completion_queue *cq) +{ + ib_poll_cq(cq->ibdev, cq); +} + +static int golan_qp_type_to_st(enum ib_queue_pair_type type) +{ + int qpt = type; + + switch (qpt) { + case IB_QPT_RC: + return GOLAN_QP_ST_RC; + case IB_QPT_UD: + return GOLAN_QP_ST_UD; + case IB_QPT_SMI: + return GOLAN_QP_ST_QP0; + case IB_QPT_GSI: + return GOLAN_QP_ST_QP1; + case IB_QPT_ETH: + default: + return -EINVAL; + } +} +#if 0 +static int golan_is_special_qp(enum ib_queue_pair_type type) +{ + return (type == IB_QPT_GSI || type == IB_QPT_SMI); +} +#endif +static int golan_create_qp_aux(struct ib_device *ibdev, + struct ib_queue_pair *qp, + int *qpn) +{ + struct golan *golan = ib_get_drvdata(ibdev); + struct golan_queue_pair *golan_qp; + struct golan_create_qp_mbox_in_data *in; + struct golan_cmd_layout *cmd; + struct golan_wqe_data_seg *data; + struct golan_create_qp_mbox_out *out; + uint32_t wqe_size_in_bytes; + uint32_t max_qp_size_in_wqes; + unsigned int i; + int rc; + + golan_qp = zalloc(sizeof(*golan_qp)); + if (!golan_qp) { + rc = -ENOMEM; + goto err_create_qp; + } + + if ( ( qp->type == IB_QPT_SMI ) || ( qp->type == IB_QPT_GSI ) || + ( qp->type == IB_QPT_UD ) ) { + golan_qp->rq.grh_size = ( qp->recv.num_wqes * + sizeof ( golan_qp->rq.grh[0] )); + } + + /* Calculate receive queue size */ + golan_qp->rq.size = qp->recv.num_wqes * GOLAN_RECV_WQE_SIZE; + if (GOLAN_RECV_WQE_SIZE > be16_to_cpu(golan->caps.max_wqe_sz_rq)) { + DBGC (golan ,"%s receive wqe size [%zd] > max wqe size [%d]\n", __FUNCTION__, + GOLAN_RECV_WQE_SIZE, be16_to_cpu(golan->caps.max_wqe_sz_rq)); + rc = -EINVAL; + goto err_create_qp_rq_size; + } + + wqe_size_in_bytes = sizeof(golan_qp->sq.wqes[0]); + /* Calculate send queue size */ + if (wqe_size_in_bytes > be16_to_cpu(golan->caps.max_wqe_sz_sq)) { + DBGC (golan ,"%s send WQE size [%d] > max WQE size [%d]\n", __FUNCTION__, + wqe_size_in_bytes, + be16_to_cpu(golan->caps.max_wqe_sz_sq)); + rc = -EINVAL; + goto err_create_qp_sq_wqe_size; + } + golan_qp->sq.size = (qp->send.num_wqes * wqe_size_in_bytes); + max_qp_size_in_wqes = (1 << ((uint32_t)(golan->caps.log_max_qp_sz))); + if (qp->send.num_wqes > max_qp_size_in_wqes) { + DBGC (golan ,"%s send wq size [%d] > max wq size [%d]\n", __FUNCTION__, + golan_qp->sq.size, max_qp_size_in_wqes); + rc = -EINVAL; + goto err_create_qp_sq_size; + } + + golan_qp->size = golan_qp->sq.size + golan_qp->rq.size; + + /* allocate dma memory for WQEs (1 page is enough) - should change it */ + golan_qp->wqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE ); + if (!golan_qp->wqes) { + rc = -ENOMEM; + goto err_create_qp_wqe_alloc; + } + golan_qp->rq.wqes = golan_qp->wqes; + golan_qp->sq.wqes = golan_qp->wqes + golan_qp->rq.size;//(union golan_send_wqe *)& + //(((struct golan_recv_wqe_ud *)(golan_qp->wqes))[qp->recv.num_wqes]); + + if ( golan_qp->rq.grh_size ) { + golan_qp->rq.grh = ( golan_qp->wqes + + golan_qp->sq.size + + golan_qp->rq.size ); + } + + /* Invalidate all WQEs */ + data = &golan_qp->rq.wqes[0].data[0]; + for ( i = 0 ; i < ( golan_qp->rq.size / sizeof ( *data ) ); i++ ){ + data->lkey = cpu_to_be32 ( GOLAN_INVALID_LKEY ); + data++; + } + + golan_qp->doorbell_record = malloc_dma(sizeof(struct golan_qp_db), + sizeof(struct golan_qp_db)); + if (!golan_qp->doorbell_record) { + rc = -ENOMEM; + goto err_create_qp_db_alloc; + } + memset(golan_qp->doorbell_record, 0, sizeof(struct golan_qp_db)); + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_QP, 0x0, + GEN_MBOX, NO_MBOX, + sizeof(struct golan_create_qp_mbox_in) + GOLAN_PAS_SIZE, + sizeof(struct golan_create_qp_mbox_out)); + + in = (struct golan_create_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX); + + /* Fill the physical address of the page */ + in->pas[0] = VIRT_2_BE64_BUS(golan_qp->wqes); + in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index); + + in->ctx.flags_pd = cpu_to_be32(golan->pdn); + in->ctx.flags = cpu_to_be32((golan_qp_type_to_st(qp->type) + << GOLAN_QP_CTX_ST_BIT) | + (GOLAN_QP_PM_MIGRATED << + GOLAN_QP_CTX_PM_STATE_BIT)); +// cgs set to 0, initialy. +// atomic mode + in->ctx.rq_size_stride = ((ilog2(qp->recv.num_wqes) << + GOLAN_QP_CTX_RQ_SIZE_BIT) | + (sizeof(golan_qp->rq.wqes[0]) / GOLAN_RECV_WQE_SIZE)); + in->ctx.sq_crq_size = cpu_to_be16(ilog2(golan_qp->sq.size / GOLAN_SEND_WQE_BB_SIZE) + << GOLAN_QP_CTX_SQ_SIZE_BIT); + in->ctx.cqn_send = cpu_to_be32(qp->send.cq->cqn); + in->ctx.cqn_recv = cpu_to_be32(qp->recv.cq->cqn); + in->ctx.db_rec_addr = VIRT_2_BE64_BUS(golan_qp->doorbell_record); + + rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_qp_cmd ); + out = (struct golan_create_qp_mbox_out *)cmd->out; + + *qpn = (be32_to_cpu(out->qpn) & 0xffffff); + /* + * Hardware wants QPN written in big-endian order (after + * shifting) for send doorbell. Precompute this value to save + * a little bit when posting sends. + */ + golan_qp->doorbell_qpn = cpu_to_be32(*qpn << 8); + golan_qp->state = GOLAN_IB_QPS_RESET; + + ib_qp_set_drvdata(qp, golan_qp); + + return 0; + +err_create_qp_cmd: + free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db)); +err_create_qp_db_alloc: + free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE ); +err_create_qp_wqe_alloc: +err_create_qp_sq_size: +err_create_qp_sq_wqe_size: +err_create_qp_rq_size: + free ( golan_qp ); +err_create_qp: + return rc; +} + +/** + * Create queue pair + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @ret rc Return status code + */ +static int golan_create_qp(struct ib_device *ibdev, + struct ib_queue_pair *qp) +{ + int rc, qpn = -1; + + switch (qp->type) { + case IB_QPT_UD: + case IB_QPT_SMI: + case IB_QPT_GSI: + rc = golan_create_qp_aux(ibdev, qp, &qpn); + if (rc) { + DBG ( "%s Failed to create QP (rc = 0x%x)\n", __FUNCTION__, rc); + return rc; + } + qp->qpn = qpn; + + break; + case IB_QPT_ETH: + case IB_QPT_RC: + default: + DBG ( "%s unsupported QP type (0x%x)\n", __FUNCTION__, qp->type); + return -EINVAL; + } + + return 0; +} + +static int golan_modify_qp_rst_to_init(struct ib_device *ibdev, + struct ib_queue_pair *qp __unused, + struct golan_modify_qp_mbox_in_data *in) +{ + int rc = 0; + + in->ctx.qkey = cpu_to_be32((uint32_t)(qp->qkey)); + + in->ctx.pri_path.port = ibdev->port; + in->ctx.flags |= cpu_to_be32(GOLAN_QP_PM_MIGRATED << GOLAN_QP_CTX_PM_STATE_BIT); + in->ctx.pri_path.pkey_index = 0; + /* QK is 0 */ + /* QP cntr set 0 */ + return rc; +} + +static int golan_modify_qp_init_to_rtr(struct ib_device *ibdev __unused, + struct ib_queue_pair *qp __unused, + struct golan_modify_qp_mbox_in_data *in) +{ + int rc = 0; + + in->optparam = 0; + return rc; +} + +static int golan_modify_qp_rtr_to_rts(struct ib_device *ibdev __unused, + struct ib_queue_pair *qp __unused, + struct golan_modify_qp_mbox_in_data *in __unused) +{ + int rc = 0; + + in->optparam = 0; + /* In good flow psn in 0 */ + return rc; +} + +static int golan_modify_qp_to_rst(struct ib_device *ibdev, + struct ib_queue_pair *qp) +{ + struct golan *golan = ib_get_drvdata(ibdev); + struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp); + struct golan_cmd_layout *cmd; + int rc; + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_2RST_QP, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_modify_qp_mbox_in), + sizeof(struct golan_modify_qp_mbox_out)); + ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn); + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_2rst_cmd ); + + golan_qp->state = GOLAN_IB_QPS_RESET; + DBGC( golan , "%s QP number 0x%lx was modified to RESET\n", + __FUNCTION__, qp->qpn); + + return 0; + +err_modify_qp_2rst_cmd: + DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n", + __FUNCTION__, qp->qpn, rc); + return rc; +} + +static int (*golan_modify_qp_methods[])(struct ib_device *ibdev, + struct ib_queue_pair *qp, + struct golan_modify_qp_mbox_in_data *in) = { + + [GOLAN_IB_QPS_RESET] = golan_modify_qp_rst_to_init, + [GOLAN_IB_QPS_INIT] = golan_modify_qp_init_to_rtr, + [GOLAN_IB_QPS_RTR] = golan_modify_qp_rtr_to_rts +}; + +static int golan_modify_qp(struct ib_device *ibdev, + struct ib_queue_pair *qp) +{ + struct golan *golan = ib_get_drvdata(ibdev); + struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp); + struct golan_modify_qp_mbox_in_data *in; + struct golan_cmd_layout *cmd; + enum golan_ib_qp_state prev_state; + int rc; + int modify_cmd[] = {GOLAN_CMD_OP_RST2INIT_QP, + GOLAN_CMD_OP_INIT2RTR_QP, + GOLAN_CMD_OP_RTR2RTS_QP}; + + while (golan_qp->state < GOLAN_IB_QPS_RTS) { + prev_state = golan_qp->state; + cmd = write_cmd(golan, DEF_CMD_IDX, modify_cmd[golan_qp->state], 0x0, + GEN_MBOX, NO_MBOX, + sizeof(struct golan_modify_qp_mbox_in), + sizeof(struct golan_modify_qp_mbox_out)); + + in = (struct golan_modify_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX); + ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn); + rc = golan_modify_qp_methods[golan_qp->state](ibdev, qp, in); + if (rc) { + goto err_modify_qp_fill_inbox; + } +// in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index); + rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_cmd ); + + ++(golan_qp->state); + + DBGC( golan , "%s QP number 0x%lx was modified from %s to %s\n", + __FUNCTION__, qp->qpn, golan_qp_state_as_string[prev_state], + golan_qp_state_as_string[golan_qp->state]); + } + + DBGC( golan , "%s QP number 0x%lx is ready to receive/send packets.\n", + __FUNCTION__, qp->qpn); + return 0; + +err_modify_qp_cmd: +err_modify_qp_fill_inbox: + DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n", + __FUNCTION__, qp->qpn, rc); + return rc; +} + +/** + * Destroy queue pair + * + * @v ibdev Infiniband device + * @v qp Queue pair + */ +static void golan_destroy_qp(struct ib_device *ibdev, + struct ib_queue_pair *qp) +{ + struct golan *golan = ib_get_drvdata(ibdev); + struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp); + struct golan_cmd_layout *cmd; + unsigned long qpn = qp->qpn; + int rc; + + DBGC (golan, "%s in\n", __FUNCTION__); + + if (golan_qp->state != GOLAN_IB_QPS_RESET) { + if (golan_modify_qp_to_rst(ibdev, qp)) { + DBGC (golan ,"%s Failed to modify QP 0x%lx to RESET\n", __FUNCTION__, + qp->qpn); + } + } + + if (qp->recv.cq) { + golan_cq_clean(qp->recv.cq); + } + if (qp->send.cq && (qp->send.cq != qp->recv.cq)) { + golan_cq_clean(qp->send.cq); + } + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_QP, 0x0, + NO_MBOX, NO_MBOX, + sizeof(struct golan_destroy_qp_mbox_in), + sizeof(struct golan_destroy_qp_mbox_out)); + ((struct golan_destroy_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qpn); + rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_PRINT_RC_AND_CMD_STATUS; + qp->qpn = 0; + + ib_qp_set_drvdata(qp, NULL); + free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db)); + free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE ); + free(golan_qp); + + DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn); +} + +/** + * Calculate transmission rate + * + * @v av Address vector + * @ret golan_rate Golan rate + */ +static unsigned int golan_rate(enum ib_rate rate) { + return (((rate >= IB_RATE_2_5) && (rate <= IB_RATE_120)) ? (rate + 5) : 0); +} + +/** + * Post send work queue entry + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @v av Address vector + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int golan_post_send(struct ib_device *ibdev, + struct ib_queue_pair *qp, + struct ib_address_vector *av, + struct io_buffer *iobuf) +{ + struct golan *golan = ib_get_drvdata(ibdev); + struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp); + struct golan_send_wqe_ud *wqe = NULL; + struct golan_av *datagram = NULL; + unsigned long wqe_idx_mask; + unsigned long wqe_idx; + struct golan_wqe_data_seg *data = NULL; + struct golan_wqe_ctrl_seg *ctrl = NULL; + + + wqe_idx_mask = (qp->send.num_wqes - 1); + wqe_idx = (qp->send.next_idx & wqe_idx_mask); + if (qp->send.iobufs[wqe_idx]) { + DBGC (golan ,"%s Send queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn); + return -ENOMEM; + } + + qp->send.iobufs[wqe_idx] = iobuf; + + // change to this + //wqe_size_in_octa_words = golan_qp->sq.wqe_size_in_wqebb >> 4; + + wqe = &golan_qp->sq.wqes[wqe_idx].ud; + + //CHECK HW OWNERSHIP BIT ??? + + memset(wqe, 0, sizeof(*wqe)); + + ctrl = &wqe->ctrl; + ctrl->opmod_idx_opcode = cpu_to_be32(GOLAN_SEND_OPCODE | + ((u32)(golan_qp->sq.next_idx) << + GOLAN_WQE_CTRL_WQE_IDX_BIT)); + ctrl->qpn_ds = cpu_to_be32(GOLAN_SEND_UD_WQE_SIZE >> 4) | + golan_qp->doorbell_qpn; + ctrl->fm_ce_se = 0x8;//10 - 0 - 0 + data = &wqe->data; + data->byte_count = cpu_to_be32(iob_len(iobuf)); + data->lkey = cpu_to_be32(golan->mkey); + data->addr = VIRT_2_BE64_BUS(iobuf->data); + + datagram = &wqe->datagram; + datagram->key.qkey.qkey = cpu_to_be32(av->qkey); + datagram->dqp_dct = cpu_to_be32((1 << 31) | av->qpn); + datagram->stat_rate_sl = ((golan_rate(av->rate) << 4) | av->sl); + datagram->fl_mlid = (ibdev->lid & 0x007f); /* take only the 7 low bits of the LID */ + datagram->rlid = cpu_to_be16(av->lid); + datagram->grh_gid_fl = cpu_to_be32(av->gid_present << 30); + memcpy(datagram->rgid, av->gid.bytes, 16 /* sizeof(datagram->rgid) */); + + /* + * Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + ++(qp->send.next_idx); + golan_qp->sq.next_idx = (golan_qp->sq.next_idx + GOLAN_WQEBBS_PER_SEND_UD_WQE); + golan_qp->doorbell_record->send_db = cpu_to_be16(golan_qp->sq.next_idx); + wmb(); + writeq(*((__be64 *)ctrl), golan->uar.virt + + ( ( golan_qp->sq.next_idx & 0x1 ) ? DB_BUFFER0_EVEN_OFFSET + : DB_BUFFER0_ODD_OFFSET ) ); + return 0; +} + +/** + * Post receive work queue entry + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int golan_post_recv(struct ib_device *ibdev, + struct ib_queue_pair *qp, + struct io_buffer *iobuf) +{ + struct golan *golan = ib_get_drvdata(ibdev); + struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp); + struct ib_work_queue *wq = &qp->recv; + struct golan_recv_wqe_ud *wqe; + struct ib_global_route_header *grh; + struct golan_wqe_data_seg *data; + unsigned int wqe_idx_mask; + + /* Allocate work queue entry */ + wqe_idx_mask = (wq->num_wqes - 1); + if (wq->iobufs[wq->next_idx & wqe_idx_mask]) { + DBGC (golan ,"%s Receive queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn); + return -ENOMEM; + } + + wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf; + wqe = & golan_qp->rq.wqes[wq->next_idx & wqe_idx_mask]; + + memset(wqe, 0, sizeof(*wqe)); + data = &wqe->data[0]; + if ( golan_qp->rq.grh ) { + grh = &golan_qp->rq.grh[wq->next_idx & wqe_idx_mask]; + data->byte_count = cpu_to_be32 ( sizeof ( *grh ) ); + data->lkey = cpu_to_be32 ( golan->mkey ); + data->addr = VIRT_2_BE64_BUS ( grh ); + data++; + } + + data->byte_count = cpu_to_be32(iob_tailroom(iobuf)); + data->lkey = cpu_to_be32(golan->mkey); + data->addr = VIRT_2_BE64_BUS(iobuf->data); + + ++wq->next_idx; + + /* + * Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + golan_qp->doorbell_record->recv_db = cpu_to_be16(qp->recv.next_idx & 0xffff); + + return 0; +} + +static int golan_query_vport_context ( struct ib_device *ibdev ) { + struct golan *golan = ib_get_drvdata ( ibdev ); + struct golan_cmd_layout *cmd; + struct golan_query_hca_vport_context_inbox *in; + struct golan_query_hca_vport_context_data *context_data; + int rc; + + cmd = write_cmd ( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_CONTEXT, + 0x0, GEN_MBOX, GEN_MBOX, + sizeof(struct golan_query_hca_vport_context_inbox), + sizeof(struct golan_query_hca_vport_context_outbox) ); + + in = GOLAN_MBOX_IN ( cmd, in ); + in->port_num = (u8)ibdev->port; + + rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ ); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_context_cmd ); + + context_data = (struct golan_query_hca_vport_context_data *)( GET_OUTBOX ( golan, GEN_MBOX ) ); + + ibdev->node_guid.dwords[0] = context_data->node_guid[0]; + ibdev->node_guid.dwords[1] = context_data->node_guid[1]; + ibdev->lid = be16_to_cpu( context_data->lid ); + ibdev->sm_lid = be16_to_cpu( context_data->sm_lid ); + ibdev->sm_sl = context_data->sm_sl; + ibdev->port_state = context_data->port_state; + + return 0; +err_query_vport_context_cmd: + DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc); + return rc; +} + + +static int golan_query_vport_gid ( struct ib_device *ibdev ) { + struct golan *golan = ib_get_drvdata( ibdev ); + struct golan_cmd_layout *cmd; + struct golan_query_hca_vport_gid_inbox *in; + union ib_gid *ib_gid; + int rc; + + cmd = write_cmd( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_GID, + 0x0, GEN_MBOX, GEN_MBOX, + sizeof(struct golan_query_hca_vport_gid_inbox), + sizeof(struct golan_query_hca_vport_gid_outbox) ); + + in = GOLAN_MBOX_IN ( cmd, in ); + in->port_num = (u8)ibdev->port; + in->gid_index = 0; + rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ ); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_gid_cmd ); + + ib_gid = (union ib_gid *)( GET_OUTBOX ( golan, GEN_MBOX ) ); + + memcpy ( &ibdev->gid, ib_gid, sizeof(ibdev->gid) ); + + return 0; +err_query_vport_gid_cmd: + DBGC ( golan, "%s [%d] out\n", __FUNCTION__, rc); + return rc; +} + +static int golan_query_vport_pkey ( struct ib_device *ibdev ) { + struct golan *golan = ib_get_drvdata ( ibdev ); + struct golan_cmd_layout *cmd; + struct golan_query_hca_vport_pkey_inbox *in; + int pkey_table_size_in_entries = (1 << (7 + golan->caps.pkey_table_size)); + int rc; + + cmd = write_cmd ( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_PKEY, + 0x0, GEN_MBOX, GEN_MBOX, + sizeof(struct golan_query_hca_vport_pkey_inbox), + sizeof(struct golan_outbox_hdr) + 8 + + sizeof(struct golan_query_hca_vport_pkey_data) * pkey_table_size_in_entries ); + + in = GOLAN_MBOX_IN ( cmd, in ); + in->port_num = (u8)ibdev->port; + in->pkey_index = 0xffff; + rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ ); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_pkey_cmd ); + + return 0; +err_query_vport_pkey_cmd: + DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc); + return rc; +} + +static int golan_get_ib_info ( struct ib_device *ibdev ) { + int rc; + + rc = golan_query_vport_context ( ibdev ); + if ( rc != 0 ) { + DBG ( "golan_get_ib_info: golan_query_vport_context Failed (rc = %d)\n",rc ); + goto err_query_vport_context; + } + + rc = golan_query_vport_gid ( ibdev ); + if ( rc != 0 ) { + DBG ( "golan_get_ib_info: golan_query_vport_gid Failed (rc = %d)\n",rc ); + goto err_query_vport_gid; + } + + rc = golan_query_vport_pkey ( ibdev ); + if ( rc != 0 ) { + DBG ( "golan_get_ib_info: golan_query_vport_pkey Failed (rc = %d)\n",rc ); + goto err_query_vport_pkey; + } + return rc; +err_query_vport_pkey: +err_query_vport_gid: +err_query_vport_context: + DBG ( "%s [%d] out\n", __FUNCTION__, rc); + return rc; +} + +static int golan_complete(struct ib_device *ibdev, + struct ib_completion_queue *cq, + struct golan_cqe64 *cqe64) +{ + struct golan *golan = ib_get_drvdata(ibdev); + struct ib_work_queue *wq; + struct golan_queue_pair *golan_qp; + struct ib_queue_pair *qp; + struct io_buffer *iobuf = NULL; + struct ib_address_vector recv_dest; + struct ib_address_vector recv_source; + struct ib_global_route_header *grh; + struct golan_err_cqe *err_cqe64; + int gid_present, idx; + u16 wqe_ctr; + uint8_t opcode; + static int error_state; + uint32_t qpn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff; + int is_send = 0; + size_t len; + + opcode = cqe64->op_own >> GOLAN_CQE_OPCODE_BIT; + DBGC2( golan , "%s completion with opcode 0x%x\n", __FUNCTION__, opcode); + + if (opcode == GOLAN_CQE_REQ || opcode == GOLAN_CQE_REQ_ERR) { + is_send = 1; + } else { + is_send = 0; + } + if (opcode == GOLAN_CQE_REQ_ERR || opcode == GOLAN_CQE_RESP_ERR) { + err_cqe64 = (struct golan_err_cqe *)cqe64; + int i = 0; + if (!error_state++) { + DBGC (golan ,"\n"); + for ( i = 0 ; i < 16 ; i += 2 ) { + DBGC (golan ,"%x %x\n", + be32_to_cpu(((uint32_t *)(err_cqe64))[i]), + be32_to_cpu(((uint32_t *)(err_cqe64))[i + 1])); + } + DBGC (golan ,"CQE with error: Syndrome(0x%x), VendorSynd(0x%x), HW_SYN(0x%x)\n", + err_cqe64->syndrome, err_cqe64->vendor_err_synd, + err_cqe64->hw_syndrom); + } + } + /* Identify work queue */ + wq = ib_find_wq(cq, qpn, is_send); + if (!wq) { + DBGC (golan ,"%s unknown %s QPN 0x%x in CQN 0x%lx\n", + __FUNCTION__, (is_send ? "send" : "recv"), qpn, cq->cqn); + return -EINVAL; + } + + qp = wq->qp; + golan_qp = ib_qp_get_drvdata ( qp ); + + wqe_ctr = be16_to_cpu(cqe64->wqe_counter); + if (is_send) { + wqe_ctr &= ((GOLAN_WQEBBS_PER_SEND_UD_WQE * wq->num_wqes) - 1); + idx = wqe_ctr / GOLAN_WQEBBS_PER_SEND_UD_WQE; + } else { + idx = wqe_ctr & (wq->num_wqes - 1); + } + + iobuf = wq->iobufs[idx]; + if (!iobuf) { + DBGC (golan ,"%s IO Buffer 0x%x not found in QPN 0x%x\n", + __FUNCTION__, idx, qpn); + return -EINVAL; + } + wq->iobufs[idx] = NULL; + + if (is_send) { + ib_complete_send(ibdev, qp, iobuf, (opcode == GOLAN_CQE_REQ_ERR)); + } else { + len = be32_to_cpu(cqe64->byte_cnt); + memset(&recv_dest, 0, sizeof(recv_dest)); + recv_dest.qpn = qpn; + /* Construct address vector */ + memset(&recv_source, 0, sizeof(recv_source)); + switch (qp->type) { + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_UD: + /* Locate corresponding GRH */ + assert ( golan_qp->rq.grh != NULL ); + grh = &golan_qp->rq.grh[ idx ]; + + recv_source.qpn = be32_to_cpu(cqe64->flags_rqpn) & 0xffffff; + recv_source.lid = be16_to_cpu(cqe64->slid); + recv_source.sl = (be32_to_cpu(cqe64->flags_rqpn) >> 24) & 0xf; + gid_present = (be32_to_cpu(cqe64->flags_rqpn) >> 28) & 3; + if (!gid_present) { + recv_dest.gid_present = recv_source.gid_present = 0; + } else { + recv_dest.gid_present = recv_source.gid_present = 1; + //if (recv_source.gid_present == 0x1) { + memcpy(&recv_source.gid, &grh->sgid, sizeof(recv_source.gid)); + memcpy(&recv_dest.gid, &grh->dgid, sizeof(recv_dest.gid)); + //} else { // recv_source.gid_present = 0x3 + /* GRH is located in the upper 64 byte of the CQE128 + * currently not supported */ + //; + //} + } + len -= sizeof ( *grh ); + break; + case IB_QPT_RC: + case IB_QPT_ETH: + default: + DBGC (golan ,"%s Unsupported QP type (0x%x)\n", __FUNCTION__, qp->type); + return -EINVAL; + } + assert(len <= iob_tailroom(iobuf)); + iob_put(iobuf, len); + ib_complete_recv(ibdev, qp, &recv_dest, &recv_source, iobuf, (opcode == GOLAN_CQE_RESP_ERR)); + } + return 0; +} + +static int golan_is_hw_ownership(struct ib_completion_queue *cq, + struct golan_cqe64 *cqe64) +{ + return ((cqe64->op_own & GOLAN_CQE_OWNER_MASK) != + ((cq->next_idx >> ilog2(cq->num_cqes)) & 1)); +} +static void golan_poll_cq(struct ib_device *ibdev, + struct ib_completion_queue *cq) +{ + unsigned int i; + int rc = 0; + unsigned int cqe_idx_mask; + struct golan_cqe64 *cqe64; + struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq); + struct golan *golan = ib_get_drvdata(ibdev); + + for (i = 0; i < cq->num_cqes; ++i) { + /* Look for completion entry */ + cqe_idx_mask = (cq->num_cqes - 1); + cqe64 = &golan_cq->cqes[cq->next_idx & cqe_idx_mask]; + /* temporary valid only for 64 byte CQE */ + if (golan_is_hw_ownership(cq, cqe64) || + ((cqe64->op_own >> GOLAN_CQE_OPCODE_BIT) == + GOLAN_CQE_OPCODE_NOT_VALID)) { + break; /* HW ownership */ + } + + DBGC2( golan , "%s CQN 0x%lx [%ld] \n", __FUNCTION__, cq->cqn, cq->next_idx); + /* + * Make sure we read CQ entry contents after we've checked the + * ownership bit. (PRM - 6.5.3.2) + */ + rmb(); + rc = golan_complete(ibdev, cq, cqe64); + if (rc != 0) { + DBGC (golan ,"%s CQN 0x%lx failed to complete\n", __FUNCTION__, cq->cqn); + } + + /* Update completion queue's index */ + cq->next_idx++; + + /* Update doorbell record */ + *(golan_cq->doorbell_record) = cpu_to_be32(cq->next_idx & 0xffffff); + } +} + +static const char *golan_eqe_type_str(u8 type) +{ + switch (type) { + case GOLAN_EVENT_TYPE_COMP: + return "GOLAN_EVENT_TYPE_COMP"; + case GOLAN_EVENT_TYPE_PATH_MIG: + return "GOLAN_EVENT_TYPE_PATH_MIG"; + case GOLAN_EVENT_TYPE_COMM_EST: + return "GOLAN_EVENT_TYPE_COMM_EST"; + case GOLAN_EVENT_TYPE_SQ_DRAINED: + return "GOLAN_EVENT_TYPE_SQ_DRAINED"; + case GOLAN_EVENT_TYPE_SRQ_LAST_WQE: + return "GOLAN_EVENT_TYPE_SRQ_LAST_WQE"; + case GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT: + return "GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT"; + case GOLAN_EVENT_TYPE_CQ_ERROR: + return "GOLAN_EVENT_TYPE_CQ_ERROR"; + case GOLAN_EVENT_TYPE_WQ_CATAS_ERROR: + return "GOLAN_EVENT_TYPE_WQ_CATAS_ERROR"; + case GOLAN_EVENT_TYPE_PATH_MIG_FAILED: + return "GOLAN_EVENT_TYPE_PATH_MIG_FAILED"; + case GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + return "GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; + case GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR: + return "GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR"; + case GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR: + return "GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR"; + case GOLAN_EVENT_TYPE_INTERNAL_ERROR: + return "GOLAN_EVENT_TYPE_INTERNAL_ERROR"; + case GOLAN_EVENT_TYPE_PORT_CHANGE: + return "GOLAN_EVENT_TYPE_PORT_CHANGE"; + case GOLAN_EVENT_TYPE_GPIO_EVENT: + return "GOLAN_EVENT_TYPE_GPIO_EVENT"; + case GOLAN_EVENT_TYPE_REMOTE_CONFIG: + return "GOLAN_EVENT_TYPE_REMOTE_CONFIG"; + case GOLAN_EVENT_TYPE_DB_BF_CONGESTION: + return "GOLAN_EVENT_TYPE_DB_BF_CONGESTION"; + case GOLAN_EVENT_TYPE_STALL_EVENT: + return "GOLAN_EVENT_TYPE_STALL_EVENT"; + case GOLAN_EVENT_TYPE_CMD: + return "GOLAN_EVENT_TYPE_CMD"; + case GOLAN_EVENT_TYPE_PAGE_REQUEST: + return "GOLAN_EVENT_TYPE_PAGE_REQUEST"; + default: + return "Unrecognized event"; + } +} + +static const char *golan_eqe_port_subtype_str(u8 subtype) +{ + switch (subtype) { + case GOLAN_PORT_CHANGE_SUBTYPE_DOWN: + return "GOLAN_PORT_CHANGE_SUBTYPE_DOWN"; + case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE: + return "GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE"; + case GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED: + return "GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED"; + case GOLAN_PORT_CHANGE_SUBTYPE_LID: + return "GOLAN_PORT_CHANGE_SUBTYPE_LID"; + case GOLAN_PORT_CHANGE_SUBTYPE_PKEY: + return "GOLAN_PORT_CHANGE_SUBTYPE_PKEY"; + case GOLAN_PORT_CHANGE_SUBTYPE_GUID: + return "GOLAN_PORT_CHANGE_SUBTYPE_GUID"; + case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG: + return "GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG"; + default: + return "Unrecognized event"; + } +} + +/** + * Update Infiniband parameters using Commands + * + * @v ibdev Infiniband device + * @ret rc Return status code + */ +static int golan_ib_update ( struct ib_device *ibdev ) { + int rc; + + /* Get IB parameters */ + if ( ( rc = golan_get_ib_info ( ibdev ) ) != 0 ) + return rc; + + /* Notify Infiniband core of potential link state change */ + ib_link_state_changed ( ibdev ); + + return 0; +} + +static inline void golan_handle_port_event(struct golan *golan, struct golan_eqe *eqe) +{ + struct ib_device *ibdev; + u8 port; + + port = (eqe->data.port.port >> 4) & 0xf; + ibdev = golan->ports[port - 1].ibdev; + + if ( ! ib_is_open ( ibdev ) ) + return; + + switch (eqe->sub_type) { + case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG: + case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE: + golan_ib_update ( ibdev ); + /* Fall through */ + case GOLAN_PORT_CHANGE_SUBTYPE_DOWN: + case GOLAN_PORT_CHANGE_SUBTYPE_LID: + case GOLAN_PORT_CHANGE_SUBTYPE_PKEY: + case GOLAN_PORT_CHANGE_SUBTYPE_GUID: + case GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED: + DBGC( golan , "%s event %s(%d) (sub event %s(%d))arrived on port %d\n", + __FUNCTION__, golan_eqe_type_str(eqe->type), eqe->type, + golan_eqe_port_subtype_str(eqe->sub_type), + eqe->sub_type, port); + break; + default: + DBGC (golan ,"%s Port event with unrecognized subtype: port %d, sub_type %d\n", + __FUNCTION__, port, eqe->sub_type); + } +} + +static struct golan_eqe *golan_next_eqe_sw(struct golan_event_queue *eq) +{ + uint32_t entry = (eq->cons_index & (GOLAN_NUM_EQES - 1)); + struct golan_eqe *eqe = &(eq->eqes[entry]); + return ((eqe->owner != ((eq->cons_index >> ilog2(GOLAN_NUM_EQES)) & 1)) ? NULL : eqe); +} + + +/** + * Poll event queue + * + * @v ibdev Infiniband device + */ +static void golan_poll_eq(struct ib_device *ibdev) +{ + struct golan *golan = ib_get_drvdata(ibdev); + struct golan_event_queue *eq = &(golan->eq); + struct golan_eqe *eqe; + u32 cqn; + int counter = 0; + + while ((eqe = golan_next_eqe_sw(eq)) && (counter < GOLAN_NUM_EQES)) { + /* + * Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + rmb(); + + DBGC( golan , "%s eqn %d, eqe type %s\n", __FUNCTION__, eq->eqn, + golan_eqe_type_str(eqe->type)); + switch (eqe->type) { + case GOLAN_EVENT_TYPE_COMP: + /* We dont need to handle completion events since we + * poll all the CQs after polling the EQ */ + break; + case GOLAN_EVENT_TYPE_PATH_MIG: + case GOLAN_EVENT_TYPE_COMM_EST: + case GOLAN_EVENT_TYPE_SQ_DRAINED: + case GOLAN_EVENT_TYPE_SRQ_LAST_WQE: + case GOLAN_EVENT_TYPE_WQ_CATAS_ERROR: + case GOLAN_EVENT_TYPE_PATH_MIG_FAILED: + case GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR: + case GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT: + case GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR: + DBGC( golan , "%s event %s(%d) arrived\n", __FUNCTION__, + golan_eqe_type_str(eqe->type), eqe->type); + break; + case GOLAN_EVENT_TYPE_CMD: +// golan_cmd_comp_handler(be32_to_cpu(eqe->data.cmd.vector)); + break; + case GOLAN_EVENT_TYPE_PORT_CHANGE: + golan_handle_port_event(golan, eqe); + break; + case GOLAN_EVENT_TYPE_CQ_ERROR: + cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; + DBGC (golan ,"CQ error on CQN 0x%x, syndrom 0x%x\n", + cqn, eqe->data.cq_err.syndrome); +// mlx5_cq_event(dev, cqn, eqe->type); + break; + /* + * currently the driver do not support dynamic memory request + * during FW run, a follow up change will allocate FW pages once and + * never release them till driver shutdown, this change will not support + * this request as currently this request is not issued anyway. + case GOLAN_EVENT_TYPE_PAGE_REQUEST: + { + // we should check if we get this event while we + // waiting for a command + u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); + s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); + + DBGC (golan ,"%s page request for func 0x%x, napges %d\n", + __FUNCTION__, func_id, npages); + golan_provide_pages(golan, npages, func_id); + } + break; + */ + default: + DBGC (golan ,"%s Unhandled event 0x%x on EQ 0x%x\n", __FUNCTION__, + eqe->type, eq->eqn); + break; + } + + ++eq->cons_index; + golan_eq_update_ci(eq, GOLAN_EQ_UNARMED); + ++counter; + } +} + +/** + * Attach to multicast group + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @v gid Multicast GID + * @ret rc Return status code + */ +static int golan_mcast_attach(struct ib_device *ibdev, + struct ib_queue_pair *qp, + union ib_gid *gid) +{ + struct golan *golan = ib_get_drvdata(ibdev); + struct golan_cmd_layout *cmd; + int rc; + + if ( qp == NULL ) { + DBGC( golan, "%s: Invalid pointer, could not attach QPN to MCG\n", + __FUNCTION__ ); + return -EFAULT; + } + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ATTACH_TO_MCG, 0x0, + GEN_MBOX, NO_MBOX, + sizeof(struct golan_attach_mcg_mbox_in), + sizeof(struct golan_attach_mcg_mbox_out)); + ((struct golan_attach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn); + + memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid)); + + rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_CHECK_RC_AND_CMD_STATUS( err_attach_to_mcg_cmd ); + + DBGC( golan , "%s: QPN 0x%lx was attached to MCG\n", __FUNCTION__, qp->qpn); + return 0; +err_attach_to_mcg_cmd: + DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc); + return rc; +} + +/** + * Detach from multicast group + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @v gid Multicast GID + * @ret rc Return status code + */ +static void golan_mcast_detach(struct ib_device *ibdev, + struct ib_queue_pair *qp, + union ib_gid *gid) +{ + struct golan *golan = ib_get_drvdata(ibdev); + struct golan_cmd_layout *cmd; + int rc; + + cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DETACH_FROM_MCG, 0x0, + GEN_MBOX, NO_MBOX, + sizeof(struct golan_detach_mcg_mbox_in), + sizeof(struct golan_detach_mcg_mbox_out)); + ((struct golan_detach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn); + + memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid)); + + rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__); + GOLAN_PRINT_RC_AND_CMD_STATUS; + + DBGC( golan , "%s: QPN 0x%lx was detached from MCG\n", __FUNCTION__, qp->qpn); +} + +/** + * Inform embedded subnet management agent of a received MAD + * + * @v ibdev Infiniband device + * @v mad MAD + * @ret rc Return status code + */ +static int golan_inform_sma(struct ib_device *ibdev, + union ib_mad *mad) +{ + if (!ibdev || !mad) { + return 1; + } + + return 0; +} + +static int golan_register_ibdev(struct golan_port *port) +{ + struct ib_device *ibdev = port->ibdev; + int rc; + + golan_get_ib_info ( ibdev ); + /* Register Infiniband device */ + if ((rc = register_ibdev(ibdev)) != 0) { + DBG ( "%s port %d could not register IB device: (rc = %d)\n", + __FUNCTION__, ibdev->port, rc); + return rc; + } + + port->netdev = ipoib_netdev( ibdev ); + + return 0; +} + +static inline void golan_bring_down(struct golan *golan) +{ + DBGC(golan, "%s: start\n", __FUNCTION__); + + if (~golan->flags & GOLAN_OPEN) { + DBGC(golan, "%s: end (already closed)\n", __FUNCTION__); + return; + } + + golan_destroy_mkey(golan); + golan_dealloc_pd(golan); + golan_destory_eq(golan); + golan_dealloc_uar(golan); + golan_teardown_hca(golan, GOLAN_TEARDOWN_GRACEFUL); + golan_handle_pages(golan, GOLAN_REG_PAGES , GOLAN_PAGES_TAKE); + golan_disable_hca(golan); + golan_cmd_uninit(golan); + golan->flags &= ~GOLAN_OPEN; + DBGC(golan, "%s: end\n", __FUNCTION__); +} + +static int golan_set_link_speed ( struct golan *golan ){ + mlx_status status; + int i = 0; + int utils_inited = 0; + + if ( ! golan->utils ) { + utils_inited = 1; + status = init_mlx_utils ( & golan->utils, golan->pci ); + MLX_CHECK_STATUS ( golan->pci, status, utils_init_err, "mlx_utils_init failed" ); + } + + for ( i = 0; i < golan->caps.num_ports; ++i ) { + status = mlx_set_link_speed ( golan->utils, i + 1, LINK_SPEED_IB, LINK_SPEED_SDR ); + MLX_CHECK_STATUS ( golan->pci, status, set_link_speed_err, "mlx_set_link_speed failed" ); + } + +set_link_speed_err: +if ( utils_inited ) + free_mlx_utils ( & golan->utils ); +utils_init_err: + return status; +} + +static inline int golan_bring_up(struct golan *golan) +{ + int rc = 0; + DBGC(golan, "%s\n", __FUNCTION__); + + if (golan->flags & GOLAN_OPEN) + return 0; + + if (( rc = golan_cmd_init(golan) )) + goto out; + + if (( rc = golan_core_enable_hca(golan) )) + goto cmd_uninit; + + /* Query for need for boot pages */ + if (( rc = golan_handle_pages(golan, GOLAN_BOOT_PAGES, GOLAN_PAGES_GIVE) )) + goto disable; + + if (( rc = golan_qry_hca_cap(golan) )) + goto pages; + + if (( rc = golan_set_hca_cap(golan) )) + goto pages; + + if (( rc = golan_handle_pages(golan, GOLAN_INIT_PAGES, GOLAN_PAGES_GIVE) )) + goto pages; + + if (( rc = golan_set_link_speed ( golan ) )) + goto pages_teardown; + + //Reg Init? + if (( rc = golan_hca_init(golan) )) + goto pages_2; + + if (( rc = golan_alloc_uar(golan) )) + goto teardown; + + if (( rc = golan_create_eq(golan) )) + goto de_uar; + + if (( rc = golan_alloc_pd(golan) )) + goto de_eq; + + if (( rc = golan_create_mkey(golan) )) + goto de_pd; + + golan->flags |= GOLAN_OPEN; + return 0; + + golan_destroy_mkey(golan); +de_pd: + golan_dealloc_pd(golan); +de_eq: + golan_destory_eq(golan); +de_uar: + golan_dealloc_uar(golan); +teardown: + golan_teardown_hca(golan, GOLAN_TEARDOWN_GRACEFUL); +pages_2: +pages_teardown: + golan_handle_pages(golan, GOLAN_INIT_PAGES, GOLAN_PAGES_TAKE); +pages: + golan_handle_pages(golan, GOLAN_BOOT_PAGES, GOLAN_PAGES_TAKE); +disable: + golan_disable_hca(golan); +cmd_uninit: + golan_cmd_uninit(golan); +out: + return rc; +} + +/** + * Close Infiniband link + * + * @v ibdev Infiniband device + */ +static void golan_ib_close ( struct ib_device *ibdev ) { + struct golan *golan = NULL; + + DBG ( "%s start\n", __FUNCTION__ ); + if ( ! ibdev ) + return; + golan = ib_get_drvdata ( ibdev ); + golan_bring_down ( golan ); + DBG ( "%s end\n", __FUNCTION__ ); +} + +/** + * Initialise Infiniband link + * + * @v ibdev Infiniband device + * @ret rc Return status code + */ +static int golan_ib_open ( struct ib_device *ibdev ) { + struct golan *golan = NULL; + DBG ( "%s start\n", __FUNCTION__ ); + + if ( ! ibdev ) + return -EINVAL; + golan = ib_get_drvdata ( ibdev ); + golan_bring_up ( golan ); + golan_ib_update ( ibdev ); + + DBG ( "%s end\n", __FUNCTION__ ); + return 0; +} + +/** Golan Infiniband operations */ +static struct ib_device_operations golan_ib_operations = { + .create_cq = golan_create_cq, + .destroy_cq = golan_destroy_cq, + .create_qp = golan_create_qp, + .modify_qp = golan_modify_qp, + .destroy_qp = golan_destroy_qp, + .post_send = golan_post_send, + .post_recv = golan_post_recv, + .poll_cq = golan_poll_cq, + .poll_eq = golan_poll_eq, + .open = golan_ib_open, + .close = golan_ib_close, + .mcast_attach = golan_mcast_attach, + .mcast_detach = golan_mcast_detach, + .set_port_info = golan_inform_sma, + .set_pkey_table = golan_inform_sma, +}; + +static int golan_probe_normal ( struct pci_device *pci ) { + struct golan *golan; + struct ib_device *ibdev; + struct golan_port *port; + int i; + int rc = 0; + + golan = golan_alloc(); + if ( !golan ) { + rc = -ENOMEM; + goto err_golan_alloc; + } + + /* at POST stage some BIOSes have limited available dynamic memory */ + if ( golan_init_fw_areas ( golan ) ) { + rc = -ENOMEM; + goto err_golan_golan_init_pages; + } + + /* Setup PCI bus and HCA BAR */ + pci_set_drvdata( pci, golan ); + golan->pci = pci; + golan_pci_init( golan ); + /* config command queues */ + if ( golan_bring_up( golan ) ) { + DBGC (golan ,"golan bringup failed\n"); + rc = -1; + goto err_golan_bringup; + } + + if ( ! DEVICE_IS_CIB ( pci->device ) ) { + if ( init_mlx_utils ( & golan->utils, pci ) ) { + rc = -1; + goto err_utils_init; + } + } + /* Allocate Infiniband devices */ + for (i = 0; i < golan->caps.num_ports; ++i) { + ibdev = alloc_ibdev( 0 ); + if ( !ibdev ) { + rc = -ENOMEM; + goto err_golan_probe_alloc_ibdev; + } + golan->ports[i].ibdev = ibdev; + golan->ports[i].vep_number = 0; + ibdev->op = &golan_ib_operations; + ibdev->dev = &pci->dev; + ibdev->port = (GOLAN_PORT_BASE + i); + ib_set_drvdata( ibdev, golan ); + } + + /* Register devices */ + for ( i = 0; i < golan->caps.num_ports; ++i ) { + port = &golan->ports[i]; + if ((rc = golan_register_ibdev ( port ) ) != 0 ) { + goto err_golan_probe_register_ibdev; + } + } + + golan_bring_down ( golan ); + + return 0; + + i = golan->caps.num_ports; +err_golan_probe_register_ibdev: + for ( i-- ; ( signed int ) i >= 0 ; i-- ) + unregister_ibdev ( golan->ports[i].ibdev ); + + i = golan->caps.num_ports; +err_golan_probe_alloc_ibdev: + for ( i-- ; ( signed int ) i >= 0 ; i-- ) + ibdev_put ( golan->ports[i].ibdev ); + if ( ! DEVICE_IS_CIB ( pci->device ) ) { + free_mlx_utils ( & golan->utils ); + } +err_utils_init: + golan_bring_down ( golan ); +err_golan_bringup: + iounmap( golan->iseg ); + golan_free_fw_areas ( golan ); +err_golan_golan_init_pages: + free ( golan ); +err_golan_alloc: + DBGC (golan ,"%s rc = %d\n", __FUNCTION__, rc); + return rc; +} + +static void golan_remove_normal ( struct pci_device *pci ) { + struct golan *golan = pci_get_drvdata(pci); + struct golan_port *port; + int i; + + DBGC(golan, "%s\n", __FUNCTION__); + + for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) { + port = &golan->ports[i]; + unregister_ibdev ( port->ibdev ); + } + for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) { + netdev_nullify ( golan->ports[i].netdev ); + } + for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) { + ibdev_put ( golan->ports[i].ibdev ); + } + if ( ! DEVICE_IS_CIB ( pci->device ) ) { + free_mlx_utils ( & golan->utils ); + } + iounmap( golan->iseg ); + golan_free_fw_areas ( golan ); + free(golan); +} + +/*************************************************************************** + * NODNIC operations + **************************************************************************/ +static mlx_status shomron_tx_uar_send_db ( struct ib_device *ibdev, + struct nodnic_send_wqbb *wqbb ) { + mlx_status status = MLX_SUCCESS; + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct shomron_nodnic_eth_send_wqe *eth_wqe = + ( struct shomron_nodnic_eth_send_wqe * )wqbb; + struct shomronprm_wqe_segment_ctrl_send *ctrl; + + if ( ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) { + DBG("%s: Invalid parameters\n",__FUNCTION__); + status = MLX_FAILED; + goto err; + } + wmb(); + ctrl = & eth_wqe->ctrl; + writeq(*((__be64 *)ctrl), flexboot_nodnic->device_priv.uar.virt + + ( ( MLX_GET ( ctrl, wqe_index ) & 0x1 ) ? DB_BUFFER0_ODD_OFFSET + : DB_BUFFER0_EVEN_OFFSET ) ); +err: + return status; +} + +static mlx_status shomron_fill_eth_send_wqe ( struct ib_device *ibdev, + struct ib_queue_pair *qp, struct ib_address_vector *av __unused, + struct io_buffer *iobuf, struct nodnic_send_wqbb *wqbb, + unsigned long wqe_index ) { + mlx_status status = MLX_SUCCESS; + struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev ); + struct shomron_nodnic_eth_send_wqe *eth_wqe = NULL; + struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1]; + struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = + ib_qp_get_drvdata ( qp ); + nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair; + struct nodnic_send_ring *send_ring = &nodnic_qp->send; + mlx_uint32 qpn = 0; + + eth_wqe = (struct shomron_nodnic_eth_send_wqe *)wqbb; + memset ( ( ( ( void * ) eth_wqe ) ), 0, + ( sizeof ( *eth_wqe ) ) ); + + status = nodnic_port_get_qpn(&port->port_priv, &send_ring->nodnic_ring, + &qpn); + if ( status != MLX_SUCCESS ) { + DBG("nodnic_port_get_qpn failed\n"); + goto err; + } + +#define SHOMRON_GENERATE_CQE 0x3 +#define SHOMRON_INLINE_HEADERS_SIZE 18 +#define SHOMRON_INLINE_HEADERS_OFFSET 32 + MLX_FILL_2 ( ð_wqe->ctrl, 0, opcode, FLEXBOOT_NODNIC_OPCODE_SEND, + wqe_index, wqe_index & 0xFFFF); + MLX_FILL_2 ( ð_wqe->ctrl, 1, ds, 0x4 , qpn, qpn ); + MLX_FILL_1 ( ð_wqe->ctrl, 2, + ce, SHOMRON_GENERATE_CQE /* generate completion */ + ); + MLX_FILL_2 ( ð_wqe->ctrl, 7, + inline_headers1, + cpu_to_be16(*(mlx_uint16 *)iobuf->data), + inline_headers_size, SHOMRON_INLINE_HEADERS_SIZE + ); + memcpy((void *)ð_wqe->ctrl + SHOMRON_INLINE_HEADERS_OFFSET, + iobuf->data + 2, SHOMRON_INLINE_HEADERS_SIZE - 2); + iob_pull(iobuf, SHOMRON_INLINE_HEADERS_SIZE); + MLX_FILL_1 ( ð_wqe->data[0], 0, + byte_count, iob_len ( iobuf ) ); + MLX_FILL_1 ( ð_wqe->data[0], 1, l_key, + flexboot_nodnic->device_priv.lkey ); + MLX_FILL_H ( ð_wqe->data[0], 2, + local_address_h, virt_to_bus ( iobuf->data ) ); + MLX_FILL_1 ( ð_wqe->data[0], 3, + local_address_l, virt_to_bus ( iobuf->data ) ); +err: + return status; +} + +static mlx_status shomron_fill_completion( void *cqe, struct cqe_data *cqe_data ) { + union shomronprm_completion_entry *cq_entry; + uint32_t opcode; + + cq_entry = (union shomronprm_completion_entry *)cqe; + cqe_data->owner = MLX_GET ( &cq_entry->normal, owner ); + opcode = MLX_GET ( &cq_entry->normal, opcode ); +#define FLEXBOOT_NODNIC_OPCODE_CQ_SEND 0 +#define FLEXBOOT_NODNIC_OPCODE_CQ_RECV 2 +#define FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR 13 +#define FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR 14 + cqe_data->is_error = + ( opcode >= FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR); + if ( cqe_data->is_error ) { + cqe_data->syndrome = MLX_GET ( &cq_entry->error, syndrome ); + cqe_data->vendor_err_syndrome = + MLX_GET ( &cq_entry->error, vendor_error_syndrome ); + cqe_data->is_send = + (opcode == FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR); + } else { + cqe_data->is_send = + (opcode == FLEXBOOT_NODNIC_OPCODE_CQ_SEND); + cqe_data->wqe_counter = MLX_GET ( &cq_entry->normal, wqe_counter ); + cqe_data->byte_cnt = MLX_GET ( &cq_entry->normal, byte_cnt ); + + } + if ( cqe_data->is_send == TRUE ) + cqe_data->qpn = MLX_GET ( &cq_entry->normal, qpn ); + else + cqe_data->qpn = MLX_GET ( &cq_entry->normal, srqn ); + + return 0; +} + +static mlx_status shomron_cqe_set_owner ( void *cq, unsigned int num_cqes ) { + unsigned int i = 0; + union shomronprm_completion_entry *cq_list; + + cq_list = (union shomronprm_completion_entry *)cq; + for ( ; i < num_cqes ; i++ ) + MLX_FILL_1 ( &cq_list[i].normal, 15, owner, 1 ); + return 0; +} + +static mlx_size shomron_get_cqe_size () { + return sizeof ( union shomronprm_completion_entry ); +} + +struct flexboot_nodnic_callbacks shomron_nodnic_callbacks = { + .get_cqe_size = shomron_get_cqe_size, + .fill_send_wqe[IB_QPT_ETH] = shomron_fill_eth_send_wqe, + .fill_completion = shomron_fill_completion, + .cqe_set_owner = shomron_cqe_set_owner, + .irq = flexboot_nodnic_eth_irq, + .tx_uar_send_doorbell_fn = shomron_tx_uar_send_db, +}; + +static int shomron_nodnic_is_supported ( struct pci_device *pci ) { + if ( DEVICE_IS_CIB ( pci->device ) ) + return 0; + + return flexboot_nodnic_is_supported ( pci ); +} +/**************************************************************************/ + +static int golan_probe ( struct pci_device *pci ) { + int rc = -ENOTSUP; + + DBG ( "%s: start\n", __FUNCTION__ ); + + if ( ! pci ) { + DBG ( "%s: PCI is NULL\n", __FUNCTION__ ); + rc = -EINVAL; + goto probe_done; + } + + if ( shomron_nodnic_is_supported ( pci ) ) { + DBG ( "%s: Using NODNIC driver\n", __FUNCTION__ ); + rc = flexboot_nodnic_probe ( pci, &shomron_nodnic_callbacks, NULL ); + } else { + DBG ( "%s: Using normal driver\n", __FUNCTION__ ); + rc = golan_probe_normal ( pci ); + } + +probe_done: + DBG ( "%s: rc = %d\n", __FUNCTION__, rc ); + return rc; +} + +static void golan_remove ( struct pci_device *pci ) { + DBG ( "%s: start\n", __FUNCTION__ ); + + if ( ! shomron_nodnic_is_supported ( pci ) ) { + DBG ( "%s: Using normal driver remove\n", __FUNCTION__ ); + golan_remove_normal ( pci ); + return; + } + + DBG ( "%s: Using NODNIC driver remove\n", __FUNCTION__ ); + + flexboot_nodnic_remove ( pci ); + + DBG ( "%s: end\n", __FUNCTION__ ); +} + +static struct pci_device_id golan_nics[] = { + PCI_ROM ( 0x15b3, 0x1011, "ConnectIB", "ConnectIB HCA driver: DevID 4113", 0 ), + PCI_ROM ( 0x15b3, 0x1013, "ConnectX-4", "ConnectX-4 HCA driver, DevID 4115", 0 ), + PCI_ROM ( 0x15b3, 0x1015, "ConnectX-4Lx", "ConnectX-4Lx HCA driver, DevID 4117", 0 ), + PCI_ROM ( 0x15b3, 0x1017, "ConnectX-5", "ConnectX-5 HCA driver, DevID 4119", 0 ), + PCI_ROM ( 0x15b3, 0x1019, "ConnectX-5EX", "ConnectX-5EX HCA driver, DevID 4121", 0 ), + PCI_ROM ( 0x15b3, 0x101b, "ConnectX-6", "ConnectX-6 HCA driver, DevID 4123", 0 ), + PCI_ROM ( 0x15b3, 0x101d, "ConnectX-6DX", "ConnectX-6DX HCA driver, DevID 4125", 0 ), + PCI_ROM ( 0x15b3, 0xa2d2, "BlueField", "BlueField integrated ConnectX-5 network controller HCA driver, DevID 41682", 0 ), +}; + +struct pci_driver golan_driver __pci_driver = { + .ids = golan_nics, + .id_count = (sizeof(golan_nics) / sizeof(golan_nics[0])), + .probe = golan_probe, + .remove = golan_remove, +}; diff --git a/src/drivers/infiniband/golan.h b/src/drivers/infiniband/golan.h new file mode 100755 index 00000000..2fd06ecf --- /dev/null +++ b/src/drivers/infiniband/golan.h @@ -0,0 +1,344 @@ +#ifndef _GOLAN_H_ +#define _GOLAN_H_ + +/* + * Copyright (C) 2013-2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include +#include +#include "CIB_PRM.h" +#include "mlx_utils/include/public/mlx_utils.h" + +#define GOLAN_PCI_CONFIG_BAR_SIZE 0x100000//HERMON_PCI_CONFIG_BAR_SIZE //TODO: What is the BAR size? + +#define GOLAN_PAS_SIZE sizeof(uint64_t) + +#define GOLAN_INVALID_LKEY 0x00000100UL + +#define GOLAN_MAX_PORTS 2 +#define GOLAN_PORT_BASE 1 + +#define MELLANOX_VID 0x15b3 +#define GOLAN_HCA_BAR PCI_BASE_ADDRESS_0 //BAR 0 + +#define GOLAN_HCR_MAX_WAIT_MS 10000 + +#define min(a,b) ((a)<(b)?(a):(b)) + +#define GOLAN_PAGE_SHIFT 12 +#define GOLAN_PAGE_SIZE (1 << GOLAN_PAGE_SHIFT) +#define GOLAN_PAGE_MASK (GOLAN_PAGE_SIZE - 1) + +#define MAX_MBOX ( GOLAN_PAGE_SIZE / MAILBOX_STRIDE ) +#define DEF_CMD_IDX 1 +#define MEM_CMD_IDX 0 +#define NO_MBOX 0xffff +#define MEM_MBOX MEM_CMD_IDX +#define GEN_MBOX DEF_CMD_IDX + +#define CMD_IF_REV 4 + +#define MAX_PASE_MBOX ((GOLAN_CMD_PAS_CNT) - 2) + +#define CMD_STATUS( golan , idx ) ((struct golan_outbox_hdr *)(get_cmd( (golan) , (idx) )->out))->status +#define CMD_SYND( golan , idx ) ((struct golan_outbox_hdr *)(get_cmd( (golan) , (idx) )->out))->syndrome +#define QRY_PAGES_OUT( golan, idx ) ((struct golan_query_pages_outbox *)(get_cmd( (golan) , (idx) )->out)) + +#define VIRT_2_BE64_BUS( addr ) cpu_to_be64(((unsigned long long )virt_to_bus(addr))) +#define BE64_BUS_2_VIRT( addr ) bus_to_virt(be64_to_cpu(addr)) +#define USR_2_BE64_BUS( addr ) cpu_to_be64(((unsigned long long )user_to_phys(addr, 0))) +#define BE64_BUS_2_USR( addr ) be64_to_cpu(phys_to_user(addr)) + +#define GET_INBOX(golan, idx) (&(((struct mbox *)(golan->mboxes.inbox))[idx])) +#define GET_OUTBOX(golan, idx) (&(((struct mbox *)(golan->mboxes.outbox))[idx])) + +#define GOLAN_MBOX_IN( cmd_ptr, in_ptr ) ( { \ + union { \ + __be32 raw[4]; \ + typeof ( *(in_ptr) ) cooked; \ + } *u = container_of ( &(cmd_ptr)->in[0], typeof ( *u ), raw[0] ); \ + &u->cooked; } ) + +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) + +/* Fw status fields */ +typedef enum { + NO_ERRORS = 0x0, + SIGNATURE_ERROR = 0x1, + TOKEN_ERROR = 0x2, + BAD_BLOCK_NUMBER = 0x3, + BAD_OUTPUT_POINTER = 0x4, // pointer not align to mailbox size + BAD_INPUT_POINTER = 0x5, // pointer not align to mailbox size + INTERNAL_ERROR = 0x6, + INPUT_LEN_ERROR = 0x7, // input length less than 0x8. + OUTPUT_LEN_ERROR = 0x8, // output length less than 0x8. + RESERVE_NOT_ZERO = 0x9, + BAD_CMD_TYPE = 0x10, +} return_hdr_t; + +struct golan_cmdq_md { + void *addr; + u16 log_stride; + u16 size; +}; + +struct golan_uar { + uint32_t index; + void *virt; + unsigned long phys; +}; + + +struct golan_firmware_area { + /* length of area in pages */ + uint32_t npages; + /** Firmware area in external memory + * + * This is allocated when first needed, and freed only on + * final teardown, in order to avoid memory map changes at + * runtime. + */ + userptr_t area; +}; +/* Queue Pair */ +#define GOLAN_SEND_WQE_BB_SIZE 64 +#define GOLAN_SEND_UD_WQE_SIZE sizeof(struct golan_send_wqe_ud) +#define GOLAN_RECV_WQE_SIZE sizeof(struct golan_recv_wqe_ud) +#define GOLAN_WQEBBS_PER_SEND_UD_WQE DIV_ROUND_UP(GOLAN_SEND_UD_WQE_SIZE, GOLAN_SEND_WQE_BB_SIZE) +#define GOLAN_SEND_OPCODE 0x0a +#define GOLAN_WQE_CTRL_WQE_IDX_BIT 8 + +enum golan_ib_qp_state { + GOLAN_IB_QPS_RESET, + GOLAN_IB_QPS_INIT, + GOLAN_IB_QPS_RTR, + GOLAN_IB_QPS_RTS, + GOLAN_IB_QPS_SQD, + GOLAN_IB_QPS_SQE, + GOLAN_IB_QPS_ERR +}; + +struct golan_send_wqe_ud { + struct golan_wqe_ctrl_seg ctrl; + struct golan_av datagram; + struct golan_wqe_data_seg data; +}; + +union golan_send_wqe { + struct golan_send_wqe_ud ud; + uint8_t pad[GOLAN_WQEBBS_PER_SEND_UD_WQE * GOLAN_SEND_WQE_BB_SIZE]; +}; + +struct golan_recv_wqe_ud { + struct golan_wqe_data_seg data[2]; +}; + +struct golan_recv_wq { + struct golan_recv_wqe_ud *wqes; + /* WQ size in bytes */ + int size; + /* In SQ, it will be increased in wqe_size (number of WQEBBs per WQE) */ + u16 next_idx; + /** GRH buffers (if applicable) */ + struct ib_global_route_header *grh; + /** Size of GRH buffers */ + size_t grh_size; +}; + +struct golan_send_wq { + union golan_send_wqe *wqes; + /* WQ size in bytes */ + int size; + /* In SQ, it will be increased in wqe_size (number of WQEBBs per WQE) */ + u16 next_idx; +}; + +struct golan_queue_pair { + void *wqes; + int size; + struct golan_recv_wq rq; + struct golan_send_wq sq; + struct golan_qp_db *doorbell_record; + u32 doorbell_qpn; + enum golan_ib_qp_state state; +}; + +/* Completion Queue */ +#define GOLAN_CQE_OPCODE_NOT_VALID 0x0f +#define GOLAN_CQE_OPCODE_BIT 4 +#define GOLAN_CQ_DB_RECORD_SIZE sizeof(uint64_t) +#define GOLAN_CQE_OWNER_MASK 1 + +#define MANAGE_PAGES_PSA_OFFSET 0 +#define PXE_CMDIF_REF 5 + +enum { + GOLAN_CQE_SW_OWNERSHIP = 0x0, + GOLAN_CQE_HW_OWNERSHIP = 0x1 +}; + +enum { + GOLAN_CQE_SIZE_64 = 0, + GOLAN_CQE_SIZE_128 = 1 +}; + +struct golan_completion_queue { + struct golan_cqe64 *cqes; + int size; + __be64 *doorbell_record; +}; + + +/* Event Queue */ +#define GOLAN_EQE_SIZE sizeof(struct golan_eqe) +#define GOLAN_NUM_EQES 8 +#define GOLAN_EQ_DOORBELL_OFFSET 0x40 +#define DB_BUFFER0_EVEN_OFFSET 0x800 +#define DB_BUFFER0_ODD_OFFSET 0x900 + +#define GOLAN_EQ_MAP_ALL_EVENTS \ + ((1 << GOLAN_EVENT_TYPE_PATH_MIG )| \ + (1 << GOLAN_EVENT_TYPE_COMM_EST )| \ + (1 << GOLAN_EVENT_TYPE_SQ_DRAINED )| \ + (1 << GOLAN_EVENT_TYPE_SRQ_LAST_WQE )| \ + (1 << GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT )| \ + (1 << GOLAN_EVENT_TYPE_CQ_ERROR )| \ + (1 << GOLAN_EVENT_TYPE_WQ_CATAS_ERROR )| \ + (1 << GOLAN_EVENT_TYPE_PATH_MIG_FAILED )| \ + (1 << GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR )| \ + (1 << GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR )| \ + (1 << GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR )| \ + (1 << GOLAN_EVENT_TYPE_INTERNAL_ERROR )| \ + (1 << GOLAN_EVENT_TYPE_PORT_CHANGE )| \ + (1 << GOLAN_EVENT_TYPE_GPIO_EVENT )| \ + (1 << GOLAN_EVENT_TYPE_CLIENT_RE_REGISTER )| \ + (1 << GOLAN_EVENT_TYPE_REMOTE_CONFIG )| \ + (1 << GOLAN_EVENT_TYPE_DB_BF_CONGESTION )| \ + (1 << GOLAN_EVENT_TYPE_STALL_EVENT )| \ + (1 << GOLAN_EVENT_TYPE_PACKET_DROPPED )| \ + (1 << GOLAN_EVENT_TYPE_CMD )| \ + (1 << GOLAN_EVENT_TYPE_PAGE_REQUEST )) + +enum golan_event { + GOLAN_EVENT_TYPE_COMP = 0x0, + + GOLAN_EVENT_TYPE_PATH_MIG = 0x01, + GOLAN_EVENT_TYPE_COMM_EST = 0x02, + GOLAN_EVENT_TYPE_SQ_DRAINED = 0x03, + GOLAN_EVENT_TYPE_SRQ_LAST_WQE = 0x13, + GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14, + + GOLAN_EVENT_TYPE_CQ_ERROR = 0x04, + GOLAN_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + GOLAN_EVENT_TYPE_PATH_MIG_FAILED = 0x07, + GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, + GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, + GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, + + GOLAN_EVENT_TYPE_INTERNAL_ERROR = 0x08, + GOLAN_EVENT_TYPE_PORT_CHANGE = 0x09, + GOLAN_EVENT_TYPE_GPIO_EVENT = 0x15, +// GOLAN_EVENT_TYPE_CLIENT_RE_REGISTER = 0x16, + GOLAN_EVENT_TYPE_REMOTE_CONFIG = 0x19, + + GOLAN_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, + GOLAN_EVENT_TYPE_STALL_EVENT = 0x1b, + + GOLAN_EVENT_TYPE_PACKET_DROPPED = 0x1f, + + GOLAN_EVENT_TYPE_CMD = 0x0a, + GOLAN_EVENT_TYPE_PAGE_REQUEST = 0x0b, + GOLAN_EVENT_TYPE_PAGE_FAULT = 0x0C, +}; + +enum golan_port_sub_event { + GOLAN_PORT_CHANGE_SUBTYPE_DOWN = 1, + GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE = 4, + GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, + GOLAN_PORT_CHANGE_SUBTYPE_LID = 6, + GOLAN_PORT_CHANGE_SUBTYPE_PKEY = 7, + GOLAN_PORT_CHANGE_SUBTYPE_GUID = 8, + GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9 +}; + + +enum { + GOLAN_EQE_SW_OWNERSHIP = 0x0, + GOLAN_EQE_HW_OWNERSHIP = 0x1 +}; + +enum { + GOLAN_EQ_UNARMED = 0, + GOLAN_EQ_ARMED = 1, +}; + +struct golan_event_queue { + uint8_t eqn; + uint64_t mask; + struct golan_eqe *eqes; + int size; + __be32 *doorbell; + uint32_t cons_index; +}; + +struct golan_port { + /** Infiniband device */ + struct ib_device *ibdev; + /** Network device */ + struct net_device *netdev; + /** VEP number */ + u8 vep_number; +}; + +struct golan_mboxes { + void *inbox; + void *outbox; +}; + +#define GOLAN_OPEN 0x1 + +struct golan { + struct pci_device *pci; + struct golan_hca_init_seg *iseg; + struct golan_cmdq_md cmd; + struct golan_hca_cap caps; /* stored as big indian*/ + struct golan_mboxes mboxes; + struct list_head pages; + uint32_t cmd_bm; + uint32_t total_dma_pages; + struct golan_uar uar; + struct golan_event_queue eq; + uint32_t pdn; + u32 mkey; + u32 flags; + mlx_utils *utils; + + struct golan_port ports[GOLAN_MAX_PORTS]; +#define GOLAN_FW_AREAS_NUM 2 + struct golan_firmware_area fw_areas[GOLAN_FW_AREAS_NUM]; +}; + +#endif /* _GOLAN_H_*/ diff --git a/src/drivers/infiniband/mlx_nodnic/include/mlx_cmd.h b/src/drivers/infiniband/mlx_nodnic/include/mlx_cmd.h new file mode 100644 index 00000000..e1e89b4c --- /dev/null +++ b/src/drivers/infiniband/mlx_nodnic/include/mlx_cmd.h @@ -0,0 +1,43 @@ +#ifndef NODNIC_CMD_H_ +#define NODNIC_CMD_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "mlx_nodnic_data_structures.h" +#include "../../mlx_utils/include/public/mlx_utils.h" +#include "../../mlx_utils/include/public/mlx_pci_gw.h" + +mlx_status +nodnic_cmd_read( + IN nodnic_device_priv *device_priv, + IN mlx_uint32 address, + OUT mlx_pci_gw_buffer *buffer + ); + +mlx_status +nodnic_cmd_write( + IN nodnic_device_priv *device_priv, + IN mlx_uint32 address, + IN mlx_pci_gw_buffer buffer + ); + +#endif /* STUB_NODNIC_CMD_H_ */ diff --git a/src/drivers/infiniband/mlx_nodnic/include/mlx_device.h b/src/drivers/infiniband/mlx_nodnic/include/mlx_device.h new file mode 100644 index 00000000..b0cc7f72 --- /dev/null +++ b/src/drivers/infiniband/mlx_nodnic/include/mlx_device.h @@ -0,0 +1,80 @@ +#ifndef NODNIC_DEVICE_H_ +#define NODNIC_DEVICE_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "mlx_nodnic_data_structures.h" + +#define NODIC_SUPPORTED_REVISION 1 +//Initialization segment +#define NODNIC_CMDQ_PHY_ADDR_HIGH_OFFSET 0x10 +#define NODNIC_CMDQ_PHY_ADDR_LOW_OFFSET 0x14 +#define NODNIC_NIC_INTERFACE_OFFSET 0x14 +#define NODNIC_INITIALIZING_OFFSET 0x1fc +#define NODNIC_NIC_INTERFACE_SUPPORTED_OFFSET 0x1fc +#define NODNIC_LOCATION_OFFSET 0x240 + +#define NODNIC_CMDQ_PHY_ADDR_LOW_MASK 0xFFFFE000 +#define NODNIC_NIC_INTERFACE_SUPPORTED_MASK 0x4000000 + +#define NODNIC_NIC_INTERFACE_BIT 9 +#define NODNIC_DISABLE_INTERFACE_BIT 8 +#define NODNIC_NIC_INTERFACE_SUPPORTED_BIT 26 +#define NODNIC_INITIALIZING_BIT 31 + +#define NODNIC_NIC_DISABLE_INT_OFFSET 0x100c + +//nodnic segment +#define NODNIC_REVISION_OFFSET 0x0 +#define NODNIC_HARDWARE_FORMAT_OFFSET 0x0 + + + +mlx_status +nodnic_device_init( + IN nodnic_device_priv *device_priv + ); + +mlx_status +nodnic_device_teardown( + IN nodnic_device_priv *device_priv + ); + + +mlx_status +nodnic_device_get_cap( + IN nodnic_device_priv *device_priv + ); + +mlx_status +nodnic_device_clear_int ( + IN nodnic_device_priv *device_priv + ); + +mlx_status +nodnic_device_get_fw_version( + IN nodnic_device_priv *device_priv, + OUT mlx_uint16 *fw_ver_minor, + OUT mlx_uint16 *fw_ver_sub_minor, + OUT mlx_uint16 *fw_ver_major + ); +#endif /* STUB_NODNIC_DEVICE_H_ */ diff --git a/src/drivers/infiniband/mlx_nodnic/include/mlx_nodnic_data_structures.h b/src/drivers/infiniband/mlx_nodnic/include/mlx_nodnic_data_structures.h new file mode 100644 index 00000000..61f2c573 --- /dev/null +++ b/src/drivers/infiniband/mlx_nodnic/include/mlx_nodnic_data_structures.h @@ -0,0 +1,231 @@ +#ifndef NODNIC_NODNICDATASTRUCTURES_H_ +#define NODNIC_NODNICDATASTRUCTURES_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../mlx_utils/include/public/mlx_utils.h" + +/* todo: fix coding convention */ +#define NODNIC_MEMORY_ALIGN 0x1000 + +#define NODNIC_MAX_MAC_FILTERS 5 +#define NODNIC_MAX_MGID_FILTERS 4 + +typedef struct _nodnic_device_priv nodnic_device_priv; +typedef struct _nodnic_port_priv nodnic_port_priv; +typedef struct _nodnic_device_capabilites nodnic_device_capabilites; +typedef struct _nodnic_qp nodnic_qp; +typedef struct _nodnic_cq nodnic_cq; +typedef struct _nodnic_eq nodnic_eq; +typedef struct _nodnic_qp_db nodnic_qp_db; +typedef struct _nodnic_arm_cq_db nodnic_arm_cq_db; + +/* NODNIC Port states + * Bit 0 - port open/close + * Bit 1 - port is [not] in disabling DMA + * 0 - closed and not disabling DMA + * 1 - opened and not disabling DMA + * 3 - opened and disabling DMA + */ +#define NODNIC_PORT_OPENED 0b00000001 +#define NODNIC_PORT_DISABLING_DMA 0b00000010 + +typedef enum { + ConnectX3 = 0, + Connectx4 +}nodnic_hardware_format; + + +typedef enum { + NODNIC_QPT_SMI, + NODNIC_QPT_GSI, + NODNIC_QPT_UD, + NODNIC_QPT_RC, + NODNIC_QPT_ETH, +}nodnic_queue_pair_type; +typedef enum { + NODNIC_PORT_TYPE_IB = 0, + NODNIC_PORT_TYPE_ETH, + NODNIC_PORT_TYPE_UNKNOWN, +}nodnic_port_type; + + +#define RECV_WQE_SIZE 16 +#define NODNIC_WQBB_SIZE 64 +/** A nodnic send wqbb */ +struct nodnic_send_wqbb { + mlx_uint8 force_align[NODNIC_WQBB_SIZE]; +}; + +struct nodnic_doorbell { + mlx_physical_address doorbell_physical; + mlx_void *map; + nodnic_qp_db *qp_doorbell_record; +}; +struct nodnic_ring { + mlx_uint32 offset; + /** Work queue entries */ + /* TODO: add to memory entity */ + mlx_physical_address wqe_physical; + mlx_void *map; + /** Size of work queue */ + mlx_size wq_size; + /** Next work queue entry index + * + * This is the index of the next entry to be filled (i.e. the + * first empty entry). This value is not bounded by num_wqes; + * users must logical-AND with (num_wqes-1) to generate an + * array index. + */ + mlx_uint32 num_wqes; + mlx_uint32 qpn; + mlx_uint32 next_idx; + struct nodnic_doorbell recv_doorbell; + struct nodnic_doorbell send_doorbell; +}; + +struct nodnic_send_ring{ + struct nodnic_ring nodnic_ring; + struct nodnic_send_wqbb *wqe_virt; +}; + + +struct nodnic_recv_ring{ + struct nodnic_ring nodnic_ring; + void *wqe_virt; +}; +struct _nodnic_qp{ + nodnic_queue_pair_type type; + struct nodnic_send_ring send; + struct nodnic_recv_ring receive; +}; + +struct _nodnic_cq{ + /** cq entries */ + mlx_void *cq_virt; + mlx_physical_address cq_physical; + mlx_void *map; + /** cq */ + mlx_size cq_size; + struct nodnic_doorbell arm_cq_doorbell; +}; + +struct _nodnic_eq{ + mlx_void *eq_virt; + mlx_physical_address eq_physical; + mlx_void *map; + mlx_size eq_size; +}; +struct _nodnic_device_capabilites{ + mlx_boolean support_mac_filters; + mlx_boolean support_promisc_filter; + mlx_boolean support_promisc_multicast_filter; + mlx_uint8 log_working_buffer_size; + mlx_uint8 log_pkey_table_size; + mlx_boolean num_ports; // 0 - single port, 1 - dual port + mlx_uint8 log_max_ring_size; +#ifdef DEVICE_CX3 + mlx_uint8 crspace_doorbells; +#endif + mlx_uint8 support_rx_pi_dma; + mlx_uint8 support_uar_tx_db; + mlx_uint8 support_bar_cq_ctrl; + mlx_uint8 log_uar_page_size; +}; + +#ifdef DEVICE_CX3 +/* This is the structure of the data in the scratchpad + * Read/Write data from/to its field using PCI accesses only */ +typedef struct _nodnic_port_data_flow_gw nodnic_port_data_flow_gw; +struct _nodnic_port_data_flow_gw { + mlx_uint32 send_doorbell; + mlx_uint32 recv_doorbell; + mlx_uint32 reserved2[2]; + mlx_uint32 armcq_cq_ci_dword; + mlx_uint32 dma_en; +} __attribute__ ((packed)); +#endif + +typedef struct _nodnic_uar_priv{ + mlx_uint8 inited; + mlx_uint64 offset; + void *virt; + unsigned long phys; +} nodnic_uar; + +struct _nodnic_device_priv{ + mlx_boolean is_initiailzied; + mlx_utils *utils; + + //nodnic structure offset in init segment + mlx_uint32 device_offset; + + nodnic_device_capabilites device_cap; + + mlx_uint8 nodnic_revision; + nodnic_hardware_format hardware_format; + mlx_uint32 pd; + mlx_uint32 lkey; + mlx_uint64 device_guid; + nodnic_port_priv *ports; +#ifdef DEVICE_CX3 + mlx_void *crspace_clear_int; +#endif + nodnic_uar uar; +}; + +struct _nodnic_port_priv{ + nodnic_device_priv *device; + mlx_uint32 port_offset; + mlx_uint8 port_state; + mlx_boolean network_state; + mlx_boolean dma_state; + nodnic_port_type port_type; + mlx_uint8 port_num; + nodnic_eq eq; + mlx_mac_address mac_filters[5]; + nodnic_arm_cq_db *arm_cq_doorbell_record; + mlx_status (*send_doorbell)( + IN nodnic_port_priv *port_priv, + IN struct nodnic_ring *ring, + IN mlx_uint16 index); + mlx_status (*recv_doorbell)( + IN nodnic_port_priv *port_priv, + IN struct nodnic_ring *ring, + IN mlx_uint16 index); + mlx_status (*set_dma)( + IN nodnic_port_priv *port_priv, + IN mlx_boolean value); +#ifdef DEVICE_CX3 + nodnic_port_data_flow_gw *data_flow_gw; +#endif +}; + +struct _nodnic_qp_db { + mlx_uint32 recv_db; + mlx_uint32 send_db; +} __attribute ( ( packed ) ); + +struct _nodnic_arm_cq_db { + mlx_uint32 dword[2]; +} __attribute ( ( packed ) ); +#endif /* STUB_NODNIC_NODNICDATASTRUCTURES_H_ */ diff --git a/src/drivers/infiniband/mlx_nodnic/include/mlx_port.h b/src/drivers/infiniband/mlx_nodnic/include/mlx_port.h new file mode 100644 index 00000000..bb302672 --- /dev/null +++ b/src/drivers/infiniband/mlx_nodnic/include/mlx_port.h @@ -0,0 +1,242 @@ +#ifndef NODNIC_PORT_H_ +#define NODNIC_PORT_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "mlx_nodnic_data_structures.h" + +#define NODNIC_PORT_MAC_FILTERS_OFFSET 0x10 + +typedef enum { + nodnic_port_option_link_type = 0, + nodnic_port_option_mac_low, + nodnic_port_option_mac_high, + nodnic_port_option_log_cq_size, + nodnic_port_option_reset_needed, + nodnic_port_option_mac_filters_en, + nodnic_port_option_port_state, + nodnic_port_option_network_en, + nodnic_port_option_dma_en, + nodnic_port_option_eq_addr_low, + nodnic_port_option_eq_addr_high, + nodnic_port_option_cq_addr_low, + nodnic_port_option_cq_addr_high, + nodnic_port_option_port_management_change_event, + nodnic_port_option_port_promisc_en, + nodnic_port_option_arm_cq, + nodnic_port_option_port_promisc_multicast_en, +#ifdef DEVICE_CX3 + nodnic_port_option_crspace_en, +#endif + nodnic_port_option_send_ring0_uar_index, + nodnic_port_option_send_ring1_uar_index, + nodnic_port_option_cq_n_index, +}nodnic_port_option; + +struct nodnic_port_data_entry{ + nodnic_port_option option; + mlx_uint32 offset; + mlx_uint8 align; + mlx_uint32 mask; +}; + +struct nodnic_qp_data_entry{ + nodnic_queue_pair_type type; + mlx_uint32 send_offset; + mlx_uint32 recv_offset; +}; + + +typedef enum { + nodnic_port_state_down = 0, + nodnic_port_state_initialize, + nodnic_port_state_armed, + nodnic_port_state_active, +}nodnic_port_state; + +mlx_status +nodnic_port_get_state( + IN nodnic_port_priv *port_priv, + OUT nodnic_port_state *state + ); + +mlx_status +nodnic_port_get_type( + IN nodnic_port_priv *port_priv, + OUT nodnic_port_type *type + ); + +mlx_status +nodnic_port_query( + IN nodnic_port_priv *port_priv, + IN nodnic_port_option option, + OUT mlx_uint32 *out + ); + +mlx_status +nodnic_port_set( + IN nodnic_port_priv *port_priv, + IN nodnic_port_option option, + IN mlx_uint32 in + ); + +mlx_status +nodnic_port_create_cq( + IN nodnic_port_priv *port_priv, + IN mlx_size cq_size, + OUT nodnic_cq **cq + ); + +mlx_status +nodnic_port_destroy_cq( + IN nodnic_port_priv *port_priv, + IN nodnic_cq *cq + ); + +mlx_status +nodnic_port_create_qp( + IN nodnic_port_priv *port_priv, + IN nodnic_queue_pair_type type, + IN mlx_size send_wq_size, + IN mlx_uint32 send_wqe_num, + IN mlx_size receive_wq_size, + IN mlx_uint32 recv_wqe_num, + OUT nodnic_qp **qp + ); + +mlx_status +nodnic_port_destroy_qp( + IN nodnic_port_priv *port_priv, + IN nodnic_queue_pair_type type, + IN nodnic_qp *qp + ); +mlx_status +nodnic_port_get_qpn( + IN nodnic_port_priv *port_priv, + IN struct nodnic_ring *ring, + OUT mlx_uint32 *qpn + ); +mlx_status +nodnic_port_update_ring_doorbell( + IN nodnic_port_priv *port_priv, + IN struct nodnic_ring *ring, + IN mlx_uint16 index + ); +mlx_status +nodnic_port_get_cq_size( + IN nodnic_port_priv *port_priv, + OUT mlx_uint64 *cq_size + ); + +mlx_status +nodnic_port_allocate_eq( + IN nodnic_port_priv *port_priv, + IN mlx_uint8 log_eq_size + ); +mlx_status +nodnic_port_free_eq( + IN nodnic_port_priv *port_priv + ); + +mlx_status +nodnic_port_add_mac_filter( + IN nodnic_port_priv *port_priv, + IN mlx_mac_address mac + ); + +mlx_status +nodnic_port_remove_mac_filter( + IN nodnic_port_priv *port_priv, + IN mlx_mac_address mac + ); +mlx_status +nodnic_port_add_mgid_filter( + IN nodnic_port_priv *port_priv, + IN mlx_mac_address mac + ); + +mlx_status +nodnic_port_remove_mgid_filter( + IN nodnic_port_priv *port_priv, + IN mlx_mac_address mac + ); +mlx_status +nodnic_port_thin_init( + IN nodnic_device_priv *device_priv, + IN nodnic_port_priv *port_priv, + IN mlx_uint8 port_index + ); + +mlx_status +nodnic_port_set_promisc( + IN nodnic_port_priv *port_priv, + IN mlx_boolean value + ); + +mlx_status +nodnic_port_set_promisc_multicast( + IN nodnic_port_priv *port_priv, + IN mlx_boolean value + ); + +mlx_status +nodnic_port_init( + IN nodnic_port_priv *port_priv + ); + +mlx_status +nodnic_port_close( + IN nodnic_port_priv *port_priv + ); + +mlx_status +nodnic_port_enable_dma( + IN nodnic_port_priv *port_priv + ); + +mlx_status +nodnic_port_disable_dma( + IN nodnic_port_priv *port_priv + ); + +mlx_status +nodnic_port_read_reset_needed( + IN nodnic_port_priv *port_priv, + OUT mlx_boolean *reset_needed + ); + +mlx_status +nodnic_port_read_port_management_change_event( + IN nodnic_port_priv *port_priv, + OUT mlx_boolean *change_event + ); +mlx_status +nodnic_port_set_send_uar_offset( + IN nodnic_port_priv *port_priv + ); + +mlx_status +nodnic_port_update_tx_db_func( + IN nodnic_device_priv *device_priv, + IN nodnic_port_priv *port_priv + ); +#endif /* STUB_NODNIC_PORT_H_ */ diff --git a/src/drivers/infiniband/mlx_nodnic/src/mlx_cmd.c b/src/drivers/infiniband/mlx_nodnic/src/mlx_cmd.c new file mode 100644 index 00000000..69f85358 --- /dev/null +++ b/src/drivers/infiniband/mlx_nodnic/src/mlx_cmd.c @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../include/mlx_cmd.h" +#include "../../mlx_utils/include/public/mlx_pci_gw.h" +#include "../../mlx_utils/include/public/mlx_bail.h" +#include "../../mlx_utils/include/public/mlx_pci.h" +#include "../../mlx_utils/include/public/mlx_logging.h" + +mlx_status +nodnic_cmd_read( + IN nodnic_device_priv *device_priv, + IN mlx_uint32 address, + OUT mlx_pci_gw_buffer *buffer + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_utils *utils = NULL; + + if ( device_priv == NULL || buffer == NULL ) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + utils = device_priv->utils; + + status = mlx_pci_gw_read(utils, PCI_GW_SPACE_NODNIC, address, buffer); + MLX_CHECK_STATUS(device_priv, status, read_error,"mlx_pci_gw_read failed"); + +read_error: +bad_param: + return status; +} + +mlx_status +nodnic_cmd_write( + IN nodnic_device_priv *device_priv, + IN mlx_uint32 address, + IN mlx_pci_gw_buffer buffer + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_utils *utils = NULL; + + + if ( device_priv == NULL ) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + utils = device_priv->utils; + + + status = mlx_pci_gw_write(utils, PCI_GW_SPACE_NODNIC, address, buffer); + MLX_CHECK_STATUS(device_priv, status, write_error,"mlx_pci_gw_write failed"); +write_error: +bad_param: + return status; +} diff --git a/src/drivers/infiniband/mlx_nodnic/src/mlx_device.c b/src/drivers/infiniband/mlx_nodnic/src/mlx_device.c new file mode 100644 index 00000000..65655457 --- /dev/null +++ b/src/drivers/infiniband/mlx_nodnic/src/mlx_device.c @@ -0,0 +1,363 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../include/mlx_device.h" +#include "../include/mlx_cmd.h" +#include "../../mlx_utils/include/public/mlx_bail.h" +#include "../../mlx_utils/include/public/mlx_pci.h" +#include "../../mlx_utils/include/public/mlx_memory.h" +#include "../../mlx_utils/include/public/mlx_logging.h" + +#define CHECK_BIT(field, offset) (((field) & ((mlx_uint32)1 << (offset))) != 0) + +static +mlx_status +check_nodnic_interface_supported( + IN nodnic_device_priv* device_priv, + OUT mlx_boolean *out + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 output = 0; + status = nodnic_cmd_read(device_priv, NODNIC_NIC_INTERFACE_SUPPORTED_OFFSET, + &output); + MLX_FATAL_CHECK_STATUS(status, read_error, "failed to read nic_interface_supported"); + *out = CHECK_BIT(output, NODNIC_NIC_INTERFACE_SUPPORTED_BIT); +read_error: + return status; +} + +static +mlx_status +wait_for_device_initialization( + IN nodnic_device_priv* device_priv + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint8 try = 0; + mlx_uint32 buffer = 0; + +#define CHECK_DEVICE_INIT_TRIES 10 + for( ; try < CHECK_DEVICE_INIT_TRIES ; try++){ + status = nodnic_cmd_read(device_priv, NODNIC_INITIALIZING_OFFSET, &buffer); + MLX_CHECK_STATUS(device_priv, status, read_error, "failed to read initializing"); + if( !CHECK_BIT(buffer, NODNIC_INITIALIZING_BIT)){ + goto init_done; + } + mlx_utils_delay_in_ms(100); + } + status = MLX_FAILED; +read_error: +init_done: + return status; +} + +static +mlx_status +disable_nodnic_inteface( + IN nodnic_device_priv *device_priv + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 buffer = 0; + + buffer = (1 << NODNIC_DISABLE_INTERFACE_BIT); + status = nodnic_cmd_write(device_priv, NODNIC_CMDQ_PHY_ADDR_LOW_OFFSET, buffer); + MLX_FATAL_CHECK_STATUS(status, write_err, "failed to write cmdq_phy_addr + nic_interface"); + + status = wait_for_device_initialization(device_priv); + MLX_FATAL_CHECK_STATUS(status, init_err, "failed to initialize device"); +init_err: +write_err: + return status; +} +static +mlx_status +nodnic_device_start_nodnic( + IN nodnic_device_priv *device_priv + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 buffer = 0; + mlx_boolean nodnic_supported = 0; + + status = wait_for_device_initialization(device_priv); + MLX_FATAL_CHECK_STATUS(status, wait_for_fw_err, "failed to initialize device"); + + status = check_nodnic_interface_supported(device_priv, &nodnic_supported); + MLX_FATAL_CHECK_STATUS(status, read_err,"failed to check nic_interface_supported"); + + if( nodnic_supported == 0 ){ + status = MLX_UNSUPPORTED; + goto nodnic_unsupported; + } + buffer = (1 << NODNIC_NIC_INTERFACE_BIT); + status = nodnic_cmd_write(device_priv, NODNIC_NIC_INTERFACE_OFFSET, buffer); + MLX_FATAL_CHECK_STATUS(status, write_err, "failed to write cmdq_phy_addr + nic_interface"); + + status = wait_for_device_initialization(device_priv); + MLX_FATAL_CHECK_STATUS(status, init_err, "failed to initialize device"); +init_err: +read_err: +write_err: +nodnic_unsupported: +wait_for_fw_err: + return status; +} + +static +mlx_status +nodnic_device_get_nodnic_data( + IN nodnic_device_priv *device_priv + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 buffer = 0; + + status = nodnic_cmd_read(device_priv, NODNIC_LOCATION_OFFSET, &device_priv->device_offset); + MLX_FATAL_CHECK_STATUS(status, nodnic_offset_read_err, "failed to read nodnic offset"); + + status = nodnic_cmd_read(device_priv, + device_priv->device_offset + NODNIC_REVISION_OFFSET, &buffer); + MLX_FATAL_CHECK_STATUS(status, nodnic_revision_read_err, "failed to read nodnic revision"); + + device_priv->nodnic_revision = (buffer >> 24) & 0xFF; + if( device_priv->nodnic_revision != NODIC_SUPPORTED_REVISION ){ + MLX_DEBUG_ERROR(device_priv, "nodnic revision not supported\n"); + status = MLX_UNSUPPORTED; + goto unsupported_revision; + } + + status = nodnic_cmd_read(device_priv, + device_priv->device_offset + NODNIC_HARDWARE_FORMAT_OFFSET, &buffer); + MLX_FATAL_CHECK_STATUS(status, nodnic_hardware_format_read_err, "failed to read nodnic revision"); + device_priv->hardware_format = (buffer >> 16) & 0xFF; + + return status; + +unsupported_revision: +nodnic_hardware_format_read_err: +nodnic_offset_read_err: +nodnic_revision_read_err: + disable_nodnic_inteface(device_priv); + return status; +} + +mlx_status +nodnic_device_clear_int ( + IN nodnic_device_priv *device_priv + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 disable = 1; +#ifndef DEVICE_CX3 + status = nodnic_cmd_write(device_priv, NODNIC_NIC_DISABLE_INT_OFFSET, disable); + MLX_CHECK_STATUS(device_priv, status, clear_int_done, "failed writing to disable_bit"); +#else + mlx_utils *utils = device_priv->utils; + mlx_uint64 clear_int = (mlx_uintn)(device_priv->crspace_clear_int); + mlx_uint32 swapped = 0; + + if (device_priv->device_cap.crspace_doorbells == 0) { + status = nodnic_cmd_write(device_priv, NODNIC_NIC_DISABLE_INT_OFFSET, disable); + MLX_CHECK_STATUS(device_priv, status, clear_int_done, "failed writing to disable_bit"); + } else { + /* Write the new index and update FW that new data was submitted */ + disable = 0x80000000; + mlx_memory_cpu_to_be32(utils, disable, &swapped); + mlx_pci_mem_write (utils, MlxPciWidthUint32, 0, clear_int, 1, &swapped); + mlx_pci_mem_read (utils, MlxPciWidthUint32, 0, clear_int, 1, &swapped); + } +#endif +clear_int_done: + return status; +} + +mlx_status +nodnic_device_init( + IN nodnic_device_priv *device_priv + ) +{ + mlx_status status = MLX_SUCCESS; + + if( device_priv == NULL ){ + status = MLX_INVALID_PARAMETER; + goto parm_err; + } + status = nodnic_device_start_nodnic(device_priv); + MLX_FATAL_CHECK_STATUS(status, start_nodnic_err, "nodnic_device_start_nodnic failed"); + + status = nodnic_device_get_nodnic_data(device_priv); + MLX_FATAL_CHECK_STATUS(status, data_err, "nodnic_device_get_nodnic_data failed"); + return status; +data_err: +start_nodnic_err: +parm_err: + return status; +} + +mlx_status +nodnic_device_teardown( + IN nodnic_device_priv *device_priv + ) +{ + mlx_status status = MLX_SUCCESS; + status = disable_nodnic_inteface(device_priv); + MLX_FATAL_CHECK_STATUS(status, disable_failed, "failed to disable nodnic interface"); +disable_failed: + return status; +} + +mlx_status +nodnic_device_get_cap( + IN nodnic_device_priv *device_priv + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_capabilites *device_cap = NULL; + mlx_uint32 buffer = 0; + mlx_uint64 guid_l = 0; + mlx_uint64 guid_h = 0; + if( device_priv == NULL ){ + status = MLX_INVALID_PARAMETER; + goto parm_err; + } + + device_cap = &device_priv->device_cap; + + //get device capabilities + status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x0, &buffer); + MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic first dword"); + +#define NODNIC_DEVICE_SUPPORT_MAC_FILTERS_OFFSET 15 +#define NODNIC_DEVICE_SUPPORT_PROMISC_FILTER_OFFSET 14 +#define NODNIC_DEVICE_SUPPORT_PROMISC_MULT_FILTER_OFFSET 13 +#define NODNIC_DEVICE_LOG_WORKING_BUFFER_SIZE_OFFSET 8 +#define NODNIC_DEVICE_LOG_WORKING_BUFFER_SIZE_MASK 0x7 +#define NODNIC_DEVICE_LOG_PKEY_TABLE_SIZE_OFFSET 4 +#define NODNIC_DEVICE_LOG_PKEY_TABLE_SIZE_MASK 0xF +#define NODNIC_DEVICE_NUM_PORTS_OFFSET 0 + device_cap->support_mac_filters = CHECK_BIT(buffer, NODNIC_DEVICE_SUPPORT_MAC_FILTERS_OFFSET); + + device_cap->support_promisc_filter = CHECK_BIT(buffer, NODNIC_DEVICE_SUPPORT_PROMISC_FILTER_OFFSET); + + device_cap->support_promisc_multicast_filter = CHECK_BIT(buffer, NODNIC_DEVICE_SUPPORT_PROMISC_MULT_FILTER_OFFSET); + + device_cap->log_working_buffer_size = + (buffer >> NODNIC_DEVICE_LOG_WORKING_BUFFER_SIZE_OFFSET) & NODNIC_DEVICE_LOG_WORKING_BUFFER_SIZE_MASK; + + device_cap->log_pkey_table_size = + (buffer >> NODNIC_DEVICE_LOG_PKEY_TABLE_SIZE_OFFSET) & NODNIC_DEVICE_LOG_PKEY_TABLE_SIZE_MASK; + + device_cap->num_ports = CHECK_BIT(buffer, NODNIC_DEVICE_NUM_PORTS_OFFSET) + 1; + +#ifdef DEVICE_CX3 +#define NODNIC_DEVICE_CRSPACE_DB_OFFSET 12 + device_cap->crspace_doorbells = CHECK_BIT(buffer, NODNIC_DEVICE_CRSPACE_DB_OFFSET); +#endif + + status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x4, &buffer); + MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic second dword"); + +#define NODNIC_DEVICE_LOG_MAX_RING_SIZE_OFFSET 24 +#define NODNIC_DEVICE_LOG_MAX_RING_SIZE_MASK 0x3F +#define NODNIC_DEVICE_PD_MASK 0xFFFFFF + device_cap->log_max_ring_size = + (buffer >> NODNIC_DEVICE_LOG_MAX_RING_SIZE_OFFSET) & NODNIC_DEVICE_LOG_MAX_RING_SIZE_MASK; + + //get device magic numbers + device_priv->pd = buffer & NODNIC_DEVICE_PD_MASK; + + status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x8, &buffer); + MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic third dword"); + device_priv->lkey = buffer; + +#ifdef DEVICE_CX3 + if ( device_cap->crspace_doorbells ) { + status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x18, &buffer); + MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic_crspace_clear_int address"); + device_priv->crspace_clear_int = device_priv->utils->config + buffer; + } +#endif + + status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x10, (mlx_uint32*)&guid_h); + MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic guid_h"); + status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x14, (mlx_uint32*)&guid_l); + MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic guid_l"); + device_priv->device_guid = guid_l | (guid_h << 32); + +#define NODNIC_DEVICE_SUPPORT_RX_PI_DMA_OFFSET 31 +#define NODNIC_DEVICE_SUPPORT_RX_PI_DMA_MASK 0x1 +#define NODNIC_DEVICE_SUPPORT_UAR_TRX_DB_OFFSET 29 +#define NODNIC_DEVICE_SUPPORT_UAR_TRX_DB_MASK 0x1 +#define NODNIC_DEVICE_SUPPORT_BAR_CQ_CONTROL_OFFSET 27 +#define NODNIC_DEVICE_SUPPORT_BAR_CQ_CONTROL_MASK 0x1 + status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x1c, &buffer); + MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic support_rx_pi_dma"); + if ( sizeof ( mlx_uintn ) == sizeof ( mlx_uint32 ) ) { + device_cap->support_rx_pi_dma = FALSE; + device_cap->support_uar_tx_db = FALSE; + device_cap->support_bar_cq_ctrl = FALSE; + } else { + device_cap->support_rx_pi_dma = CHECK_BIT(buffer, NODNIC_DEVICE_SUPPORT_RX_PI_DMA_OFFSET); + device_cap->support_uar_tx_db = CHECK_BIT(buffer, NODNIC_DEVICE_SUPPORT_UAR_TRX_DB_OFFSET); + device_cap->support_bar_cq_ctrl = CHECK_BIT(buffer, NODNIC_DEVICE_SUPPORT_BAR_CQ_CONTROL_OFFSET); + } + +#define NODNIC_DEVICE_LOG_UAR_PAGE_SIZE_OFFSET 0 +#define NODNIC_DEVICE_LOG_UAR_PAGE_SIZE_MASK 0xFF + status = nodnic_cmd_read(device_priv, device_priv->device_offset + 0x20, &buffer); + MLX_FATAL_CHECK_STATUS(status, read_err, "failed to read nodnic log_uar_page_size"); + device_cap->log_uar_page_size = ( buffer >> NODNIC_DEVICE_LOG_UAR_PAGE_SIZE_OFFSET) & NODNIC_DEVICE_LOG_UAR_PAGE_SIZE_MASK; +read_err: +parm_err: + return status; +} + +mlx_status +nodnic_device_get_fw_version( + IN nodnic_device_priv *device_priv, + OUT mlx_uint16 *fw_ver_minor, + OUT mlx_uint16 *fw_ver_sub_minor, + OUT mlx_uint16 *fw_ver_major + ){ + mlx_status status = MLX_SUCCESS; + mlx_uint32 buffer = 0; + + if( device_priv == NULL ){ + status = MLX_INVALID_PARAMETER; + goto parm_err; + } + + status = nodnic_cmd_read(device_priv, 0x0, &buffer); + MLX_CHECK_STATUS(device_priv, status, read_err, "failed to read fw revision major and minor"); + + *fw_ver_minor = (mlx_uint16)(buffer >> 16); + *fw_ver_major = (mlx_uint16)buffer; + + status = nodnic_cmd_read(device_priv, 0x4, &buffer); + MLX_CHECK_STATUS(device_priv, status, read_err, "failed to read fw revision sub minor"); + + *fw_ver_sub_minor = (mlx_uint16)buffer; +read_err: +parm_err: + return status; +} diff --git a/src/drivers/infiniband/mlx_nodnic/src/mlx_port.c b/src/drivers/infiniband/mlx_nodnic/src/mlx_port.c new file mode 100644 index 00000000..efbd8ddf --- /dev/null +++ b/src/drivers/infiniband/mlx_nodnic/src/mlx_port.c @@ -0,0 +1,1370 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../include/mlx_port.h" +#include "../include/mlx_cmd.h" +#include "../../mlx_utils/include/public/mlx_memory.h" +#include "../../mlx_utils/include/public/mlx_pci.h" +#include "../../mlx_utils/include/public/mlx_bail.h" + +#define PortDataEntry( _option, _offset, _align, _mask) { \ + .option = _option, \ + .offset = _offset, \ + .align = _align, \ + .mask = _mask, \ + } + +#define QpDataEntry( _type, _send_offset, _recv_offset) { \ + .type = _type, \ + .send_offset = _send_offset, \ + .recv_offset = _recv_offset, \ + } + + +struct nodnic_port_data_entry nodnic_port_data_table[] = { + PortDataEntry(nodnic_port_option_link_type, 0x0, 4, 0x1), + PortDataEntry(nodnic_port_option_mac_low, 0xc, 0, 0xFFFFFFFF), + PortDataEntry(nodnic_port_option_mac_high, 0x8, 0, 0xFFFF), + PortDataEntry(nodnic_port_option_log_cq_size, 0x6c, 0, 0x3F), + PortDataEntry(nodnic_port_option_reset_needed, 0x0, 31, 0x1), + PortDataEntry(nodnic_port_option_mac_filters_en, 0x4, 0, 0x1F), + PortDataEntry(nodnic_port_option_port_state, 0x0, 0, 0xF), + PortDataEntry(nodnic_port_option_network_en, 0x4, 31, 0x1), + PortDataEntry(nodnic_port_option_dma_en, 0x4, 30, 0x1), + PortDataEntry(nodnic_port_option_eq_addr_low, 0x74, 0, 0xFFFFFFFF), + PortDataEntry(nodnic_port_option_eq_addr_high, 0x70, 0, 0xFFFFFFFF), + PortDataEntry(nodnic_port_option_cq_addr_low, 0x6c, 12, 0xFFFFF), + PortDataEntry(nodnic_port_option_cq_addr_high, 0x68, 0, 0xFFFFFFFF), + PortDataEntry(nodnic_port_option_port_management_change_event, 0x0, 30, 0x1), + PortDataEntry(nodnic_port_option_port_promisc_en, 0x4, 29, 0x1), +#ifndef DEVICE_CX3 + PortDataEntry(nodnic_port_option_arm_cq, 0x78, 8, 0xffffff), +#else + PortDataEntry(nodnic_port_option_arm_cq, 0x78, 8, 0xffff), +#endif + PortDataEntry(nodnic_port_option_port_promisc_multicast_en, 0x4, 28, 0x1), +#ifdef DEVICE_CX3 + PortDataEntry(nodnic_port_option_crspace_en, 0x4, 27, 0x1), +#endif + PortDataEntry(nodnic_port_option_send_ring0_uar_index, 0x108, 0, 0xFFFFFFFF), + PortDataEntry(nodnic_port_option_send_ring1_uar_index, 0x10c, 0, 0xFFFFFFFF), + PortDataEntry(nodnic_port_option_cq_n_index, 0x118, 0, 0xFFFFFF), +}; + +#define MAX_QP_DATA_ENTRIES 5 +struct nodnic_qp_data_entry nodnic_qp_data_teable[MAX_QP_DATA_ENTRIES] = { + QpDataEntry(NODNIC_QPT_SMI, 0, 0), + QpDataEntry(NODNIC_QPT_GSI, 0, 0), + QpDataEntry(NODNIC_QPT_UD, 0, 0), + QpDataEntry(NODNIC_QPT_RC, 0, 0), + QpDataEntry(NODNIC_QPT_ETH, 0x80, 0xC0), +}; + +#define MAX_NODNIC_PORTS 2 +int nodnic_port_offset_table[MAX_NODNIC_PORTS] = { + 0x100, //port 1 offset + 0x280, //port 1 offset +}; + +mlx_status +nodnic_port_get_state( + IN nodnic_port_priv *port_priv, + OUT nodnic_port_state *state + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 out = 0; + + status = nodnic_port_query(port_priv, + nodnic_port_option_port_state, &out); + MLX_CHECK_STATUS(port_priv->device, status, query_err, + "nodnic_port_query failed"); + *state = (nodnic_port_state)out; +query_err: + return status; +} +mlx_status +nodnic_port_get_type( + IN nodnic_port_priv *port_priv, + OUT nodnic_port_type *type + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 out = 0; + + if ( port_priv->port_type == NODNIC_PORT_TYPE_UNKNOWN){ + status = nodnic_port_query(port_priv, + nodnic_port_option_link_type, &out); + MLX_FATAL_CHECK_STATUS(status, query_err, + "nodnic_port_query failed"); + port_priv->port_type = (nodnic_port_type)out; + } + *type = port_priv->port_type; +query_err: + return status; +} + +mlx_status +nodnic_port_query( + IN nodnic_port_priv *port_priv, + IN nodnic_port_option option, + OUT mlx_uint32 *out + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = NULL; + struct nodnic_port_data_entry *data_entry; + mlx_uint32 buffer = 0; + if( port_priv == NULL || out == NULL){ + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } + device_priv = port_priv->device; + + data_entry = &nodnic_port_data_table[option]; + + status = nodnic_cmd_read(device_priv, + port_priv->port_offset + data_entry->offset , &buffer); + MLX_CHECK_STATUS(device_priv, status, read_err, + "nodnic_cmd_read failed"); + *out = (buffer >> data_entry->align) & data_entry->mask; +read_err: +invalid_parm: + return status; +} + +mlx_status +nodnic_port_set( + IN nodnic_port_priv *port_priv, + IN nodnic_port_option option, + IN mlx_uint32 in + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = NULL; + struct nodnic_port_data_entry *data_entry; + mlx_uint32 buffer = 0; + + if( port_priv == NULL ){ + MLX_DEBUG_FATAL_ERROR("port_priv is NULL\n"); + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } + device_priv = port_priv->device; + data_entry = &nodnic_port_data_table[option]; + + if( in > data_entry->mask ){ + MLX_DEBUG_FATAL_ERROR("in > data_entry->mask (%d > %d)\n", + in, data_entry->mask); + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } + status = nodnic_cmd_read(device_priv, + port_priv->port_offset + data_entry->offset, &buffer); + MLX_FATAL_CHECK_STATUS(status, read_err, + "nodnic_cmd_read failed"); + buffer = buffer & ~(data_entry->mask << data_entry->align); + buffer = buffer | (in << data_entry->align); + status = nodnic_cmd_write(device_priv, + port_priv->port_offset + data_entry->offset, buffer); + MLX_FATAL_CHECK_STATUS(status, write_err, + "nodnic_cmd_write failed"); +write_err: +read_err: +invalid_parm: + return status; +} + +mlx_status +nodnic_port_set_send_uar_offset( + IN nodnic_port_priv *port_priv + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 out = 0; + + if ( ! port_priv->device->device_cap.support_uar_tx_db ) { + MLX_DEBUG_INFO1 ( port_priv, "nodnic_port_set_send_uar_offset: tx db using uar is not supported \n"); + status = MLX_UNSUPPORTED; + goto uar_not_supported; + } + + status = nodnic_port_query(port_priv, + nodnic_port_option_send_ring0_uar_index, &out); + MLX_CHECK_STATUS(port_priv->device, status, query_err, + "nodnic_port_query failed"); + port_priv->device->uar.offset = out << port_priv->device->device_cap.log_uar_page_size; +uar_not_supported: +query_err: + return status; +} + +mlx_status +nodnic_port_read_reset_needed( + IN nodnic_port_priv *port_priv, + OUT mlx_boolean *reset_needed + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 out = 0; + status = nodnic_port_query(port_priv, + nodnic_port_option_reset_needed, &out); + MLX_CHECK_STATUS(port_priv->device, status, query_err, + "nodnic_port_query failed"); + *reset_needed = (mlx_boolean)out; +query_err: + return status; +} + +mlx_status +nodnic_port_read_port_management_change_event( + IN nodnic_port_priv *port_priv, + OUT mlx_boolean *change_event + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 out = 0; + status = nodnic_port_query(port_priv, + nodnic_port_option_port_management_change_event, &out); + MLX_CHECK_STATUS(port_priv->device, status, query_err, + "nodnic_port_query failed"); + *change_event = (mlx_boolean)out; +query_err: + return status; +} + +static +mlx_status +nodnic_port_allocate_dbr_dma ( + IN nodnic_port_priv *port_priv, + IN struct nodnic_doorbell *nodnic_db, + IN mlx_uint32 dbr_addr_low_ofst, + IN mlx_uint32 dbr_addr_high_ofst, + IN void **dbr_addr, + IN mlx_size size, + IN void **map + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint64 address = 0; + nodnic_device_priv *device_priv = NULL; + + if( port_priv == NULL || nodnic_db == NULL ){ + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } + + device_priv = port_priv->device; + status = mlx_memory_alloc_dma(device_priv->utils, + size, + NODNIC_MEMORY_ALIGN, + (void **)dbr_addr + ); + MLX_FATAL_CHECK_STATUS(status, alloc_db_record_err, + "doorbell record dma allocation error"); + + status = mlx_memory_map_dma(device_priv->utils, + (void *)(*dbr_addr), + size, + &nodnic_db->doorbell_physical, + map//nodnic_ring->map + ); + MLX_FATAL_CHECK_STATUS(status, map_db_record_err, + "doorbell record map dma error"); + + address = (mlx_uint64)nodnic_db->doorbell_physical; + status = nodnic_cmd_write(device_priv, + dbr_addr_low_ofst, + (mlx_uint32)address); + MLX_FATAL_CHECK_STATUS(status, set_err, + "failed to set doorbell addr low"); + + address = address >> 32; + status = nodnic_cmd_write(device_priv, + dbr_addr_high_ofst, + (mlx_uint32)address); + MLX_FATAL_CHECK_STATUS(status, set_err, + "failed to set doorbell addr high"); + + return status; + +set_err: + mlx_memory_ummap_dma(device_priv->utils, *map); +map_db_record_err: + mlx_memory_free_dma(device_priv->utils, size, + (void **)dbr_addr); +alloc_db_record_err: +invalid_parm: + return status; +} + +static +mlx_status +nodnic_port_cq_dbr_dma_init( + IN nodnic_port_priv *port_priv, + OUT nodnic_cq **cq + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = NULL; + + if( port_priv == NULL ){ + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } + + device_priv = port_priv->device; + if ( ! device_priv->device_cap.support_bar_cq_ctrl ) { + status = MLX_UNSUPPORTED; + goto uar_arm_cq_db_unsupported; + } + +#define NODNIC_PORT_ARM_CQ_DBR_ADDR_LOW_OFFSET 0x114 +#define NODNIC_PORT_ARM_CQ_DBR_ADDR_HIGH_OFFSET 0x110 + + status = nodnic_port_allocate_dbr_dma ( port_priv,&(*cq)->arm_cq_doorbell, + port_priv->port_offset + NODNIC_PORT_ARM_CQ_DBR_ADDR_LOW_OFFSET, + port_priv->port_offset + NODNIC_PORT_ARM_CQ_DBR_ADDR_HIGH_OFFSET, + (void **)&port_priv->arm_cq_doorbell_record , + sizeof(nodnic_arm_cq_db), + (void **)&((*cq)->arm_cq_doorbell.map)); + MLX_FATAL_CHECK_STATUS(status, alloc_dbr_dma_err, + "failed to allocate doorbell record dma"); + return status; + +alloc_dbr_dma_err: +uar_arm_cq_db_unsupported: +invalid_parm: + return status; +} + +mlx_status +nodnic_port_create_cq( + IN nodnic_port_priv *port_priv, + IN mlx_size cq_size, + OUT nodnic_cq **cq + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = NULL; + mlx_uint64 address = 0; + if( port_priv == NULL || cq == NULL){ + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } + + device_priv = port_priv->device; + + status = mlx_memory_zalloc(device_priv->utils, + sizeof(nodnic_cq),(mlx_void **)cq); + MLX_FATAL_CHECK_STATUS(status, alloc_err, + "cq priv allocation error"); + + (*cq)->cq_size = cq_size; + status = mlx_memory_alloc_dma(device_priv->utils, + (*cq)->cq_size, NODNIC_MEMORY_ALIGN, + &(*cq)->cq_virt); + MLX_FATAL_CHECK_STATUS(status, dma_alloc_err, + "cq allocation error"); + + status = mlx_memory_map_dma(device_priv->utils, + (*cq)->cq_virt, + (*cq)->cq_size, + &(*cq)->cq_physical, + &(*cq)->map); + MLX_FATAL_CHECK_STATUS(status, cq_map_err, + "cq map error"); + + status = nodnic_port_cq_dbr_dma_init(port_priv,cq); + + /* update cq address */ +#define NODIC_CQ_ADDR_HIGH 0x68 +#define NODIC_CQ_ADDR_LOW 0x6c + address = (mlx_uint64)(*cq)->cq_physical; + status = nodnic_port_set(port_priv, nodnic_port_option_cq_addr_low, + (mlx_uint32)(address) >> 12); + MLX_FATAL_CHECK_STATUS(status, dma_set_addr_low_err, + "cq set addr low error"); + address = address >> 32; + status = nodnic_port_set(port_priv, nodnic_port_option_cq_addr_high, + (mlx_uint32)address); + MLX_FATAL_CHECK_STATUS(status, dma_set_addr_high_err, + "cq set addr high error"); + return status; +dma_set_addr_high_err: +dma_set_addr_low_err: + mlx_memory_ummap_dma(device_priv->utils, (*cq)->map); +cq_map_err: + mlx_memory_free_dma(device_priv->utils, (*cq)->cq_size, + (void **)&((*cq)->cq_virt)); +dma_alloc_err: + mlx_memory_free(device_priv->utils, (void **)cq); +alloc_err: +invalid_parm: + return status; +} + +mlx_status +nodnic_port_destroy_cq( + IN nodnic_port_priv *port_priv, + IN nodnic_cq *cq + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = NULL; + + if( port_priv == NULL || cq == NULL){ + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } + device_priv = port_priv->device; + + if ( device_priv->device_cap.support_bar_cq_ctrl ){ + status = mlx_memory_ummap_dma(device_priv->utils, + cq->arm_cq_doorbell.map); + if( status != MLX_SUCCESS){ + MLX_DEBUG_ERROR(device_priv, "mlx_memory_ummap_dma failed (Status = %d)\n", status); + } + + status = mlx_memory_free_dma(device_priv->utils, + sizeof(nodnic_arm_cq_db), + (void **)&(port_priv->arm_cq_doorbell_record)); + if( status != MLX_SUCCESS){ + MLX_DEBUG_ERROR(device_priv, "mlx_memory_free_dma failed (Status = %d)\n", status); + } + } + + mlx_memory_ummap_dma(device_priv->utils, cq->map); + + mlx_memory_free_dma(device_priv->utils, cq->cq_size, + (void **)&(cq->cq_virt)); + + mlx_memory_free(device_priv->utils, (void **)&cq); +invalid_parm: + return status; +} + +static +mlx_status +nodnic_port_allocate_ring_db_dma ( + IN nodnic_port_priv *port_priv, + IN struct nodnic_ring *nodnic_ring, + IN struct nodnic_doorbell *nodnic_db + ) +{ + mlx_status status = MLX_SUCCESS; + + if( port_priv == NULL || nodnic_ring == NULL || nodnic_db == NULL ){ + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } +#define NODNIC_RING_DBR_ADDR_LOW_OFFSET 0x1C +#define NODNIC_RING_DBR_ADDR_HIGH_OFFSET 0x18 + status = nodnic_port_allocate_dbr_dma ( port_priv,nodnic_db, + nodnic_ring->offset + NODNIC_RING_DBR_ADDR_LOW_OFFSET, + nodnic_ring->offset + NODNIC_RING_DBR_ADDR_HIGH_OFFSET, + (void **)&nodnic_db->qp_doorbell_record, + sizeof(nodnic_qp_db), + (void **)&nodnic_ring->map ); + MLX_FATAL_CHECK_STATUS(status, alloc_dbr_dma_err, + "failed to allocate doorbell record dma"); + + return status; +alloc_dbr_dma_err: +invalid_parm: + return status; +} + +static +mlx_status +nodnic_port_rx_pi_dma_alloc( + IN nodnic_port_priv *port_priv, + OUT nodnic_qp **qp + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = NULL; + + if( port_priv == NULL || qp == NULL){ + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } + + device_priv = port_priv->device; + + if ( ! device_priv->device_cap.support_rx_pi_dma ) { + goto rx_pi_dma_unsupported; + } + + if ( device_priv->device_cap.support_rx_pi_dma ) { + status = nodnic_port_allocate_ring_db_dma(port_priv, + &(*qp)->receive.nodnic_ring,&(*qp)->receive.nodnic_ring.recv_doorbell); + MLX_FATAL_CHECK_STATUS(status, dma_alloc_err, + "rx doorbell dma allocation error"); + } + + return status; + +dma_alloc_err: +rx_pi_dma_unsupported: +invalid_parm: + return status; +} + +static +mlx_status +nodnic_port_send_db_dma( + IN nodnic_port_priv *port_priv, + IN struct nodnic_ring *ring, + IN mlx_uint16 index + ) +{ + mlx_uint32 swapped = 0; + mlx_uint32 index32 = index; + mlx_memory_cpu_to_be32(port_priv->device->utils, index32, &swapped); + ring->send_doorbell.qp_doorbell_record->send_db = swapped; + + return MLX_SUCCESS; +} + +static +mlx_status +nodnic_port_tx_dbr_dma_init( + IN nodnic_port_priv *port_priv, + OUT nodnic_qp **qp + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = NULL; + + if( port_priv == NULL || qp == NULL){ + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } + + device_priv = port_priv->device; + + if ( ! device_priv->device_cap.support_uar_tx_db || ! device_priv->uar.offset ) { + status = MLX_UNSUPPORTED; + goto uar_tx_db_unsupported; + } + status = nodnic_port_allocate_ring_db_dma(port_priv, + &(*qp)->send.nodnic_ring,&(*qp)->send.nodnic_ring.send_doorbell); + MLX_FATAL_CHECK_STATUS(status, dma_alloc_err, + "tx doorbell dma allocation error"); + port_priv->send_doorbell = nodnic_port_send_db_dma; + + return status; + +dma_alloc_err: +uar_tx_db_unsupported: +invalid_parm: + + return status; +} + +mlx_status +nodnic_port_create_qp( + IN nodnic_port_priv *port_priv, + IN nodnic_queue_pair_type type, + IN mlx_size send_wq_size, + IN mlx_uint32 send_wqe_num, + IN mlx_size receive_wq_size, + IN mlx_uint32 recv_wqe_num, + OUT nodnic_qp **qp + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = NULL; + mlx_uint32 max_ring_size = 0; + mlx_uint64 address = 0; + mlx_uint32 log_size = 0; + if( port_priv == NULL || qp == NULL){ + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } + + device_priv = port_priv->device; + max_ring_size = (1 << device_priv->device_cap.log_max_ring_size); + if( send_wq_size > max_ring_size || + receive_wq_size > max_ring_size ){ + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } + + status = mlx_memory_zalloc(device_priv->utils, + sizeof(nodnic_qp),(mlx_void **)qp); + MLX_FATAL_CHECK_STATUS(status, alloc_err, + "qp allocation error"); + + if( nodnic_qp_data_teable[type].send_offset == 0 || + nodnic_qp_data_teable[type].recv_offset == 0){ + status = MLX_INVALID_PARAMETER; + goto invalid_type; + } + + (*qp)->send.nodnic_ring.offset = port_priv->port_offset + + nodnic_qp_data_teable[type].send_offset; + (*qp)->receive.nodnic_ring.offset = port_priv->port_offset + + nodnic_qp_data_teable[type].recv_offset; + + status = mlx_memory_alloc_dma(device_priv->utils, + send_wq_size, NODNIC_MEMORY_ALIGN, + (void*)&(*qp)->send.wqe_virt); + MLX_FATAL_CHECK_STATUS(status, send_alloc_err, + "send wq allocation error"); + + status = mlx_memory_alloc_dma(device_priv->utils, + receive_wq_size, NODNIC_MEMORY_ALIGN, + &(*qp)->receive.wqe_virt); + MLX_FATAL_CHECK_STATUS(status, receive_alloc_err, + "receive wq allocation error"); + + status = mlx_memory_map_dma(device_priv->utils, + (*qp)->send.wqe_virt, + send_wq_size, + &(*qp)->send.nodnic_ring.wqe_physical, + &(*qp)->send.nodnic_ring.map); + MLX_FATAL_CHECK_STATUS(status, send_map_err, + "send wq map error"); + + status = mlx_memory_map_dma(device_priv->utils, + (*qp)->receive.wqe_virt, + receive_wq_size, + &(*qp)->receive.nodnic_ring.wqe_physical, + &(*qp)->receive.nodnic_ring.map); + MLX_FATAL_CHECK_STATUS(status, receive_map_err, + "receive wq map error"); + + status = nodnic_port_rx_pi_dma_alloc(port_priv,qp); + MLX_FATAL_CHECK_STATUS(status, rx_pi_dma_alloc_err, + "receive db dma error"); + + status = nodnic_port_tx_dbr_dma_init(port_priv,qp); + + + (*qp)->send.nodnic_ring.wq_size = send_wq_size; + (*qp)->send.nodnic_ring.num_wqes = send_wqe_num; + (*qp)->receive.nodnic_ring.wq_size = receive_wq_size; + (*qp)->receive.nodnic_ring.num_wqes = recv_wqe_num; + + /* Set Ownership bit in Send/receive queue (0 - recv ; 1 - send) */ + mlx_memory_set(device_priv->utils, (*qp)->send.wqe_virt, 0xff, send_wq_size ); + mlx_memory_set(device_priv->utils, (*qp)->receive.wqe_virt, 0, recv_wqe_num ); + + /* update send ring */ +#define NODIC_RING_QP_ADDR_HIGH 0x0 +#define NODIC_RING_QP_ADDR_LOW 0x4 + address = (mlx_uint64)(*qp)->send.nodnic_ring.wqe_physical; + status = nodnic_cmd_write(device_priv, (*qp)->send.nodnic_ring.offset + + NODIC_RING_QP_ADDR_HIGH, + (mlx_uint32)(address >> 32)); + MLX_FATAL_CHECK_STATUS(status, write_send_addr_err, + "send address write error 1"); + mlx_utils_ilog2((*qp)->send.nodnic_ring.wq_size, &log_size); + address = address | log_size; + status = nodnic_cmd_write(device_priv, (*qp)->send.nodnic_ring.offset + + NODIC_RING_QP_ADDR_LOW, + (mlx_uint32)address); + MLX_FATAL_CHECK_STATUS(status, write_send_addr_err, + "send address write error 2"); + /* update receive ring */ + address = (mlx_uint64)(*qp)->receive.nodnic_ring.wqe_physical; + status = nodnic_cmd_write(device_priv, (*qp)->receive.nodnic_ring.offset + + NODIC_RING_QP_ADDR_HIGH, + (mlx_uint32)(address >> 32)); + MLX_FATAL_CHECK_STATUS(status, write_recv_addr_err, + "receive address write error 1"); + mlx_utils_ilog2((*qp)->receive.nodnic_ring.wq_size, &log_size); + address = address | log_size; + status = nodnic_cmd_write(device_priv, (*qp)->receive.nodnic_ring.offset + + NODIC_RING_QP_ADDR_LOW, + (mlx_uint32)address); + MLX_FATAL_CHECK_STATUS(status, write_recv_addr_err, + "receive address write error 2"); + + return status; +write_recv_addr_err: +write_send_addr_err: + mlx_memory_ummap_dma(device_priv->utils, (*qp)->receive.nodnic_ring.map); +rx_pi_dma_alloc_err: +receive_map_err: + mlx_memory_ummap_dma(device_priv->utils, (*qp)->send.nodnic_ring.map); +send_map_err: + mlx_memory_free_dma(device_priv->utils, receive_wq_size, + &((*qp)->receive.wqe_virt)); +receive_alloc_err: + mlx_memory_free_dma(device_priv->utils, send_wq_size, + (void **)&((*qp)->send.wqe_virt)); +send_alloc_err: +invalid_type: + mlx_memory_free(device_priv->utils, (void **)qp); +alloc_err: +invalid_parm: + return status; +} + +mlx_status +nodnic_port_destroy_qp( + IN nodnic_port_priv *port_priv, + IN nodnic_queue_pair_type type __attribute__((unused)), + IN nodnic_qp *qp + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = port_priv->device; + + status = mlx_memory_ummap_dma(device_priv->utils, + qp->receive.nodnic_ring.map); + if( status != MLX_SUCCESS){ + MLX_DEBUG_ERROR(device_priv, "mlx_memory_ummap_dma failed (Status = %d)\n", status); + } + + status = mlx_memory_ummap_dma(device_priv->utils, qp->send.nodnic_ring.map); + if( status != MLX_SUCCESS){ + MLX_DEBUG_ERROR(device_priv, "mlx_memory_ummap_dma failed (Status = %d)\n", status); + } + + if ( device_priv->device_cap.support_rx_pi_dma ){ + status = mlx_memory_ummap_dma(device_priv->utils, + qp->receive.nodnic_ring.recv_doorbell.map); + if( status != MLX_SUCCESS){ + MLX_DEBUG_ERROR(device_priv, "mlx_memory_ummap_dma failed (Status = %d)\n", status); + } + + status = mlx_memory_free_dma(device_priv->utils, + sizeof(nodnic_qp_db), + (void **)&(qp->receive.nodnic_ring.recv_doorbell.qp_doorbell_record)); + if( status != MLX_SUCCESS){ + MLX_DEBUG_ERROR(device_priv, "mlx_memory_free_dma failed (Status = %d)\n", status); + } + } + + if ( device_priv->device_cap.support_uar_tx_db || ! device_priv->uar.offset){ + status = mlx_memory_ummap_dma(device_priv->utils, + qp->send.nodnic_ring.send_doorbell.map); + if( status != MLX_SUCCESS){ + MLX_DEBUG_ERROR(device_priv, "mlx_memory_ummap_dma failed (Status = %d)\n", status); + } + + status = mlx_memory_free_dma(device_priv->utils, + sizeof(nodnic_qp_db), + (void **)&(qp->send.nodnic_ring.send_doorbell.qp_doorbell_record)); + if( status != MLX_SUCCESS){ + MLX_DEBUG_ERROR(device_priv, "mlx_memory_free_dma failed (Status = %d)\n", status); + } + } + + status = mlx_memory_free_dma(device_priv->utils, + qp->receive.nodnic_ring.wq_size, + (void **)&(qp->receive.wqe_virt)); + if( status != MLX_SUCCESS){ + MLX_DEBUG_ERROR(device_priv, "mlx_memory_free_dma failed (Status = %d)\n", status); + } + status = mlx_memory_free_dma(device_priv->utils, + qp->send.nodnic_ring.wq_size, + (void **)&(qp->send.wqe_virt)); + if( status != MLX_SUCCESS){ + MLX_DEBUG_ERROR(device_priv, "mlx_memory_free_dma failed (Status = %d)\n", status); + } + status = mlx_memory_free(device_priv->utils, (void **)&qp); + if( status != MLX_SUCCESS){ + MLX_DEBUG_ERROR(device_priv, "mlx_memory_free failed (Status = %d)\n", status); + } + return status; +} + +mlx_status +nodnic_port_get_qpn( + IN nodnic_port_priv *port_priv, + IN struct nodnic_ring *ring, + OUT mlx_uint32 *qpn + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 buffer = 0; + if( ring == NULL || qpn == NULL){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + if( ring->qpn != 0 ){ + *qpn = ring->qpn; + goto success; + } +#define NODNIC_RING_QPN_OFFSET 0xc +#define NODNIC_RING_QPN_MASK 0xFFFFFF + status = nodnic_cmd_read(port_priv->device, + ring->offset + NODNIC_RING_QPN_OFFSET, + &buffer); + MLX_FATAL_CHECK_STATUS(status, read_err, + "nodnic_cmd_read failed"); + ring->qpn = buffer & NODNIC_RING_QPN_MASK; + *qpn = ring->qpn; +read_err: +success: +bad_param: + return status; +} + +#ifdef DEVICE_CX3 +static +mlx_status +nodnic_port_send_db_connectx3( + IN nodnic_port_priv *port_priv, + IN struct nodnic_ring *ring __attribute__((unused)), + IN mlx_uint16 index + ) +{ + nodnic_port_data_flow_gw *ptr = port_priv->data_flow_gw; + mlx_uint32 index32 = index; + mlx_pci_mem_write(port_priv->device->utils, MlxPciWidthUint32, 0, + (mlx_uintn)&(ptr->send_doorbell), 1, &index32); + return MLX_SUCCESS; +} + +static +mlx_status +nodnic_port_recv_db_connectx3( + IN nodnic_port_priv *port_priv, + IN struct nodnic_ring *ring __attribute__((unused)), + IN mlx_uint16 index + ) +{ + nodnic_port_data_flow_gw *ptr = port_priv->data_flow_gw; + mlx_uint32 index32 = index; + mlx_pci_mem_write(port_priv->device->utils, MlxPciWidthUint32, 0, + (mlx_uintn)&(ptr->recv_doorbell), 1, &index32); + return MLX_SUCCESS; +} +#endif +static +mlx_status +nodnic_port_recv_db_dma( + IN nodnic_port_priv *port_priv __attribute__((unused)), + IN struct nodnic_ring *ring, + IN mlx_uint16 index + ) +{ + mlx_uint32 swapped = 0; + mlx_uint32 index32 = index; + mlx_memory_cpu_to_be32(port_priv->device->utils, index32, &swapped); + ring->recv_doorbell.qp_doorbell_record->recv_db = swapped; + return MLX_SUCCESS; +} + +mlx_status +nodnic_port_update_ring_doorbell( + IN nodnic_port_priv *port_priv, + IN struct nodnic_ring *ring, + IN mlx_uint16 index + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 buffer = 0; + if( ring == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } +#define NODNIC_RING_RING_OFFSET 0x8 + buffer = (mlx_uint32)((index & 0xFFFF)<< 8); + status = nodnic_cmd_write(port_priv->device, + ring->offset + NODNIC_RING_RING_OFFSET, + buffer); + MLX_CHECK_STATUS(port_priv->device, status, write_err, + "nodnic_cmd_write failed"); +write_err: +bad_param: + return status; +} + +mlx_status +nodnic_port_get_cq_size( + IN nodnic_port_priv *port_priv, + OUT mlx_uint64 *cq_size + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 out = 0; + status = nodnic_port_query(port_priv, nodnic_port_option_log_cq_size, &out); + MLX_FATAL_CHECK_STATUS(status, query_err, + "nodnic_port_query failed"); + *cq_size = 1 << out; +query_err: + return status; +} + +mlx_status +nodnic_port_allocate_eq( + IN nodnic_port_priv *port_priv, + IN mlx_uint8 log_eq_size + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = NULL; + mlx_uint64 address = 0; + + if( port_priv == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + device_priv = port_priv->device; + port_priv->eq.eq_size = ( ( 1 << log_eq_size ) * 1024 ); /* Size is in KB */ + status = mlx_memory_alloc_dma(device_priv->utils, + port_priv->eq.eq_size, + NODNIC_MEMORY_ALIGN, + &port_priv->eq.eq_virt); + MLX_FATAL_CHECK_STATUS(status, alloc_err, + "eq allocation error"); + + status = mlx_memory_map_dma(device_priv->utils, + port_priv->eq.eq_virt, + port_priv->eq.eq_size, + &port_priv->eq.eq_physical, + &port_priv->eq.map); + MLX_FATAL_CHECK_STATUS(status, map_err, + "eq map error"); + + address = port_priv->eq.eq_physical; + status = nodnic_port_set(port_priv, nodnic_port_option_eq_addr_low, + (mlx_uint32)address); + MLX_FATAL_CHECK_STATUS(status, set_err, + "failed to set eq addr low"); + address = (address >> 32); + status = nodnic_port_set(port_priv, nodnic_port_option_eq_addr_high, + (mlx_uint32)address); + MLX_FATAL_CHECK_STATUS(status, set_err, + "failed to set eq addr high"); + return status; +set_err: + mlx_memory_ummap_dma(device_priv->utils, port_priv->eq.map); +map_err: + mlx_memory_free_dma(device_priv->utils, + port_priv->eq.eq_size, + (void **)&(port_priv->eq.eq_virt)); +alloc_err: +bad_param: + return status; +} +mlx_status +nodnic_port_free_eq( + IN nodnic_port_priv *port_priv + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device_priv = NULL; + + if( port_priv == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + device_priv = port_priv->device; + mlx_memory_ummap_dma(device_priv->utils, port_priv->eq.map); + + mlx_memory_free_dma(device_priv->utils, + port_priv->eq.eq_size, + (void **)&(port_priv->eq.eq_virt)); + +bad_param: + return status; +} + +mlx_status +nodnic_port_add_mac_filter( + IN nodnic_port_priv *port_priv, + IN mlx_mac_address mac + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device= NULL;; + mlx_uint8 index = 0; + mlx_uint32 out = 0; + mlx_uint32 mac_filters_en = 0; + mlx_uint32 address = 0; + mlx_mac_address zero_mac; + mlx_utils *utils = NULL; + + if( port_priv == NULL){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + device = port_priv->device; + utils = device->utils; + + mlx_memory_set(utils, &zero_mac, 0, sizeof(zero_mac)); + /* check if mac already exists */ + for( ; index < NODNIC_MAX_MAC_FILTERS ; index ++) { + mlx_memory_cmp(utils, &port_priv->mac_filters[index], &mac, + sizeof(mac), &out); + if ( out == 0 ){ + status = MLX_FAILED; + goto already_exists; + } + } + + /* serch for available mac filter slot */ + for (index = 0 ; index < NODNIC_MAX_MAC_FILTERS ; index ++) { + mlx_memory_cmp(utils, &port_priv->mac_filters[index], &zero_mac, + sizeof(zero_mac), &out); + if ( out == 0 ){ + break; + } + } + if ( index >= NODNIC_MAX_MAC_FILTERS ){ + status = MLX_FAILED; + goto mac_list_full; + } + + status = nodnic_port_query(port_priv, nodnic_port_option_mac_filters_en, + &mac_filters_en); + MLX_CHECK_STATUS(device, status , query_err, + "nodnic_port_query failed"); + if(mac_filters_en & (1 << index)){ + status = MLX_FAILED; + goto mac_list_full; + } + port_priv->mac_filters[index] = mac; + + // set mac filter + address = port_priv->port_offset + NODNIC_PORT_MAC_FILTERS_OFFSET + + (0x8 * index); + + status = nodnic_cmd_write(device, address, mac.high ); + MLX_CHECK_STATUS(device, status, write_err, "set mac high failed"); + status = nodnic_cmd_write(device, address + 0x4, mac.low ); + MLX_CHECK_STATUS(device, status, write_err, "set mac low failed"); + + // enable mac filter + mac_filters_en = mac_filters_en | (1 << index); + status = nodnic_port_set(port_priv, nodnic_port_option_mac_filters_en, + mac_filters_en); + MLX_CHECK_STATUS(device, status , set_err, + "nodnic_port_set failed"); +set_err: +write_err: +query_err: +mac_list_full: +already_exists: +bad_param: + return status; +} + +mlx_status +nodnic_port_remove_mac_filter( + IN nodnic_port_priv *port_priv, + IN mlx_mac_address mac + ) +{ + mlx_status status = MLX_SUCCESS; + nodnic_device_priv *device= NULL;; + mlx_uint8 index = 0; + mlx_uint32 out = 0; + mlx_uint32 mac_filters_en = 0; + mlx_mac_address zero_mac; + mlx_utils *utils = NULL; + + if( port_priv == NULL){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + device = port_priv->device; + utils = device->utils; + + mlx_memory_set(utils, &zero_mac, 0, sizeof(zero_mac)); + /* serch for mac filter */ + for( ; index < NODNIC_MAX_MAC_FILTERS ; index ++) { + mlx_memory_cmp(utils, &port_priv->mac_filters[index], &mac, + sizeof(mac), &out); + if ( out == 0 ){ + break; + } + } + if ( index == NODNIC_MAX_MAC_FILTERS ){ + status = MLX_FAILED; + goto mac_not_found; + } + + status = nodnic_port_query(port_priv, nodnic_port_option_mac_filters_en, + &mac_filters_en); + MLX_CHECK_STATUS(device, status , query_err, + "nodnic_port_query failed"); + if((mac_filters_en & (1 << index)) == 0){ + status = MLX_FAILED; + goto mac_not_en; + } + port_priv->mac_filters[index] = zero_mac; + + // disable mac filter + mac_filters_en = mac_filters_en & ~(1 << index); + status = nodnic_port_set(port_priv, nodnic_port_option_mac_filters_en, + mac_filters_en); + MLX_CHECK_STATUS(device, status , set_err, + "nodnic_port_set failed"); +set_err: +query_err: +mac_not_en: +mac_not_found: +bad_param: + return status; +} + +static +mlx_status +nodnic_port_set_network( + IN nodnic_port_priv *port_priv, + IN mlx_boolean value + ) +{ + mlx_status status = MLX_SUCCESS; + /*mlx_uint32 network_valid = 0; + mlx_uint8 try = 0;*/ + + status = nodnic_port_set(port_priv, nodnic_port_option_network_en, value); + MLX_CHECK_STATUS(port_priv->device, status, set_err, + "nodnic_port_set failed"); + port_priv->network_state = value; +set_err: + return status; +} + +#ifdef DEVICE_CX3 +static +mlx_status +nodnic_port_set_dma_connectx3( + IN nodnic_port_priv *port_priv, + IN mlx_boolean value + ) +{ + mlx_utils *utils = port_priv->device->utils; + nodnic_port_data_flow_gw *ptr = port_priv->data_flow_gw; + mlx_uint32 data = (value ? 0xffffffff : 0x0); + mlx_pci_mem_write(utils, MlxPciWidthUint32, 0, + (mlx_uintn)&(ptr->dma_en), 1, &data); + return MLX_SUCCESS; +} +#endif + +static +mlx_status +nodnic_port_set_dma( + IN nodnic_port_priv *port_priv, + IN mlx_boolean value + ) +{ + return nodnic_port_set(port_priv, nodnic_port_option_dma_en, value); +} + +static +mlx_status +nodnic_port_check_and_set_dma( + IN nodnic_port_priv *port_priv, + IN mlx_boolean value + ) +{ + mlx_status status = MLX_SUCCESS; + if ( port_priv->dma_state == value ) { + MLX_DEBUG_WARN(port_priv->device, + "nodnic_port_check_and_set_dma: already %s\n", + (value ? "enabled" : "disabled")); + status = MLX_SUCCESS; + goto set_out; + } + + status = port_priv->set_dma(port_priv, value); + MLX_CHECK_STATUS(port_priv->device, status, set_err, + "nodnic_port_set failed"); + port_priv->dma_state = value; +set_err: +set_out: + return status; +} + + +mlx_status +nodnic_port_set_promisc( + IN nodnic_port_priv *port_priv, + IN mlx_boolean value + ){ + mlx_status status = MLX_SUCCESS; + mlx_uint32 buffer = value; + + status = nodnic_port_set(port_priv, nodnic_port_option_port_promisc_en, buffer); + MLX_CHECK_STATUS(port_priv->device, status, set_err, + "nodnic_port_set failed"); +set_err: + return status; +} + +mlx_status +nodnic_port_set_promisc_multicast( + IN nodnic_port_priv *port_priv, + IN mlx_boolean value + ){ + mlx_status status = MLX_SUCCESS; + mlx_uint32 buffer = value; + + status = nodnic_port_set(port_priv, nodnic_port_option_port_promisc_multicast_en, buffer); + MLX_CHECK_STATUS(port_priv->device, status, set_err, + "nodnic_port_set failed"); +set_err: + return status; +} + +mlx_status +nodnic_port_init( + IN nodnic_port_priv *port_priv + ) +{ + mlx_status status = MLX_SUCCESS; + + if( port_priv == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + status = nodnic_port_set_network(port_priv, TRUE); + MLX_FATAL_CHECK_STATUS(status, set_err, + "nodnic_port_set_network failed"); +set_err: +bad_param: + return status; +} + +mlx_status +nodnic_port_close( + IN nodnic_port_priv *port_priv + ) +{ + mlx_status status = MLX_SUCCESS; + + if( port_priv == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + status = nodnic_port_set_network(port_priv, FALSE); + MLX_FATAL_CHECK_STATUS(status, set_err, + "nodnic_port_set_network failed"); +set_err: +bad_param: + return status; +} + +mlx_status +nodnic_port_enable_dma( + IN nodnic_port_priv *port_priv + ) +{ + mlx_status status = MLX_SUCCESS; + + if( port_priv == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + status = nodnic_port_check_and_set_dma(port_priv, TRUE); + MLX_CHECK_STATUS(port_priv->device, status, set_err, + "nodnic_port_check_and_set_dma failed"); +set_err: +bad_param: + return status; +} + +mlx_status +nodnic_port_disable_dma( + IN nodnic_port_priv *port_priv + ) +{ + mlx_status status = MLX_SUCCESS; + + if( port_priv == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + status = nodnic_port_check_and_set_dma(port_priv, FALSE); + MLX_CHECK_STATUS(port_priv->device, status, set_err, + "nodnic_port_check_and_set_dma failed"); +set_err: +bad_param: + return status; +} + +mlx_status +nodnic_port_thin_init( + IN nodnic_device_priv *device_priv, + IN nodnic_port_priv *port_priv, + IN mlx_uint8 port_index + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_boolean reset_needed = 0; +#ifdef DEVICE_CX3 + mlx_uint32 offset; +#endif + + if( device_priv == NULL || port_priv == NULL || port_index > 1){ + status = MLX_INVALID_PARAMETER; + goto invalid_parm; + } + + port_priv->device = device_priv; + + port_priv->port_offset = device_priv->device_offset + + nodnic_port_offset_table[port_index]; + + port_priv->port_num = port_index + 1; + + port_priv->send_doorbell = nodnic_port_update_ring_doorbell; + port_priv->recv_doorbell = nodnic_port_update_ring_doorbell; + port_priv->set_dma = nodnic_port_set_dma; +#ifdef DEVICE_CX3 + if (device_priv->device_cap.crspace_doorbells) { + status = nodnic_cmd_read(device_priv, (port_priv->port_offset + 0x100), + &offset); + if (status != MLX_SUCCESS) { + return status; + } else { + port_priv->data_flow_gw = (nodnic_port_data_flow_gw *) + (device_priv->utils->config + offset); + } + if ( nodnic_port_set ( port_priv, nodnic_port_option_crspace_en, 1 ) ) { + return MLX_FAILED; + } + port_priv->send_doorbell = nodnic_port_send_db_connectx3; + port_priv->recv_doorbell = nodnic_port_recv_db_connectx3; + port_priv->set_dma = nodnic_port_set_dma_connectx3; + } +#endif + if ( device_priv->device_cap.support_rx_pi_dma ) { + port_priv->recv_doorbell = nodnic_port_recv_db_dma; + } + + /* clear reset_needed */ + nodnic_port_read_reset_needed(port_priv, &reset_needed); + + port_priv->port_type = NODNIC_PORT_TYPE_UNKNOWN; +invalid_parm: + return status; +} diff --git a/src/drivers/infiniband/mlx_utils/include/private/mlx_memory_priv.h b/src/drivers/infiniband/mlx_utils/include/private/mlx_memory_priv.h new file mode 100644 index 00000000..1f8ba89e --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/include/private/mlx_memory_priv.h @@ -0,0 +1,113 @@ +#ifndef MLXUTILS_INCLUDE_PRIVATE_MEMORYPRIV_H_ +#define MLXUTILS_INCLUDE_PRIVATE_MEMORYPRIV_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../../mlx_utils/include/public/mlx_utils.h" + +mlx_status +mlx_memory_alloc_priv( + IN mlx_utils *utils, + IN mlx_size size, + OUT mlx_void **ptr + ); + +mlx_status +mlx_memory_zalloc_priv( + IN mlx_utils *utils, + IN mlx_size size, + OUT mlx_void **ptr + ); + +mlx_status +mlx_memory_free_priv( + IN mlx_utils *utils, + IN mlx_void *ptr + ); +mlx_status +mlx_memory_alloc_dma_priv( + IN mlx_utils *utils, + IN mlx_size size , + IN mlx_size align, + OUT mlx_void **ptr + ); + +mlx_status +mlx_memory_free_dma_priv( + IN mlx_utils *utils, + IN mlx_size size , + IN mlx_void *ptr + ); +mlx_status +mlx_memory_map_dma_priv( + IN mlx_utils *utils, + IN mlx_void *addr , + IN mlx_size number_of_bytes, + OUT mlx_physical_address *phys_addr, + OUT mlx_void **mapping + ); + +mlx_status +mlx_memory_ummap_dma_priv( + IN mlx_utils *utils, + IN mlx_void *mapping + ); + +mlx_status +mlx_memory_cmp_priv( + IN mlx_utils *utils, + IN mlx_void *first_block, + IN mlx_void *second_block, + IN mlx_size size, + OUT mlx_uint32 *out + ); + +mlx_status +mlx_memory_set_priv( + IN mlx_utils *utils, + IN mlx_void *block, + IN mlx_int32 value, + IN mlx_size size + ); + +mlx_status +mlx_memory_cpy_priv( + IN mlx_utils *utils, + OUT mlx_void *destination_buffer, + IN mlx_void *source_buffer, + IN mlx_size length + ); + +mlx_status +mlx_memory_cpu_to_be32_priv( + IN mlx_utils *utils, + IN mlx_uint32 source, + IN mlx_uint32 *destination + ); + +mlx_status +mlx_memory_be32_to_cpu_priv( + IN mlx_utils *utils, + IN mlx_uint32 source, + IN mlx_uint32 *destination + ); +#endif /* STUB_MLXUTILS_INCLUDE_PRIVATE_MEMORYPRIV_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/include/private/mlx_pci_priv.h b/src/drivers/infiniband/mlx_utils/include/private/mlx_pci_priv.h new file mode 100644 index 00000000..cf35e5b7 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/include/private/mlx_pci_priv.h @@ -0,0 +1,77 @@ +#ifndef STUB_MLXUTILS_INCLUDE_PRIVATE_PCIPRIV_H_ +#define STUB_MLXUTILS_INCLUDE_PRIVATE_PCIPRIV_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../include/public/mlx_pci.h" +#include "../../include/public/mlx_utils.h" + +mlx_status +mlx_pci_init_priv( + IN mlx_utils *utils + ); + +mlx_status +mlx_pci_teardown_priv( + IN mlx_utils *utils + ); + +mlx_status +mlx_pci_read_priv( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint32 offset, + IN mlx_uintn count, + OUT mlx_void *buffer + ); + +mlx_status +mlx_pci_write_priv( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint32 offset, + IN mlx_uintn count, + IN mlx_void *buffer + ); + +mlx_status +mlx_pci_mem_read_priv( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint8 bar_index, + IN mlx_uint64 offset, + IN mlx_uintn count, + OUT mlx_void *buffer + ); + +mlx_status +mlx_pci_mem_write_priv( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint8 bar_index, + IN mlx_uint64 offset, + IN mlx_uintn count, + IN mlx_void *buffer + ); + + +#endif /* STUB_MLXUTILS_INCLUDE_PRIVATE_PCIPRIV_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/include/private/mlx_utils_priv.h b/src/drivers/infiniband/mlx_utils/include/private/mlx_utils_priv.h new file mode 100644 index 00000000..268b76fa --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/include/private/mlx_utils_priv.h @@ -0,0 +1,68 @@ +#ifndef SRC_DRIVERS_INFINIBAND_MLX_UTILS_INCLUDE_PRIVATE_MLX_UTILS_PRIV_H_ +#define SRC_DRIVERS_INFINIBAND_MLX_UTILS_INCLUDE_PRIVATE_MLX_UTILS_PRIV_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../include/public/mlx_utils.h" + +mlx_status +mlx_utils_delay_in_ms_priv( + IN mlx_uint32 msecs + ); + +mlx_status +mlx_utils_delay_in_us_priv( + IN mlx_uint32 usecs + ); + +mlx_status +mlx_utils_ilog2_priv( + IN mlx_uint32 i, + OUT mlx_uint32 *log + ); + +mlx_status +mlx_utils_init_lock_priv( + OUT void **lock + ); + +mlx_status +mlx_utils_free_lock_priv( + IN void *lock + ); + +mlx_status +mlx_utils_acquire_lock_priv ( + IN void *lock + ); + +mlx_status +mlx_utils_release_lock_priv ( + IN void *lock + ); + +mlx_status +mlx_utils_rand_priv ( + IN mlx_utils *utils, + OUT mlx_uint32 *rand_num + ); +#endif /* SRC_DRIVERS_INFINIBAND_MLX_UTILS_INCLUDE_PRIVATE_MLX_UTILS_PRIV_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_bail.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_bail.h new file mode 100644 index 00000000..a4f4b37b --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_bail.h @@ -0,0 +1,47 @@ +#ifndef INCLUDE_PUBLIC_MLXBAIL_H_ +#define INCLUDE_PUBLIC_MLXBAIL_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "mlx_types.h" + +#define MLX_BAIL_ERROR(id, status,message) MLX_CHECK_STATUS(id, status, bail, message) + +#define MLX_FATAL_CHECK_STATUS(status, label, message) \ + do { \ + if (status != MLX_SUCCESS) { \ + MLX_DEBUG_FATAL_ERROR(message " (Status = %d)\n", status); \ + goto label; \ + } \ + } while (0) + +#define MLX_CHECK_STATUS(id, status, label, message) \ + do { \ + if (status != MLX_SUCCESS) { \ + MLX_DEBUG_ERROR(id, message " (Status = %d)\n", status);\ + goto label; \ + } \ + } while (0) + + + +#endif /* INCLUDE_PUBLIC_MLXBAIL_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_icmd.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_icmd.h new file mode 100644 index 00000000..1ed423da --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_icmd.h @@ -0,0 +1,63 @@ +#ifndef MLXUTILS_INCLUDE_PUBLIC_MLX_ICMD_H_ +#define MLXUTILS_INCLUDE_PUBLIC_MLX_ICMD_H_ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "mlx_utils.h" + +#define MLX_ICMD_MB_ADDR 0x100000 +#define MLX_ICMD_MB_SIZE_ADDR 0x1000 +#define MLX_ICMD_CTRL_ADDR 0x0 + +#define MLX_ICMD_SEMAPHORE_ADDR 0x0 + +#define MLX_ICMD_SEMAPHORE_ID 1234 + +enum { + FLASH_REG_ACCESS = 0x9001, + GET_FW_INFO = 0x8007, + QUERY_VIRTUAL_MAC = 0x9003, + SET_VIRTUAL_MAC = 0x9004, + QUERY_WOL_ROL = 0x9005, + SET_WOL_ROL = 0x9006, + OCBB_INIT = 0x9007, + OCBB_QUERY_HEADER_STATUS = 0x9008, + OCBB_QUERY_ETOC_STATUS = 0x9009, + OCBB_QUERY_SET_EVENT = 0x900A, + OCSD_INIT = 0xf004, +}; + +struct mlx_icmd_ocsd { + mlx_uint32 reserved; + mlx_uint64 address; +}; + +mlx_status +mlx_icmd_send_command( + IN mlx_utils *utils, + IN mlx_uint16 opcode, + IN OUT mlx_void* data, + IN mlx_uint32 write_data_size, + IN mlx_uint32 read_data_size + ); + +#endif /* MLXUTILS_INCLUDE_PUBLIC_MLX_ICMD_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_logging.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_logging.h new file mode 100644 index 00000000..7ff06bbf --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_logging.h @@ -0,0 +1,47 @@ +#ifndef PUBLIC_INCLUDE_MLX_LOGGER_H_ +#define PUBLIC_INCLUDE_MLX_LOGGER_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../../mlx_utils_flexboot/include/mlx_logging_priv.h" + +#define MLX_PRINT(...) MLX_PRINT_PRIVATE(__VA_ARGS__) +#define MLX_DEBUG_FATAL_ERROR(...) MLX_DEBUG_FATAL_ERROR_PRIVATE(__VA_ARGS__) +#define MLX_DEBUG_ERROR(...) MLX_DEBUG_ERROR_PRIVATE(__VA_ARGS__) +#define MLX_DEBUG_WARN(...) MLX_DEBUG_WARN_PRIVATE(__VA_ARGS__) +#define MLX_DEBUG_INFO1(...) MLX_DEBUG_INFO1_PRIVATE(__VA_ARGS__) +#define MLX_DEBUG_INFO2(...) MLX_DEBUG_INFO2_PRIVATE(__VA_ARGS__) +#define MLX_DBG_ERROR(...) MLX_DBG_ERROR_PRIVATE(__VA_ARGS__) +#define MLX_DBG_WARN(...) MLX_DBG_WARN_PRIVATE(__VA_ARGS__) +#define MLX_DBG_INFO1(...) MLX_DBG_INFO1_PRIVATE(__VA_ARGS__) +#define MLX_DBG_INFO2(...) MLX_DBG_INFO2_PRIVATE(__VA_ARGS__) + +#define MLX_TRACE_1_START() MLX_DBG_INFO1_PRIVATE("Start\n") +#define MLX_TRACE_1_END() MLX_DBG_INFO1_PRIVATE("End\n") +#define MLX_TRACE_1_END_STATUS(status) MLX_DBG_INFO1_PRIVATE("End (%s=%d)\n", #status,status) +#define MLX_TRACE_2_START() MLX_DBG_INFO2_PRIVATE("Start\n") +#define MLX_TRACE_2_END() MLX_DBG_INFO2_PRIVATE("End\n") +#define MLX_TRACE_2_END_STATUS(status) MLX_DBG_INFO2_PRIVATE("End (%s=%d)\n", #status,status) + + + +#endif /* PUBLIC_INCLUDE_MLX_LOGGER_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_memory.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_memory.h new file mode 100644 index 00000000..05675666 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_memory.h @@ -0,0 +1,115 @@ +#ifndef MLXUTILS_INCLUDE_PUBLIC_MEMORY_H_ +#define MLXUTILS_INCLUDE_PUBLIC_MEMORY_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "mlx_utils.h" + + +mlx_status +mlx_memory_alloc( + IN mlx_utils *utils, + IN mlx_size size, + OUT mlx_void **ptr + ); + +mlx_status +mlx_memory_zalloc( + IN mlx_utils *utils, + IN mlx_size size, + OUT mlx_void **ptr + ); + +mlx_status +mlx_memory_free( + IN mlx_utils *utils, + IN mlx_void **ptr + ); +mlx_status +mlx_memory_alloc_dma( + IN mlx_utils *utils, + IN mlx_size size , + IN mlx_size align, + OUT mlx_void **ptr + ); + +mlx_status +mlx_memory_free_dma( + IN mlx_utils *utils, + IN mlx_size size , + IN mlx_void **ptr + ); +mlx_status +mlx_memory_map_dma( + IN mlx_utils *utils, + IN mlx_void *Addr , + IN mlx_size NumberOfBytes, + OUT mlx_physical_address *PhysAddr, + OUT mlx_void **Mapping + ); + +mlx_status +mlx_memory_ummap_dma( + IN mlx_utils *utils, + IN mlx_void *Mapping + ); + +mlx_status +mlx_memory_cmp( + IN mlx_utils *utils, + IN mlx_void *first_block, + IN mlx_void *second_block, + IN mlx_size size, + OUT mlx_uint32 *out + ); + +mlx_status +mlx_memory_set( + IN mlx_utils *utils, + IN mlx_void *block, + IN mlx_int32 value, + IN mlx_size size + ); + +mlx_status +mlx_memory_cpy( + IN mlx_utils *utils, + OUT mlx_void *destination_buffer, + IN mlx_void *source_buffer, + IN mlx_size length + ); + +mlx_status +mlx_memory_cpu_to_be32( + IN mlx_utils *utils, + IN mlx_uint32 source, + IN mlx_uint32 *destination + ); + +mlx_status +mlx_memory_be32_to_cpu( + IN mlx_utils *utils, + IN mlx_uint32 source, + IN mlx_uint32 *destination + ); + +#endif /* STUB_MLXUTILS_INCLUDE_PUBLIC_MEMORY_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_pci.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_pci.h new file mode 100644 index 00000000..60eb55d5 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_pci.h @@ -0,0 +1,83 @@ +#ifndef STUB_MLXUTILS_INCLUDE_PUBLIC_PCI_H_ +#define STUB_MLXUTILS_INCLUDE_PUBLIC_PCI_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "mlx_utils.h" + +typedef enum { + MlxPciWidthUint8 = 0, + MlxPciWidthUint16, + MlxPciWidthUint32, + MlxPciWidthUint64, +} mlx_pci_width; + +mlx_status +mlx_pci_init( + IN mlx_utils *utils + ); + +mlx_status +mlx_pci_teardown( + IN mlx_utils *utils + ); + +mlx_status +mlx_pci_read( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint32 offset, + IN mlx_uintn count, + OUT mlx_void *buffer + ); + +mlx_status +mlx_pci_write( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint32 offset, + IN mlx_uintn count, + IN mlx_void *buffer + ); + +mlx_status +mlx_pci_mem_read( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint8 bar_index, + IN mlx_uint64 offset, + IN mlx_uintn count, + OUT mlx_void *buffer + ); + +mlx_status +mlx_pci_mem_write( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint8 bar_index, + IN mlx_uint64 offset, + IN mlx_uintn count, + IN mlx_void *buffer + ); + + +#endif /* STUB_MLXUTILS_INCLUDE_PUBLIC_PCI_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_pci_gw.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_pci_gw.h new file mode 100644 index 00000000..c074a22e --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_pci_gw.h @@ -0,0 +1,81 @@ +#ifndef INCLUDE_PUBLIC_MLX_PCI_GW_H_ +#define INCLUDE_PUBLIC_MLX_PCI_GW_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "mlx_utils.h" + +#define PCI_GW_FIRST_CAPABILITY_POINTER_OFFSET 0x34 + +#define PCI_GW_CAPABILITY_ID 0x9 + +#define PCI_GW_CAPABILITY_ID_OFFSET 0x0 +#define PCI_GW_CAPABILITY_NEXT_POINTER_OFFSET 0x1 +#define PCI_GW_CAPABILITY_SPACE_OFFSET 0x4 +#define PCI_GW_CAPABILITY_STATUS_OFFSET 0x7 +#define PCI_GW_CAPABILITY_COUNTER_OFFSET 0x8 +#define PCI_GW_CAPABILITY_SEMAPHORE_OFFSET 0xC +#define PCI_GW_CAPABILITY_ADDRESS_OFFSET 0x10 +#define PCI_GW_CAPABILITY_FLAG_OFFSET 0x10 +#define PCI_GW_CAPABILITY_DATA_OFFSET 0x14 + +#define PCI_GW_SEMPHORE_TRIES 3000000 +#define PCI_GW_GET_OWNERSHIP_TRIES 5000 +#define PCI_GW_READ_FLAG_TRIES 3000000 + +#define PCI_GW_WRITE_FLAG 0x80000000 + +#define PCI_GW_SPACE_NODNIC 0x4 +#define PCI_GW_SPACE_ALL_ICMD 0x3 +#define PCI_GW_SPACE_SEMAPHORE 0xa +#define PCI_GW_SPACE_CR0 0x2 + +typedef mlx_uint32 mlx_pci_gw_buffer; + + +mlx_status +mlx_pci_gw_init( + IN mlx_utils *utils + ); +mlx_status +mlx_pci_gw_teardown( + IN mlx_utils *utils + ); +mlx_status +mlx_pci_gw_read( + IN mlx_utils *utils, + IN mlx_pci_gw_space space, + IN mlx_uint32 address, + OUT mlx_pci_gw_buffer *buffer + ); + +mlx_status +mlx_pci_gw_write( + IN mlx_utils *utils, + IN mlx_pci_gw_space space, + IN mlx_uint32 address, + IN mlx_pci_gw_buffer buffer + ); + + + +#endif /* INCLUDE_PUBLIC_MLX_PCI_GW_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_types.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_types.h new file mode 100644 index 00000000..9c66567a --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_types.h @@ -0,0 +1,27 @@ +#ifndef INCLUDE_PUBLIC_MLXTYPES_H_ +#define INCLUDE_PUBLIC_MLXTYPES_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../../mlx_utils_flexboot/include/mlx_types_priv.h" + +#endif /* INCLUDE_PUBLIC_MLXBAIL_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/include/public/mlx_utils.h b/src/drivers/infiniband/mlx_utils/include/public/mlx_utils.h new file mode 100644 index 00000000..46ad97c3 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/include/public/mlx_utils.h @@ -0,0 +1,106 @@ +#ifndef MLXUTILS_INCLUDE_PUBLIC_MLXUTILS_H_ +#define MLXUTILS_INCLUDE_PUBLIC_MLXUTILS_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "mlx_logging.h" +#include "mlx_types.h" + +#define IN +#define OUT + +typedef mlx_uint16 mlx_pci_gw_space; + +typedef struct{ + mlx_uint32 pci_cmd_offset; + mlx_pci_gw_space space; +} __attribute__ (( packed )) mlx_pci_gw; + +typedef struct { + mlx_boolean icmd_opened; + mlx_boolean took_semaphore; + mlx_uint32 max_cmd_size; +} __attribute__ (( packed )) mlx_icmd ; + +typedef struct{ + mlx_pci *pci; + mlx_pci_gw pci_gw; + mlx_icmd icmd; + void *lock; +#ifdef DEVICE_CX3 + /* ACCESS to BAR0 */ + void *config; +#endif +} __attribute__ (( packed )) mlx_utils; + +mlx_status +mlx_utils_init( + IN mlx_utils *utils, + IN mlx_pci *pci + ); + +mlx_status +mlx_utils_teardown( + IN mlx_utils *utils + ); +mlx_status +mlx_utils_delay_in_ms( + IN mlx_uint32 msecs + ); + +mlx_status +mlx_utils_delay_in_us( + IN mlx_uint32 usecs + ); + +mlx_status +mlx_utils_ilog2( + IN mlx_uint32 i, + OUT mlx_uint32 *log + ); + +mlx_status +mlx_utils_init_lock( + IN OUT mlx_utils *utils + ); + +mlx_status +mlx_utils_free_lock( + IN OUT mlx_utils *utils + ); + +mlx_status +mlx_utils_acquire_lock ( + IN OUT mlx_utils *utils + ); + +mlx_status +mlx_utils_release_lock ( + IN OUT mlx_utils *utils + ); + +mlx_status +mlx_utils_rand ( + IN mlx_utils *utils, + OUT mlx_uint32 *rand_num + ); +#endif /* STUB_MLXUTILS_INCLUDE_PUBLIC_MLXUTILS_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.c new file mode 100644 index 00000000..ba56e72f --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.c @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../mlx_lib/mlx_blink_leds/mlx_blink_leds.h" +#include "../../include/public/mlx_memory.h" +#include "../../include/public/mlx_bail.h" + +mlx_status +mlx_blink_leds( + IN mlx_utils *utils, + IN mlx_uint16 secs + ) +{ + mlx_status status = MLX_SUCCESS; + struct mlx_led_control led_control; + mlx_uint32 reg_status; + + if (utils == NULL ) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + mlx_memory_set(utils, &led_control, 0, sizeof(led_control)); + led_control.beacon_duration = secs; + status = mlx_reg_access(utils, REG_ID_MLCR, REG_ACCESS_WRITE, &led_control, sizeof(led_control), + ®_status); + MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed "); + if (reg_status != 0) { + MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status); + status = MLX_FAILED; + goto reg_err; + } +reg_err: +bad_param: + return status; +} + diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.h new file mode 100644 index 00000000..886645fe --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds/mlx_blink_leds.h @@ -0,0 +1,46 @@ +#ifndef MLX_BLINK_LEDS_H_ +#define MLX_BLINK_LEDS_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../mlx_lib/mlx_reg_access/mlx_reg_access.h" +#include "../../include/public/mlx_utils.h" + +struct mlx_led_control { + mlx_uint32 reserved1 :16; + mlx_uint32 port :8; + mlx_uint32 bla :8; +/* -------------- */ + mlx_uint32 beacon_duration :16; + mlx_uint32 reserved2 :16; +/* -------------- */ + mlx_uint32 beacon_remain :16; + mlx_uint32 reserved3 :16; +}; + +mlx_status +mlx_blink_leds( + IN mlx_utils *utils, + IN mlx_uint16 secs + ); + +#endif /* MLX_NVCONFIG_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.c new file mode 100644 index 00000000..d3155302 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.c @@ -0,0 +1,180 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../mlx_lib/mlx_link_speed/mlx_link_speed.h" +#include "../../include/public/mlx_memory.h" +#include "../../include/public/mlx_bail.h" + +mlx_status +mlx_set_link_speed( + IN mlx_utils *utils, + IN mlx_uint8 port_num, + IN LINK_SPEED_TYPE type, + IN LINK_SPEED speed + ) +{ + mlx_status status = MLX_SUCCESS; + struct mlx_link_speed link_speed; + mlx_uint32 reg_status; + + if (utils == NULL) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + mlx_memory_set(utils, &link_speed, 0, sizeof(link_speed)); + + link_speed.loacl_port = port_num; + link_speed.proto_mask = 1 << type; + + status = mlx_reg_access(utils, REG_ID_PTYS, REG_ACCESS_READ, &link_speed, + sizeof(link_speed), ®_status); + + MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed "); + if (reg_status != 0) { + MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status); + status = MLX_FAILED; + goto reg_err; + } + switch (speed) { + case LINK_SPEED_1GB: + link_speed.eth_proto_admin = link_speed.eth_proto_capability & LINK_SPEED_1GB_MASK; + break; + case LINK_SPEED_10GB: + link_speed.eth_proto_admin = link_speed.eth_proto_capability & LINK_SPEED_10GB_MASK; + break; + case LINK_SPEED_40GB: + link_speed.eth_proto_admin = link_speed.eth_proto_capability & LINK_SPEED_40GB_MASK; + break; + case LINK_SPEED_100GB: + link_speed.eth_proto_admin = link_speed.eth_proto_capability & LINK_SPEED_100GB_MASK; + break; + case LINK_SPEED_SDR: + link_speed.ib_proto_admin = link_speed.ib_proto_capability & LINK_SPEED_SDR_MASK; + break; + case LINK_SPEED_DEFAULT: + if (type == LINK_SPEED_ETH) { + link_speed.eth_proto_admin = link_speed.eth_proto_capability; + } else { + link_speed.ib_proto_admin = link_speed.ib_proto_capability; + } + break; + } + status = mlx_reg_access(utils, REG_ID_PTYS, REG_ACCESS_WRITE, &link_speed, + sizeof(link_speed), ®_status); + MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed "); + if (reg_status != 0) { + MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status); + status = MLX_FAILED; + goto reg_err; + } +reg_err: +bad_param: + return status; +} + +mlx_status +mlx_get_max_speed( + IN mlx_utils *utils, + IN mlx_uint8 port_num, + IN LINK_SPEED_TYPE type, + OUT mlx_uint64 *speed + ) +{ + mlx_status status = MLX_SUCCESS; + struct mlx_link_speed link_speed; + mlx_uint32 reg_status; + mlx_uint64 speed_giga = 0; + mlx_uint8 lanes_number = 1; + + *speed = 0; + if (utils == NULL) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + mlx_memory_set(utils, &link_speed, 0, sizeof(link_speed)); + + link_speed.loacl_port = port_num; + link_speed.proto_mask = 1 << type; + + status = mlx_reg_access(utils, REG_ID_PTYS, REG_ACCESS_READ, &link_speed, + sizeof(link_speed), ®_status); + MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed "); + if (reg_status != 0) { + MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status); + status = MLX_FAILED; + goto reg_err; + } + + if ( type == LINK_SPEED_ETH ) { + if ( link_speed.eth_proto_capability & LINK_SPEED_100GB_MASK ) { + speed_giga = 100; + } else if ( link_speed.eth_proto_capability & LINK_SPEED_56GB_MASK ) { + speed_giga = 56; + } else if ( link_speed.eth_proto_capability & LINK_SPEED_50GB_MASK ) { + speed_giga = 50; + } else if ( link_speed.eth_proto_capability & LINK_SPEED_40GB_MASK ) { + speed_giga = 40; + } else if (link_speed.eth_proto_capability & LINK_SPEED_25GB_MASK) { + speed_giga = 25; + } else if ( link_speed.eth_proto_capability & LINK_SPEED_20GB_MASK ) { + speed_giga = 20; + } else if ( link_speed.eth_proto_capability & LINK_SPEED_10GB_MASK) { + speed_giga = 10; + } else if ( link_speed.eth_proto_capability & LINK_SPEED_1GB_MASK ) { + speed_giga = 1; + } + } else { + if ( link_speed.ib_proto_capability & LINK_SPEED_EDR_MASK ) { + speed_giga = 25; + } else if ( link_speed.ib_proto_capability & LINK_SPEED_EDR20_MASK ) { + speed_giga = 20; + } else if ( link_speed.ib_proto_capability & LINK_SPEED_FDR_MASK ) { + speed_giga = 14; + } else if ( link_speed.ib_proto_capability & LINK_SPEED_QDR_MASK ) { + speed_giga = 10; + } else if ( link_speed.ib_proto_capability & LINK_SPEED_DDR_MASK ) { + speed_giga = 5; + } else if ( link_speed.ib_proto_capability & LINK_SPEED_SDR_MASK ) { + speed_giga = 2.5; + } + if ( link_speed.ib_link_width_capability & LINK_SPEED_WITDH_12_MASK ) { + lanes_number = 12; + } else if ( link_speed.ib_link_width_capability & LINK_SPEED_WITDH_8_MASK ) { + lanes_number = 8; + } else if (link_speed.ib_link_width_capability & LINK_SPEED_WITDH_4_MASK ) { + lanes_number = 4; + } else if (link_speed.ib_link_width_capability & LINK_SPEED_WITDH_2_MASK ) { + lanes_number = 2; + } else if (link_speed.ib_link_width_capability & LINK_SPEED_WITDH_1_MASK ) { + lanes_number = 1; + } + speed_giga = speed_giga * lanes_number; + } + // Return data in bits + *speed = speed_giga * GIGA_TO_BIT; +reg_err: +bad_param: + return status; +} + + diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h new file mode 100644 index 00000000..cb167d6a --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h @@ -0,0 +1,150 @@ +#ifndef MLX_LINK_SPEED_H_ +#define MLX_LINK_SPEED_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../mlx_lib/mlx_reg_access/mlx_reg_access.h" +#include "../../include/public/mlx_utils.h" + +#define LINK_SPEED_100GB_MASK (ETH_SPEED_ENABLE_MASK_100GBASECR4 | ETH_SPEED_ENABLE_MASK_100GBASESR4 | ETH_SPEED_ENABLE_MASK_100GBASEKR4 | ETH_SPEED_ENABLE_MASK_100GBASELR4) +#define LINK_SPEED_56GB_MASK (ETH_SPEED_ENABLE_MASK_56GBASER4) +#define LINK_SPEED_50GB_MASK (ETH_SPEED_ENABLE_MASK_50GBASECR2 | ETH_SPEED_ENABLE_MASK_50GBASEKR2) +#define LINK_SPEED_40GB_MASK (ETH_SPEED_ENABLE_MASK_40GBASECR4 | ETH_SPEED_ENABLE_MASK_40GBASEKR4 | ETH_SPEED_ENABLE_MASK_40GBASESR4 | ETH_SPEED_ENABLE_MASK_40GBASELR4) +#define LINK_SPEED_25GB_MASK (ETH_SPEED_ENABLE_MASK_25GBASECR | ETH_SPEED_ENABLE_MASK_25GBASEKR | ETH_SPEED_ENABLE_MASK_25GBASESR) +#define LINK_SPEED_20GB_MASK (ETH_SPEED_ENABLE_MASK_20GBASER2) +#define LINK_SPEED_10GB_MASK (ETH_SPEED_ENABLE_MASK_10GBASECR | ETH_SPEED_ENABLE_MASK_10GBASESR | ETH_SPEED_ENABLE_MASK_10GBASELR | ETH_SPEED_ENABLE_MASK_10GBASEKR) +#define LINK_SPEED_1GB_MASK (ETH_SPEED_ENABLE_MASK_1000BASECX | ETH_SPEED_ENABLE_MASK_1000BASEKX | ETH_SPEED_ENABLE_MASK_100BaseTX | ETH_SPEED_ENABLE_MASK_1000BASET) + +#define LINK_SPEED_SDR_MASK 0x1 +#define LINK_SPEED_DDR_MASK 0x2 +#define LINK_SPEED_QDR_MASK 0xC +#define LINK_SPEED_FDR_MASK 0x10 +#define LINK_SPEED_EDR20_MASK 0x200 +#define LINK_SPEED_EDR_MASK 0x20 + +#define LINK_SPEED_WITDH_1_MASK 0x1 +#define LINK_SPEED_WITDH_2_MASK 0x2 +#define LINK_SPEED_WITDH_4_MASK 0x4 +#define LINK_SPEED_WITDH_8_MASK 0x8 +#define LINK_SPEED_WITDH_12_MASK 0x10 + +#define GIGA_TO_BIT 0x40000000 + +enum { + ETH_SPEED_ENABLE_MASK_1000BASECX = 0x0001, + ETH_SPEED_ENABLE_MASK_1000BASEKX = 0x0002, + ETH_SPEED_ENABLE_MASK_10GBASECX4 = 0x0004, + ETH_SPEED_ENABLE_MASK_10GBASEKX4 = 0x0008, + ETH_SPEED_ENABLE_MASK_10GBASEKR = 0x0010, + ETH_SPEED_ENABLE_MASK_20GBASER2 = 0x0020, + ETH_SPEED_ENABLE_MASK_40GBASECR4 = 0x0040, + ETH_SPEED_ENABLE_MASK_40GBASEKR4 = 0x0080, + ETH_SPEED_ENABLE_MASK_56GBASER4 = 0x0100, + ETH_SPEED_ENABLE_MASK_10GBASECR = 0x1000, + ETH_SPEED_ENABLE_MASK_10GBASESR = 0x2000, + ETH_SPEED_ENABLE_MASK_10GBASELR = 0x4000, + ETH_SPEED_ENABLE_MASK_40GBASESR4 = 0x8000, + ETH_SPEED_ENABLE_MASK_40GBASELR4 = 0x10000, + ETH_SPEED_ENABLE_MASK_50GBASEKR4 = 0x80000, + ETH_SPEED_ENABLE_MASK_100GBASECR4 = 0x100000, + ETH_SPEED_ENABLE_MASK_100GBASESR4 = 0x200000, + ETH_SPEED_ENABLE_MASK_100GBASEKR4 = 0x400000, + ETH_SPEED_ENABLE_MASK_100GBASELR4 = 0x800000, + ETH_SPEED_ENABLE_MASK_100BaseTX = 0x1000000, + ETH_SPEED_ENABLE_MASK_1000BASET = 0x2000000, + ETH_SPEED_ENABLE_MASK_10GBASET = 0x4000000, + ETH_SPEED_ENABLE_MASK_25GBASECR = 0x8000000, + ETH_SPEED_ENABLE_MASK_25GBASEKR = 0x10000000, + ETH_SPEED_ENABLE_MASK_25GBASESR = 0x20000000, + ETH_SPEED_ENABLE_MASK_50GBASECR2 = 0x40000000, + ETH_SPEED_ENABLE_MASK_50GBASEKR2 = 0x80000000, + ETH_SPEED_ENABLE_MASK_BAD = 0xffff, +}; + + +typedef enum { + LINK_SPEED_IB = 0, + LINK_SPEED_FC, + LINK_SPEED_ETH, +} LINK_SPEED_TYPE; + +typedef enum { + LINK_SPEED_1GB = 0, + LINK_SPEED_10GB, + LINK_SPEED_40GB, + LINK_SPEED_100GB, + LINK_SPEED_SDR, + LINK_SPEED_DEFAULT, +} LINK_SPEED; + +struct mlx_link_speed { + mlx_uint32 proto_mask :3; + mlx_uint32 reserved1 :13; + mlx_uint32 loacl_port :8; + mlx_uint32 reserved2 :8; + /* -------------- */ + mlx_uint32 reserved3 :32; + /* -------------- */ + mlx_uint32 reserved4 :32; + /* -------------- */ + mlx_uint32 eth_proto_capability :32; + /* -------------- */ + mlx_uint32 ib_proto_capability :16; + mlx_uint32 ib_link_width_capability :16; + /* -------------- */ + mlx_uint32 reserved5 :32; + /* -------------- */ + mlx_uint32 eth_proto_admin :32; + /* -------------- */ + mlx_uint32 ib_proto_admin :16; + mlx_uint32 ib_link_width_admin :16; + /* -------------- */ + mlx_uint32 reserved6 :32; + /* -------------- */ + mlx_uint32 eth_proto_oper :32; + /* -------------- */ + mlx_uint32 ib_proto_oper :16; + mlx_uint32 ib_link_width_oper :16; + /* -------------- */ + mlx_uint32 reserved7 :32; + /* -------------- */ + mlx_uint32 eth_proto_lp_advertise :32; + mlx_uint32 reserved[3]; +}; + +mlx_status +mlx_set_link_speed( + IN mlx_utils *utils, + IN mlx_uint8 port_num, + IN LINK_SPEED_TYPE type, + IN LINK_SPEED speed + ); + +mlx_status +mlx_get_max_speed( + IN mlx_utils *utils, + IN mlx_uint8 port_num, + IN LINK_SPEED_TYPE type, + OUT mlx_uint64 *speed + ); + +#endif /* MLX_LINK_SPEED_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.c new file mode 100644 index 00000000..f0af1ecf --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.c @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "mlx_mtu.h" +#include "../../include/public/mlx_memory.h" +#include "../../include/public/mlx_bail.h" + +mlx_status +mlx_get_max_mtu( + IN mlx_utils *utils, + IN mlx_uint8 port_num, + OUT mlx_uint32 *max_mtu + ) +{ + mlx_status status = MLX_SUCCESS; + struct mlx_mtu mtu; + mlx_uint32 reg_status; + *max_mtu = 0; + + if (utils == NULL) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + mlx_memory_set(utils, &mtu, 0, sizeof(mtu)); + + mtu.local_port = port_num; + + status = mlx_reg_access(utils, REG_ID_PMTU, REG_ACCESS_READ, &mtu, + sizeof(mtu), ®_status); + MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed "); + if (reg_status != 0) { + MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status); + status = MLX_FAILED; + goto reg_err; + } + // Return data in bits + *max_mtu = mtu.max_mtu * BYTE_TO_BIT; +reg_err: +bad_param: + return status; +} + +mlx_status +mlx_set_admin_mtu( + IN mlx_utils *utils, + IN mlx_uint8 port_num, + IN mlx_uint32 admin_mtu + ) +{ + mlx_status status = MLX_SUCCESS; + struct mlx_mtu mtu; + mlx_uint32 reg_status; + + if (utils == NULL) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + mlx_memory_set(utils, &mtu, 0, sizeof(mtu)); + + mtu.local_port = port_num; + mtu.admin_mtu = admin_mtu; + + status = mlx_reg_access(utils, REG_ID_PMTU, REG_ACCESS_WRITE, &mtu, + sizeof(mtu), ®_status); + MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed "); + if (reg_status != 0) { + MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status); + status = MLX_FAILED; + goto reg_err; + } +reg_err: +bad_param: + return status; +} diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h new file mode 100644 index 00000000..bd3ded3f --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h @@ -0,0 +1,58 @@ +#ifndef MLX_MTU_H_ +#define MLX_MTU_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../include/public/mlx_utils.h" +#include "../../mlx_lib/mlx_reg_access/mlx_reg_access.h" + +#define BYTE_TO_BIT 0x8 + +struct mlx_mtu { + mlx_uint32 reserved1 :16; + mlx_uint32 local_port :8; + mlx_uint32 reserved2 :8; + /* -------------- */ + mlx_uint32 reserved3 :16; + mlx_uint32 max_mtu :16; + /* -------------- */ + mlx_uint32 reserved4 :16; + mlx_uint32 admin_mtu :16; + /* -------------- */ + mlx_uint32 reserved5 :16; + mlx_uint32 oper_mtu :16; +}; + +mlx_status +mlx_get_max_mtu( + IN mlx_utils *utils, + IN mlx_uint8 port_num, + OUT mlx_uint32 *max_mtu + ); + +mlx_status +mlx_set_admin_mtu( + IN mlx_utils *utils, + IN mlx_uint8 port_num, + IN mlx_uint32 admin_mtu + ); +#endif /* MLX_MTU_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.c new file mode 100644 index 00000000..028ba5ce --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.c @@ -0,0 +1,302 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../mlx_lib/mlx_nvconfig/mlx_nvconfig.h" +#include "../../include/public/mlx_memory.h" +#include "../../include/public/mlx_bail.h" + +#define TlvMappingEntry( _tlv_type, _real_tlv_type, _class_code, _fw_reset_needed) { \ + .tlv_type = _tlv_type, \ + .real_tlv_type = _real_tlv_type, \ + .class_code = _class_code, \ + .fw_reset_needed = _fw_reset_needed, \ + } + +struct nvconfig_tlv_mapping nvconfig_tlv_mapping[] = { + TlvMappingEntry(0x10, 0x10, NVRAM_TLV_CLASS_HOST, TRUE), + TlvMappingEntry(0x12, 0x12, NVRAM_TLV_CLASS_PHYSICAL_PORT, TRUE), + TlvMappingEntry(0x80, 0x80, NVRAM_TLV_CLASS_GLOBAL, TRUE), + TlvMappingEntry(0x81, 0x81, NVRAM_TLV_CLASS_GLOBAL, TRUE), + TlvMappingEntry(0x100, 0x100, NVRAM_TLV_CLASS_GLOBAL, TRUE), + TlvMappingEntry(0x2001, 0x195, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2010, 0x210, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2011, 0x211, NVRAM_TLV_CLASS_GLOBAL, FALSE), + TlvMappingEntry(0x2021, 0x221, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2023, 0x223, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2006, 0x206, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2100, 0x230, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2101, 0x231, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2102, 0x232, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2103, 0x233, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2104, 0x234, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2105, 0x235, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2106, 0x236, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2107, 0x237, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2108, 0x238, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2109, 0x239, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x210A, 0x23A, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2022, 0x222, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2200, 0x240, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2201, 0x241, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2202, 0x242, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2203, 0x243, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2204, 0x244, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2205, 0x245, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2207, 0x247, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2002, 0x202, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x2004, 0x204, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x110, 0x110, NVRAM_TLV_CLASS_HOST, FALSE), + TlvMappingEntry(0x192, 0x192, NVRAM_TLV_CLASS_GLOBAL, FALSE), + TlvMappingEntry(0x101, 0x101, NVRAM_TLV_CLASS_GLOBAL, TRUE), + TlvMappingEntry(0x194, 0x194, NVRAM_TLV_CLASS_GLOBAL, FALSE), + TlvMappingEntry(0, 0, 0, 0), +}; + +static +mlx_status +nvconfig_set_fw_reset_level( + IN mlx_utils *utils, + IN mlx_uint16 tlv_type + ) +{ +#define WARM_REBOOT_RESET ((mlx_uint64)0x1 << 38) + mlx_status status = MLX_SUCCESS; + mlx_uint32 reg_status; + mlx_uint64 mfrl = WARM_REBOOT_RESET ; + mlx_uint8 index = 0; + mlx_boolean reset_needed = FALSE; + + for (index = 0 ; nvconfig_tlv_mapping[index].tlv_type != 0 ; index++) { + if (nvconfig_tlv_mapping[index].tlv_type == tlv_type) { + reset_needed = nvconfig_tlv_mapping[index].fw_reset_needed; + } + } + + if (reset_needed == FALSE) { + goto no_fw_reset_needed; + } + status = mlx_reg_access(utils, REG_ID_MFRL, REG_ACCESS_WRITE, &mfrl, sizeof(mfrl), + ®_status); + MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed "); + + if (reg_status != 0) { + MLX_DEBUG_ERROR(utils,"nvconfig_set_fw_reset_level failed with status = %d\n", reg_status); + status = MLX_FAILED; + goto reg_err; + } +reg_err: +no_fw_reset_needed: + return status; +} + + +static +mlx_status +nvconfig_get_tlv_type_and_class( + IN mlx_uint16 tlv_type, + OUT mlx_uint16 *real_tlv_type, + OUT NVRAM_CLASS_CODE *class_code + ) +{ + mlx_uint8 index = 0; + for ( ; nvconfig_tlv_mapping[index].tlv_type != 0 ; index ++) { + if ( nvconfig_tlv_mapping[index].tlv_type == tlv_type) { + *real_tlv_type = nvconfig_tlv_mapping[index].real_tlv_type; + *class_code = nvconfig_tlv_mapping[index].class_code; + return MLX_SUCCESS; + } + } + return MLX_NOT_FOUND; +} +static +void +nvconfig_fill_tlv_type( + IN mlx_uint8 port, + IN NVRAM_CLASS_CODE class_code, + IN mlx_uint16 tlv_type, + OUT union nvconfig_tlv_type *nvconfig_tlv_type + ) +{ + switch (class_code) { + case NVRAM_TLV_CLASS_GLOBAL: + nvconfig_tlv_type->global.param_class = NVRAM_TLV_CLASS_GLOBAL; + nvconfig_tlv_type->global.param_idx = tlv_type; + break; + case NVRAM_TLV_CLASS_HOST: + nvconfig_tlv_type->per_host.param_class = NVRAM_TLV_CLASS_HOST; + nvconfig_tlv_type->per_host.param_idx = tlv_type; + break; + case NVRAM_TLV_CLASS_PHYSICAL_PORT: + nvconfig_tlv_type->per_port.param_class = NVRAM_TLV_CLASS_PHYSICAL_PORT; + nvconfig_tlv_type->per_port.param_idx = tlv_type; + nvconfig_tlv_type->per_port.port = port; + break; + } +} +mlx_status +nvconfig_query_capability( + IN mlx_utils *utils, + IN mlx_uint8 port, + IN mlx_uint16 tlv_type, + OUT mlx_boolean *read_supported, + OUT mlx_boolean *write_supported + ) +{ + mlx_status status = MLX_SUCCESS; + struct nvconfig_nvqc nvqc; + mlx_uint32 reg_status; + NVRAM_CLASS_CODE class_code; + mlx_uint16 real_tlv_type; + + if (utils == NULL || read_supported == NULL || write_supported == NULL) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + status = nvconfig_get_tlv_type_and_class(tlv_type, &real_tlv_type, &class_code); + MLX_CHECK_STATUS(utils, status, tlv_not_supported, "tlv not supported"); + + mlx_memory_set(utils, &nvqc, 0, sizeof(nvqc)); + nvconfig_fill_tlv_type(port, class_code, real_tlv_type, &nvqc.tlv_type); + + status = mlx_reg_access(utils, REG_ID_NVQC, REG_ACCESS_READ, &nvqc, sizeof(nvqc), + ®_status); + MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed "); + if (reg_status != 0) { + MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status); + status = MLX_FAILED; + goto reg_err; + } + *read_supported = nvqc.support_rd; + *write_supported = nvqc.support_wr; +reg_err: +tlv_not_supported: +bad_param: + return status; +} + +mlx_status +nvconfig_nvdata_invalidate( + IN mlx_utils *utils, + IN mlx_uint8 port, + IN mlx_uint16 tlv_type + ) +{ + mlx_status status = MLX_SUCCESS; + struct nvconfig_header nv_header; + mlx_uint32 reg_status; + NVRAM_CLASS_CODE class_code; + mlx_uint16 real_tlv_type; + + if (utils == NULL) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + status = nvconfig_get_tlv_type_and_class(tlv_type, &real_tlv_type, &class_code); + MLX_CHECK_STATUS(utils, status, tlv_not_supported, "tlv not supported"); + + mlx_memory_set(utils, &nv_header, 0, sizeof(nv_header)); + nvconfig_fill_tlv_type(port, class_code, real_tlv_type, &nv_header.tlv_type); + + status = mlx_reg_access(utils, REG_ID_NVDI, REG_ACCESS_WRITE, &nv_header, sizeof(nv_header), + ®_status); + MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed "); + if (reg_status != 0) { + MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status); + status = MLX_FAILED; + goto reg_err; + } +reg_err: +tlv_not_supported: +bad_param: + return status; +} + +mlx_status +nvconfig_nvdata_access( + IN mlx_utils *utils, + IN mlx_uint8 port, + IN mlx_uint16 tlv_type, + IN REG_ACCESS_OPT opt, + IN mlx_size data_size, + IN NV_DEFAULT_OPT def_en, + IN NVDA_WRITER_ID writer_id, + IN OUT mlx_uint8 *version, + IN OUT mlx_void *data + ) +{ + mlx_status status = MLX_SUCCESS; + struct nvconfig_nvda nvda; + mlx_uint32 reg_status; + mlx_uint32 real_size_to_read; + mlx_uint32 index; + NVRAM_CLASS_CODE class_code; + mlx_uint16 real_tlv_type; + mlx_size data_size_align_to_dword; + + if (utils == NULL || data == NULL || data_size > NVCONFIG_MAX_TLV_SIZE) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + status = nvconfig_get_tlv_type_and_class(tlv_type, &real_tlv_type, &class_code); + MLX_CHECK_STATUS(utils, status, tlv_not_supported, "tlv not supported"); + + data_size_align_to_dword = ((data_size + 3) / sizeof(mlx_uint32)) * sizeof(mlx_uint32); + mlx_memory_set(utils, &nvda, 0, sizeof(nvda)); + nvda.nv_header.length = data_size_align_to_dword; + nvda.nv_header.access_mode = def_en; + nvda.nv_header.version = *version; + nvda.nv_header.writer_id = writer_id; + + nvconfig_fill_tlv_type(port, class_code, real_tlv_type, &nvda.nv_header.tlv_type); + + mlx_memory_cpy(utils, nvda.data, data, data_size); + for (index = 0 ; index * 4 < NVCONFIG_MAX_TLV_SIZE ; index++) { + mlx_memory_be32_to_cpu(utils,(((mlx_uint32 *)nvda.data)[index]), ((mlx_uint32 *)nvda.data) + index); + } + status = mlx_reg_access(utils, REG_ID_NVDA, opt, &nvda, + data_size_align_to_dword + sizeof(nvda.nv_header), ®_status); + MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed "); + if (reg_status != 0) { + MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status); + status = MLX_FAILED; + goto reg_err; + } + for (index = 0 ; index * 4 < NVCONFIG_MAX_TLV_SIZE ; index++) { + mlx_memory_cpu_to_be32(utils,(((mlx_uint32 *)nvda.data)[index]), ((mlx_uint32 *)nvda.data) + index); + } + if (opt == REG_ACCESS_READ) { + real_size_to_read = (nvda.nv_header.length > data_size) ? data_size : + nvda.nv_header.length; + mlx_memory_cpy(utils, data, nvda.data, real_size_to_read); + *version = nvda.nv_header.version; + } else { + nvconfig_set_fw_reset_level(utils, tlv_type); + } +reg_err: +tlv_not_supported: +bad_param: + return status; +} + + diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h new file mode 100644 index 00000000..3058c781 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h @@ -0,0 +1,166 @@ +#ifndef MLX_NVCONFIG_H_ +#define MLX_NVCONFIG_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../mlx_reg_access/mlx_reg_access.h" +#include "../../include/public/mlx_utils.h" + +typedef enum { + NVRAM_TLV_CLASS_GLOBAL = 0, + NVRAM_TLV_CLASS_PHYSICAL_PORT = 1, + NVRAM_TLV_CLASS_HOST = 3, +} NVRAM_CLASS_CODE; + +typedef enum { + NVDA_NV_HEADER_WRITER_ID_UEFI_HII = 0x6, + NVDA_NV_HEADER_WRITER_ID_FLEXBOOT = 0x8, +} NVDA_WRITER_ID; + +typedef enum { + TLV_ACCESS_DEFAULT_DIS = 0, + TLV_ACCESS_CURRENT = 1, + TLV_ACCESS_DEFAULT_EN = 2, +} NV_DEFAULT_OPT; + +struct nvconfig_tlv_type_per_port { + mlx_uint32 param_idx :16; + mlx_uint32 port :8; + mlx_uint32 param_class :8; +}; + +struct nvconfig_tlv_type_per_host { + mlx_uint32 param_idx :10; + mlx_uint32 function :8; + mlx_uint32 host :6; + mlx_uint32 param_class :8; +}; + +struct nvconfig_tlv_type_global { + mlx_uint32 param_idx :24; + mlx_uint32 param_class :8; +}; + +struct nvconfig_tlv_mapping{ + mlx_uint16 tlv_type; + mlx_uint16 real_tlv_type; + NVRAM_CLASS_CODE class_code; + mlx_boolean fw_reset_needed; +}; + +union nvconfig_tlv_type { + struct nvconfig_tlv_type_per_port per_port; + struct nvconfig_tlv_type_per_host per_host; + struct nvconfig_tlv_type_global global; +}; + + +struct nvconfig_nvqc { + union nvconfig_tlv_type tlv_type; +/* -------------- */ + mlx_uint32 support_rd :1; /*the configuration item is supported and can be read */ + mlx_uint32 support_wr :1; /*the configuration item is supported and can be updated */ + mlx_uint32 reserved1 :2; + mlx_uint32 version :4; /*The maximum version of the configuration item currently supported by the firmware. */ + mlx_uint32 reserved2 :24; +}; + + +struct nvconfig_header { + mlx_uint32 length :9; /*Size of configuration item data in bytes between 0..256 */ + mlx_uint32 reserved0 :3; + mlx_uint32 version :4; /* Configuration item version */ + mlx_uint32 writer_id :5; + mlx_uint32 reserved1 :1; + + mlx_uint32 access_mode :2; /*Defines which value of the Configuration Item will be accessed. + 0x0: NEXT - Next value to be applied + 0x1: CURRENT - Currently set values (only valid for Query operation) Supported only if NVGC.nvda_read_current_settings==1. + 0x2: FACTORY - Default factory values (only valid for Query operation). Supported only if NVGC.nvda_read_factory_settings==1.*/ + + mlx_uint32 reserved2 :2; + mlx_uint32 header_type :2; + mlx_uint32 reserved3 :2; + mlx_uint32 valid :2; +/* -------------- */ + union nvconfig_tlv_type tlv_type;; +/* -------------- */ + mlx_uint32 crc :16; + mlx_uint32 reserved :16; + +}; + +#define NVCONFIG_MAX_TLV_SIZE 256 + +struct nvconfig_nvda { + struct nvconfig_header nv_header; + mlx_uint8 data[NVCONFIG_MAX_TLV_SIZE]; +}; + +struct nv_conf_cap { + /** WOL En/Dis **/ + mlx_uint8 wol_en; + /** VPI En/Dis **/ + mlx_uint8 vpi_en; +}; + +struct mlx_nvconfig_virt_net_addr { + mlx_uint32 reserved1 :29; + mlx_uint32 erase_on_powerup:1; + mlx_uint32 reserverd2 :1; + mlx_uint32 virtual_mac_en :1; + mlx_uint32 virtual_mac_high; + mlx_uint32 virtual_mac_low; +}; + + +mlx_status +nvconfig_query_capability( + IN mlx_utils *utils, + IN mlx_uint8 port, + IN mlx_uint16 tlv_type, + OUT mlx_boolean *read_supported, + OUT mlx_boolean *write_supported + ); + + +mlx_status +nvconfig_nvdata_invalidate( + IN mlx_utils *utils, + IN mlx_uint8 port, + IN mlx_uint16 tlv_type + ); + +mlx_status +nvconfig_nvdata_access( + IN mlx_utils *utils, + IN mlx_uint8 port, + IN mlx_uint16 tlv_type, + IN REG_ACCESS_OPT opt, + IN mlx_size data_size, + IN NV_DEFAULT_OPT def_en, + IN NVDA_WRITER_ID writer_id, + IN OUT mlx_uint8 *version, + IN OUT mlx_void *data + ); + +#endif /* MLX_NVCONFIG_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.c new file mode 100644 index 00000000..ca5a6591 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.c @@ -0,0 +1,519 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE( GPL2_OR_LATER); + +#include "../../mlx_lib/mlx_nvconfig/mlx_nvconfig.h" +#include "../../include/public/mlx_memory.h" +#include "../../include/public/mlx_bail.h" +#include "../../mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h" + +struct tlv_default { + mlx_uint16 tlv_type; + mlx_size data_size; + mlx_status (*set_defaults)( IN void *data, IN int status, + OUT void *def_struct); +}; + +#define TlvDefaultEntry( _tlv_type, _data_size, _set_defaults) { \ + .tlv_type = _tlv_type, \ + .data_size = sizeof ( _data_size ), \ + .set_defaults = _set_defaults, \ + } + +static +mlx_status +nvconfig_get_boot_default_conf( + IN void *data, + IN int status, + OUT void *def_struct + ) +{ + union mlx_nvconfig_nic_boot_conf *nic_boot_conf = + (union mlx_nvconfig_nic_boot_conf *) data; + struct mlx_nvconfig_port_conf_defaults *port_conf_def = + (struct mlx_nvconfig_port_conf_defaults *) def_struct; + + /* boot_option_rom_en is deprecated - enabled always */ + port_conf_def->boot_option_rom_en = DEFAULT_OPTION_ROM_EN; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "TLV not found. Using hard-coded defaults "); + port_conf_def->boot_vlan = nic_boot_conf->vlan_id; + port_conf_def->boot_protocol = nic_boot_conf->legacy_boot_prot; + port_conf_def->boot_retry_count = nic_boot_conf->boot_retry_count; + port_conf_def->boot_vlan_en = nic_boot_conf->en_vlan; + + return MLX_SUCCESS; + +nvdata_access_err: + port_conf_def->boot_vlan = DEFAULT_BOOT_VLAN; + port_conf_def->boot_protocol = DEFAULT_BOOT_PROTOCOL; + + return status; +} + +static +mlx_status +nvconfig_get_boot_ext_default_conf( + IN void *data, + IN int status, + OUT void *def_struct + ) +{ + union mlx_nvconfig_nic_boot_ext_conf *nic_boot_ext_conf = + (union mlx_nvconfig_nic_boot_ext_conf *) data; + struct mlx_nvconfig_port_conf_defaults *port_conf_def = + (struct mlx_nvconfig_port_conf_defaults *) def_struct; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "TLV not found. Using hard-coded defaults "); + port_conf_def->linkup_timeout = nic_boot_ext_conf->linkup_timeout; + port_conf_def->ip_ver = nic_boot_ext_conf->ip_ver; + port_conf_def->undi_network_wait_to = nic_boot_ext_conf->undi_network_wait_to; + return MLX_SUCCESS; + +nvdata_access_err: + port_conf_def->linkup_timeout = DEFAULT_BOOT_LINK_UP_TO; + port_conf_def->ip_ver = DEFAULT_BOOT_IP_VER; + port_conf_def->undi_network_wait_to = DEFAULT_BOOT_UNDI_NETWORK_WAIT_TO; + return status; +} + +static +mlx_status +nvconfig_get_iscsi_init_dhcp_default_conf( + IN void *data, + IN int status, + OUT void *def_struct + ) +{ + union mlx_nvconfig_iscsi_init_dhcp_conf *iscsi_init_dhcp_conf = + (union mlx_nvconfig_iscsi_init_dhcp_conf *) data; + struct mlx_nvconfig_port_conf_defaults *port_conf_def = + (struct mlx_nvconfig_port_conf_defaults *) def_struct; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "TLV not found. Using hard-coded defaults "); + port_conf_def->iscsi_dhcp_params_en = iscsi_init_dhcp_conf->dhcp_iscsi_en; + port_conf_def->iscsi_ipv4_dhcp_en = iscsi_init_dhcp_conf->ipv4_dhcp_en; + + return MLX_SUCCESS; + +nvdata_access_err: + port_conf_def->iscsi_dhcp_params_en = DEFAULT_ISCSI_DHCP_PARAM_EN; + port_conf_def->iscsi_ipv4_dhcp_en = DEFAULT_ISCSI_IPV4_DHCP_EN; + + return status; +} + +static +mlx_status +nvconfig_get_ib_boot_default_conf( + IN void *data, + IN int status, + OUT void *def_struct + ) +{ + union mlx_nvconfig_nic_ib_boot_conf *ib_boot_conf = + (union mlx_nvconfig_nic_ib_boot_conf *) data; + struct mlx_nvconfig_port_conf_defaults *port_conf_def = + (struct mlx_nvconfig_port_conf_defaults *) def_struct; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "nvconfig_nvdata_default_access failed "); + port_conf_def->boot_pkey = ib_boot_conf->boot_pkey; + +nvdata_access_err: + return status; +} + +static +mlx_status +nvconfig_get_wol_default_conf( + IN void *data, + IN int status, + OUT void *def_struct + ) +{ + union mlx_nvconfig_wol_conf *wol_conf = (union mlx_nvconfig_wol_conf *) data; + struct mlx_nvconfig_port_conf_defaults *port_conf_def = + (struct mlx_nvconfig_port_conf_defaults *) def_struct; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "nvconfig_nvdata_default_access failed "); + port_conf_def->en_wol_magic = wol_conf->en_wol_magic; + +nvdata_access_err: + return status; +} + +static +mlx_status +nvconfig_get_iscsi_gen_default_conf( + IN void *data, + IN int status, + OUT void *def_struct) +{ + union mlx_nvconfig_iscsi_general *iscsi_gen = + (union mlx_nvconfig_iscsi_general *) data; + struct mlx_nvconfig_port_conf_defaults *port_conf_def = + (struct mlx_nvconfig_port_conf_defaults *) def_struct; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "nvconfig_nvdata_default_access failed "); + port_conf_def->iscsi_boot_to_target = iscsi_gen->boot_to_target; + port_conf_def->iscsi_vlan_en = iscsi_gen->vlan_en; + port_conf_def->iscsi_tcp_timestamps_en = iscsi_gen->tcp_timestamps_en; + port_conf_def->iscsi_chap_mutual_auth_en = iscsi_gen->chap_mutual_auth_en; + port_conf_def->iscsi_chap_auth_en = iscsi_gen->chap_auth_en; + port_conf_def->iscsi_lun_busy_retry_count = iscsi_gen->lun_busy_retry_count; + port_conf_def->iscsi_link_up_delay_time = iscsi_gen->link_up_delay_time; + port_conf_def->iscsi_drive_num = iscsi_gen->drive_num; + + return MLX_SUCCESS; + +nvdata_access_err: + port_conf_def->iscsi_drive_num = DEFAULT_ISCSI_DRIVE_NUM; + return status; +} + +static +mlx_status +nvconfig_get_ib_dhcp_default_conf( + IN void *data, + IN int status, + OUT void *def_struct + ) +{ + union mlx_nvconfig_ib_dhcp_conf *ib_dhcp = + (union mlx_nvconfig_ib_dhcp_conf *) data; + struct mlx_nvconfig_port_conf_defaults *port_conf_def = + (struct mlx_nvconfig_port_conf_defaults *) def_struct; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "nvconfig_nvdata_default_access failed "); + port_conf_def->client_identifier = ib_dhcp->client_identifier; + port_conf_def->mac_admin_bit = ib_dhcp->mac_admin_bit; + +nvdata_access_err: + return status; +} + +static +mlx_status +nvconfig_get_ocsd_ocbb_default_conf( IN void *data, + IN int status, OUT void *def_struct) { + union mlx_nvconfig_ocsd_ocbb_conf *ocsd_ocbb = + (union mlx_nvconfig_ocsd_ocbb_conf *) data; + struct mlx_nvconfig_conf_defaults *conf_def = + (struct mlx_nvconfig_conf_defaults *) def_struct; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "TLV not found. Using hard-coded defaults "); + conf_def->ocsd_ocbb_en = ocsd_ocbb->ocsd_ocbb_en; + + return MLX_SUCCESS; + +nvdata_access_err: + conf_def->ocsd_ocbb_en = DEFAULT_OCSD_OCBB_EN; + + return status; +} + +static +mlx_status +nvconfig_get_vpi_link_default_conf( + IN void *data, + IN int status, + OUT void *def_struct + ) +{ + union mlx_nvconfig_vpi_link_conf *vpi_link = + (union mlx_nvconfig_vpi_link_conf *) data; + struct mlx_nvconfig_port_conf_defaults *port_conf_def = + (struct mlx_nvconfig_port_conf_defaults *) def_struct; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "nvconfig_nvdata_default_access failed "); + port_conf_def->network_link_type = vpi_link->network_link_type; + port_conf_def->default_link_type = vpi_link->default_link_type; + +nvdata_access_err: + return status; +} + +static +mlx_status +nvconfig_get_rom_banner_to_default_conf( + IN void *data, + IN int status, + OUT void *def_struct + ) +{ + union mlx_nvconfig_rom_banner_timeout_conf *rom_banner_timeout_conf = + (union mlx_nvconfig_rom_banner_timeout_conf *) data; + struct mlx_nvconfig_conf_defaults *conf_def = + (struct mlx_nvconfig_conf_defaults *) def_struct; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "TLV not found. Using hard-coded defaults "); + conf_def->flexboot_menu_to = rom_banner_timeout_conf->rom_banner_to; + + return MLX_SUCCESS; + +nvdata_access_err: + conf_def->flexboot_menu_to = DEFAULT_FLEXBOOT_MENU_TO; + + return status; +} + +static +mlx_status +nvconfig_get_nv_virt_caps_default_conf( + IN void *data, + IN int status, + OUT void *def_struct + ) +{ + union mlx_nvconfig_virt_caps *nv_virt_caps = + (union mlx_nvconfig_virt_caps *) data; + struct mlx_nvconfig_conf_defaults *conf_def = + (struct mlx_nvconfig_conf_defaults *) def_struct; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "TLV not found. Using hard-coded defaults "); + conf_def->max_vfs = nv_virt_caps->max_vfs_per_pf; + + return MLX_SUCCESS; + +nvdata_access_err: + conf_def->max_vfs = DEFAULT_MAX_VFS; + + return status; +} + +static +mlx_status +nvconfig_get_nv_virt_default_conf( + IN void *data, + IN int status, + OUT void *def_struct + ) +{ + union mlx_nvconfig_virt_conf *nv_virt_conf = + (union mlx_nvconfig_virt_conf *) data; + struct mlx_nvconfig_conf_defaults *conf_def = + (struct mlx_nvconfig_conf_defaults *) def_struct; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "nvconfig_nvdata_default_access failed "); + conf_def->total_vfs = nv_virt_conf->num_of_vfs; + conf_def->sriov_en = nv_virt_conf->virt_mode; + +nvdata_access_err: + return status; +} + +static +mlx_status +nvconfig_get_rom_cap_default_conf( IN void *data, + IN int status, OUT void *def_struct) { + union mlx_nvconfig_rom_cap_conf *rom_cap_conf = + (union mlx_nvconfig_rom_cap_conf *) data; + struct mlx_nvconfig_conf_defaults *conf_def = + (struct mlx_nvconfig_conf_defaults *) def_struct; + + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "TLV not found. Using hard-coded defaults "); + conf_def->boot_ip_ver_en = rom_cap_conf->boot_ip_ver_en; + + return MLX_SUCCESS; + +nvdata_access_err: + rom_cap_conf->boot_ip_ver_en = DEFAULT_BOOT_IP_VERSION_EN; + + return status; +} + +static struct tlv_default tlv_port_defaults[] = { + TlvDefaultEntry(BOOT_SETTINGS_TYPE, union mlx_nvconfig_nic_boot_conf, &nvconfig_get_boot_default_conf), + TlvDefaultEntry(BOOT_SETTINGS_EXT_TYPE, union mlx_nvconfig_nic_boot_ext_conf, &nvconfig_get_boot_ext_default_conf), + TlvDefaultEntry(ISCSI_INITIATOR_DHCP_CONF_TYPE, union mlx_nvconfig_iscsi_init_dhcp_conf, &nvconfig_get_iscsi_init_dhcp_default_conf), + TlvDefaultEntry(IB_BOOT_SETTING_TYPE, union mlx_nvconfig_nic_ib_boot_conf, &nvconfig_get_ib_boot_default_conf), + TlvDefaultEntry(WAKE_ON_LAN_TYPE, union mlx_nvconfig_wol_conf, &nvconfig_get_wol_default_conf), + TlvDefaultEntry(ISCSI_GENERAL_SETTINGS_TYPE, union mlx_nvconfig_iscsi_general, &nvconfig_get_iscsi_gen_default_conf), + TlvDefaultEntry(IB_DHCP_SETTINGS_TYPE, union mlx_nvconfig_ib_dhcp_conf, &nvconfig_get_ib_dhcp_default_conf), + TlvDefaultEntry(VPI_LINK_TYPE, union mlx_nvconfig_vpi_link_conf, &nvconfig_get_vpi_link_default_conf), +}; + +static struct tlv_default tlv_general_defaults[] = { + TlvDefaultEntry(BANNER_TO_TYPE, union mlx_nvconfig_rom_banner_timeout_conf, &nvconfig_get_rom_banner_to_default_conf), + TlvDefaultEntry(GLOPAL_PCI_CAPS_TYPE, union mlx_nvconfig_virt_caps, &nvconfig_get_nv_virt_caps_default_conf), + TlvDefaultEntry(GLOPAL_PCI_SETTINGS_TYPE, union mlx_nvconfig_virt_conf, &nvconfig_get_nv_virt_default_conf), + TlvDefaultEntry(OCSD_OCBB_TYPE, union mlx_nvconfig_ocsd_ocbb_conf, &nvconfig_get_ocsd_ocbb_default_conf), + TlvDefaultEntry(NV_ROM_CAP_TYPE, union mlx_nvconfig_rom_cap_conf, &nvconfig_get_rom_cap_default_conf), +}; + +static +mlx_status +nvconfig_nvdata_default_access( + IN mlx_utils *utils, + IN mlx_uint8 port, + IN mlx_uint16 tlv_type, + IN mlx_size data_size, + OUT mlx_void *data + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 index; + mlx_uint8 version = 0; + + status = nvconfig_nvdata_access(utils, port, tlv_type, REG_ACCESS_READ, + data_size, TLV_ACCESS_DEFAULT_EN, 0, + &version, data); + MLX_CHECK_STATUS(NULL, status, nvdata_access_err, + "nvconfig_nvdata_access failed "); + for (index = 0; index * 4 < data_size; index++) { + mlx_memory_be32_to_cpu(utils, (((mlx_uint32 *) data)[index]), + ((mlx_uint32 *) data) + index); + } + +nvdata_access_err: + return status; +} + +static +mlx_status +nvconfig_nvdata_read_default_value( + IN mlx_utils *utils, + IN mlx_uint8 modifier, + IN struct tlv_default *def, + OUT void *def_struct + ) +{ + mlx_status status = MLX_SUCCESS; + void *data = NULL; + + status = mlx_memory_zalloc(utils, def->data_size,&data); + MLX_CHECK_STATUS(utils, status, memory_err, + "mlx_memory_zalloc failed "); + status = nvconfig_nvdata_default_access(utils, modifier, def->tlv_type, + def->data_size, data); + def->set_defaults(data, status, def_struct); + mlx_memory_free(utils, &data); + +memory_err: + return status; +} + +static +void +nvconfig_nvdata_read_default_values( + IN mlx_utils *utils, + IN mlx_uint8 modifier, + IN struct tlv_default defaults_table[], + IN mlx_uint8 defaults_table_size, + OUT void *def_strct + ) +{ + struct tlv_default *defs; + unsigned int i; + + for (i = 0; i < defaults_table_size; i++) { + defs = &defaults_table[i]; + nvconfig_nvdata_read_default_value(utils, modifier, defs, def_strct); + } +} + +mlx_status +nvconfig_read_port_default_values( + IN mlx_utils *utils, + IN mlx_uint8 port, + OUT struct mlx_nvconfig_port_conf_defaults *port_conf_def + ) +{ + mlx_status status = MLX_SUCCESS; + + if (utils == NULL || port_conf_def == NULL) { + status = MLX_INVALID_PARAMETER; + MLX_DEBUG_ERROR(utils,"bad params."); + goto bad_param; + } + mlx_memory_set(utils, port_conf_def, 0, sizeof(*port_conf_def)); + nvconfig_nvdata_read_default_values(utils, port, tlv_port_defaults, + (sizeof(tlv_port_defaults)/sizeof(tlv_port_defaults[0])), + port_conf_def); + +bad_param: + return status; +} + +mlx_status +nvconfig_read_general_default_values( + IN mlx_utils *utils, + OUT struct mlx_nvconfig_conf_defaults *conf_def + ) +{ + mlx_status status = MLX_SUCCESS; + + if (utils == NULL || conf_def == NULL) { + status = MLX_INVALID_PARAMETER; + MLX_DEBUG_ERROR(utils,"bad params."); + goto bad_param; + } + mlx_memory_set(utils, conf_def, 0, sizeof(*conf_def)); + nvconfig_nvdata_read_default_values(utils, 0, tlv_general_defaults, + (sizeof(tlv_general_defaults)/sizeof(tlv_general_defaults[0])), + conf_def); + +bad_param: + return status; +} + +mlx_status +nvconfig_read_rom_ini_values( + IN mlx_utils *utils, + OUT struct mlx_nvcofnig_romini *rom_ini + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint8 version = 0; + mlx_uint32 index; + + if (utils == NULL || rom_ini == NULL) { + status = MLX_INVALID_PARAMETER; + MLX_DEBUG_ERROR(utils,"bad params."); + goto bad_param; + } + mlx_memory_set(utils, rom_ini, 0, sizeof(*rom_ini)); + + status = nvconfig_nvdata_access(utils, 0, GLOBAL_ROM_INI_TYPE, REG_ACCESS_READ, + sizeof(*rom_ini), TLV_ACCESS_DEFAULT_DIS, 0, + &version, rom_ini); + MLX_CHECK_STATUS(NULL, status, bad_param, + "nvconfig_nvdata_access failed "); + for (index = 0; index * 4 < sizeof(*rom_ini); index++) { + mlx_memory_be32_to_cpu(utils, (((mlx_uint32 *) rom_ini)[index]), + ((mlx_uint32 *) rom_ini) + index); + } + +bad_param: + return status; +} diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h new file mode 100644 index 00000000..48699c35 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h @@ -0,0 +1,100 @@ +#ifndef MLX_NVCONFIG_DEFAULTS_H_ +#define MLX_NVCONFIG_DEFAULTS_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); +#include "mlx_nvconfig_prm.h" +/* + * Default values + */ +#define DEFAULT_FLEXBOOT_MENU_TO 4 +#define DEFAULT_MAX_VFS 8 +#define DEFAULT_BOOT_PROTOCOL 1 +#define DEFAULT_OPTION_ROM_EN 1 +#define DEFAULT_BOOT_VLAN 1 +#define DEFAULT_ISCSI_DHCP_PARAM_EN 1 +#define DEFAULT_ISCSI_IPV4_DHCP_EN 1 +#define DEFAULT_ISCSI_DRIVE_NUM 0x80 +#define DEFAULT_OCSD_OCBB_EN 1 +#define DEFAULT_BOOT_IP_VER 0 +#define DEFAULT_BOOT_LINK_UP_TO 0 +#define DEFAULT_BOOT_UNDI_NETWORK_WAIT_TO 30 +#define DEFAULT_BOOT_IP_VERSION_EN 1 + +struct mlx_nvconfig_port_conf_defaults { + mlx_uint8 pptx; + mlx_uint8 pprx; + mlx_boolean boot_option_rom_en; + mlx_boolean boot_vlan_en; + mlx_uint8 boot_retry_count; + mlx_uint8 boot_protocol; + mlx_uint8 boot_vlan; + mlx_uint8 boot_pkey; + mlx_boolean en_wol_magic; + mlx_uint8 network_link_type; + mlx_uint8 iscsi_boot_to_target; + mlx_boolean iscsi_vlan_en; + mlx_boolean iscsi_tcp_timestamps_en; + mlx_boolean iscsi_chap_mutual_auth_en; + mlx_boolean iscsi_chap_auth_en; + mlx_boolean iscsi_dhcp_params_en; + mlx_boolean iscsi_ipv4_dhcp_en; + mlx_uint8 iscsi_lun_busy_retry_count; + mlx_uint8 iscsi_link_up_delay_time; + mlx_uint8 iscsi_drive_num; + mlx_uint8 client_identifier; + mlx_uint8 mac_admin_bit; + mlx_uint8 default_link_type; + mlx_uint8 linkup_timeout; + mlx_uint8 ip_ver; + mlx_uint8 undi_network_wait_to; +}; + +struct mlx_nvconfig_conf_defaults { + mlx_uint8 max_vfs; + mlx_uint8 total_vfs; + mlx_uint8 sriov_en; + mlx_uint8 maximum_uar_bar_size; + mlx_uint8 uar_bar_size; + mlx_uint8 flexboot_menu_to; + mlx_boolean ocsd_ocbb_en; + mlx_boolean boot_ip_ver_en; +}; + +mlx_status +nvconfig_read_port_default_values( + IN mlx_utils *utils, + IN mlx_uint8 port, + OUT struct mlx_nvconfig_port_conf_defaults *port_conf_def + ); + +mlx_status +nvconfig_read_general_default_values( + IN mlx_utils *utils, + OUT struct mlx_nvconfig_conf_defaults *conf_def + ); + +mlx_status +nvconfig_read_rom_ini_values( + IN mlx_utils *utils, + OUT struct mlx_nvcofnig_romini *rom_ini + ); +#endif /* MLX_NVCONFIG_DEFAULTS_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_prm.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_prm.h new file mode 100644 index 00000000..7fd52acc --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_prm.h @@ -0,0 +1,331 @@ +#ifndef MLX_NVCONFIG_PRM_H_ +#define MLX_NVCONFIG_PRM_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../include/public/mlx_types.h" + +enum { + WAKE_ON_LAN_TYPE = 0x10, + VIRTUALIZATION_TYPE = 0x11, + VPI_LINK_TYPE = 0x12, + BOOT_SETTINGS_EXT_TYPE = 0x2001, + BANNER_TO_TYPE = 0x2010, + OCSD_OCBB_TYPE = 0x2011, + FLOW_CONTROL_TYPE = 0x2020, + BOOT_SETTINGS_TYPE = 0x2021, + NV_ROM_FLEXBOOT_DEBUG = 0x2004, + + ISCSI_GENERAL_SETTINGS_TYPE = 0x2100, + IB_BOOT_SETTING_TYPE = 0x2022, + IB_DHCP_SETTINGS_TYPE = 0x2023, + GLOPAL_PCI_SETTINGS_TYPE = 0x80, + GLOPAL_PCI_CAPS_TYPE = 0x81, + GLOBAL_ROM_INI_TYPE = 0x100, + NV_VIRT_NET_ADDR = 0x110, + + // Types for iSCSI strings + DHCP_VEND_ID = 0x2101, + ISCSI_INITIATOR_IPV4_ADDR = 0x2102, + ISCSI_INITIATOR_SUBNET = 0x2103, + ISCSI_INITIATOR_IPV4_GATEWAY = 0x2104, + ISCSI_INITIATOR_IPV4_PRIM_DNS = 0x2105, + ISCSI_INITIATOR_IPV4_SECDNS = 0x2106, + ISCSI_INITIATOR_NAME = 0x2107, + ISCSI_INITIATOR_CHAP_ID = 0x2108, + ISCSI_INITIATOR_CHAP_PWD = 0x2109, + ISCSI_INITIATOR_DHCP_CONF_TYPE = 0x210a, + + CONNECT_FIRST_TGT = 0x2200, + FIRST_TGT_IP_ADDRESS = 0x2201, + FIRST_TGT_TCP_PORT = 0x2202, + FIRST_TGT_BOOT_LUN = 0x2203, + FIRST_TGT_ISCSI_NAME = 0x2204, + FIRST_TGT_CHAP_ID = 0x2205, + FIRST_TGT_CHAP_PWD = 0x2207, + NV_ROM_DEBUG_LEVEL = 0x2002, + NV_ROM_CAP_TYPE = 0x101, +}; + +union mlx_nvconfig_nic_boot_conf { + struct { + mlx_uint32 vlan_id : 12; + mlx_uint32 link_speed : 4; + mlx_uint32 legacy_boot_prot : 8; + mlx_uint32 boot_retry_count : 3; + mlx_uint32 boot_strap_type : 3; + mlx_uint32 en_vlan : 1; + mlx_uint32 en_option_rom : 1; + }; + mlx_uint32 dword; +}; + +union mlx_nvconfig_nic_boot_ext_conf { + struct { + mlx_uint32 linkup_timeout : 8; + mlx_uint32 ip_ver : 2; + mlx_uint32 reserved0 : 6; + mlx_uint32 undi_network_wait_to : 8; + mlx_uint32 reserved1 : 8; + }; + mlx_uint32 dword; +}; + +union mlx_nvconfig_rom_banner_timeout_conf { + struct { + mlx_uint32 rom_banner_to : 4; + mlx_uint32 reserved : 28; + }; + mlx_uint32 dword; +}; + +union mlx_nvconfig_virt_conf { + struct { + mlx_uint32 reserved0 :24; + mlx_uint32 pf_bar_size_valid :1; + mlx_uint32 vf_bar_size_valid :1; + mlx_uint32 num_pf_msix_valid :1; + mlx_uint32 num_vf_msix_valid :1; + mlx_uint32 num_pfs_valid :1; + mlx_uint32 fpp_valid :1; + mlx_uint32 full_vf_qos_valid :1; + mlx_uint32 sriov_valid :1; + /*-------------------*/ + mlx_uint32 num_of_vfs :16; + mlx_uint32 num_of_pfs :4; + mlx_uint32 reserved1 :9; + mlx_uint32 fpp_en :1; + mlx_uint32 full_vf_qos :1; + mlx_uint32 virt_mode :1; //sriov_en + /*-------------------*/ + mlx_uint32 log_pf_uar_bar_size :6; + mlx_uint32 log_vf_uar_bar_size :6; + mlx_uint32 num_pf_msix :10; + mlx_uint32 num_vf_msix :10; + }; + mlx_uint32 dword[3]; +}; + +union mlx_nvconfig_virt_caps { + struct { + mlx_uint32 reserved0 :24; + mlx_uint32 max_vfs_per_pf_valid :1; + mlx_uint32 max_total_msix_valid :1; + mlx_uint32 max_total_bar_valid :1; + mlx_uint32 num_pfs_supported :1; + mlx_uint32 num_vf_msix_supported :1; + mlx_uint32 num_pf_msix_supported :1; + mlx_uint32 vf_bar_size_supported :1; + mlx_uint32 pf_bar_size_supported :1; + /*-------------------*/ + mlx_uint32 max_vfs_per_pf :16; + mlx_uint32 max_num_pfs :4; + mlx_uint32 reserved1 :9; + mlx_uint32 fpp_support :1; + mlx_uint32 vf_qos_control_support :1; + mlx_uint32 sriov_support :1; + /*-------------------*/ + mlx_uint32 max_log_pf_uar_bar_size :6; + mlx_uint32 max_log_vf_uar_bar_size :6; + mlx_uint32 max_num_pf_msix :10; + mlx_uint32 max_num_vf_msix :10; + /*-------------------*/ + mlx_uint32 max_total_msix; + /*-------------------*/ + mlx_uint32 max_total_bar; + }; + mlx_uint32 dword[5]; +}; + +union mlx_nvconfig_iscsi_init_dhcp_conf { + struct { + mlx_uint32 reserved0 :30; + mlx_uint32 dhcp_iscsi_en :1; + mlx_uint32 ipv4_dhcp_en :1; + + }; + mlx_uint32 dword; +}; + +union mlx_nvconfig_nic_ib_boot_conf { + struct { + mlx_uint32 boot_pkey : 16; + mlx_uint32 reserved0 : 16; + }; + mlx_uint32 dword; +}; + +union mlx_nvconfig_wol_conf { + struct { + mlx_uint32 reserved0 :9; + mlx_uint32 en_wol_passwd :1; + mlx_uint32 en_wol_magic :1; + mlx_uint32 reserved1 :21; + mlx_uint32 reserved2 :32; + }; + mlx_uint32 dword[2]; +}; + +union mlx_nvconfig_iscsi_general { + struct { + mlx_uint32 reserved0 :22; + mlx_uint32 boot_to_target :2; + mlx_uint32 reserved1 :2; + mlx_uint32 vlan_en :1; + mlx_uint32 tcp_timestamps_en :1; + mlx_uint32 chap_mutual_auth_en :1; + mlx_uint32 chap_auth_en :1; + mlx_uint32 reserved2 :2; + /*-------------------*/ + mlx_uint32 vlan :12; + mlx_uint32 reserved3 :20; + /*-------------------*/ + mlx_uint32 lun_busy_retry_count:8; + mlx_uint32 link_up_delay_time :8; + mlx_uint32 drive_num :8; + mlx_uint32 reserved4 :8; + }; + mlx_uint32 dword[3]; +}; + +union mlx_nvconfig_ib_dhcp_conf { + struct { + mlx_uint32 reserved :24; + mlx_uint32 client_identifier :4; + mlx_uint32 mac_admin_bit :4; + }; + mlx_uint32 dword; +}; + +union mlx_nvconfig_ocsd_ocbb_conf { + struct { + mlx_uint32 reserved :31; + mlx_uint32 ocsd_ocbb_en :1; + }; + mlx_uint32 dword; +}; + +union mlx_nvconfig_vpi_link_conf { + struct { + mlx_uint32 network_link_type :2; + mlx_uint32 default_link_type :2; + mlx_uint32 reserved :28; + }; + mlx_uint32 dword; +}; + +struct mlx_nvcofnig_romini { + mlx_uint32 reserved0 :1; + mlx_uint32 shared_memory_en :1; + mlx_uint32 hii_vpi_en :1; + mlx_uint32 tech_enum :1; + mlx_uint32 reserved1 :4; + mlx_uint32 static_component_name_string :1; + mlx_uint32 hii_iscsi_configuration :1; + mlx_uint32 hii_ibm_aim :1; + mlx_uint32 hii_platform_setup :1; + mlx_uint32 hii_bdf_decimal :1; + mlx_uint32 hii_read_only :1; + mlx_uint32 reserved2 :10; + mlx_uint32 mac_enum :1; + mlx_uint32 port_enum :1; + mlx_uint32 flash_en :1; + mlx_uint32 fmp_en :1; + mlx_uint32 bofm_en :1; + mlx_uint32 platform_to_driver_en:1; + mlx_uint32 hii_en :1; + mlx_uint32 undi_en :1; + /* -------------- */ + mlx_uint64 dhcp_user_class; + /* -------------- */ + mlx_uint32 reserved3 :10; + mlx_uint32 ucm_single_port :1; + mlx_uint32 tivoli_wa_en :1; + mlx_uint32 dhcp_pxe_discovery_control_dis :1; + mlx_uint32 hii_flexaddr_override:1; + mlx_uint32 hii_flexaddr_setting :1; + mlx_uint32 guided_ops :1; + mlx_uint32 hii_type :4; + mlx_uint32 hii_mriname2 :1; + mlx_uint32 hii_aim_ucm_ver2 :1; + mlx_uint32 uri_boot_retry_delay :4; + mlx_uint32 uri_boot_retry :4; + mlx_uint32 option_rom_debug :1; + mlx_uint32 promiscuous_vlan :1; + +} __attribute__ ((packed)); + +union mlx_nvconfig_debug_conf { + struct { + mlx_uint32 dbg_log_en :1; + mlx_uint32 reserved1 :31; + /***************************************************/ + mlx_uint32 stp_dbg_lvl :2; + mlx_uint32 romprefix_dbg_lvl :2; + mlx_uint32 dhcp_dbg_lvl :2; + mlx_uint32 dhcpv6_dbg_lvl :2; + mlx_uint32 arp_dbg_lvl :2; + mlx_uint32 neighbor_dbg_lvl :2; + mlx_uint32 ndp_dbg_lvl :2; + mlx_uint32 uri_dbg_lvl :2; + mlx_uint32 driver_dbg_lvl :2; + mlx_uint32 nodnic_dbg_lvl :2; + mlx_uint32 nodnic_cmd_dbg_lvl :2; + mlx_uint32 nodnic_device_dbg_lvl :2; + mlx_uint32 nodnic_port_dbg_lvl :2; + mlx_uint32 netdevice_dbg_lvl :2; + mlx_uint32 tftp_dbg_lvl :2; + mlx_uint32 udp_dbg_lvl :2; + /***************************************************/ + mlx_uint32 tcp_dbg_lvl :2; + mlx_uint32 tcpip_dbg_lvl :2; + mlx_uint32 ipv4_dbg_lvl :2; + mlx_uint32 ipv6_dbg_lvl :2; + mlx_uint32 drv_set_dbg_lvl :2; + mlx_uint32 stat_update_dbg_lvl :2; + mlx_uint32 pxe_undi_dbg_lvl :2; + mlx_uint32 reserved2 :18; + }; + mlx_uint32 dword[3]; +}; + +union mlx_nvconfig_flexboot_debug { + struct { + mlx_uint32 reserved0 :29; + mlx_uint32 panic_behavior :2; + mlx_uint32 boot_to_shell :1; + }; + mlx_uint32 dword; +}; + +union mlx_nvconfig_rom_cap_conf { + struct { + mlx_uint32 reserved0 :28; + mlx_uint32 uefi_logs_en :1; + mlx_uint32 flexboot_debug_en :1; + mlx_uint32 boot_debug_log_en :1; + mlx_uint32 boot_ip_ver_en :1; + }; + mlx_uint32 dword; +}; + +#endif /* MLX_NVCONFIG_PRM_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.c new file mode 100644 index 00000000..143ab1b0 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.c @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../mlx_lib/mlx_reg_access/mlx_reg_access.h" +#include "../../include/public/mlx_icmd.h" +#include "../../include/public/mlx_bail.h" +#include "../../include/public/mlx_memory.h" + +static +mlx_status +init_operation_tlv( + IN struct mail_box_tlv *mail_box_tlv, + IN mlx_uint16 reg_id, + IN REG_ACCESS_OPT reg_opt + ) +{ +#define TLV_OPERATION 1 + mail_box_tlv->operation_tlv.Type = TLV_OPERATION; +#define MAD_CLASS_REG_ACCESS 1 + mail_box_tlv->operation_tlv.cls = MAD_CLASS_REG_ACCESS; +#define TLV_OPERATION_SIZE 4 + mail_box_tlv->operation_tlv.len = TLV_OPERATION_SIZE; + mail_box_tlv->operation_tlv.method = reg_opt; + mail_box_tlv->operation_tlv.register_id = reg_id; + return MLX_SUCCESS; +} + +mlx_status +mlx_reg_access( + IN mlx_utils *utils, + IN mlx_uint16 reg_id, + IN REG_ACCESS_OPT reg_opt, + IN OUT mlx_void *reg_data, + IN mlx_size reg_size, + OUT mlx_uint32 *reg_status + ) +{ + mlx_status status = MLX_SUCCESS; + struct mail_box_tlv mail_box_tlv; + + if (utils == NULL || reg_data == NULL || reg_status == NULL + || reg_size > REG_ACCESS_MAX_REG_SIZE) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + mlx_memory_set(utils, &mail_box_tlv, 0, sizeof(mail_box_tlv)); + + init_operation_tlv(&mail_box_tlv, reg_id, reg_opt); + +#define REG_ACCESS_TLV_REG 3 +#define REG_TLV_HEADER_LEN 4 +#define OP_TLV_SIZE 16 + mail_box_tlv.reg_tlv.Type = REG_ACCESS_TLV_REG; + mail_box_tlv.reg_tlv.len = ((reg_size + REG_TLV_HEADER_LEN + 3) >> 2); // length is in dwords round up + mlx_memory_cpy(utils, &mail_box_tlv.reg_tlv.data, reg_data, reg_size); + + reg_size += OP_TLV_SIZE + REG_TLV_HEADER_LEN; + + status = mlx_icmd_send_command(utils, FLASH_REG_ACCESS, &mail_box_tlv, reg_size, reg_size); + MLX_CHECK_STATUS(utils, status, icmd_err, "failed to send icmd"); + + mlx_memory_cpy(utils, reg_data, &mail_box_tlv.reg_tlv.data, + reg_size - (OP_TLV_SIZE + REG_TLV_HEADER_LEN)); + + *reg_status = mail_box_tlv.operation_tlv.status; +icmd_err: +bad_param: + return status; +} + + diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.h new file mode 100644 index 00000000..ca7ca2f8 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_reg_access/mlx_reg_access.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#ifndef MLX_REG_ACCESS_H_ +#define MLX_REG_ACCESS_H_ + +#include "../../include/public/mlx_icmd.h" + +#define REG_ACCESS_MAX_REG_SIZE 236 + +typedef enum { + REG_ACCESS_READ = 1, + REG_ACCESS_WRITE = 2, +} REG_ACCESS_OPT; + +#define REG_ID_NVDA 0x9024 +#define REG_ID_NVDI 0x9025 +#define REG_ID_NVIA 0x9029 +#define REG_ID_MLCR 0x902b +#define REG_ID_NVQC 0x9030 +#define REG_ID_MFRL 0x9028 +#define REG_ID_PTYS 0x5004 +#define REG_ID_PMTU 0x5003 + +struct operation_tlv { + mlx_uint32 reserved0 :8; /* bit_offset:0 */ /* element_size: 8 */ + mlx_uint32 status :7; /* bit_offset:8 */ /* element_size: 7 */ + mlx_uint32 dr :1; /* bit_offset:15 */ /* element_size: 1 */ + mlx_uint32 len :11; /* bit_offset:16 */ /* element_size: 11 */ + mlx_uint32 Type :5; /* bit_offset:27 */ /* element_size: 5 */ + mlx_uint32 cls :8; /* bit_offset:32 */ /* element_size: 8 */ + mlx_uint32 method :7; /* bit_offset:40 */ /* element_size: 7 */ + mlx_uint32 r :1; /* bit_offset:47 */ /* element_size: 1 */ + mlx_uint32 register_id :16; /* bit_offset:48 */ /* element_size: 16 */ + mlx_uint64 tid ; /* bit_offset:64 */ /* element_size: 64 */ +}; + +struct reg_tlv { + mlx_uint32 reserved0 :16; /* bit_offset:0 */ /* element_size: 16 */ + mlx_uint32 len :11; /* bit_offset:16 */ /* element_size: 11 */ + mlx_uint32 Type :5; /* bit_offset:27 */ /* element_size: 5 */ + mlx_uint8 data[REG_ACCESS_MAX_REG_SIZE]; +}; + +struct mail_box_tlv { + struct operation_tlv operation_tlv; + struct reg_tlv reg_tlv; +}; +mlx_status +mlx_reg_access( + IN mlx_utils *utils, + IN mlx_uint16 reg_id, + IN REG_ACCESS_OPT reg_opt, + IN OUT mlx_void *reg_data, + IN mlx_size reg_size, + OUT mlx_uint32 *reg_status + ); + +#endif /* MLX_REG_ACCESS_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.c b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.c new file mode 100644 index 00000000..65d04c96 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.c @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../mlx_lib/mlx_vmac/mlx_vmac.h" +#include "../../include/public/mlx_icmd.h" +#include "../../include/public/mlx_bail.h" + +mlx_status +mlx_vmac_query_virt_mac ( + IN mlx_utils *utils, + OUT struct mlx_vmac_query_virt_mac *virt_mac + ) +{ + mlx_status status = MLX_SUCCESS; + if (utils == NULL || virt_mac == NULL) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + status = mlx_icmd_send_command( + utils, + QUERY_VIRTUAL_MAC, + virt_mac, + 0, + sizeof(*virt_mac) + ); + MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed"); +icmd_err: +bad_param: + return status; +} + +mlx_status +mlx_vmac_set_virt_mac ( + IN mlx_utils *utils, + OUT struct mlx_vmac_set_virt_mac *virt_mac + ) +{ + mlx_status status = MLX_SUCCESS; + if (utils == NULL || virt_mac == NULL) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + status = mlx_icmd_send_command( + utils, + SET_VIRTUAL_MAC, + virt_mac, + sizeof(*virt_mac), + 0 + ); + MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed"); +icmd_err: +bad_param: + return status; +} diff --git a/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h new file mode 100644 index 00000000..2214d918 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h @@ -0,0 +1,60 @@ +#ifndef MLX_VMAC_H_ +#define MLX_VMAC_H_ + +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../include/public/mlx_utils.h" + +struct mlx_vmac_query_virt_mac { + mlx_uint32 reserved0 :30; + mlx_uint32 mac_aux_v :1; + mlx_uint32 virtual_mac_en :1; + mlx_uint32 parmanent_mac_high :16; + mlx_uint32 reserved1 :16; + mlx_uint32 parmanent_mac_low :32; + mlx_uint32 virtual_mac_high :16; + mlx_uint32 Reserved2 :16; + mlx_uint32 virtual_mac_low :32; +}; + +struct mlx_vmac_set_virt_mac { + mlx_uint32 Reserved0 :30; + mlx_uint32 mac_aux_v :1; + mlx_uint32 virtual_mac_en :1; + mlx_uint32 reserved1 :32; + mlx_uint32 reserved2 :32; + mlx_uint32 virtual_mac_high; + mlx_uint32 virtual_mac_low; +}; + +mlx_status +mlx_vmac_query_virt_mac ( + IN mlx_utils *utils, + OUT struct mlx_vmac_query_virt_mac *virt_mac + ); + +mlx_status +mlx_vmac_set_virt_mac ( + IN mlx_utils *utils, + OUT struct mlx_vmac_set_virt_mac *virt_mac + ); +#endif /* MLX_VMAC_H_ */ diff --git a/src/drivers/infiniband/mlx_utils/src/public/mlx_icmd.c b/src/drivers/infiniband/mlx_utils/src/public/mlx_icmd.c new file mode 100644 index 00000000..e4ab5f0a --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/src/public/mlx_icmd.c @@ -0,0 +1,371 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../include/public/mlx_bail.h" +#include "../../include/public/mlx_icmd.h" +#include "../../include/public/mlx_pci_gw.h" +#include "../../include/public/mlx_utils.h" + +static +mlx_status +mlx_icmd_get_semaphore( + IN mlx_utils *utils + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 retries = 0; + mlx_uint32 semaphore_id; + mlx_uint32 buffer; + if (utils == NULL) { + status = MLX_INVALID_PARAMETER; + goto invalid_param; + } + + status = mlx_utils_rand(utils, &semaphore_id); + MLX_CHECK_STATUS(utils, status, rand_err, "failed to get random number"); +#define ICMD_GET_SEMAPHORE_TRIES 2560 + for (retries = 0 ; retries < ICMD_GET_SEMAPHORE_TRIES ; retries++) { + status = mlx_pci_gw_read( utils, PCI_GW_SPACE_SEMAPHORE, + MLX_ICMD_SEMAPHORE_ADDR, &buffer); + MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd semaphore"); + if (buffer != 0) { + mlx_utils_delay_in_ms(10); + continue; + } + mlx_pci_gw_write( utils, PCI_GW_SPACE_SEMAPHORE, + MLX_ICMD_SEMAPHORE_ADDR, semaphore_id); + MLX_CHECK_STATUS(utils, status, set_err, "failed to set icmd semaphore"); + status = mlx_pci_gw_read( utils, PCI_GW_SPACE_SEMAPHORE, + MLX_ICMD_SEMAPHORE_ADDR, &buffer); + MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd semaphore"); + if (semaphore_id == buffer) { + status = MLX_SUCCESS; + utils->icmd.took_semaphore = TRUE; + break; + } + mlx_utils_delay_in_ms(10); + } + if (semaphore_id != buffer) { + status = MLX_FAILED; + } +read_err: +set_err: +rand_err: +invalid_param: + return status; +} +static +mlx_status +mlx_icmd_clear_semaphore( + IN mlx_utils *utils + ) +{ + mlx_status status = MLX_SUCCESS; + + if (utils == NULL) { + status = MLX_INVALID_PARAMETER; + goto invalid_param; + } + + if (utils->icmd.took_semaphore == FALSE) { + goto semaphore_not_taken; + } + status = mlx_pci_gw_write( utils, PCI_GW_SPACE_SEMAPHORE, + MLX_ICMD_SEMAPHORE_ADDR, 0); + MLX_CHECK_STATUS(utils, status, read_err, "failed to clear icmd semaphore"); + + utils->icmd.took_semaphore = FALSE; +read_err: +semaphore_not_taken: +invalid_param: + return status; +} + +static +mlx_status +mlx_icmd_init( + IN mlx_utils *utils + ) +{ + mlx_status status = MLX_SUCCESS; + + if (utils == NULL) { + status = MLX_INVALID_PARAMETER; + goto invalid_param; + } + if (utils->icmd.icmd_opened == TRUE) { + goto already_opened; + } + + utils->icmd.took_semaphore = FALSE; + + status = mlx_pci_gw_read( utils, PCI_GW_SPACE_ALL_ICMD, + MLX_ICMD_MB_SIZE_ADDR, &utils->icmd.max_cmd_size); + MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd mail box size"); + + utils->icmd.icmd_opened = TRUE; +read_err: +already_opened: +invalid_param: + return status; +} + +static +mlx_status +mlx_icmd_set_opcode( + IN mlx_utils *utils, + IN mlx_uint16 opcode + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 buffer; + + if (utils == NULL) { + status = MLX_INVALID_PARAMETER; + goto invalid_param; + } + + status = mlx_pci_gw_read( utils, PCI_GW_SPACE_ALL_ICMD, + MLX_ICMD_CTRL_ADDR, &buffer); + MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd ctrl"); + +#define MLX_ICMD_OPCODE_ALIGN 16 +#define MLX_ICMD_OPCODE_MASK 0xffff + + buffer = buffer & ~(MLX_ICMD_OPCODE_MASK << MLX_ICMD_OPCODE_ALIGN); + buffer = buffer | (opcode << MLX_ICMD_OPCODE_ALIGN); + + status = mlx_pci_gw_write( utils, PCI_GW_SPACE_ALL_ICMD, + MLX_ICMD_CTRL_ADDR, buffer); + MLX_CHECK_STATUS(utils, status, write_err, "failed to write icmd ctrl"); +write_err: +read_err: +invalid_param: + return status; +} + +static +mlx_status +mlx_icmd_go( + IN mlx_utils *utils + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 buffer; + mlx_uint32 busy; + mlx_uint32 wait_iteration = 0; + + if (utils == NULL) { + status = MLX_INVALID_PARAMETER; + goto invalid_param; + } + + status = mlx_pci_gw_read( utils, PCI_GW_SPACE_ALL_ICMD, + MLX_ICMD_CTRL_ADDR, &buffer); + MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd ctrl"); + +#define MLX_ICMD_BUSY_ALIGN 0 +#define MLX_ICMD_BUSY_MASK 0x1 + + busy = (buffer >> MLX_ICMD_BUSY_ALIGN) & MLX_ICMD_BUSY_MASK; + if (busy != 0) { + status = MLX_FAILED; + goto already_busy; + } + + buffer = buffer | (1 << MLX_ICMD_BUSY_ALIGN); + + status = mlx_pci_gw_write( utils, PCI_GW_SPACE_ALL_ICMD, + MLX_ICMD_CTRL_ADDR, buffer); + MLX_CHECK_STATUS(utils, status, write_err, "failed to write icmd ctrl"); + +#define MLX_ICMD_BUSY_MAX_ITERATIONS 1024 + do { + if (++wait_iteration > MLX_ICMD_BUSY_MAX_ITERATIONS) { + status = MLX_FAILED; + MLX_DEBUG_ERROR(utils, "ICMD time out"); + goto busy_timeout; + } + + mlx_utils_delay_in_ms(10); + status = mlx_pci_gw_read( utils, PCI_GW_SPACE_ALL_ICMD, + MLX_ICMD_CTRL_ADDR, &buffer); + MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd ctrl"); + busy = (buffer >> MLX_ICMD_BUSY_ALIGN) & MLX_ICMD_BUSY_MASK; + } while (busy != 0); + +busy_timeout: +write_err: +already_busy: +read_err: +invalid_param: + return status; +} + +static +mlx_status +mlx_icmd_get_status( + IN mlx_utils *utils, + OUT mlx_uint32 *out_status + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 buffer; + + if (utils == NULL || out_status == NULL) { + status = MLX_INVALID_PARAMETER; + goto invalid_param; + } + + status = mlx_pci_gw_read( utils, PCI_GW_SPACE_ALL_ICMD, + MLX_ICMD_CTRL_ADDR, &buffer); + MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd ctrl"); + +#define MLX_ICMD_STATUS_ALIGN 8 +#define MLX_ICMD_STATUS_MASK 0xff + + *out_status = (buffer >> MLX_ICMD_STATUS_ALIGN) & MLX_ICMD_STATUS_MASK; + +read_err: +invalid_param: + return status; +} + +static +mlx_status +mlx_icmd_write_buffer( + IN mlx_utils *utils, + IN mlx_void* data, + IN mlx_uint32 data_size + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 data_offset = 0; + mlx_size dword_size = sizeof(mlx_uint32); + + if (utils == NULL || data == NULL) { + status = MLX_INVALID_PARAMETER; + goto invalid_param; + } + + for (data_offset = 0 ; data_offset*dword_size < data_size ; data_offset++) { + status = mlx_pci_gw_write( utils, PCI_GW_SPACE_ALL_ICMD, + MLX_ICMD_MB_ADDR + data_offset*dword_size, + ((mlx_uint32*)data)[data_offset]); + MLX_CHECK_STATUS(utils, status, write_err, "failed to write icmd MB"); + } +write_err: +invalid_param: + return status; +} + + +static +mlx_status +mlx_icmd_read_buffer( + IN mlx_utils *utils, + OUT mlx_void* data, + IN mlx_uint32 data_size + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 data_offset = 0; + mlx_size dword_size = sizeof(mlx_uint32); + + if (utils == NULL || data == NULL) { + status = MLX_INVALID_PARAMETER; + goto invalid_param; + } + + for (data_offset = 0 ; data_offset*dword_size < data_size ; data_offset++) { + status = mlx_pci_gw_read( utils, PCI_GW_SPACE_ALL_ICMD, + MLX_ICMD_MB_ADDR + data_offset*dword_size, + (mlx_uint32*)data + data_offset); + MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd MB"); + } +read_err: +invalid_param: + return status; +} +mlx_status +mlx_icmd_send_command( + IN mlx_utils *utils, + IN mlx_uint16 opcode, + IN OUT mlx_void* data, + IN mlx_uint32 write_data_size, + IN mlx_uint32 read_data_size + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 icmd_status = 0; + + if (utils == NULL || data == NULL) { + status = MLX_INVALID_PARAMETER; + goto invalid_param; + } + status = mlx_icmd_init(utils); + MLX_CHECK_STATUS(utils, status, open_err, "failed to open icmd"); + + if (write_data_size > utils->icmd.max_cmd_size || + read_data_size > utils->icmd.max_cmd_size) { + status = MLX_INVALID_PARAMETER; + goto size_err; + } + + status = mlx_icmd_get_semaphore(utils); + MLX_CHECK_STATUS(utils, status, semaphore_err, "failed to get icmd semaphore"); + + status = mlx_icmd_set_opcode(utils, opcode); + MLX_CHECK_STATUS(utils, status, opcode_err, "failed to set icmd opcode"); + + if (write_data_size != 0) { + status = mlx_icmd_write_buffer(utils, data, write_data_size); + MLX_CHECK_STATUS(utils, status, opcode_err, "failed to write icmd MB"); + } + + status = mlx_icmd_go(utils); + MLX_CHECK_STATUS(utils, status, go_err, "failed to activate icmd"); + + status = mlx_icmd_get_status(utils, &icmd_status); + MLX_CHECK_STATUS(utils, status, get_status_err, "failed to set icmd opcode"); + + if (icmd_status != 0) { + MLX_DEBUG_ERROR(utils, "icmd failed with status = %d\n", icmd_status); + status = MLX_FAILED; + goto icmd_failed; + } + if (read_data_size != 0) { + status = mlx_icmd_read_buffer(utils, data, read_data_size); + MLX_CHECK_STATUS(utils, status, read_err, "failed to read icmd MB"); + } +read_err: +icmd_failed: +get_status_err: +go_err: +opcode_err: + mlx_icmd_clear_semaphore(utils); +semaphore_err: +size_err: +open_err: +invalid_param: + return status; +} diff --git a/src/drivers/infiniband/mlx_utils/src/public/mlx_memory.c b/src/drivers/infiniband/mlx_utils/src/public/mlx_memory.c new file mode 100644 index 00000000..5aa5a53d --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/src/public/mlx_memory.c @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include "../../include/private/mlx_memory_priv.h" +#include "../../include/public/mlx_memory.h" + +mlx_status +mlx_memory_alloc( + IN mlx_utils *utils, + IN mlx_size size, + OUT mlx_void **ptr + ) +{ + mlx_status status = MLX_SUCCESS; + *ptr = NULL; + if ( utils == NULL || size == 0 || *ptr != NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + status = mlx_memory_alloc_priv(utils, size, ptr); +bad_param: + return status; +} + +mlx_status +mlx_memory_zalloc( + IN mlx_utils *utils, + IN mlx_size size, + OUT mlx_void **ptr + ) +{ + mlx_status status = MLX_SUCCESS; + *ptr = NULL; + if ( utils == NULL || size == 0 || *ptr != NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + status = mlx_memory_zalloc_priv(utils, size, ptr); +bad_param: + return status; +} + +mlx_status +mlx_memory_free( + IN mlx_utils *utils, + IN mlx_void **ptr + ) +{ + mlx_status status = MLX_SUCCESS; + if ( utils == NULL || ptr == NULL || *ptr == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + status = mlx_memory_free_priv(utils, *ptr); + *ptr = NULL; +bad_param: + return status; +} +mlx_status +mlx_memory_alloc_dma( + IN mlx_utils *utils, + IN mlx_size size , + IN mlx_size align, + OUT mlx_void **ptr + ) +{ + mlx_status status = MLX_SUCCESS; + *ptr = NULL; + if ( utils == NULL || size == 0 || *ptr != NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + status = mlx_memory_alloc_dma_priv(utils, size, align, ptr); +bad_param: + return status; +} + +mlx_status +mlx_memory_free_dma( + IN mlx_utils *utils, + IN mlx_size size , + IN mlx_void **ptr + ) +{ + mlx_status status = MLX_SUCCESS; + if ( utils == NULL || size == 0 || ptr == NULL || *ptr == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + status = mlx_memory_free_dma_priv(utils, size, *ptr); + *ptr = NULL; +bad_param: + return status; +} + +mlx_status +mlx_memory_map_dma( + IN mlx_utils *utils, + IN mlx_void *addr , + IN mlx_size number_of_bytes, + OUT mlx_physical_address *phys_addr, + OUT mlx_void **mapping + ) +{ + mlx_status status = MLX_SUCCESS; + if ( utils == NULL || phys_addr == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + status = mlx_memory_map_dma_priv(utils, addr, number_of_bytes, phys_addr, mapping); +bad_param: + return status; +} + +mlx_status +mlx_memory_ummap_dma( + IN mlx_utils *utils, + IN mlx_void *mapping + ) +{ + mlx_status status = MLX_SUCCESS; + if ( utils == NULL){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + status = mlx_memory_ummap_dma_priv(utils, mapping); +bad_param: + return status; +} + +mlx_status +mlx_memory_cmp( + IN mlx_utils *utils, + IN mlx_void *first_block, + IN mlx_void *second_block, + IN mlx_size size, + OUT mlx_uint32 *out + ) +{ + mlx_status status = MLX_SUCCESS; + if ( utils == NULL || first_block == NULL || second_block == NULL || + out == NULL){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + status = mlx_memory_cmp_priv(utils, first_block, second_block, size, out); +bad_param: + return status; +} + +mlx_status +mlx_memory_set( + IN mlx_utils *utils, + IN mlx_void *block, + IN mlx_int32 value, + IN mlx_size size + ) +{ + mlx_status status = MLX_SUCCESS; + if ( utils == NULL || block == NULL){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + status = mlx_memory_set_priv(utils, block, value, size); +bad_param: + return status; +} + +mlx_status +mlx_memory_cpy( + IN mlx_utils *utils, + OUT mlx_void *destination_buffer, + IN mlx_void *source_buffer, + IN mlx_size length + ) +{ + mlx_status status = MLX_SUCCESS; + if ( utils == NULL || destination_buffer == NULL || source_buffer == NULL){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + status = mlx_memory_cpy_priv(utils, destination_buffer, source_buffer, length); +bad_param: + return status; +} + +mlx_status +mlx_memory_cpu_to_be32( + IN mlx_utils *utils, + IN mlx_uint32 source, + IN mlx_uint32 *destination + ) +{ + mlx_status status = MLX_SUCCESS; + if ( utils == NULL || destination == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + status = mlx_memory_cpu_to_be32_priv(utils, source, destination); +bad_param: + return status; +} + +mlx_status +mlx_memory_be32_to_cpu( + IN mlx_utils *utils, + IN mlx_uint32 source, + IN mlx_uint32 *destination + ) +{ + mlx_status status = MLX_SUCCESS; + if ( utils == NULL || destination == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + status = mlx_memory_be32_to_cpu_priv(utils, source, destination); +bad_param: + return status; +} diff --git a/src/drivers/infiniband/mlx_utils/src/public/mlx_pci.c b/src/drivers/infiniband/mlx_utils/src/public/mlx_pci.c new file mode 100644 index 00000000..d4ff1b9a --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/src/public/mlx_pci.c @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include + +#include "../../include/private/mlx_pci_priv.h" +#include "../../include/public/mlx_pci.h" + +mlx_status +mlx_pci_init( + IN mlx_utils *utils + ) +{ + mlx_status status = MLX_SUCCESS; + if( utils == NULL){ + status = MLX_INVALID_PARAMETER; + goto bail; + } + status = mlx_pci_init_priv(utils); +bail: + return status; +} + +mlx_status +mlx_pci_teardown( + IN mlx_utils *utils + ) +{ + mlx_status status = MLX_SUCCESS; + if( utils == NULL){ + status = MLX_INVALID_PARAMETER; + goto bail; + } + status = mlx_pci_teardown_priv(utils); +bail: + return status; +} + +mlx_status +mlx_pci_read( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint32 offset, + IN mlx_uintn count, + OUT mlx_void *buffer + ) +{ + mlx_status status = MLX_SUCCESS; + if( utils == NULL || count == 0){ + status = MLX_INVALID_PARAMETER; + goto bail; + } + status = mlx_pci_read_priv(utils, width, offset, count, buffer); +bail: + return status; +} + +mlx_status +mlx_pci_write( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint32 offset, + IN mlx_uintn count, + IN mlx_void *buffer + ) +{ + mlx_status status = MLX_SUCCESS; + if( utils == NULL || count == 0){ + status = MLX_INVALID_PARAMETER; + goto bail; + } + status = mlx_pci_write_priv(utils, width, offset, count, buffer); +bail: + return status; +} + +mlx_status +mlx_pci_mem_read( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint8 bar_index, + IN mlx_uint64 offset, + IN mlx_uintn count, + OUT mlx_void *buffer + ) +{ + mlx_status status = MLX_SUCCESS; + if( utils == NULL || count == 0){ + status = MLX_INVALID_PARAMETER; + goto bail; + } + status = mlx_pci_mem_read_priv(utils, width,bar_index, offset, count, buffer); +bail: + return status; +} + +mlx_status +mlx_pci_mem_write( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint8 bar_index, + IN mlx_uint64 offset, + IN mlx_uintn count, + IN mlx_void *buffer + ) +{ + mlx_status status = MLX_SUCCESS; + if( utils == NULL || count == 0){ + status = MLX_INVALID_PARAMETER; + goto bail; + } + status = mlx_pci_mem_write_priv(utils, width, bar_index, offset, count, buffer); +bail: + return status; +} diff --git a/src/drivers/infiniband/mlx_utils/src/public/mlx_pci_gw.c b/src/drivers/infiniband/mlx_utils/src/public/mlx_pci_gw.c new file mode 100644 index 00000000..30c1e644 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/src/public/mlx_pci_gw.c @@ -0,0 +1,392 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include "../../include/public/mlx_pci_gw.h" +#include "../../include/public/mlx_bail.h" +#include "../../include/public/mlx_pci.h" +#include "../../include/public/mlx_logging.h" + +/* Lock/unlock GW on each VSEC access */ +#undef VSEC_DEBUG + +static +mlx_status +mlx_pci_gw_check_capability_id( + IN mlx_utils *utils, + IN mlx_uint8 cap_pointer, + OUT mlx_boolean *bool + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint8 offset = cap_pointer + PCI_GW_CAPABILITY_ID_OFFSET; + mlx_uint8 id = 0; + status = mlx_pci_read(utils, MlxPciWidthUint8, offset, + 1, &id); + MLX_CHECK_STATUS(utils, status, read_err,"failed to read capability id"); + *bool = ( id == PCI_GW_CAPABILITY_ID ); +read_err: + return status; +} + +static +mlx_status +mlx_pci_gw_get_ownership( + IN mlx_utils *utils + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 cap_offset = utils->pci_gw.pci_cmd_offset; + mlx_uint32 semaphore = 0; + mlx_uint32 counter = 0; + mlx_uint32 get_semaphore_try = 0; + mlx_uint32 get_ownership_try = 0; + + for( ; get_ownership_try < PCI_GW_GET_OWNERSHIP_TRIES; get_ownership_try ++){ + for( ; get_semaphore_try <= PCI_GW_SEMPHORE_TRIES ; get_semaphore_try++){ + status = mlx_pci_read(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_SEMAPHORE_OFFSET, + 1, &semaphore); + MLX_CHECK_STATUS(utils, status, read_err,"failed to read semaphore"); + if( semaphore == 0 ){ + break; + } + mlx_utils_delay_in_us(10); + } + if( semaphore != 0 ){ + status = MLX_FAILED; + goto semaphore_err; + } + + status = mlx_pci_read(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_COUNTER_OFFSET, + 1, &counter); + MLX_CHECK_STATUS(utils, status, read_err, "failed to read counter"); + + status = mlx_pci_write(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_SEMAPHORE_OFFSET, + 1, &counter); + MLX_CHECK_STATUS(utils, status, write_err,"failed to write semaphore"); + + status = mlx_pci_read(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_SEMAPHORE_OFFSET, + 1, &semaphore); + MLX_CHECK_STATUS(utils, status, read_err,"failed to read semaphore"); + if( counter == semaphore ){ + break; + } + } + if( counter != semaphore ){ + status = MLX_FAILED; + } +write_err: +read_err: +semaphore_err: + return status; +} + +static +mlx_status +mlx_pci_gw_free_ownership( + IN mlx_utils *utils + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 cap_offset = utils->pci_gw.pci_cmd_offset; + mlx_uint32 value = 0; + + status = mlx_pci_write(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_SEMAPHORE_OFFSET, + 1, &value); + MLX_CHECK_STATUS(utils, status, write_err,"failed to write semaphore"); +write_err: + return status; +} + +static +mlx_status +mlx_pci_gw_set_space( + IN mlx_utils *utils, + IN mlx_pci_gw_space space + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 cap_offset = utils->pci_gw.pci_cmd_offset;; + mlx_uint8 space_status = 0; + + /* set nodnic*/ + status = mlx_pci_write(utils, MlxPciWidthUint16, cap_offset + PCI_GW_CAPABILITY_SPACE_OFFSET, 1, &space); + MLX_CHECK_STATUS(utils, status, read_error,"failed to write capability space"); + + status = mlx_pci_read(utils, MlxPciWidthUint8, cap_offset + PCI_GW_CAPABILITY_STATUS_OFFSET, 1, &space_status); + MLX_CHECK_STATUS(utils, status, read_error,"failed to read capability status"); + if( (space_status & 0x20) == 0){ + status = MLX_FAILED; + goto space_unsupported; + } +read_error: +space_unsupported: + return status; +} + +static +mlx_status +mlx_pci_gw_wait_for_flag_value( + IN mlx_utils *utils, + IN mlx_boolean value + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint32 try = 0; + mlx_uint32 cap_offset = utils->pci_gw.pci_cmd_offset; + mlx_uint32 flag = 0; + + for(; try < PCI_GW_READ_FLAG_TRIES ; try ++ ) { + status = mlx_pci_read(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_FLAG_OFFSET, 1, &flag); + MLX_CHECK_STATUS(utils, status, read_error, "failed to read capability flag"); + if( ((flag & 0x80000000) != 0) == value ){ + goto flag_valid; + } + mlx_utils_delay_in_us(10); + } + status = MLX_FAILED; +flag_valid: +read_error: + return status; +} +static +mlx_status +mlx_pci_gw_search_capability( + IN mlx_utils *utils, + OUT mlx_uint32 *cap_offset + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint8 cap_pointer = 0; + mlx_boolean is_capability = FALSE; + + if( cap_offset == NULL || utils == NULL){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + //get first capability pointer + status = mlx_pci_read(utils, MlxPciWidthUint8, PCI_GW_FIRST_CAPABILITY_POINTER_OFFSET, + 1, &cap_pointer); + MLX_CHECK_STATUS(utils, status, read_err, + "failed to read capability pointer"); + + //search the right capability + while( cap_pointer != 0 ){ + status = mlx_pci_gw_check_capability_id(utils, cap_pointer, &is_capability); + MLX_CHECK_STATUS(utils, status, check_err + ,"failed to check capability id"); + + if( is_capability == TRUE ){ + *cap_offset = cap_pointer; + break; + } + + status = mlx_pci_read(utils, MlxPciWidthUint8, cap_pointer + + PCI_GW_CAPABILITY_NEXT_POINTER_OFFSET , + 1, &cap_pointer); + MLX_CHECK_STATUS(utils, status, read_err, + "failed to read capability pointer"); + } + if( is_capability != TRUE ){ + status = MLX_NOT_FOUND; + } +check_err: +read_err: +bad_param: + return status; +} + +mlx_status +mlx_pci_gw_init( + IN mlx_utils *utils + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_pci_gw *pci_gw = NULL; + + if( utils == NULL){ + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + pci_gw = &utils->pci_gw; + + status = mlx_pci_gw_search_capability(utils, &pci_gw->pci_cmd_offset); + MLX_CHECK_STATUS(utils, status, cap_err, + "mlx_pci_gw_search_capability failed"); + +#if ! defined ( VSEC_DEBUG ) + status = mlx_pci_gw_get_ownership(utils); + MLX_CHECK_STATUS(utils, status, ownership_err,"failed to get ownership"); +ownership_err: +#endif +cap_err: +bad_param: + return status; +} + +mlx_status +mlx_pci_gw_teardown( + IN mlx_utils *utils __attribute__ ((unused)) + ) +{ +#if ! defined ( VSEC_DEBUG ) + mlx_pci_gw_free_ownership(utils); +#endif + return MLX_SUCCESS; +} + +mlx_status +mlx_pci_gw_read( + IN mlx_utils *utils, + IN mlx_pci_gw_space space, + IN mlx_uint32 address, + OUT mlx_pci_gw_buffer *buffer + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_pci_gw *pci_gw = NULL; + mlx_uint32 cap_offset = 0; + + if (utils == NULL || buffer == NULL || utils->pci_gw.pci_cmd_offset == 0) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + mlx_utils_acquire_lock(utils); + + pci_gw = &utils->pci_gw; + cap_offset = pci_gw->pci_cmd_offset; + +#if ! defined ( VSEC_DEBUG ) + if (pci_gw->space != space) { + status = mlx_pci_gw_set_space(utils, space); + MLX_CHECK_STATUS(utils, status, space_error,"failed to set space"); + pci_gw->space = space; + } +#else + status = mlx_pci_gw_get_ownership(utils); + MLX_CHECK_STATUS(utils, status, ownership_err,"failed to get ownership"); + + status = mlx_pci_gw_set_space(utils, space); + MLX_CHECK_STATUS(utils, status, space_error,"failed to set space"); + pci_gw->space = space; +#endif + + status = mlx_pci_write(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_ADDRESS_OFFSET, 1, &address); + MLX_CHECK_STATUS(utils, status, read_error,"failed to write capability address"); + +#if defined ( DEVICE_CX3 ) + /* WA for PCI issue (race) */ + mlx_utils_delay_in_us ( 10 ); +#endif + + status = mlx_pci_gw_wait_for_flag_value(utils, TRUE); + MLX_CHECK_STATUS(utils, status, read_error, "flag failed to change"); + + status = mlx_pci_read(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_DATA_OFFSET, 1, buffer); + MLX_CHECK_STATUS(utils, status, read_error,"failed to read capability data"); + +#if defined ( VSEC_DEBUG ) + status = mlx_pci_gw_free_ownership(utils); + MLX_CHECK_STATUS(utils, status, free_err, + "mlx_pci_gw_free_ownership failed"); +free_err: + mlx_utils_release_lock(utils); + return status; +#endif +read_error: +space_error: +#if defined ( VSEC_DEBUG ) + mlx_pci_gw_free_ownership(utils); +ownership_err: +#endif +mlx_utils_release_lock(utils); +bad_param: + return status; +} + +mlx_status +mlx_pci_gw_write( + IN mlx_utils *utils, + IN mlx_pci_gw_space space, + IN mlx_uint32 address, + IN mlx_pci_gw_buffer buffer + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_pci_gw *pci_gw = NULL; + mlx_uint32 cap_offset = 0; + mlx_uint32 fixed_address = address | PCI_GW_WRITE_FLAG; + + if (utils == NULL || utils->pci_gw.pci_cmd_offset == 0) { + status = MLX_INVALID_PARAMETER; + goto bad_param; + } + + mlx_utils_acquire_lock(utils); + + pci_gw = &utils->pci_gw; + cap_offset = pci_gw->pci_cmd_offset; + +#if ! defined ( VSEC_DEBUG ) + if (pci_gw->space != space) { + status = mlx_pci_gw_set_space(utils, space); + MLX_CHECK_STATUS(utils, status, space_error,"failed to set space"); + pci_gw->space = space; + } +#else + status = mlx_pci_gw_get_ownership(utils); + MLX_CHECK_STATUS(utils, status, ownership_err,"failed to get ownership"); + + status = mlx_pci_gw_set_space(utils, space); + MLX_CHECK_STATUS(utils, status, space_error,"failed to set space"); + pci_gw->space = space; +#endif + status = mlx_pci_write(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_DATA_OFFSET, 1, &buffer); + MLX_CHECK_STATUS(utils, status, read_error,"failed to write capability data"); + + status = mlx_pci_write(utils, MlxPciWidthUint32, cap_offset + PCI_GW_CAPABILITY_ADDRESS_OFFSET, 1, &fixed_address); + MLX_CHECK_STATUS(utils, status, read_error,"failed to write capability address"); + + status = mlx_pci_gw_wait_for_flag_value(utils, FALSE); + MLX_CHECK_STATUS(utils, status, read_error, "flag failed to change"); +#if defined ( VSEC_DEBUG ) + status = mlx_pci_gw_free_ownership(utils); + MLX_CHECK_STATUS(utils, status, free_err, + "mlx_pci_gw_free_ownership failed"); +free_err: +mlx_utils_release_lock(utils); + return status; +#endif +read_error: +space_error: +#if defined ( VSEC_DEBUG ) + mlx_pci_gw_free_ownership(utils); +ownership_err: +#endif +mlx_utils_release_lock(utils); +bad_param: + return status; +} + + + diff --git a/src/drivers/infiniband/mlx_utils/src/public/mlx_utils.c b/src/drivers/infiniband/mlx_utils/src/public/mlx_utils.c new file mode 100644 index 00000000..7ae35355 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils/src/public/mlx_utils.c @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include + +#include "../../include/private/mlx_utils_priv.h" +#include "../../include/public/mlx_pci.h" +#include "../../include/public/mlx_utils.h" +mlx_status +mlx_utils_init( + IN mlx_utils *utils, + IN mlx_pci *pci + ) +{ + mlx_status status = MLX_SUCCESS; + if( pci == NULL || utils == NULL ){ + status = MLX_INVALID_PARAMETER; + goto bail; + } + utils->pci = pci; + status = mlx_pci_init(utils); + status = mlx_utils_init_lock(utils); +bail: + return status; +} + +mlx_status +mlx_utils_teardown( + IN mlx_utils *utils + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_utils_free_lock(utils); + mlx_pci_teardown(utils); + return status; +} + +mlx_status +mlx_utils_delay_in_ms( + IN mlx_uint32 msecs + ) +{ + mlx_utils_delay_in_ms_priv(msecs); + return MLX_SUCCESS; +} +mlx_status +mlx_utils_delay_in_us( + IN mlx_uint32 usecs + ) +{ + mlx_utils_delay_in_us_priv(usecs); + return MLX_SUCCESS; +} +mlx_status +mlx_utils_ilog2( + IN mlx_uint32 i, + OUT mlx_uint32 *log + ) +{ + mlx_utils_ilog2_priv(i, log); + return MLX_SUCCESS; +} + +mlx_status +mlx_utils_init_lock( + IN OUT mlx_utils *utils + ) +{ + return mlx_utils_init_lock_priv(&(utils->lock)); + +} + +mlx_status +mlx_utils_free_lock( + IN OUT mlx_utils *utils + ) +{ + return mlx_utils_free_lock_priv(utils->lock); +} + +mlx_status +mlx_utils_acquire_lock ( + IN OUT mlx_utils *utils + ) +{ + return mlx_utils_acquire_lock_priv(utils->lock); +} + +mlx_status +mlx_utils_release_lock ( + IN OUT mlx_utils *utils + ) +{ + return mlx_utils_release_lock_priv(utils->lock); +} + +mlx_status +mlx_utils_rand ( + IN mlx_utils *utils, + OUT mlx_uint32 *rand_num + ) +{ + return mlx_utils_rand_priv(utils, rand_num); +} diff --git a/src/drivers/infiniband/mlx_utils_flexboot/include/mlx_logging_priv.h b/src/drivers/infiniband/mlx_utils_flexboot/include/mlx_logging_priv.h new file mode 100644 index 00000000..3acc1d9d --- /dev/null +++ b/src/drivers/infiniband/mlx_utils_flexboot/include/mlx_logging_priv.h @@ -0,0 +1,62 @@ +/* + * DebugPriv.h + * + * Created on: Jan 19, 2015 + * Author: maord + */ + +#ifndef STUB_MLXUTILS_INCLUDE_PRIVATE_FLEXBOOT_DEBUG_H_ +#define STUB_MLXUTILS_INCLUDE_PRIVATE_FLEXBOOT_DEBUG_H_ + +#include +#include + +#define MLX_DEBUG_FATAL_ERROR_PRIVATE(...) do { \ + printf("%s: ",__func__); \ + printf(__VA_ARGS__); \ + } while ( 0 ) + +#define MLX_DEBUG_ERROR_PRIVATE(id, ...) do { \ + DBGC(id, "%s: ",__func__); \ + DBGC(id, __VA_ARGS__); \ + } while ( 0 ) + +#define MLX_DEBUG_WARN_PRIVATE(id, ...) do { \ + DBGC(id, "%s: ",__func__); \ + DBGC(id, __VA_ARGS__); \ + } while ( 0 ) + +#define MLX_DEBUG_INFO1_PRIVATE(id, ...) do { \ + DBGC(id, "%s: ",__func__); \ + DBGC(id, __VA_ARGS__); \ + } while ( 0 ) + +#define MLX_DEBUG_INFO2_PRIVATE(id, ...) do { \ + DBGC2(id, "%s: ",__func__); \ + DBGC2(id, __VA_ARGS__); \ + } while ( 0 ) + +#define MLX_DBG_ERROR_PRIVATE(...) do { \ + DBG("%s: ",__func__); \ + DBG(__VA_ARGS__); \ + } while ( 0 ) + +#define MLX_DBG_WARN_PRIVATE(...) do { \ + DBG("%s: ",__func__); \ + DBG(__VA_ARGS__); \ + } while ( 0 ) + +#define MLX_DBG_INFO1_PRIVATE(...) do { \ + DBG("%s: ",__func__); \ + DBG(__VA_ARGS__); \ + } while ( 0 ) + +#define MLX_DBG_INFO2_PRIVATE(...) do { \ + DBG2("%s: ",__func__); \ + DBG2(__VA_ARGS__); \ + } while ( 0 ) + +#define MLX_PRINT_PRIVATE(...) printf(__VA_ARGS__) + + +#endif /* STUB_MLXUTILS_INCLUDE_PRIVATE_FLEXBOOT_DEBUG_H_ */ diff --git a/src/drivers/infiniband/mlx_utils_flexboot/include/mlx_types_priv.h b/src/drivers/infiniband/mlx_utils_flexboot/include/mlx_types_priv.h new file mode 100644 index 00000000..fe0d5c05 --- /dev/null +++ b/src/drivers/infiniband/mlx_utils_flexboot/include/mlx_types_priv.h @@ -0,0 +1,60 @@ +/* + * types.h + * + * Created on: Jan 18, 2015 + * Author: maord + */ + +#ifndef A_MLXUTILS_INCLUDE_PUBLIC_TYPES_H_ +#define A_MLXUTILS_INCLUDE_PUBLIC_TYPES_H_ +#include +//#include +#include + +#define MLX_SUCCESS 0 +#define MLX_OUT_OF_RESOURCES (-1) +//(-ENOMEM) +#define MLX_INVALID_PARAMETER (-2) +//(-EINVAL) +#define MLX_UNSUPPORTED (-3) +//(-ENOSYS) +#define MLX_NOT_FOUND (-4) + +#define MLX_FAILED (-5) + +#undef TRUE +#define TRUE 1 +#undef FALSE +#define FALSE !TRUE + +typedef int mlx_status; + +typedef uint8_t mlx_uint8; +typedef uint16_t mlx_uint16; +typedef uint32_t mlx_uint32; +typedef uint64_t mlx_uint64; +typedef unsigned long mlx_uintn; + +typedef int8_t mlx_int8; +typedef int16_t mlx_int16;; +typedef int32_t mlx_int32; +typedef int64_t mlx_int64; +typedef uint8_t mlx_boolean; + +typedef struct pci_device mlx_pci; + +typedef size_t mlx_size; + +typedef void mlx_void; + +#define MAC_ADDR_LEN 6 +typedef unsigned long mlx_physical_address; +typedef union { + struct { + uint32_t low; + uint32_t high; + } __attribute__ (( packed )); + uint8_t addr[MAC_ADDR_LEN]; +} mlx_mac_address; + +#endif /* A_MLXUTILS_INCLUDE_PUBLIC_TYPES_H_ */ diff --git a/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_memory_priv.c b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_memory_priv.c new file mode 100644 index 00000000..cb9e759b --- /dev/null +++ b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_memory_priv.c @@ -0,0 +1,172 @@ +/* + * MemoryPriv.c + * + * Created on: Jan 21, 2015 + * Author: maord + */ + +#include +#include +#include +#include +#include "../../mlx_utils/include/private/mlx_memory_priv.h" + + +mlx_status +mlx_memory_alloc_priv( + IN mlx_utils *utils __attribute__ ((unused)), + IN mlx_size size, + OUT mlx_void **ptr + ) +{ + mlx_status status = MLX_SUCCESS; + *ptr = malloc(size); + if(*ptr == NULL){ + status = MLX_OUT_OF_RESOURCES; + } + return status; +} + +mlx_status +mlx_memory_zalloc_priv( + IN mlx_utils *utils __attribute__ ((unused)), + IN mlx_size size, + OUT mlx_void **ptr + ) +{ + mlx_status status = MLX_SUCCESS; + *ptr = zalloc(size); + if(*ptr == NULL){ + status = MLX_OUT_OF_RESOURCES; + } + return status; +} + +mlx_status +mlx_memory_free_priv( + IN mlx_utils *utils __attribute__ ((unused)), + IN mlx_void *ptr + ) +{ + mlx_status status = MLX_SUCCESS; + free(ptr); + return status; +} +mlx_status +mlx_memory_alloc_dma_priv( + IN mlx_utils *utils __attribute__ ((unused)), + IN mlx_size size , + IN mlx_size align, + OUT mlx_void **ptr + ) +{ + mlx_status status = MLX_SUCCESS; + *ptr = malloc_dma(size, align); + if (*ptr == NULL) { + status = MLX_OUT_OF_RESOURCES; + } else { + memset(*ptr, 0, size); + } + return status; +} + +mlx_status +mlx_memory_free_dma_priv( + IN mlx_utils *utils __attribute__ ((unused)), + IN mlx_size size , + IN mlx_void *ptr + ) +{ + mlx_status status = MLX_SUCCESS; + free_dma(ptr, size); + return status; +} +mlx_status +mlx_memory_map_dma_priv( + IN mlx_utils *utils __attribute__ ((unused)), + IN mlx_void *addr , + IN mlx_size number_of_bytes __attribute__ ((unused)), + OUT mlx_physical_address *phys_addr, + OUT mlx_void **mapping __attribute__ ((unused)) + ) +{ + mlx_status status = MLX_SUCCESS; + *phys_addr = virt_to_bus(addr); + return status; +} + +mlx_status +mlx_memory_ummap_dma_priv( + IN mlx_utils *utils __attribute__ ((unused)), + IN mlx_void *mapping __attribute__ ((unused)) + ) +{ + mlx_status status = MLX_SUCCESS; + return status; +} + +mlx_status +mlx_memory_cmp_priv( + IN mlx_utils *utils __unused, + IN mlx_void *first_block, + IN mlx_void *second_block, + IN mlx_size size, + OUT mlx_uint32 *out + ) +{ + mlx_status status = MLX_SUCCESS; + *out = memcmp(first_block, second_block, size); + return status; +} + +mlx_status +mlx_memory_set_priv( + IN mlx_utils *utils __unused, + IN mlx_void *block, + IN mlx_int32 value, + IN mlx_size size + ) +{ + mlx_status status = MLX_SUCCESS; + memset(block, value, size); + return status; +} + +mlx_status +mlx_memory_cpy_priv( + IN mlx_utils *utils __unused, + OUT mlx_void *destination_buffer, + IN mlx_void *source_buffer, + IN mlx_size length + ) +{ + mlx_status status = MLX_SUCCESS; + memcpy(destination_buffer, source_buffer, length); + return status; +} + +mlx_status +mlx_memory_cpu_to_be32_priv( + IN mlx_utils *utils __unused, + IN mlx_uint32 source, + IN mlx_uint32 *destination + ) +{ + mlx_status status = MLX_SUCCESS; + *destination = cpu_to_be32(source); + return status; +} + + +mlx_status +mlx_memory_be32_to_cpu_priv( + IN mlx_utils *utils __unused, + IN mlx_uint32 source, + IN mlx_uint32 *destination + ) +{ + mlx_status status = MLX_SUCCESS; + *destination = be32_to_cpu(source); + return status; +} + diff --git a/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_pci_priv.c b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_pci_priv.c new file mode 100644 index 00000000..6b42bcaf --- /dev/null +++ b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_pci_priv.c @@ -0,0 +1,195 @@ +/* + * MlxPciPriv.c + * + * Created on: Jan 21, 2015 + * Author: maord + */ + +#include + +#include "../../mlx_utils/include/private/mlx_pci_priv.h" + + +static +mlx_status +mlx_pci_config_byte( + IN mlx_utils *utils, + IN mlx_boolean read, + IN mlx_uint32 offset, + IN OUT mlx_uint8 *buffer + ) +{ + mlx_status status = MLX_SUCCESS; + if (read) { + status = pci_read_config_byte(utils->pci, offset, buffer); + }else { + status = pci_write_config_byte(utils->pci, offset, *buffer); + } + return status; +} + +static +mlx_status +mlx_pci_config_word( + IN mlx_utils *utils, + IN mlx_boolean read, + IN mlx_uint32 offset, + IN OUT mlx_uint16 *buffer + ) +{ + mlx_status status = MLX_SUCCESS; + if (read) { + status = pci_read_config_word(utils->pci, offset, buffer); + }else { + status = pci_write_config_word(utils->pci, offset, *buffer); + } + return status; +} + +static +mlx_status +mlx_pci_config_dword( + IN mlx_utils *utils, + IN mlx_boolean read, + IN mlx_uint32 offset, + IN OUT mlx_uint32 *buffer + ) +{ + mlx_status status = MLX_SUCCESS; + if (read) { + status = pci_read_config_dword(utils->pci, offset, buffer); + }else { + status = pci_write_config_dword(utils->pci, offset, *buffer); + } + return status; +} +static +mlx_status +mlx_pci_config( + IN mlx_utils *utils, + IN mlx_boolean read, + IN mlx_pci_width width, + IN mlx_uint32 offset, + IN mlx_uintn count, + IN OUT mlx_void *buffer + ) +{ + mlx_status status = MLX_SUCCESS; + mlx_uint8 *tmp = (mlx_uint8*)buffer; + mlx_uintn iteration = 0; + if( width == MlxPciWidthUint64) { + width = MlxPciWidthUint32; + count = count * 2; + } + + for(;iteration < count ; iteration++) { + switch (width){ + case MlxPciWidthUint8: + status = mlx_pci_config_byte(utils, read , offset++, tmp++); + break; + case MlxPciWidthUint16: + status = mlx_pci_config_word(utils, read , offset, (mlx_uint16*)tmp); + tmp += 2; + offset += 2; + break; + case MlxPciWidthUint32: + status = mlx_pci_config_dword(utils, read , offset, (mlx_uint32*)tmp); + tmp += 4; + offset += 4; + break; + default: + status = MLX_INVALID_PARAMETER; + } + if(status != MLX_SUCCESS) { + goto config_error; + } + } +config_error: + return status; +} +mlx_status +mlx_pci_init_priv( + IN mlx_utils *utils + ) +{ + mlx_status status = MLX_SUCCESS; + adjust_pci_device ( utils->pci ); +#ifdef DEVICE_CX3 + utils->config = pci_ioremap ( utils->pci, pci_bar_start ( utils->pci, PCI_BASE_ADDRESS_0), + 0x100000 ); +#endif + return status; +} + +mlx_status +mlx_pci_teardown_priv( + IN mlx_utils *utils __attribute__ ((unused)) + ) +{ + mlx_status status = MLX_SUCCESS; +#ifdef DEVICE_CX3 + iounmap( utils->config ); +#endif + return status; +} + +mlx_status +mlx_pci_read_priv( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint32 offset, + IN mlx_uintn count, + OUT mlx_void *buffer + ) +{ + mlx_status status = MLX_SUCCESS; + status = mlx_pci_config(utils, TRUE, width, offset, count, buffer); + return status; +} + +mlx_status +mlx_pci_write_priv( + IN mlx_utils *utils, + IN mlx_pci_width width, + IN mlx_uint32 offset, + IN mlx_uintn count, + IN mlx_void *buffer + ) +{ + mlx_status status = MLX_SUCCESS; + status = mlx_pci_config(utils, FALSE, width, offset, count, buffer); + return status; +} + +mlx_status +mlx_pci_mem_read_priv( + IN mlx_utils *utils __attribute__ ((unused)), + IN mlx_pci_width width __attribute__ ((unused)), + IN mlx_uint8 bar_index __attribute__ ((unused)), + IN mlx_uint64 offset, + IN mlx_uintn count __attribute__ ((unused)), + OUT mlx_void *buffer + ) +{ + if (buffer == NULL || width != MlxPciWidthUint32) + return MLX_INVALID_PARAMETER; + *((mlx_uint32 *)buffer) = readl(offset); + return MLX_SUCCESS; +} + +mlx_status +mlx_pci_mem_write_priv( + IN mlx_utils *utils __attribute__ ((unused)), + IN mlx_pci_width width __attribute__ ((unused)), + IN mlx_uint8 bar_index __attribute__ ((unused)), + IN mlx_uint64 offset, + IN mlx_uintn count __attribute__ ((unused)), + IN mlx_void *buffer + ) +{ + if (buffer == NULL || width != MlxPciWidthUint32) + return MLX_INVALID_PARAMETER; + barrier(); + writel(*((mlx_uint32 *)buffer), offset); + return MLX_SUCCESS; +} diff --git a/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_utils_priv.c b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_utils_priv.c new file mode 100644 index 00000000..5fca406f --- /dev/null +++ b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_utils_priv.c @@ -0,0 +1,83 @@ +/* + * MlxUtilsPriv.c + * + * Created on: Jan 25, 2015 + * Author: maord + */ + +#include +#include +#include +#include "../../mlx_utils/include/private/mlx_utils_priv.h" + +mlx_status +mlx_utils_delay_in_ms_priv( + IN mlx_uint32 msecs + ) +{ + mdelay(msecs); + return MLX_SUCCESS; +} + +mlx_status +mlx_utils_delay_in_us_priv( + IN mlx_uint32 usecs + ) +{ + udelay(usecs); + return MLX_SUCCESS; +} + +mlx_status +mlx_utils_ilog2_priv( + IN mlx_uint32 i, + OUT mlx_uint32 *log + ) +{ + *log = ( fls ( i ) - 1 ); + return MLX_SUCCESS; +} + +mlx_status +mlx_utils_init_lock_priv( + OUT void **lock __unused + ) +{ + return MLX_SUCCESS; +} + +mlx_status +mlx_utils_free_lock_priv( + IN void *lock __unused + ) +{ + return MLX_SUCCESS; +} + +mlx_status +mlx_utils_acquire_lock_priv ( + IN void *lock __unused + ) +{ + return MLX_SUCCESS; +} + +mlx_status +mlx_utils_release_lock_priv ( + IN void *lock __unused + ) +{ + return MLX_SUCCESS; +} + +mlx_status +mlx_utils_rand_priv ( + IN mlx_utils *utils __unused, + OUT mlx_uint32 *rand_num + ) +{ + do { + *rand_num = rand(); + } while ( *rand_num == 0 ); + return MLX_SUCCESS; +} diff --git a/src/drivers/infiniband/nodnic_prm.h b/src/drivers/infiniband/nodnic_prm.h new file mode 100644 index 00000000..5e0fa989 --- /dev/null +++ b/src/drivers/infiniband/nodnic_prm.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#ifndef SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_PRM_H_ +#define SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_PRM_H_ + +#include "mlx_bitops.h" + +struct nodnic_wqe_segment_data_ptr_st { /* Little Endian */ + pseudo_bit_t byte_count[0x0001f]; + pseudo_bit_t always0[0x00001]; +/* -------------- */ + pseudo_bit_t l_key[0x00020]; +/* -------------- */ + pseudo_bit_t local_address_h[0x00020]; +/* -------------- */ + pseudo_bit_t local_address_l[0x00020]; +/* -------------- */ +}; + +struct MLX_DECLARE_STRUCT ( nodnic_wqe_segment_data_ptr ); + +#define HERMON_MAX_SCATTER 1 + +struct nodnic_recv_wqe { + struct nodnic_wqe_segment_data_ptr data[HERMON_MAX_SCATTER]; +} __attribute__ (( packed )); + +#endif /* SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_PRM_H_ */ diff --git a/src/drivers/infiniband/nodnic_shomron_prm.h b/src/drivers/infiniband/nodnic_shomron_prm.h new file mode 100644 index 00000000..85cd9718 --- /dev/null +++ b/src/drivers/infiniband/nodnic_shomron_prm.h @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2015 Mellanox Technologies Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#ifndef SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_SHOMRON_PRM_H_ +#define SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_SHOMRON_PRM_H_ + + + +#include "nodnic_prm.h" + + +#define SHOMRON_MAX_GATHER 1 + +/* Send wqe segment ctrl */ + +struct shomronprm_wqe_segment_ctrl_send_st { /* Little Endian */ + pseudo_bit_t opcode[0x00008]; + pseudo_bit_t wqe_index[0x00010]; + pseudo_bit_t reserved1[0x00008]; +/* -------------- */ + pseudo_bit_t ds[0x00006]; /* descriptor (wqe) size in 16bytes chunk */ + pseudo_bit_t reserved2[0x00002]; + pseudo_bit_t qpn[0x00018]; +/* -------------- */ + pseudo_bit_t reserved3[0x00002]; + pseudo_bit_t ce[0x00002]; + pseudo_bit_t reserved4[0x0001c]; +/* -------------- */ + pseudo_bit_t reserved5[0x00040]; +/* -------------- */ + pseudo_bit_t mss[0x0000e]; + pseudo_bit_t reserved6[0x0000e]; + pseudo_bit_t cs13_inner[0x00001]; + pseudo_bit_t cs14_inner[0x00001]; + pseudo_bit_t cs13[0x00001]; + pseudo_bit_t cs14[0x00001]; +/* -------------- */ + pseudo_bit_t reserved7[0x00020]; +/* -------------- */ + pseudo_bit_t inline_headers1[0x00010]; + pseudo_bit_t inline_headers_size[0x0000a]; //sum size of inline_hdr1+inline_hdrs (0x10) + pseudo_bit_t reserved8[0x00006]; +/* -------------- */ + pseudo_bit_t inline_headers2[0x00020]; +/* -------------- */ + pseudo_bit_t inline_headers3[0x00020]; +/* -------------- */ + pseudo_bit_t inline_headers4[0x00020]; +/* -------------- */ + pseudo_bit_t inline_headers5[0x00020]; +}; + + + +/* Completion Queue Entry Format #### michal - fixed by gdror */ + +struct shomronprm_completion_queue_entry_st { /* Little Endian */ + + pseudo_bit_t reserved1[0x00080]; +/* -------------- */ + pseudo_bit_t reserved2[0x00010]; + pseudo_bit_t ml_path[0x00007]; + pseudo_bit_t reserved3[0x00009]; +/* -------------- */ + pseudo_bit_t slid[0x00010]; + pseudo_bit_t reserved4[0x00010]; +/* -------------- */ + pseudo_bit_t rqpn[0x00018]; + pseudo_bit_t sl[0x00004]; + pseudo_bit_t l3_hdr[0x00002]; + pseudo_bit_t reserved5[0x00002]; +/* -------------- */ + pseudo_bit_t reserved10[0x00020]; +/* -------------- */ + pseudo_bit_t srqn[0x00018]; + pseudo_bit_t reserved11[0x0008]; +/* -------------- */ + pseudo_bit_t pkey_index[0x00020]; +/* -------------- */ + pseudo_bit_t reserved6[0x00020]; +/* -------------- */ + pseudo_bit_t byte_cnt[0x00020]; +/* -------------- */ + pseudo_bit_t reserved7[0x00040]; +/* -------------- */ + pseudo_bit_t qpn[0x00018]; + pseudo_bit_t rx_drop_counter[0x00008]; +/* -------------- */ + pseudo_bit_t owner[0x00001]; + pseudo_bit_t reserved8[0x00003]; + pseudo_bit_t opcode[0x00004]; + pseudo_bit_t reserved9[0x00008]; + pseudo_bit_t wqe_counter[0x00010]; +}; + + +/* Completion with Error CQE #### michal - gdror fixed */ + +struct shomronprm_completion_with_error_st { /* Little Endian */ + pseudo_bit_t reserved1[0x001a0]; + /* -------------- */ + pseudo_bit_t syndrome[0x00008]; + pseudo_bit_t vendor_error_syndrome[0x00008]; + pseudo_bit_t reserved2[0x00010]; + /* -------------- */ + pseudo_bit_t reserved3[0x00040]; +}; + + +struct MLX_DECLARE_STRUCT ( shomronprm_wqe_segment_ctrl_send ); +struct MLX_DECLARE_STRUCT ( shomronprm_completion_queue_entry ); +struct MLX_DECLARE_STRUCT ( shomronprm_completion_with_error ); + +struct shomron_nodnic_eth_send_wqe { + struct shomronprm_wqe_segment_ctrl_send ctrl; + struct nodnic_wqe_segment_data_ptr data[SHOMRON_MAX_GATHER]; +} __attribute__ (( packed )); + +union shomronprm_completion_entry { + struct shomronprm_completion_queue_entry normal; + struct shomronprm_completion_with_error error; +} __attribute__ (( packed )); + + +#endif /* SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_SHOMRON_PRM_H_ */ diff --git a/src/drivers/linux/af_packet.c b/src/drivers/linux/af_packet.c new file mode 100644 index 00000000..65aafc5b --- /dev/null +++ b/src/drivers/linux/af_packet.c @@ -0,0 +1,326 @@ +/* + * Copyright (C) 2016 David Decotigny + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* This hack prevents pre-2.6.32 headers from redefining struct sockaddr */ +#define _SYS_SOCKET_H +#define __GLIBC__ 2 +#include +#include +#include +#include +#undef __GLIBC__ +#include + +/* linux-specifc syscall params */ +#define LINUX_AF_PACKET 17 +#define LINUX_SOCK_RAW 3 +#define LINUX_SIOCGIFINDEX 0x8933 +#define LINUX_SIOCGIFHWADDR 0x8927 + +#define RX_BUF_SIZE 1536 + +/** @file + * + * The AF_PACKET driver. + * + * Bind to an existing linux network interface. + */ + +struct af_packet_nic { + /** Linux network interface name */ + char * ifname; + /** Packet socket descriptor */ + int fd; + /** ifindex */ + int ifindex; +}; + +/** Open the linux interface */ +static int af_packet_nic_open ( struct net_device * netdev ) +{ + struct af_packet_nic * nic = netdev->priv; + struct sockaddr_ll socket_address; + struct ifreq if_data; + int ret; + + nic->fd = linux_socket(LINUX_AF_PACKET, LINUX_SOCK_RAW, + htons(ETH_P_ALL)); + if (nic->fd < 0) { + DBGC(nic, "af_packet %p socket(AF_PACKET) = %d (%s)\n", + nic, nic->fd, linux_strerror(linux_errno)); + return nic->fd; + } + + /* resolve ifindex of ifname */ + memset(&if_data, 0, sizeof(if_data)); + strncpy(if_data.ifr_name, nic->ifname, sizeof(if_data.ifr_name)); + ret = linux_ioctl(nic->fd, LINUX_SIOCGIFINDEX, &if_data); + if (ret < 0) { + DBGC(nic, "af_packet %p ioctl(SIOCGIFINDEX) = %d (%s)\n", + nic, ret, linux_strerror(linux_errno)); + linux_close(nic->fd); + return ret; + } + + nic->ifindex = if_data.ifr_ifindex; + + /* bind to interface */ + memset(&socket_address, 0, sizeof(socket_address)); + socket_address.sll_family = LINUX_AF_PACKET; + socket_address.sll_ifindex = nic->ifindex; + socket_address.sll_protocol = htons(ETH_P_ALL); + ret = linux_bind(nic->fd, (void *) &socket_address, + sizeof(socket_address)); + if (ret == -1) { + DBGC(nic, "af_packet %p bind() = %d (%s)\n", + nic, ret, linux_strerror(linux_errno)); + linux_close(nic->fd); + return ret; + } + + /* Set nonblocking mode to make af_packet_nic_poll() easier */ + ret = linux_fcntl(nic->fd, F_SETFL, O_NONBLOCK); + if (ret != 0) { + DBGC(nic, "af_packet %p fcntl(%d, ...) = %d (%s)\n", + nic, nic->fd, ret, linux_strerror(linux_errno)); + linux_close(nic->fd); + return ret; + } + + return 0; +} + +/** Close the packet socket */ +static void af_packet_nic_close ( struct net_device *netdev ) +{ + struct af_packet_nic * nic = netdev->priv; + linux_close(nic->fd); +} + +/** + * Transmit an ethernet packet. + * + * The packet can be written to the socket and marked as complete immediately. + */ +static int af_packet_nic_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) +{ + struct af_packet_nic * nic = netdev->priv; + struct sockaddr_ll socket_address; + const struct ethhdr * eh; + int rc; + + memset(&socket_address, 0, sizeof(socket_address)); + socket_address.sll_family = LINUX_AF_PACKET; + socket_address.sll_ifindex = nic->ifindex; + socket_address.sll_halen = ETH_ALEN; + + eh = iobuf->data; + memcpy(socket_address.sll_addr, eh->h_dest, ETH_ALEN); + + rc = linux_sendto(nic->fd, iobuf->data, iobuf->tail - iobuf->data, + 0, (struct sockaddr *)&socket_address, + sizeof(socket_address)); + + DBGC2(nic, "af_packet %p wrote %d bytes\n", nic, rc); + netdev_tx_complete(netdev, iobuf); + + return 0; +} + +/** Poll for new packets */ +static void af_packet_nic_poll ( struct net_device *netdev ) +{ + struct af_packet_nic * nic = netdev->priv; + struct pollfd pfd; + struct io_buffer * iobuf; + int r; + + pfd.fd = nic->fd; + pfd.events = POLLIN; + if (linux_poll(&pfd, 1, 0) == -1) { + DBGC(nic, "af_packet %p poll failed (%s)\n", + nic, linux_strerror(linux_errno)); + return; + } + if ((pfd.revents & POLLIN) == 0) + return; + + /* At this point we know there is at least one new packet to be read */ + + iobuf = alloc_iob(RX_BUF_SIZE); + if (! iobuf) + goto allocfail; + + while ((r = linux_read(nic->fd, iobuf->data, RX_BUF_SIZE)) > 0) { + DBGC2(nic, "af_packet %p read %d bytes\n", nic, r); + + iob_put(iobuf, r); + netdev_rx(netdev, iobuf); + + iobuf = alloc_iob(RX_BUF_SIZE); + if (! iobuf) + goto allocfail; + } + + free_iob(iobuf); + return; + +allocfail: + DBGC(nic, "af_packet %p alloc_iob failed\n", nic); +} + +/** + * Set irq. + * + * Not used on linux, provide a dummy implementation. + */ +static void af_packet_nic_irq ( struct net_device *netdev, int enable ) +{ + struct af_packet_nic *nic = netdev->priv; + + DBGC(nic, "af_packet %p irq enable = %d\n", nic, enable); +} + + +static int af_packet_update_properties ( struct net_device *netdev ) +{ + struct af_packet_nic *nic = netdev->priv; + struct ifreq if_data; + int ret; + + /* retrieve default MAC address */ + int fd = linux_socket(LINUX_AF_PACKET, LINUX_SOCK_RAW, 0); + if (fd < 0) { + DBGC(nic, "af_packet %p cannot create raw socket (%s)\n", + nic, linux_strerror(linux_errno)); + return fd; + } + + /* retrieve host's MAC address */ + memset(&if_data, 0, sizeof(if_data)); + strncpy(if_data.ifr_name, nic->ifname, sizeof(if_data.ifr_name)); + ret = linux_ioctl(fd, LINUX_SIOCGIFHWADDR, &if_data); + if (ret < 0) { + DBGC(nic, "af_packet %p cannot get mac addr (%s)\n", + nic, linux_strerror(linux_errno)); + linux_close(fd); + return ret; + } + + linux_close(fd); + /* struct sockaddr = { u16 family, u8 pad[14] (equiv. sa_data) }; */ + memcpy(netdev->ll_addr, if_data.ifr_hwaddr.pad, ETH_ALEN); + return 0; +} + +/** AF_PACKET operations */ +static struct net_device_operations af_packet_nic_operations = { + .open = af_packet_nic_open, + .close = af_packet_nic_close, + .transmit = af_packet_nic_transmit, + .poll = af_packet_nic_poll, + .irq = af_packet_nic_irq, +}; + +/** Handle a device request for the af_packet driver */ +static int af_packet_nic_probe ( struct linux_device *device, + struct linux_device_request *request ) +{ + struct linux_setting *if_setting; + struct net_device *netdev; + struct af_packet_nic *nic; + int rc; + + netdev = alloc_etherdev(sizeof(*nic)); + if (! netdev) + return -ENOMEM; + + netdev_init(netdev, &af_packet_nic_operations); + nic = netdev->priv; + linux_set_drvdata(device, netdev); + netdev->dev = &device->dev; + + memset(nic, 0, sizeof(*nic)); + + /* Look for the mandatory if setting */ + if_setting = linux_find_setting("if", &request->settings); + + /* No if setting */ + if (! if_setting) { + printf("af_packet missing a mandatory if setting\n"); + rc = -EINVAL; + goto err_settings; + } + + nic->ifname = if_setting->value; + snprintf ( device->dev.name, sizeof ( device->dev.name ), "%s", + nic->ifname ); + device->dev.desc.bus_type = BUS_TYPE_TAP; + af_packet_update_properties(netdev); + if_setting->applied = 1; + + /* Apply rest of the settings */ + linux_apply_settings(&request->settings, &netdev->settings.settings); + + /* Register network device */ + if ((rc = register_netdev(netdev)) != 0) + goto err_register; + + netdev_link_up(netdev); + + return 0; + +err_settings: + unregister_netdev(netdev); +err_register: + netdev_nullify(netdev); + netdev_put(netdev); + return rc; +} + +/** Remove the device */ +static void af_packet_nic_remove ( struct linux_device *device ) +{ + struct net_device *netdev = linux_get_drvdata(device); + unregister_netdev(netdev); + netdev_nullify(netdev); + netdev_put(netdev); +} + +/** AF_PACKET linux_driver */ +struct linux_driver af_packet_nic_driver __linux_driver = { + .name = "af_packet", + .probe = af_packet_nic_probe, + .remove = af_packet_nic_remove, + .can_probe = 1, +}; diff --git a/src/drivers/net/acm.c b/src/drivers/net/acm.c new file mode 100644 index 00000000..16dab4be --- /dev/null +++ b/src/drivers/net/acm.c @@ -0,0 +1,529 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include "acm.h" + +/** @file + * + * USB RNDIS driver + * + */ + +/** Interrupt completion profiler */ +static struct profiler acm_intr_profiler __profiler = + { .name = "acm.intr" }; + +/** Bulk IN completion profiler */ +static struct profiler acm_in_profiler __profiler = + { .name = "acm.in" }; + +/** Bulk OUT profiler */ +static struct profiler acm_out_profiler __profiler = + { .name = "acm.out" }; + +/****************************************************************************** + * + * USB RNDIS communications interface + * + ****************************************************************************** + */ + +/** + * Complete interrupt transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void acm_intr_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct acm_device *acm = container_of ( ep, struct acm_device, + usbnet.intr ); + struct rndis_device *rndis = acm->rndis; + struct usb_setup_packet *message; + + /* Profile completions */ + profile_start ( &acm_intr_profiler ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto ignore; + + /* Drop packets with errors */ + if ( rc != 0 ) { + DBGC ( acm, "ACM %p interrupt failed: %s\n", + acm, strerror ( rc ) ); + DBGC_HDA ( acm, 0, iobuf->data, iob_len ( iobuf ) ); + goto error; + } + + /* Extract message header */ + if ( iob_len ( iobuf ) < sizeof ( *message ) ) { + DBGC ( acm, "ACM %p underlength interrupt:\n", acm ); + DBGC_HDA ( acm, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto error; + } + message = iobuf->data; + + /* Parse message header */ + switch ( message->request ) { + + case cpu_to_le16 ( CDC_RESPONSE_AVAILABLE ) : + case cpu_to_le16 ( 0x0001 ) : /* qemu seems to use this value */ + acm->responded = 1; + break; + + default: + DBGC ( acm, "ACM %p unrecognised interrupt:\n", acm ); + DBGC_HDA ( acm, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -ENOTSUP; + goto error; + } + + /* Free I/O buffer */ + free_iob ( iobuf ); + profile_stop ( &acm_intr_profiler ); + + return; + + error: + rndis_rx_err ( rndis, iob_disown ( iobuf ), rc ); + ignore: + free_iob ( iobuf ); + return; +} + +/** Interrupt endpoint operations */ +static struct usb_endpoint_driver_operations acm_intr_operations = { + .complete = acm_intr_complete, +}; + +/****************************************************************************** + * + * USB RNDIS data interface + * + ****************************************************************************** + */ + +/** + * Complete bulk IN transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void acm_in_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf, + int rc ) { + struct acm_device *acm = container_of ( ep, struct acm_device, + usbnet.in ); + struct rndis_device *rndis = acm->rndis; + + /* Profile receive completions */ + profile_start ( &acm_in_profiler ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto ignore; + + /* Record USB errors against the RNDIS device */ + if ( rc != 0 ) { + DBGC ( acm, "ACM %p bulk IN failed: %s\n", + acm, strerror ( rc ) ); + goto error; + } + + /* Hand off to RNDIS */ + rndis_rx ( rndis, iob_disown ( iobuf ) ); + + profile_stop ( &acm_in_profiler ); + return; + + error: + rndis_rx_err ( rndis, iob_disown ( iobuf ), rc ); + ignore: + free_iob ( iobuf ); +} + +/** Bulk IN endpoint operations */ +static struct usb_endpoint_driver_operations acm_in_operations = { + .complete = acm_in_complete, +}; + +/** + * Transmit packet + * + * @v acm USB RNDIS device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int acm_out_transmit ( struct acm_device *acm, + struct io_buffer *iobuf ) { + int rc; + + /* Profile transmissions */ + profile_start ( &acm_out_profiler ); + + /* Enqueue I/O buffer */ + if ( ( rc = usb_stream ( &acm->usbnet.out, iobuf, 0 ) ) != 0 ) + return rc; + + profile_stop ( &acm_out_profiler ); + return 0; +} + +/** + * Complete bulk OUT transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void acm_out_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf, + int rc ) { + struct acm_device *acm = container_of ( ep, struct acm_device, + usbnet.out ); + struct rndis_device *rndis = acm->rndis; + + /* Report TX completion */ + rndis_tx_complete_err ( rndis, iobuf, rc ); +} + +/** Bulk OUT endpoint operations */ +static struct usb_endpoint_driver_operations acm_out_operations = { + .complete = acm_out_complete, +}; + +/****************************************************************************** + * + * USB RNDIS control interface + * + ****************************************************************************** + */ + +/** + * Send control packet + * + * @v acm USB RNDIS device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int acm_control_transmit ( struct acm_device *acm, + struct io_buffer *iobuf ) { + struct rndis_device *rndis = acm->rndis; + struct usb_device *usb = acm->usb; + int rc; + + /* Send packet as an encapsulated command */ + if ( ( rc = cdc_send_encapsulated_command ( usb, acm->usbnet.comms, + iobuf->data, + iob_len ( iobuf ) ) ) != 0){ + DBGC ( acm, "ACM %p could not send encapsulated command: %s\n", + acm, strerror ( rc ) ); + return rc; + } + + /* Complete packet immediately */ + rndis_tx_complete ( rndis, iobuf ); + + return 0; +} + +/** + * Receive control packet + * + * @v acm USB RNDIS device + * @ret rc Return status code + */ +static int acm_control_receive ( struct acm_device *acm ) { + struct rndis_device *rndis = acm->rndis; + struct usb_device *usb = acm->usb; + struct io_buffer *iobuf; + struct rndis_header *header; + size_t mtu = ACM_RESPONSE_MTU; + size_t len; + int rc; + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( mtu ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Get encapsulated response */ + if ( ( rc = cdc_get_encapsulated_response ( usb, acm->usbnet.comms, + iobuf->data, mtu ) ) != 0 ){ + DBGC ( acm, "ACM %p could not get encapsulated response: %s\n", + acm, strerror ( rc ) ); + goto err_get_response; + } + + /* Fix up buffer length */ + header = iobuf->data; + len = le32_to_cpu ( header->len ); + if ( len > mtu ) { + DBGC ( acm, "ACM %p overlength encapsulated response\n", acm ); + DBGC_HDA ( acm, 0, iobuf->data, mtu ); + rc = -EPROTO; + goto err_len; + } + iob_put ( iobuf, len ); + + /* Hand off to RNDIS */ + rndis_rx ( rndis, iob_disown ( iobuf ) ); + + return 0; + + err_len: + err_get_response: + free_iob ( iobuf ); + err_alloc: + return rc; +} + +/****************************************************************************** + * + * RNDIS interface + * + ****************************************************************************** + */ + +/** + * Open RNDIS device + * + * @v rndis RNDIS device + * @ret rc Return status code + */ +static int acm_open ( struct rndis_device *rndis ) { + struct acm_device *acm = rndis->priv; + int rc; + + /* Open USB network device */ + if ( ( rc = usbnet_open ( &acm->usbnet ) ) != 0 ) + goto err_open; + + return 0; + + usbnet_close ( &acm->usbnet ); + err_open: + return rc; +} + +/** + * Close RNDIS device + * + * @v rndis RNDIS device + */ +static void acm_close ( struct rndis_device *rndis ) { + struct acm_device *acm = rndis->priv; + + /* Close USB network device */ + usbnet_close ( &acm->usbnet ); +} + +/** + * Transmit packet + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int acm_transmit ( struct rndis_device *rndis, + struct io_buffer *iobuf ) { + struct acm_device *acm = rndis->priv; + struct rndis_header *header = iobuf->data; + + /* Sanity check */ + assert ( iob_len ( iobuf ) >= sizeof ( *header ) ); + assert ( iob_len ( iobuf ) == le32_to_cpu ( header->len ) ); + + /* Transmit packet via appropriate mechanism */ + if ( header->type == cpu_to_le32 ( RNDIS_PACKET_MSG ) ) { + return acm_out_transmit ( acm, iobuf ); + } else { + return acm_control_transmit ( acm, iobuf ); + } +} + +/** + * Poll for completed and received packets + * + * @v rndis RNDIS device + */ +static void acm_poll ( struct rndis_device *rndis ) { + struct acm_device *acm = rndis->priv; + int rc; + + /* Poll USB bus */ + usb_poll ( acm->bus ); + + /* Refill rings */ + if ( ( rc = usbnet_refill ( &acm->usbnet ) ) != 0 ) + rndis_rx_err ( rndis, NULL, rc ); + + /* Retrieve encapsulated response, if applicable */ + if ( acm->responded ) { + + /* Clear flag */ + acm->responded = 0; + + /* Get encapsulated response */ + if ( ( rc = acm_control_receive ( acm ) ) != 0 ) + rndis_rx_err ( rndis, NULL, rc ); + } +} + +/** USB RNDIS operations */ +static struct rndis_operations acm_operations = { + .open = acm_open, + .close = acm_close, + .transmit = acm_transmit, + .poll = acm_poll, +}; + +/****************************************************************************** + * + * USB interface + * + ****************************************************************************** + */ + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int acm_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct usb_device *usb = func->usb; + struct rndis_device *rndis; + struct acm_device *acm; + int rc; + + /* Allocate and initialise structure */ + rndis = alloc_rndis ( sizeof ( *acm ) ); + if ( ! rndis ) { + rc = -ENOMEM; + goto err_alloc; + } + rndis_init ( rndis, &acm_operations ); + rndis->netdev->dev = &func->dev; + acm = rndis->priv; + acm->usb = usb; + acm->bus = usb->port->hub->bus; + acm->rndis = rndis; + usbnet_init ( &acm->usbnet, func, &acm_intr_operations, + &acm_in_operations, &acm_out_operations ); + usb_refill_init ( &acm->usbnet.intr, 0, 0, ACM_INTR_MAX_FILL ); + usb_refill_init ( &acm->usbnet.in, 0, ACM_IN_MTU, ACM_IN_MAX_FILL ); + + /* Describe USB network device */ + if ( ( rc = usbnet_describe ( &acm->usbnet, config ) ) != 0 ) { + DBGC ( acm, "ACM %p could not describe: %s\n", + acm, strerror ( rc ) ); + goto err_describe; + } + + /* Register RNDIS device */ + if ( ( rc = register_rndis ( rndis ) ) != 0 ) + goto err_register; + + usb_func_set_drvdata ( func, acm ); + return 0; + + unregister_rndis ( rndis ); + err_register: + err_describe: + free_rndis ( rndis ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void acm_remove ( struct usb_function *func ) { + struct acm_device *acm = usb_func_get_drvdata ( func ); + struct rndis_device *rndis = acm->rndis; + + /* Unregister RNDIS device */ + unregister_rndis ( rndis ); + + /* Free RNDIS device */ + free_rndis ( rndis ); +} + +/** USB CDC-ACM device IDs */ +static struct usb_device_id cdc_acm_ids[] = { + { + .name = "cdc-acm", + .vendor = USB_ANY_ID, + .product = USB_ANY_ID, + }, +}; + +/** USB CDC-ACM driver */ +struct usb_driver cdc_acm_driver __usb_driver = { + .ids = cdc_acm_ids, + .id_count = ( sizeof ( cdc_acm_ids ) / sizeof ( cdc_acm_ids[0] ) ), + .class = USB_CLASS_ID ( USB_CLASS_CDC, USB_SUBCLASS_CDC_ACM, + USB_PROTOCOL_ACM_RNDIS ), + .score = USB_SCORE_DEPRECATED, + .probe = acm_probe, + .remove = acm_remove, +}; + +/** USB RF-RNDIS device IDs */ +static struct usb_device_id rf_rndis_ids[] = { + { + .name = "rf-rndis", + .vendor = USB_ANY_ID, + .product = USB_ANY_ID, + }, +}; + +/** USB RF-RNDIS driver */ +struct usb_driver rf_rndis_driver __usb_driver = { + .ids = rf_rndis_ids, + .id_count = ( sizeof ( rf_rndis_ids ) / sizeof ( rf_rndis_ids[0] ) ), + .class = USB_CLASS_ID ( USB_CLASS_WIRELESS, USB_SUBCLASS_WIRELESS_RADIO, + USB_PROTOCOL_RADIO_RNDIS ), + .score = USB_SCORE_DEPRECATED, + .probe = acm_probe, + .remove = acm_remove, +}; diff --git a/src/drivers/net/acm.h b/src/drivers/net/acm.h new file mode 100644 index 00000000..d4944967 --- /dev/null +++ b/src/drivers/net/acm.h @@ -0,0 +1,69 @@ +#ifndef _ACM_H +#define _ACM_H + +/** @file + * + * USB RNDIS Ethernet driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** CDC-ACM subclass */ +#define USB_SUBCLASS_CDC_ACM 0x02 + +/** CDC-ACM RNDIS device protocol */ +#define USB_PROTOCOL_ACM_RNDIS 0xff + +/** Class code for wireless devices */ +#define USB_CLASS_WIRELESS 0xe0 + +/** Radio frequency device subclass */ +#define USB_SUBCLASS_WIRELESS_RADIO 0x01 + +/** Radio frequency RNDIS device protocol */ +#define USB_PROTOCOL_RADIO_RNDIS 0x03 + +/** A USB RNDIS network device */ +struct acm_device { + /** USB device */ + struct usb_device *usb; + /** USB bus */ + struct usb_bus *bus; + /** RNDIS device */ + struct rndis_device *rndis; + /** USB network device */ + struct usbnet_device usbnet; + + /** An encapsulated response is available */ + int responded; +}; + +/** Interrupt maximum fill level + * + * This is a policy decision. + */ +#define ACM_INTR_MAX_FILL 2 + +/** Bulk IN maximum fill level + * + * This is a policy decision. + */ +#define ACM_IN_MAX_FILL 8 + +/** Bulk IN buffer size + * + * This is a policy decision. + */ +#define ACM_IN_MTU 2048 + +/** Encapsulated response buffer size + * + * This is a policy decision. + */ +#define ACM_RESPONSE_MTU 128 + +#endif /* _ACM_H */ diff --git a/src/drivers/net/axge.c b/src/drivers/net/axge.c new file mode 100644 index 00000000..fb274d24 --- /dev/null +++ b/src/drivers/net/axge.c @@ -0,0 +1,821 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "axge.h" + +/** @file + * + * Asix 10/100/1000 USB Ethernet driver + * + * Large chunks of functionality are undocumented in the available + * datasheets. The gaps are deduced from combinations of the Linux + * driver, the FreeBSD driver, and experimentation with the hardware. + */ + +/** Interrupt completion profiler */ +static struct profiler axge_intr_profiler __profiler = + { .name = "axge.intr" }; + +/** Bulk IN completion profiler */ +static struct profiler axge_in_profiler __profiler = + { .name = "axge.in" }; + +/** Bulk OUT profiler */ +static struct profiler axge_out_profiler __profiler = + { .name = "axge.out" }; + +/** Default bulk IN configuration + * + * The Linux and FreeBSD drivers have set of magic constants which are + * chosen based on both the Ethernet and USB link speeds. + * + * Experimentation shows that setting the "timer" value to zero seems + * to prevent the device from ever coalescing multiple packets into a + * single bulk IN transfer. This allows us to get away with using a + * 2kB receive I/O buffer and a zerocopy receive path. + */ +static struct axge_bulk_in_control axge_bicr = { + .ctrl = 7, + .timer = cpu_to_le16 ( 0 ), + .size = 0, + .ifg = 0, +}; + +/****************************************************************************** + * + * Register access + * + ****************************************************************************** + */ + +/** + * Read register + * + * @v asix AXGE device + * @v offset Register offset + * @v data Data buffer + * @v len Length of data + * @ret rc Return status code + */ +static inline int axge_read_register ( struct axge_device *axge, + unsigned int offset, void *data, + size_t len ) { + + return usb_control ( axge->usb, AXGE_READ_MAC_REGISTER, + offset, len, data, len ); +} + +/** + * Read one-byte register + * + * @v asix AXGE device + * @v offset Register offset + * @v value Value to fill in + * @ret rc Return status code + */ +static inline int axge_read_byte ( struct axge_device *axge, + unsigned int offset, uint8_t *value ) { + + return axge_read_register ( axge, offset, value, sizeof ( *value ) ); +} + +/** + * Read two-byte register + * + * @v asix AXGE device + * @v offset Register offset + * @v value Value to fill in + * @ret rc Return status code + */ +static inline int axge_read_word ( struct axge_device *axge, + unsigned int offset, uint16_t *value ) { + + return axge_read_register ( axge, offset, value, sizeof ( *value ) ); +} + +/** + * Read four-byte register + * + * @v asix AXGE device + * @v offset Register offset + * @v value Value to fill in + * @ret rc Return status code + */ +static inline int axge_read_dword ( struct axge_device *axge, + unsigned int offset, uint32_t *value ) { + + return axge_read_register ( axge, offset, value, sizeof ( *value ) ); +} + +/** + * Write register + * + * @v asix AXGE device + * @v offset Register offset + * @v data Data buffer + * @v len Length of data + * @ret rc Return status code + */ +static inline int axge_write_register ( struct axge_device *axge, + unsigned int offset, void *data, + size_t len ) { + + return usb_control ( axge->usb, AXGE_WRITE_MAC_REGISTER, + offset, len, data, len ); +} + +/** + * Write one-byte register + * + * @v asix AXGE device + * @v offset Register offset + * @v value Value + * @ret rc Return status code + */ +static inline int axge_write_byte ( struct axge_device *axge, + unsigned int offset, uint8_t value ) { + + return axge_write_register ( axge, offset, &value, sizeof ( value )); +} + +/** + * Write two-byte register + * + * @v asix AXGE device + * @v offset Register offset + * @v value Value + * @ret rc Return status code + */ +static inline int axge_write_word ( struct axge_device *axge, + unsigned int offset, uint16_t value ) { + + return axge_write_register ( axge, offset, &value, sizeof ( value )); +} + +/** + * Write one-byte register + * + * @v asix AXGE device + * @v offset Register offset + * @v value Value + * @ret rc Return status code + */ +static inline int axge_write_dword ( struct axge_device *axge, + unsigned int offset, uint32_t value ) { + + return axge_write_register ( axge, offset, &value, sizeof ( value )); +} + +/****************************************************************************** + * + * Link status + * + ****************************************************************************** + */ + +/** + * Get link status + * + * @v asix AXGE device + * @ret rc Return status code + */ +static int axge_check_link ( struct axge_device *axge ) { + struct net_device *netdev = axge->netdev; + uint8_t plsr; + uint16_t msr; + int rc; + + /* Read physical link status register */ + if ( ( rc = axge_read_byte ( axge, AXGE_PLSR, &plsr ) ) != 0 ) { + DBGC ( axge, "AXGE %p could not read PLSR: %s\n", + axge, strerror ( rc ) ); + return rc; + } + + /* Write medium status register */ + msr = cpu_to_le16 ( AXGE_MSR_FD | AXGE_MSR_RFC | AXGE_MSR_TFC | + AXGE_MSR_RE ); + if ( plsr & AXGE_PLSR_EPHY_1000 ) { + msr |= cpu_to_le16 ( AXGE_MSR_GM ); + } else if ( plsr & AXGE_PLSR_EPHY_100 ) { + msr |= cpu_to_le16 ( AXGE_MSR_PS ); + } + if ( ( rc = axge_write_word ( axge, AXGE_MSR, msr ) ) != 0 ) { + DBGC ( axge, "AXGE %p could not write MSR: %s\n", + axge, strerror ( rc ) ); + return rc; + } + + /* Update link status */ + if ( plsr & AXGE_PLSR_EPHY_ANY ) { + DBGC ( axge, "AXGE %p link up (PLSR %02x MSR %04x)\n", + axge, plsr, msr ); + netdev_link_up ( netdev ); + } else { + DBGC ( axge, "AXGE %p link down (PLSR %02x MSR %04x)\n", + axge, plsr, msr ); + netdev_link_down ( netdev ); + } + + return 0; +} + +/****************************************************************************** + * + * AXGE communications interface + * + ****************************************************************************** + */ + +/** + * Complete interrupt transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void axge_intr_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct axge_device *axge = container_of ( ep, struct axge_device, + usbnet.intr ); + struct net_device *netdev = axge->netdev; + struct axge_interrupt *intr; + size_t len = iob_len ( iobuf ); + unsigned int link_ok; + + /* Profile completions */ + profile_start ( &axge_intr_profiler ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto ignore; + + /* Drop packets with errors */ + if ( rc != 0 ) { + DBGC ( axge, "AXGE %p interrupt failed: %s\n", + axge, strerror ( rc ) ); + DBGC_HDA ( axge, 0, iobuf->data, iob_len ( iobuf ) ); + goto error; + } + + /* Extract message header */ + if ( len < sizeof ( *intr ) ) { + DBGC ( axge, "AXGE %p underlength interrupt:\n", axge ); + DBGC_HDA ( axge, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto error; + } + intr = iobuf->data; + + /* Check magic signature */ + if ( intr->magic != cpu_to_le16 ( AXGE_INTR_MAGIC ) ) { + DBGC ( axge, "AXGE %p malformed interrupt:\n", axge ); + DBGC_HDA ( axge, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto error; + } + + /* Extract link status */ + link_ok = ( intr->link & cpu_to_le16 ( AXGE_INTR_LINK_PPLS ) ); + if ( ( !! link_ok ) ^ ( !! netdev_link_ok ( netdev ) ) ) + axge->check_link = 1; + + /* Free I/O buffer */ + free_iob ( iobuf ); + profile_stop ( &axge_intr_profiler ); + + return; + + error: + netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); + ignore: + free_iob ( iobuf ); + return; +} + +/** Interrupt endpoint operations */ +static struct usb_endpoint_driver_operations axge_intr_operations = { + .complete = axge_intr_complete, +}; + +/****************************************************************************** + * + * AXGE data interface + * + ****************************************************************************** + */ + +/** + * Complete bulk IN transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void axge_in_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct axge_device *axge = container_of ( ep, struct axge_device, + usbnet.in ); + struct net_device *netdev = axge->netdev; + struct axge_rx_footer *ftr; + struct axge_rx_descriptor *desc; + struct io_buffer *pkt; + unsigned int count; + unsigned int offset; + size_t len; + size_t padded_len; + + /* Profile receive completions */ + profile_start ( &axge_in_profiler ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto ignore; + + /* Record USB errors against the network device */ + if ( rc != 0 ) { + DBGC ( axge, "AXGE %p bulk IN failed: %s\n", + axge, strerror ( rc ) ); + goto error; + } + + /* Sanity check */ + if ( iob_len ( iobuf ) < sizeof ( *ftr ) ) { + DBGC ( axge, "AXGE %p underlength bulk IN:\n", axge ); + DBGC_HDA ( axge, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto error; + } + + /* Parse ftr, strip ftr and descriptors */ + iob_unput ( iobuf, sizeof ( *ftr ) ); + ftr = ( iobuf->data + iob_len ( iobuf ) ); + count = le16_to_cpu ( ftr->count ); + if ( count == 0 ) { + DBGC ( axge, "AXGE %p zero-packet bulk IN:\n", axge ); + DBGC_HDA ( axge, 0, iobuf->data, iob_len ( iobuf ) ); + goto ignore; + } + offset = le16_to_cpu ( ftr->offset ); + if ( ( iob_len ( iobuf ) < offset ) || + ( ( iob_len ( iobuf ) - offset ) < ( count * sizeof ( *desc ) ) )){ + DBGC ( axge, "AXGE %p malformed bulk IN footer:\n", axge ); + DBGC_HDA ( axge, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto error; + } + desc = ( iobuf->data + offset ); + iob_unput ( iobuf, ( iob_len ( iobuf ) - offset ) ); + + /* Process packets */ + for ( ; count-- ; desc++ ) { + + /* Parse descriptor */ + len = ( le16_to_cpu ( desc->len_flags ) & AXGE_RX_LEN_MASK ); + padded_len = ( ( len + AXGE_RX_LEN_PAD_ALIGN - 1 ) & + ~( AXGE_RX_LEN_PAD_ALIGN - 1 ) ); + if ( iob_len ( iobuf ) < padded_len ) { + DBGC ( axge, "AXGE %p malformed bulk IN descriptor:\n", + axge ); + DBGC_HDA ( axge, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto error; + } + + /* Check for previous dropped packets */ + if ( desc->len_flags & cpu_to_le16 ( AXGE_RX_CRC_ERROR ) ) + netdev_rx_err ( netdev, NULL, -EIO ); + if ( desc->len_flags & cpu_to_le16 ( AXGE_RX_DROP_ERROR ) ) + netdev_rx_err ( netdev, NULL, -ENOBUFS ); + + /* Allocate new I/O buffer, if applicable */ + if ( count ) { + + /* More packets remain: allocate a new buffer */ + pkt = alloc_iob ( AXGE_IN_RESERVE + len ); + if ( ! pkt ) { + /* Record error and continue */ + netdev_rx_err ( netdev, NULL, -ENOMEM ); + iob_pull ( iobuf, padded_len ); + continue; + } + iob_reserve ( pkt, AXGE_IN_RESERVE ); + memcpy ( iob_put ( pkt, len ), iobuf->data, len ); + iob_pull ( iobuf, padded_len ); + + } else { + + /* This is the last (or only) packet: use this buffer */ + iob_unput ( iobuf, ( padded_len - len ) ); + pkt = iob_disown ( iobuf ); + } + + /* Hand off to network stack */ + netdev_rx ( netdev, iob_disown ( pkt ) ); + } + + assert ( iobuf == NULL ); + profile_stop ( &axge_in_profiler ); + return; + + error: + netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); + ignore: + free_iob ( iobuf ); +} + +/** Bulk IN endpoint operations */ +static struct usb_endpoint_driver_operations axge_in_operations = { + .complete = axge_in_complete, +}; + +/** + * Transmit packet + * + * @v asix AXGE device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int axge_out_transmit ( struct axge_device *axge, + struct io_buffer *iobuf ) { + struct axge_tx_header *hdr; + size_t len = iob_len ( iobuf ); + int rc; + + /* Profile transmissions */ + profile_start ( &axge_out_profiler ); + + /* Prepend header */ + if ( ( rc = iob_ensure_headroom ( iobuf, sizeof ( *hdr ) ) ) != 0 ) + return rc; + hdr = iob_push ( iobuf, sizeof ( *hdr ) ); + hdr->len = cpu_to_le32 ( len ); + hdr->wtf = 0; + + /* Enqueue I/O buffer */ + if ( ( rc = usb_stream ( &axge->usbnet.out, iobuf, 0 ) ) != 0 ) + return rc; + + profile_stop ( &axge_out_profiler ); + return 0; +} + +/** + * Complete bulk OUT transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void axge_out_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct axge_device *axge = container_of ( ep, struct axge_device, + usbnet.out ); + struct net_device *netdev = axge->netdev; + + /* Report TX completion */ + netdev_tx_complete_err ( netdev, iobuf, rc ); +} + +/** Bulk OUT endpoint operations */ +static struct usb_endpoint_driver_operations axge_out_operations = { + .complete = axge_out_complete, +}; + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int axge_open ( struct net_device *netdev ) { + struct axge_device *axge = netdev->priv; + uint16_t rcr; + int rc; + + /* Reapply device configuration to avoid transaction errors */ + if ( ( rc = usb_set_configuration ( axge->usb, axge->config ) ) != 0 ) { + DBGC ( axge, "AXGE %p could not set configuration: %s\n", + axge, strerror ( rc ) ); + goto err_set_configuration; + } + + /* Open USB network device */ + if ( ( rc = usbnet_open ( &axge->usbnet ) ) != 0 ) { + DBGC ( axge, "AXGE %p could not open: %s\n", + axge, strerror ( rc ) ); + goto err_open; + } + + /* Set MAC address */ + if ( ( rc = axge_write_register ( axge, AXGE_NIDR, + netdev->ll_addr, ETH_ALEN ) ) !=0){ + DBGC ( axge, "AXGE %p could not set MAC address: %s\n", + axge, strerror ( rc ) ); + goto err_write_mac; + } + + /* Enable receiver */ + rcr = cpu_to_le16 ( AXGE_RCR_PRO | AXGE_RCR_AMALL | + AXGE_RCR_AB | AXGE_RCR_SO ); + if ( ( rc = axge_write_word ( axge, AXGE_RCR, rcr ) ) != 0 ) { + DBGC ( axge, "AXGE %p could not write RCR: %s\n", + axge, strerror ( rc ) ); + goto err_write_rcr; + } + + /* Update link status */ + if ( ( rc = axge_check_link ( axge ) ) != 0 ) + goto err_check_link; + + return 0; + + err_check_link: + axge_write_word ( axge, AXGE_RCR, 0 ); + err_write_rcr: + err_write_mac: + usbnet_close ( &axge->usbnet ); + err_open: + err_set_configuration: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void axge_close ( struct net_device *netdev ) { + struct axge_device *axge = netdev->priv; + + /* Disable receiver */ + axge_write_word ( axge, AXGE_RCR, 0 ); + + /* Close USB network device */ + usbnet_close ( &axge->usbnet ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int axge_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct axge_device *axge = netdev->priv; + int rc; + + /* Transmit packet */ + if ( ( rc = axge_out_transmit ( axge, iobuf ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void axge_poll ( struct net_device *netdev ) { + struct axge_device *axge = netdev->priv; + int rc; + + /* Poll USB bus */ + usb_poll ( axge->bus ); + + /* Refill endpoints */ + if ( ( rc = usbnet_refill ( &axge->usbnet ) ) != 0 ) + netdev_rx_err ( netdev, NULL, rc ); + + /* Update link state, if applicable */ + if ( axge->check_link ) { + if ( ( rc = axge_check_link ( axge ) ) == 0 ) { + axge->check_link = 0; + } else { + netdev_rx_err ( netdev, NULL, rc ); + } + } +} + +/** AXGE network device operations */ +static struct net_device_operations axge_operations = { + .open = axge_open, + .close = axge_close, + .transmit = axge_transmit, + .poll = axge_poll, +}; + +/****************************************************************************** + * + * USB interface + * + ****************************************************************************** + */ + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int axge_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct usb_device *usb = func->usb; + struct net_device *netdev; + struct axge_device *axge; + uint16_t epprcr; + uint8_t csr; + int rc; + + /* Allocate and initialise structure */ + netdev = alloc_etherdev ( sizeof ( *axge ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &axge_operations ); + netdev->dev = &func->dev; + axge = netdev->priv; + memset ( axge, 0, sizeof ( *axge ) ); + axge->usb = usb; + axge->bus = usb->port->hub->bus; + axge->netdev = netdev; + axge->config = config->config; + usbnet_init ( &axge->usbnet, func, &axge_intr_operations, + &axge_in_operations, &axge_out_operations ); + usb_refill_init ( &axge->usbnet.intr, 0, 0, AXGE_INTR_MAX_FILL ); + usb_refill_init ( &axge->usbnet.in, AXGE_IN_RESERVE, + AXGE_IN_MTU, AXGE_IN_MAX_FILL ); + DBGC ( axge, "AXGE %p on %s\n", axge, func->name ); + + /* Describe USB network device */ + if ( ( rc = usbnet_describe ( &axge->usbnet, config ) ) != 0 ) { + DBGC ( axge, "AXGE %p could not describe: %s\n", + axge, strerror ( rc ) ); + goto err_describe; + } + + /* Fetch MAC address */ + if ( ( rc = axge_read_register ( axge, AXGE_NIDR, netdev->hw_addr, + ETH_ALEN ) ) != 0 ) { + DBGC ( axge, "AXGE %p could not fetch MAC address: %s\n", + axge, strerror ( rc ) ); + goto err_read_mac; + } + + /* Power up PHY */ + if ( ( rc = axge_write_word ( axge, AXGE_EPPRCR, 0 ) ) != 0 ) { + DBGC ( axge, "AXGE %p could not write EPPRCR: %s\n", + axge, strerror ( rc ) ); + goto err_write_epprcr_off; + } + epprcr = cpu_to_le16 ( AXGE_EPPRCR_IPRL ); + if ( ( rc = axge_write_word ( axge, AXGE_EPPRCR, epprcr ) ) != 0){ + DBGC ( axge, "AXGE %p could not write EPPRCR: %s\n", + axge, strerror ( rc ) ); + goto err_write_epprcr_on; + } + mdelay ( AXGE_EPPRCR_DELAY_MS ); + + /* Select clocks */ + csr = ( AXGE_CSR_BCS | AXGE_CSR_ACS ); + if ( ( rc = axge_write_byte ( axge, AXGE_CSR, csr ) ) != 0){ + DBGC ( axge, "AXGE %p could not write CSR: %s\n", + axge, strerror ( rc ) ); + goto err_write_csr; + } + mdelay ( AXGE_CSR_DELAY_MS ); + + /* Configure bulk IN pipeline */ + if ( ( rc = axge_write_register ( axge, AXGE_BICR, &axge_bicr, + sizeof ( axge_bicr ) ) ) != 0 ){ + DBGC ( axge, "AXGE %p could not write BICR: %s\n", + axge, strerror ( rc ) ); + goto err_write_bicr; + } + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register; + + /* Update link status */ + if ( ( rc = axge_check_link ( axge ) ) != 0 ) + goto err_check_link; + + usb_func_set_drvdata ( func, axge ); + return 0; + + err_check_link: + unregister_netdev ( netdev ); + err_register: + err_write_bicr: + err_write_csr: + err_write_epprcr_on: + err_write_epprcr_off: + err_read_mac: + err_describe: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void axge_remove ( struct usb_function *func ) { + struct axge_device *axge = usb_func_get_drvdata ( func ); + struct net_device *netdev = axge->netdev; + + unregister_netdev ( netdev ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** AXGE device IDs */ +static struct usb_device_id axge_ids[] = { + { + .name = "ax88179", + .vendor = 0x0b95, + .product = 0x1790, + }, + { + .name = "ax88178a", + .vendor = 0x0b95, + .product = 0x178a, + }, + { + .name = "dub1312", + .vendor = 0x2001, + .product = 0x4a00, + }, + { + .name = "axge-sitecom", + .vendor = 0x0df6, + .product = 0x0072, + }, + { + .name = "axge-samsung", + .vendor = 0x04e8, + .product = 0xa100, + }, + { + .name = "onelinkdock", + .vendor = 0x17ef, + .product = 0x304b, + }, +}; + +/** AXGE driver */ +struct usb_driver axge_driver __usb_driver = { + .ids = axge_ids, + .id_count = ( sizeof ( axge_ids ) / sizeof ( axge_ids[0] ) ), + .class = USB_CLASS_ID ( USB_ANY_ID, USB_ANY_ID, USB_ANY_ID ), + .score = USB_SCORE_NORMAL, + .probe = axge_probe, + .remove = axge_remove, +}; diff --git a/src/drivers/net/axge.h b/src/drivers/net/axge.h new file mode 100644 index 00000000..e22e0ec4 --- /dev/null +++ b/src/drivers/net/axge.h @@ -0,0 +1,179 @@ +#ifndef _AXGE_H +#define _AXGE_H + +/** @file + * + * Asix 10/100/1000 USB Ethernet driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** Read MAC register */ +#define AXGE_READ_MAC_REGISTER \ + ( USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE | \ + USB_REQUEST_TYPE ( 0x01 ) ) + +/** Write MAC register */ +#define AXGE_WRITE_MAC_REGISTER \ + ( USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE | \ + USB_REQUEST_TYPE ( 0x01 ) ) + +/** Physical Link Status Register */ +#define AXGE_PLSR 0x02 +#define AXGE_PLSR_EPHY_10 0x10 /**< Ethernet at 10Mbps */ +#define AXGE_PLSR_EPHY_100 0x20 /**< Ethernet at 100Mbps */ +#define AXGE_PLSR_EPHY_1000 0x40 /**< Ethernet at 1000Mbps */ +#define AXGE_PLSR_EPHY_ANY \ + ( AXGE_PLSR_EPHY_10 | \ + AXGE_PLSR_EPHY_100 | \ + AXGE_PLSR_EPHY_1000 ) + +/** RX Control Register */ +#define AXGE_RCR 0x0b +#define AXGE_RCR_PRO 0x0001 /**< Promiscuous mode */ +#define AXGE_RCR_AMALL 0x0002 /**< Accept all multicasts */ +#define AXGE_RCR_AB 0x0008 /**< Accept broadcasts */ +#define AXGE_RCR_SO 0x0080 /**< Start operation */ + +/** Node ID Register */ +#define AXGE_NIDR 0x10 + +/** Medium Status Register */ +#define AXGE_MSR 0x22 +#define AXGE_MSR_GM 0x0001 /**< Gigabit mode */ +#define AXGE_MSR_FD 0x0002 /**< Full duplex */ +#define AXGE_MSR_RFC 0x0010 /**< RX flow control enable */ +#define AXGE_MSR_TFC 0x0020 /**< TX flow control enable */ +#define AXGE_MSR_RE 0x0100 /**< Receive enable */ +#define AXGE_MSR_PS 0x0200 /**< 100Mbps port speed */ + +/** Ethernet PHY Power and Reset Control Register */ +#define AXGE_EPPRCR 0x26 +#define AXGE_EPPRCR_IPRL 0x0020 /**< Undocumented */ + +/** Delay after initialising EPPRCR */ +#define AXGE_EPPRCR_DELAY_MS 200 + +/** Bulk IN Control Register (undocumented) */ +#define AXGE_BICR 0x2e + +/** Bulk IN Control (undocumented) */ +struct axge_bulk_in_control { + /** Control */ + uint8_t ctrl; + /** Timer */ + uint16_t timer; + /** Size */ + uint8_t size; + /** Inter-frame gap */ + uint8_t ifg; +} __attribute__ (( packed )); + +/** Clock Select Register (undocumented) */ +#define AXGE_CSR 0x33 +#define AXGE_CSR_BCS 0x01 /**< Undocumented */ +#define AXGE_CSR_ACS 0x02 /**< Undocumented */ + +/** Delay after initialising CSR */ +#define AXGE_CSR_DELAY_MS 100 + +/** Transmit packet header */ +struct axge_tx_header { + /** Packet length */ + uint32_t len; + /** Answers on a postcard, please */ + uint32_t wtf; +} __attribute__ (( packed )); + +/** Receive packet footer */ +struct axge_rx_footer { + /** Packet count */ + uint16_t count; + /** Header offset */ + uint16_t offset; +} __attribute__ (( packed )); + +/** Receive packet descriptor */ +struct axge_rx_descriptor { + /** Checksum information */ + uint16_t check; + /** Length and error flags */ + uint16_t len_flags; +} __attribute__ (( packed )); + +/** Receive packet length mask */ +#define AXGE_RX_LEN_MASK 0x1fff + +/** Receive packet length alignment */ +#define AXGE_RX_LEN_PAD_ALIGN 8 + +/** Receive packet CRC error */ +#define AXGE_RX_CRC_ERROR 0x2000 + +/** Receive packet dropped error */ +#define AXGE_RX_DROP_ERROR 0x8000 + +/** Interrupt data */ +struct axge_interrupt { + /** Magic signature */ + uint16_t magic; + /** Link state */ + uint16_t link; + /** PHY register MR01 */ + uint16_t mr01; + /** PHY register MR05 */ + uint16_t mr05; +} __attribute__ (( packed )); + +/** Interrupt magic signature */ +#define AXGE_INTR_MAGIC 0x00a1 + +/** Link is up */ +#define AXGE_INTR_LINK_PPLS 0x0001 + +/** An AXGE network device */ +struct axge_device { + /** USB device */ + struct usb_device *usb; + /** USB bus */ + struct usb_bus *bus; + /** Network device */ + struct net_device *netdev; + /** USB network device */ + struct usbnet_device usbnet; + /** Device configuration */ + unsigned int config; + /** Link state has changed */ + int check_link; +}; + +/** Interrupt maximum fill level + * + * This is a policy decision. + */ +#define AXGE_INTR_MAX_FILL 2 + +/** Bulk IN maximum fill level + * + * This is a policy decision. + */ +#define AXGE_IN_MAX_FILL 8 + +/** Bulk IN buffer size + * + * This is a policy decision. + */ +#define AXGE_IN_MTU 2048 + +/** Amount of space to reserve at start of bulk IN buffers + * + * This is required to allow for protocols such as ARP which may reuse + * a received I/O buffer for transmission. + */ +#define AXGE_IN_RESERVE sizeof ( struct axge_tx_header ) + +#endif /* _AXGE_H */ diff --git a/src/drivers/net/bnxt/bnxt.c b/src/drivers/net/bnxt/bnxt.c new file mode 100644 index 00000000..fe84ea0e --- /dev/null +++ b/src/drivers/net/bnxt/bnxt.c @@ -0,0 +1,2170 @@ + +FILE_LICENCE ( GPL2_ONLY ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bnxt.h" +#include "bnxt_dbg.h" + +static void bnxt_service_cq ( struct net_device *dev ); +static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx ); +static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt ); +static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt ); +static int bnxt_rx_complete ( struct net_device *dev, struct rx_pkt_cmpl *rx ); +void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt ); + +/** + * Check if Virtual Function + */ +u8 bnxt_is_pci_vf ( struct pci_device *pdev ) +{ + u16 i; + + for ( i = 0; i < ARRAY_SIZE ( bnxt_vf_nics ); i++ ) { + if ( pdev->device == bnxt_vf_nics[i] ) + return 1; + } + return 0; +} + +static void bnxt_down_pci ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + if ( bp->bar2 ) { + iounmap ( bp->bar2 ); + bp->bar2 = NULL; + } + if ( bp->bar1 ) { + iounmap ( bp->bar1 ); + bp->bar1 = NULL; + } + if ( bp->bar0 ) { + iounmap ( bp->bar0 ); + bp->bar0 = NULL; + } +} + +static void *bnxt_pci_base ( struct pci_device *pdev, unsigned int reg ) +{ + unsigned long reg_base, reg_size; + + reg_base = pci_bar_start ( pdev, reg ); + reg_size = pci_bar_size ( pdev, reg ); + return pci_ioremap ( pdev, reg_base, reg_size ); +} + +static int bnxt_get_pci_info ( struct bnxt *bp ) +{ + u16 cmd_reg = 0; + + DBGP ( "%s\n", __func__ ); + /* Disable Interrupt */ + pci_read_word16 ( bp->pdev, PCI_COMMAND, &bp->cmd_reg ); + cmd_reg = bp->cmd_reg | PCI_COMMAND_INTX_DISABLE; + pci_write_word ( bp->pdev, PCI_COMMAND, cmd_reg ); + pci_read_word16 ( bp->pdev, PCI_COMMAND, &cmd_reg ); + + /* SSVID */ + pci_read_word16 ( bp->pdev, + PCI_SUBSYSTEM_VENDOR_ID, + &bp->subsystem_vendor ); + + /* SSDID */ + pci_read_word16 ( bp->pdev, + PCI_SUBSYSTEM_ID, + &bp->subsystem_device ); + + /* Function Number */ + pci_read_byte ( bp->pdev, + PCICFG_ME_REGISTER, + &bp->pf_num ); + + /* Get Bar Address */ + bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 ); + bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 ); + bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 ); + + /* Virtual function */ + bp->vf = bnxt_is_pci_vf ( bp->pdev ); + + dbg_pci ( bp, __func__, cmd_reg ); + return STATUS_SUCCESS; +} + +static int bnxt_get_device_address ( struct bnxt *bp ) +{ + struct net_device *dev = bp->dev; + + DBGP ( "%s\n", __func__ ); + memcpy ( &dev->hw_addr[0], ( char * )&bp->mac_addr[0], ETH_ALEN ); + if ( !is_valid_ether_addr ( &dev->hw_addr[0] ) ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return -EINVAL; + } + + return STATUS_SUCCESS; +} + +static void bnxt_set_link ( struct bnxt *bp ) +{ + if ( bp->link_status == STATUS_LINK_ACTIVE ) + netdev_link_up ( bp->dev ); + else + netdev_link_down ( bp->dev ); +} + +static void thor_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag ) +{ + void *off; + u64 val; + + if ( bp->vf ) + off = ( void * ) ( bp->bar1 + DB_OFFSET_VF ); + else + off = ( void * ) ( bp->bar1 + DB_OFFSET_PF ); + + val = ( ( u64 )DBC_MSG_XID ( xid, flag ) << 32 ) | + ( u64 )DBC_MSG_IDX ( idx ); + write64 ( val, off ); +} + +static void bnxt_db_nq ( struct bnxt *bp ) +{ + if ( bp->thor ) + thor_db ( bp, ( u32 )bp->nq.cons_id, + ( u32 )bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM ); + else + write32 ( CMPL_DOORBELL_KEY_CMPL, ( bp->bar1 + 0 ) ); +} + +static void bnxt_db_cq ( struct bnxt *bp ) +{ + if ( bp->thor ) + thor_db ( bp, ( u32 )bp->cq.cons_id, + ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ_ARMALL ); + else + write32 ( CQ_DOORBELL_KEY_IDX ( bp->cq.cons_id ), + ( bp->bar1 + 0 ) ); +} + +static void bnxt_db_rx ( struct bnxt *bp, u32 idx ) +{ + if ( bp->thor ) + thor_db ( bp, idx, ( u32 )bp->rx_ring_id, DBC_DBC_TYPE_SRQ ); + else + write32 ( RX_DOORBELL_KEY_RX | idx, ( bp->bar1 + 0 ) ); +} + +static void bnxt_db_tx ( struct bnxt *bp, u32 idx ) +{ + if ( bp->thor ) + thor_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ ); + else + write32 ( ( u32 ) ( TX_DOORBELL_KEY_TX | idx ), + ( bp->bar1 + 0 ) ); +} + +void bnxt_add_vlan ( struct io_buffer *iob, u16 vlan ) +{ + char *src = ( char * )iob->data; + u16 len = iob_len ( iob ); + + memmove ( ( char * )&src[MAC_HDR_SIZE + VLAN_HDR_SIZE], + ( char * )&src[MAC_HDR_SIZE], + ( len - MAC_HDR_SIZE ) ); + + * ( u16 * ) ( &src[MAC_HDR_SIZE] ) = BYTE_SWAP_S ( ETHERTYPE_VLAN ); + * ( u16 * ) ( &src[MAC_HDR_SIZE + 2] ) = BYTE_SWAP_S ( vlan ); + iob_put ( iob, VLAN_HDR_SIZE ); +} + +static u16 bnxt_get_pkt_vlan ( char *src ) +{ + if ( * ( ( u16 * )&src[MAC_HDR_SIZE] ) == BYTE_SWAP_S ( ETHERTYPE_VLAN ) ) + return BYTE_SWAP_S ( * ( ( u16 * )&src[MAC_HDR_SIZE + 2] ) ); + return 0; +} + +int bnxt_vlan_drop ( struct bnxt *bp, u16 rx_vlan ) +{ + if ( rx_vlan ) { + if ( bp->vlan_tx ) { + if ( rx_vlan == bp->vlan_tx ) + return 0; + } else { + if ( rx_vlan == bp->vlan_id ) + return 0; + if ( rx_vlan && !bp->vlan_id ) + return 0; + } + } else { + if ( !bp->vlan_tx && !bp->vlan_id ) + return 0; + } + + return 1; +} + +static inline u32 bnxt_tx_avail ( struct bnxt *bp ) +{ + u32 avail; + u32 use; + + barrier ( ); + avail = TX_AVAIL ( bp->tx.ring_cnt ); + use = TX_IN_USE ( bp->tx.prod_id, bp->tx.cons_id, bp->tx.ring_cnt ); + dbg_tx_avail ( bp, avail, use ); + return ( avail-use ); +} + +void bnxt_set_txq ( struct bnxt *bp, int entry, dma_addr_t mapping, int len ) +{ + struct tx_bd_short *prod_bd; + + prod_bd = ( struct tx_bd_short * )BD_NOW ( bp->tx.bd_virt, + entry, sizeof ( struct tx_bd_short ) ); + if ( len < 512 ) + prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_LT512; + else if ( len < 1024 ) + prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_LT1K; + else if ( len < 2048 ) + prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_LT2K; + else + prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_GTE2K; + prod_bd->flags_type |= TX_BD_FLAGS; + prod_bd->dma.addr = mapping; + prod_bd->len = len; + prod_bd->opaque = ( u32 )entry; +} + +static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx ) +{ + struct bnxt *bp = netdev_priv ( dev ); + struct io_buffer *iob; + + iob = bp->tx.iob[hw_idx]; + dbg_tx_done ( iob->data, iob_len ( iob ), hw_idx ); + netdev_tx_complete ( dev, iob ); + bp->tx.cons_id = NEXT_IDX ( hw_idx, bp->tx.ring_cnt ); + bp->tx.cnt++; + dump_tx_stat ( bp ); +} + +int bnxt_free_rx_iob ( struct bnxt *bp ) +{ + unsigned int i; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RX_IOB ) ) ) + return STATUS_SUCCESS; + + for ( i = 0; i < bp->rx.buf_cnt; i++ ) { + if ( bp->rx.iob[i] ) { + free_iob ( bp->rx.iob[i] ); + bp->rx.iob[i] = NULL; + } + } + bp->rx.iob_cnt = 0; + + FLAG_RESET ( bp->flag_hwrm, VALID_RX_IOB ); + return STATUS_SUCCESS; +} + +static void bnxt_set_rx_desc ( u8 *buf, struct io_buffer *iob, + u16 cid, u32 idx ) +{ + struct rx_prod_pkt_bd *desc; + u16 off = cid * sizeof ( struct rx_prod_pkt_bd ); + + desc = ( struct rx_prod_pkt_bd * )&buf[off]; + desc->flags_type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT; + desc->len = MAX_ETHERNET_PACKET_BUFFER_SIZE; + desc->opaque = idx; + desc->dma.addr = virt_to_bus ( iob->data ); +} + +static int bnxt_alloc_rx_iob ( struct bnxt *bp, u16 cons_id, u16 iob_idx ) +{ + struct io_buffer *iob; + + iob = alloc_iob ( BNXT_RX_STD_DMA_SZ ); + if ( !iob ) { + DBGP ( "- %s ( ): alloc_iob Failed\n", __func__ ); + return -ENOMEM; + } + + dbg_alloc_rx_iob ( iob, iob_idx, cons_id ); + bnxt_set_rx_desc ( ( u8 * )bp->rx.bd_virt, iob, cons_id, + ( u32 ) iob_idx ); + bp->rx.iob[iob_idx] = iob; + return 0; +} + +int bnxt_post_rx_buffers ( struct bnxt *bp ) +{ + u16 cons_id = ( bp->rx.cons_id % bp->rx.ring_cnt ); + u16 iob_idx; + + while ( bp->rx.iob_cnt < bp->rx.buf_cnt ) { + iob_idx = ( cons_id % bp->rx.buf_cnt ); + if ( !bp->rx.iob[iob_idx] ) { + if ( bnxt_alloc_rx_iob ( bp, cons_id, iob_idx ) < 0 ) { + dbg_alloc_rx_iob_fail ( iob_idx, cons_id ); + break; + } + } + cons_id = NEXT_IDX ( cons_id, bp->rx.ring_cnt ); + bp->rx.iob_cnt++; + } + + if ( cons_id != bp->rx.cons_id ) { + dbg_rx_cid ( bp->rx.cons_id, cons_id ); + bp->rx.cons_id = cons_id; + bnxt_db_rx ( bp, ( u32 )cons_id ); + } + + FLAG_SET ( bp->flag_hwrm, VALID_RX_IOB ); + return STATUS_SUCCESS; +} + +u8 bnxt_rx_drop ( struct bnxt *bp, struct io_buffer *iob, + struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len ) +{ + u8 *rx_buf = ( u8 * )iob->data; + u16 err_flags, rx_vlan; + u8 ignore_chksum_err = 0; + int i; + + err_flags = rx_cmp_hi->errors_v2 >> RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT; + if ( rx_cmp_hi->errors_v2 == 0x20 || rx_cmp_hi->errors_v2 == 0x21 ) + ignore_chksum_err = 1; + + if ( err_flags && !ignore_chksum_err ) { + bp->rx.drop_err++; + return 1; + } + + for ( i = 0; i < 6; i++ ) { + if ( rx_buf[6 + i] != bp->mac_addr[i] ) + break; + } + + /* Drop the loopback packets */ + if ( i == 6 ) { + bp->rx.drop_lb++; + return 2; + } + + /* Get VLAN ID from RX completion ring */ + if ( rx_cmp_hi->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN ) + rx_vlan = ( rx_cmp_hi->metadata & + RX_PKT_CMPL_METADATA_VID_MASK ); + else + rx_vlan = 0; + + dbg_rx_vlan ( bp, rx_cmp_hi->metadata, rx_cmp_hi->flags2, rx_vlan ); + if ( bnxt_vlan_drop ( bp, rx_vlan ) ) { + bp->rx.drop_vlan++; + return 3; + } + iob_put ( iob, rx_len ); + + if ( rx_vlan ) + bnxt_add_vlan ( iob, rx_vlan ); + + bp->rx.good++; + return 0; +} + +static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt ) +{ + u16 cons_id; + + cons_id = bp->cq.cons_id + cnt; + if ( cons_id >= MAX_CQ_DESC_CNT ) { + /* Toggle completion bit when the ring wraps. */ + bp->cq.completion_bit ^= 1; + cons_id = cons_id - MAX_CQ_DESC_CNT; + } + bp->cq.cons_id = cons_id; +} + +void bnxt_rx_process ( struct net_device *dev, struct bnxt *bp, + struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi ) +{ + u32 desc_idx = rx_cmp->opaque; + struct io_buffer *iob = bp->rx.iob[desc_idx]; + u8 drop; + + dump_rx_bd ( rx_cmp, rx_cmp_hi, desc_idx ); + assert ( !iob ); + drop = bnxt_rx_drop ( bp, iob, rx_cmp_hi, rx_cmp->len ); + dbg_rxp ( iob->data, rx_cmp->len, drop ); + if ( drop ) + netdev_rx_err ( dev, iob, -EINVAL ); + else + netdev_rx ( dev, iob ); + + bp->rx.cnt++; + bp->rx.iob[desc_idx] = NULL; + bp->rx.iob_cnt--; + bnxt_post_rx_buffers ( bp ); + bnxt_adv_cq_index ( bp, 2 ); /* Rx completion is 2 entries. */ + dbg_rx_stat ( bp ); +} + +static int bnxt_rx_complete ( struct net_device *dev, + struct rx_pkt_cmpl *rx_cmp ) +{ + struct bnxt *bp = netdev_priv ( dev ); + struct rx_pkt_cmpl_hi *rx_cmp_hi; + u8 cmpl_bit = bp->cq.completion_bit; + + if ( bp->cq.cons_id == ( bp->cq.ring_cnt - 1 ) ) { + rx_cmp_hi = ( struct rx_pkt_cmpl_hi * )bp->cq.bd_virt; + cmpl_bit ^= 0x1; /* Ring has wrapped. */ + } else + rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) ( rx_cmp+1 ); + + if ( ! ( ( rx_cmp_hi->errors_v2 & RX_PKT_CMPL_V2 ) ^ cmpl_bit ) ) { + bnxt_rx_process ( dev, bp, rx_cmp, rx_cmp_hi ); + return SERVICE_NEXT_CQ_BD; + } else + return NO_MORE_CQ_BD_TO_SERVICE; +} + +void bnxt_mm_init ( struct bnxt *bp, const char *func ) +{ + DBGP ( "%s\n", __func__ ); + memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE ); + memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE ); + memset ( bp->hwrm_addr_dma, 0, DMA_BUFFER_SIZE ); + bp->req_addr_mapping = virt_to_bus ( bp->hwrm_addr_req ); + bp->resp_addr_mapping = virt_to_bus ( bp->hwrm_addr_resp ); + bp->dma_addr_mapping = virt_to_bus ( bp->hwrm_addr_dma ); + bp->link_status = STATUS_LINK_DOWN; + bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT; + bp->mtu = MAX_ETHERNET_PACKET_BUFFER_SIZE; + bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; + bp->nq.ring_cnt = MAX_NQ_DESC_CNT; + bp->cq.ring_cnt = MAX_CQ_DESC_CNT; + bp->tx.ring_cnt = MAX_TX_DESC_CNT; + bp->rx.ring_cnt = MAX_RX_DESC_CNT; + bp->rx.buf_cnt = NUM_RX_BUFFERS; + dbg_mem ( bp, func ); +} + +void bnxt_mm_nic ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + memset ( bp->cq.bd_virt, 0, CQ_RING_BUFFER_SIZE ); + memset ( bp->tx.bd_virt, 0, TX_RING_BUFFER_SIZE ); + memset ( bp->rx.bd_virt, 0, RX_RING_BUFFER_SIZE ); + memset ( bp->nq.bd_virt, 0, NQ_RING_BUFFER_SIZE ); + bp->nq.cons_id = 0; + bp->nq.completion_bit = 0x1; + bp->cq.cons_id = 0; + bp->cq.completion_bit = 0x1; + bp->tx.prod_id = 0; + bp->tx.cons_id = 0; + bp->rx.cons_id = 0; + bp->rx.iob_cnt = 0; + + bp->link_status = STATUS_LINK_DOWN; + bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT; + bp->mtu = MAX_ETHERNET_PACKET_BUFFER_SIZE; + bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; + bp->nq.ring_cnt = MAX_NQ_DESC_CNT; + bp->cq.ring_cnt = MAX_CQ_DESC_CNT; + bp->tx.ring_cnt = MAX_TX_DESC_CNT; + bp->rx.ring_cnt = MAX_RX_DESC_CNT; + bp->rx.buf_cnt = NUM_RX_BUFFERS; +} + +void bnxt_free_mem ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + if ( bp->nq.bd_virt ) { + free_dma ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE ); + bp->nq.bd_virt = NULL; + } + + if ( bp->cq.bd_virt ) { + free_dma ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE ); + bp->cq.bd_virt = NULL; + } + + if ( bp->rx.bd_virt ) { + free_dma ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE ); + bp->rx.bd_virt = NULL; + } + + if ( bp->tx.bd_virt ) { + free_dma ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE ); + bp->tx.bd_virt = NULL; + } + + if ( bp->hwrm_addr_dma ) { + free_dma ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE ); + bp->dma_addr_mapping = 0; + bp->hwrm_addr_dma = NULL; + } + + if ( bp->hwrm_addr_resp ) { + free_dma ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE ); + bp->resp_addr_mapping = 0; + bp->hwrm_addr_resp = NULL; + } + + if ( bp->hwrm_addr_req ) { + free_dma ( bp->hwrm_addr_req, REQ_BUFFER_SIZE ); + bp->req_addr_mapping = 0; + bp->hwrm_addr_req = NULL; + } + DBGP ( "- %s ( ): - Done\n", __func__ ); +} + +int bnxt_alloc_mem ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + bp->hwrm_addr_req = malloc_dma ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); + bp->hwrm_addr_resp = malloc_dma ( RESP_BUFFER_SIZE, + BNXT_DMA_ALIGNMENT ); + bp->hwrm_addr_dma = malloc_dma ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); + bp->tx.bd_virt = malloc_dma ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K ); + bp->rx.bd_virt = malloc_dma ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K ); + bp->cq.bd_virt = malloc_dma ( CQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); + bp->nq.bd_virt = malloc_dma ( NQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); + test_if ( bp->hwrm_addr_req && + bp->hwrm_addr_resp && + bp->hwrm_addr_dma && + bp->tx.bd_virt && + bp->rx.bd_virt && + bp->nq.bd_virt && + bp->cq.bd_virt ) { + bnxt_mm_init ( bp, __func__ ); + return STATUS_SUCCESS; + } + + DBGP ( "- %s ( ): Failed\n", __func__ ); + bnxt_free_mem ( bp ); + return -ENOMEM; +} + +static void hwrm_init ( struct bnxt *bp, struct input *req, u16 cmd, u16 len ) +{ + memset ( req, 0, len ); + req->req_type = cmd; + req->cmpl_ring = ( u16 )HWRM_NA_SIGNATURE; + req->target_id = ( u16 )HWRM_NA_SIGNATURE; + req->resp_addr = bp->resp_addr_mapping; + req->seq_id = bp->seq_id++; +} + +static void hwrm_write_req ( struct bnxt *bp, void *req, u32 cnt ) +{ + u32 i = 0; + + for ( i = 0; i < cnt; i++ ) { + write32 ( ( ( u32 * )req )[i], + ( bp->bar0 + GRC_COM_CHAN_BASE + ( i * 4 ) ) ); + } + write32 ( 0x1, ( bp->bar0 + GRC_COM_CHAN_BASE + GRC_COM_CHAN_TRIG ) ); +} + +static void short_hwrm_cmd_req ( struct bnxt *bp, u16 len ) +{ + struct hwrm_short_input sreq; + + memset ( &sreq, 0, sizeof ( struct hwrm_short_input ) ); + sreq.req_type = ( u16 ) ( ( struct input * )bp->hwrm_addr_req )->req_type; + sreq.signature = SHORT_REQ_SIGNATURE_SHORT_CMD; + sreq.size = len; + sreq.req_addr = bp->req_addr_mapping; + mdelay ( 100 ); + dbg_short_cmd ( ( u8 * )&sreq, __func__, + sizeof ( struct hwrm_short_input ) ); + hwrm_write_req ( bp, &sreq, sizeof ( struct hwrm_short_input ) / 4 ); +} + +static int wait_resp ( struct bnxt *bp, u32 tmo, u16 len, const char *func ) +{ + struct input *req = ( struct input * )bp->hwrm_addr_req; + struct output *resp = ( struct output * )bp->hwrm_addr_resp; + u8 *ptr = ( u8 * )resp; + u32 idx; + u32 wait_cnt = HWRM_CMD_DEFAULT_MULTIPLAYER ( ( u32 )tmo ); + u16 resp_len = 0; + u16 ret = STATUS_TIMEOUT; + + if ( len > bp->hwrm_max_req_len ) + short_hwrm_cmd_req ( bp, len ); + else + hwrm_write_req ( bp, req, ( u32 ) ( len / 4 ) ); + + for ( idx = 0; idx < wait_cnt; idx++ ) { + resp_len = resp->resp_len; + test_if ( resp->seq_id == req->seq_id && + resp->req_type == req->req_type && + ptr[resp_len - 1] == 1 ) { + bp->last_resp_code = resp->error_code; + ret = resp->error_code; + break; + } + udelay ( HWRM_CMD_POLL_WAIT_TIME ); + } + dbg_hw_cmd ( bp, func, len, resp_len, tmo, ret ); + return ( int )ret; +} + +static int bnxt_hwrm_ver_get ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_ver_get_input ); + struct hwrm_ver_get_input *req; + struct hwrm_ver_get_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_ver_get_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_ver_get_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VER_GET, cmd_len ); + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; + rc = wait_resp ( bp, HWRM_CMD_DEFAULT_TIMEOUT, cmd_len, __func__ ); + if ( rc ) + return STATUS_FAILURE; + + bp->hwrm_spec_code = + resp->hwrm_intf_maj_8b << 16 | + resp->hwrm_intf_min_8b << 8 | + resp->hwrm_intf_upd_8b; + bp->hwrm_cmd_timeout = ( u32 )resp->def_req_timeout; + if ( !bp->hwrm_cmd_timeout ) + bp->hwrm_cmd_timeout = ( u32 )HWRM_CMD_DEFAULT_TIMEOUT; + if ( resp->hwrm_intf_maj_8b >= 1 ) + bp->hwrm_max_req_len = resp->max_req_win_len; + bp->chip_id = + resp->chip_rev << 24 | + resp->chip_metal << 16 | + resp->chip_bond_id << 8 | + resp->chip_platform_type; + bp->chip_num = resp->chip_num; + test_if ( ( resp->dev_caps_cfg & SHORT_CMD_SUPPORTED ) && + ( resp->dev_caps_cfg & SHORT_CMD_REQUIRED ) ) + FLAG_SET ( bp->flags, BNXT_FLAG_HWRM_SHORT_CMD_SUPP ); + bp->hwrm_max_ext_req_len = resp->max_ext_req_len; + if ( bp->chip_num == CHIP_NUM_57500 ) + bp->thor = 1; + dbg_fw_ver ( resp, bp->hwrm_cmd_timeout ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_func_resource_qcaps ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_resource_qcaps_input ); + struct hwrm_func_resource_qcaps_input *req; + struct hwrm_func_resource_qcaps_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_func_resource_qcaps_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_func_resource_qcaps_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESOURCE_QCAPS, + cmd_len ); + req->fid = ( u16 )HWRM_NA_SIGNATURE; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc != STATUS_SUCCESS ) + return STATUS_SUCCESS; + + FLAG_SET ( bp->flags, BNXT_FLAG_RESOURCE_QCAPS_SUPPORT ); + + // VFs + if ( !bp->vf ) { + bp->max_vfs = resp->max_vfs; + bp->vf_res_strategy = resp->vf_reservation_strategy; + } + + // vNICs + bp->min_vnics = resp->min_vnics; + bp->max_vnics = resp->max_vnics; + + // MSI-X + bp->max_msix = resp->max_msix; + + // Ring Groups + bp->min_hw_ring_grps = resp->min_hw_ring_grps; + bp->max_hw_ring_grps = resp->max_hw_ring_grps; + + // TX Rings + bp->min_tx_rings = resp->min_tx_rings; + bp->max_tx_rings = resp->max_tx_rings; + + // RX Rings + bp->min_rx_rings = resp->min_rx_rings; + bp->max_rx_rings = resp->max_rx_rings; + + // Completion Rings + bp->min_cp_rings = resp->min_cmpl_rings; + bp->max_cp_rings = resp->max_cmpl_rings; + + // RSS Contexts + bp->min_rsscos_ctxs = resp->min_rsscos_ctx; + bp->max_rsscos_ctxs = resp->max_rsscos_ctx; + + // L2 Contexts + bp->min_l2_ctxs = resp->min_l2_ctxs; + bp->max_l2_ctxs = resp->max_l2_ctxs; + + // Statistic Contexts + bp->min_stat_ctxs = resp->min_stat_ctx; + bp->max_stat_ctxs = resp->max_stat_ctx; + dbg_func_resource_qcaps ( bp ); + return STATUS_SUCCESS; +} + +static u32 bnxt_set_ring_info ( struct bnxt *bp ) +{ + u32 enables = 0; + + DBGP ( "%s\n", __func__ ); + bp->num_cmpl_rings = DEFAULT_NUMBER_OF_CMPL_RINGS; + bp->num_tx_rings = DEFAULT_NUMBER_OF_TX_RINGS; + bp->num_rx_rings = DEFAULT_NUMBER_OF_RX_RINGS; + bp->num_hw_ring_grps = DEFAULT_NUMBER_OF_RING_GRPS; + bp->num_stat_ctxs = DEFAULT_NUMBER_OF_STAT_CTXS; + + if ( bp->min_cp_rings <= DEFAULT_NUMBER_OF_CMPL_RINGS ) + bp->num_cmpl_rings = bp->min_cp_rings; + + if ( bp->min_tx_rings <= DEFAULT_NUMBER_OF_TX_RINGS ) + bp->num_tx_rings = bp->min_tx_rings; + + if ( bp->min_rx_rings <= DEFAULT_NUMBER_OF_RX_RINGS ) + bp->num_rx_rings = bp->min_rx_rings; + + if ( bp->min_hw_ring_grps <= DEFAULT_NUMBER_OF_RING_GRPS ) + bp->num_hw_ring_grps = bp->min_hw_ring_grps; + + if ( bp->min_stat_ctxs <= DEFAULT_NUMBER_OF_STAT_CTXS ) + bp->num_stat_ctxs = bp->min_stat_ctxs; + + dbg_num_rings ( bp ); + enables = ( FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS ); + return enables; +} + +static void bnxt_hwrm_assign_resources ( struct bnxt *bp ) +{ + struct hwrm_func_cfg_input *req; + u32 enables = 0; + + DBGP ( "%s\n", __func__ ); + if ( FLAG_TEST ( bp->flags, BNXT_FLAG_RESOURCE_QCAPS_SUPPORT ) ) + enables = bnxt_set_ring_info ( bp ); + + req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req; + req->num_cmpl_rings = bp->num_cmpl_rings; + req->num_tx_rings = bp->num_tx_rings; + req->num_rx_rings = bp->num_rx_rings; + req->num_stat_ctxs = bp->num_stat_ctxs; + req->num_hw_ring_grps = bp->num_hw_ring_grps; + req->enables = enables; +} + +static int bnxt_hwrm_func_qcaps_req ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcaps_input ); + struct hwrm_func_qcaps_input *req; + struct hwrm_func_qcaps_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( bp->vf ) + return STATUS_SUCCESS; + + req = ( struct hwrm_func_qcaps_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_func_qcaps_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCAPS, cmd_len ); + req->fid = ( u16 )HWRM_NA_SIGNATURE; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + bp->fid = resp->fid; + bp->port_idx = ( u8 )resp->port_id; + + /* Get MAC address for this PF */ + memcpy ( &bp->mac_addr[0], &resp->mac_address[0], ETH_ALEN ); + dbg_func_qcaps ( bp ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_func_qcfg_req ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcfg_input ); + struct hwrm_func_qcfg_input *req; + struct hwrm_func_qcfg_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_func_qcfg_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_func_qcfg_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCFG, cmd_len ); + req->fid = ( u16 )HWRM_NA_SIGNATURE; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + if ( resp->flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST ) + FLAG_SET ( bp->flags, BNXT_FLAG_MULTI_HOST ); + + if ( resp->port_partition_type & + FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 ) + FLAG_SET ( bp->flags, BNXT_FLAG_NPAR_MODE ); + + bp->ordinal_value = ( u8 )resp->pci_id & 0x0F; + bp->stat_ctx_id = resp->stat_ctx_id; + + /* If VF is set to TRUE, then use some data from func_qcfg ( ). */ + if ( bp->vf ) { + bp->fid = resp->fid; + bp->port_idx = ( u8 )resp->port_id; + bp->vlan_id = resp->vlan; + + /* Get MAC address for this VF */ + memcpy ( bp->mac_addr, resp->mac_address, ETH_ALEN ); + } + dbg_func_qcfg ( bp ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_func_reset_req ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_reset_input ); + struct hwrm_func_reset_input *req; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_func_reset_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESET, cmd_len ); + if ( !bp->vf ) + req->func_reset_level = FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME; + + return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ ); +} + +static int bnxt_hwrm_func_cfg_req ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input ); + struct hwrm_func_cfg_input *req; + + DBGP ( "%s\n", __func__ ); + if ( bp->vf ) + return STATUS_SUCCESS; + + req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len ); + req->fid = ( u16 )HWRM_NA_SIGNATURE; + bnxt_hwrm_assign_resources ( bp ); + if ( bp->thor ) { + req->enables |= ( FUNC_CFG_REQ_ENABLES_NUM_MSIX | + FUNC_CFG_REQ_ENABLES_NUM_VNICS | + FUNC_CFG_REQ_ENABLES_EVB_MODE ); + req->num_msix = 1; + req->num_vnics = 1; + req->evb_mode = FUNC_CFG_REQ_EVB_MODE_NO_EVB; + } + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_hwrm_func_drv_rgtr ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_rgtr_input ); + struct hwrm_func_drv_rgtr_input *req; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_func_drv_rgtr_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_RGTR, cmd_len ); + + /* Register with HWRM */ + req->enables = FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | + FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD | + FUNC_DRV_RGTR_REQ_ENABLES_VER; + req->async_event_fwd[0] |= 0x01; + req->os_type = FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER; + req->ver_maj = IPXE_VERSION_MAJOR; + req->ver_min = IPXE_VERSION_MINOR; + req->ver_upd = IPXE_VERSION_UPDATE; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_SET ( bp->flag_hwrm, VALID_DRIVER_REG ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_func_drv_unrgtr ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_unrgtr_input ); + struct hwrm_func_drv_unrgtr_input *req; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_DRIVER_REG ) ) ) + return STATUS_SUCCESS; + + req = ( struct hwrm_func_drv_unrgtr_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_UNRGTR, cmd_len ); + req->flags = FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) + return STATUS_FAILURE; + + FLAG_RESET ( bp->flag_hwrm, VALID_DRIVER_REG ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_set_async_event ( struct bnxt *bp ) +{ + int rc; + u16 idx; + + DBGP ( "%s\n", __func__ ); + if ( bp->thor ) + idx = bp->nq_ring_id; + else + idx = bp->cq_ring_id; + if ( bp->vf ) { + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_vf_cfg_input ); + struct hwrm_func_vf_cfg_input *req; + + req = ( struct hwrm_func_vf_cfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_VF_CFG, + cmd_len ); + req->enables = VF_CFG_ENABLE_FLAGS; + req->async_event_cr = idx; + req->mtu = bp->mtu; + req->guest_vlan = bp->vlan_id; + memcpy ( ( char * )&req->dflt_mac_addr[0], bp->mac_addr, + ETH_ALEN ); + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + } else { + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input ); + struct hwrm_func_cfg_input *req; + + req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len ); + req->fid = ( u16 )HWRM_NA_SIGNATURE; + req->enables = FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR; + req->async_event_cr = idx; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + } + return rc; +} + +static int bnxt_hwrm_cfa_l2_filter_alloc ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_alloc_input ); + struct hwrm_cfa_l2_filter_alloc_input *req; + struct hwrm_cfa_l2_filter_alloc_output *resp; + int rc; + u32 flags = CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX; + u32 enables; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_cfa_l2_filter_alloc_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_cfa_l2_filter_alloc_output * )bp->hwrm_addr_resp; + if ( bp->vf ) + flags |= CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST; + enables = CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK; + + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_ALLOC, + cmd_len ); + req->flags = flags; + req->enables = enables; + memcpy ( ( char * )&req->l2_addr[0], ( char * )&bp->mac_addr[0], + ETH_ALEN ); + memset ( ( char * )&req->l2_addr_mask[0], 0xff, ETH_ALEN ); + if ( !bp->vf ) { + memcpy ( ( char * )&req->t_l2_addr[0], bp->mac_addr, ETH_ALEN ); + memset ( ( char * )&req->t_l2_addr_mask[0], 0xff, ETH_ALEN ); + } + req->src_type = CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT; + req->src_id = ( u32 )bp->port_idx; + req->dst_id = bp->vnic_id; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) + return STATUS_FAILURE; + + FLAG_SET ( bp->flag_hwrm, VALID_L2_FILTER ); + bp->l2_filter_id = resp->l2_filter_id; + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_cfa_l2_filter_free ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_free_input ); + struct hwrm_cfa_l2_filter_free_input *req; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_L2_FILTER ) ) ) + return STATUS_SUCCESS; + + req = ( struct hwrm_cfa_l2_filter_free_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_FREE, + cmd_len ); + req->l2_filter_id = bp->l2_filter_id; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_RESET ( bp->flag_hwrm, VALID_L2_FILTER ); + return STATUS_SUCCESS; +} + +u32 set_rx_mask ( u32 rx_mask ) +{ + u32 mask = 0; + + if ( !rx_mask ) + return mask; + + mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + if ( rx_mask != RX_MASK_ACCEPT_NONE ) { + if ( rx_mask & RX_MASK_ACCEPT_MULTICAST ) + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; + if ( rx_mask & RX_MASK_ACCEPT_ALL_MULTICAST ) + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + if ( rx_mask & RX_MASK_PROMISCUOUS_MODE ) + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + } + return mask; +} + +static int bnxt_hwrm_set_rx_mask ( struct bnxt *bp, u32 rx_mask ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_set_rx_mask_input ); + struct hwrm_cfa_l2_set_rx_mask_input *req; + u32 mask = set_rx_mask ( rx_mask ); + + req = ( struct hwrm_cfa_l2_set_rx_mask_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_SET_RX_MASK, + cmd_len ); + req->vnic_id = bp->vnic_id; + req->mask = mask; + + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_hwrm_port_phy_qcfg ( struct bnxt *bp, u16 idx ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_qcfg_input ); + struct hwrm_port_phy_qcfg_input *req; + struct hwrm_port_phy_qcfg_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_port_phy_qcfg_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_port_phy_qcfg_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCFG, cmd_len ); + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + if ( idx & SUPPORT_SPEEDS ) + bp->support_speeds = resp->support_speeds; + + if ( idx & DETECT_MEDIA ) + bp->media_detect = resp->module_status; + + if ( idx & PHY_SPEED ) + bp->current_link_speed = resp->link_speed; + + if ( idx & PHY_STATUS ) { + if ( resp->link == PORT_PHY_QCFG_RESP_LINK_LINK ) + bp->link_status = STATUS_LINK_ACTIVE; + else + bp->link_status = STATUS_LINK_DOWN; + } + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_nvm_get_variable_req ( struct bnxt *bp, + u16 data_len, u16 option_num, u16 dimensions, u16 index_0 ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_nvm_get_variable_input ); + struct hwrm_nvm_get_variable_input *req; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_nvm_get_variable_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_NVM_GET_VARIABLE, cmd_len ); + req->dest_data_addr = bp->dma_addr_mapping; + req->data_len = data_len; + req->option_num = option_num; + req->dimensions = dimensions; + req->index_0 = index_0; + return wait_resp ( bp, + HWRM_CMD_FLASH_MULTIPLAYER ( bp->hwrm_cmd_timeout ), + cmd_len, __func__ ); +} + +static int bnxt_get_link_speed ( struct bnxt *bp ) +{ + u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma; + + DBGP ( "%s\n", __func__ ); + test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4, + ( u16 )LINK_SPEED_DRV_NUM, + 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS ) + return STATUS_FAILURE; + bp->link_set = SET_LINK ( *ptr32, SPEED_DRV_MASK, SPEED_DRV_SHIFT ); + test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4, + ( u16 )LINK_SPEED_FW_NUM, + 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS ) + return STATUS_FAILURE; + bp->link_set |= SET_LINK ( *ptr32, SPEED_FW_MASK, SPEED_FW_SHIFT ); + test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4, + ( u16 )D3_LINK_SPEED_FW_NUM, 1, + ( u16 )bp->port_idx ) != STATUS_SUCCESS ) + return STATUS_FAILURE; + bp->link_set |= SET_LINK ( *ptr32, D3_SPEED_FW_MASK, + D3_SPEED_FW_SHIFT ); + test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 1, + ( u16 )PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_NUM, + 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS ) + return STATUS_FAILURE; + bp->link_set |= SET_LINK ( *ptr32, + MEDIA_AUTO_DETECT_MASK, MEDIA_AUTO_DETECT_SHIFT ); + + switch ( bp->link_set & LINK_SPEED_DRV_MASK ) { + case LINK_SPEED_DRV_1G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_1000MBPS ); + break; + case LINK_SPEED_DRV_2_5G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_2500MBPS ); + break; + case LINK_SPEED_DRV_10G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_10GBPS ); + break; + case LINK_SPEED_DRV_25G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_25GBPS ); + break; + case LINK_SPEED_DRV_40G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_40GBPS ); + break; + case LINK_SPEED_DRV_50G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_50GBPS ); + break; + case LINK_SPEED_DRV_100G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_100GBPS ); + break; + case LINK_SPEED_DRV_200G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_200GBPS ); + break; + case LINK_SPEED_DRV_AUTONEG: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_AUTONEG ); + break; + default: + bp->medium = SET_MEDIUM_DUPLEX ( bp, MEDIUM_FULL_DUPLEX ); + break; + } + prn_set_speed ( bp->link_set ); + return STATUS_SUCCESS; +} + +static int bnxt_get_vlan ( struct bnxt *bp ) +{ + u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma; + + /* If VF is set to TRUE, Do not issue this command */ + if ( bp->vf ) + return STATUS_SUCCESS; + + test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 1, + ( u16 )FUNC_CFG_PRE_BOOT_MBA_VLAN_NUM, 1, + ( u16 )bp->ordinal_value ) != STATUS_SUCCESS ) + return STATUS_FAILURE; + + bp->mba_cfg2 = SET_MBA ( *ptr32, VLAN_MASK, VLAN_SHIFT ); + test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 16, + ( u16 )FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_NUM, 1, + ( u16 )bp->ordinal_value ) != STATUS_SUCCESS ) + return STATUS_FAILURE; + + bp->mba_cfg2 |= SET_MBA ( *ptr32, VLAN_VALUE_MASK, VLAN_VALUE_SHIFT ); + if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED ) + bp->vlan_id = bp->mba_cfg2 & VLAN_VALUE_MASK; + else + bp->vlan_id = 0; + + if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED ) + DBGP ( "VLAN MBA Enabled ( %d )\n", + ( bp->mba_cfg2 & VLAN_VALUE_MASK ) ); + + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_backing_store_qcfg ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_qcfg_input ); + struct hwrm_func_backing_store_qcfg_input *req; + + DBGP ( "%s\n", __func__ ); + if ( !bp->thor ) + return STATUS_SUCCESS; + + req = ( struct hwrm_func_backing_store_qcfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_QCFG, + cmd_len ); + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_hwrm_backing_store_cfg ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_cfg_input ); + struct hwrm_func_backing_store_cfg_input *req; + + DBGP ( "%s\n", __func__ ); + if ( !bp->thor ) + return STATUS_SUCCESS; + + req = ( struct hwrm_func_backing_store_cfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_CFG, + cmd_len ); + req->flags = FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE; + req->enables = 0; + return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ ); +} + +static int bnxt_hwrm_queue_qportcfg ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_queue_qportcfg_input ); + struct hwrm_queue_qportcfg_input *req; + struct hwrm_queue_qportcfg_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( !bp->thor ) + return STATUS_SUCCESS; + + req = ( struct hwrm_queue_qportcfg_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_queue_qportcfg_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_QUEUE_QPORTCFG, cmd_len ); + req->flags = 0; + req->port_id = 0; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + bp->queue_id = resp->queue_id0; + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_port_mac_cfg ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_mac_cfg_input ); + struct hwrm_port_mac_cfg_input *req; + + DBGP ( "%s\n", __func__ ); + if ( bp->vf ) + return STATUS_SUCCESS; + + req = ( struct hwrm_port_mac_cfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_MAC_CFG, cmd_len ); + req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_hwrm_port_phy_cfg ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_cfg_input ); + struct hwrm_port_phy_cfg_input *req; + u32 flags; + u32 enables = 0; + u16 force_link_speed = 0; + u16 auto_link_speed_mask = 0; + u8 auto_mode = 0; + u8 auto_pause = 0; + u8 auto_duplex = 0; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_port_phy_cfg_input * )bp->hwrm_addr_req; + flags = PORT_PHY_CFG_REQ_FLAGS_FORCE | + PORT_PHY_CFG_REQ_FLAGS_RESET_PHY; + + switch ( GET_MEDIUM_SPEED ( bp->medium ) ) { + case MEDIUM_SPEED_1000MBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; + break; + case MEDIUM_SPEED_10GBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; + break; + case MEDIUM_SPEED_25GBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; + break; + case MEDIUM_SPEED_40GBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; + break; + case MEDIUM_SPEED_50GBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; + break; + case MEDIUM_SPEED_100GBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; + break; + case MEDIUM_SPEED_200GBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB; + break; + default: + auto_mode = PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; + flags &= ~PORT_PHY_CFG_REQ_FLAGS_FORCE; + enables |= PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE | + PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK | + PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX | + PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE; + auto_pause = PORT_PHY_CFG_REQ_AUTO_PAUSE_TX | + PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; + auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH; + auto_link_speed_mask = bp->support_speeds; + break; + } + + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_CFG, cmd_len ); + req->flags = flags; + req->enables = enables; + req->port_id = bp->port_idx; + req->force_link_speed = force_link_speed; + req->auto_mode = auto_mode; + req->auto_duplex = auto_duplex; + req->auto_pause = auto_pause; + req->auto_link_speed_mask = auto_link_speed_mask; + + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_query_phy_link ( struct bnxt *bp ) +{ + u16 flag = PHY_STATUS | PHY_SPEED | DETECT_MEDIA; + + DBGP ( "%s\n", __func__ ); + /* Query Link Status */ + if ( bnxt_hwrm_port_phy_qcfg ( bp, QCFG_PHY_ALL ) != STATUS_SUCCESS ) { + return STATUS_FAILURE; + } + + if ( bp->link_status == STATUS_LINK_ACTIVE ) + return STATUS_SUCCESS; + + /* If VF is set to TRUE, Do not issue the following commands */ + if ( bp->vf ) + return STATUS_SUCCESS; + + /* If multi_host or NPAR, Do not issue bnxt_get_link_speed */ + if ( FLAG_TEST ( bp->flags, PORT_PHY_FLAGS ) ) { + dbg_flags ( __func__, bp->flags ); + return STATUS_SUCCESS; + } + + /* HWRM_NVM_GET_VARIABLE - speed */ + if ( bnxt_get_link_speed ( bp ) != STATUS_SUCCESS ) { + return STATUS_FAILURE; + } + + /* Configure link if it is not up */ + bnxt_hwrm_port_phy_cfg ( bp ); + + /* refresh link speed values after bringing link up */ + return bnxt_hwrm_port_phy_qcfg ( bp, flag ); +} + +static int bnxt_get_phy_link ( struct bnxt *bp ) +{ + u16 i; + u16 flag = PHY_STATUS | PHY_SPEED | DETECT_MEDIA; + + DBGP ( "%s\n", __func__ ); + dbg_chip_info ( bp ); + for ( i = 0; i < ( bp->wait_link_timeout / 100 ); i++ ) { + if ( bnxt_hwrm_port_phy_qcfg ( bp, flag ) != STATUS_SUCCESS ) + break; + + if ( bp->link_status == STATUS_LINK_ACTIVE ) + break; + +// if ( bp->media_detect ) +// break; + mdelay ( LINK_POLL_WAIT_TIME ); + } + dbg_link_state ( bp, ( u32 ) ( ( i + 1 ) * 100 ) ); + bnxt_set_link ( bp ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_stat_ctx_alloc ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_stat_ctx_alloc_input ); + struct hwrm_stat_ctx_alloc_input *req; + struct hwrm_stat_ctx_alloc_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_stat_ctx_alloc_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_stat_ctx_alloc_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_ALLOC, cmd_len ); + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_SET ( bp->flag_hwrm, VALID_STAT_CTX ); + bp->stat_ctx_id = ( u16 )resp->stat_ctx_id; + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_stat_ctx_free ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_stat_ctx_free_input ); + struct hwrm_stat_ctx_free_input *req; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_STAT_CTX ) ) ) + return STATUS_SUCCESS; + + req = ( struct hwrm_stat_ctx_free_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_FREE, cmd_len ); + req->stat_ctx_id = ( u32 )bp->stat_ctx_id; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_RESET ( bp->flag_hwrm, VALID_STAT_CTX ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_ring_free_grp ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_grp_free_input ); + struct hwrm_ring_grp_free_input *req; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_GRP ) ) ) + return STATUS_SUCCESS; + + req = ( struct hwrm_ring_grp_free_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_FREE, cmd_len ); + req->ring_group_id = ( u32 )bp->ring_grp_id; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_RESET ( bp->flag_hwrm, VALID_RING_GRP ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_ring_alloc_grp ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_grp_alloc_input ); + struct hwrm_ring_grp_alloc_input *req; + struct hwrm_ring_grp_alloc_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( bp->thor ) + return STATUS_SUCCESS; + + req = ( struct hwrm_ring_grp_alloc_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_ring_grp_alloc_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_ALLOC, cmd_len ); + req->cr = bp->cq_ring_id; + req->rr = bp->rx_ring_id; + req->ar = ( u16 )HWRM_NA_SIGNATURE; + if ( bp->vf ) + req->sc = bp->stat_ctx_id; + + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_SET ( bp->flag_hwrm, VALID_RING_GRP ); + bp->ring_grp_id = ( u16 )resp->ring_group_id; + return STATUS_SUCCESS; +} + +int bnxt_hwrm_ring_free ( struct bnxt *bp, u16 ring_id, u8 ring_type ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_free_input ); + struct hwrm_ring_free_input *req; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_ring_free_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_FREE, cmd_len ); + req->ring_type = ring_type; + req->ring_id = ring_id; + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_alloc_input ); + struct hwrm_ring_alloc_input *req; + struct hwrm_ring_alloc_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_ring_alloc_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_ring_alloc_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_ALLOC, cmd_len ); + req->ring_type = type; + switch ( type ) { + case RING_ALLOC_REQ_RING_TYPE_NQ: + req->page_size = LM_PAGE_BITS ( 12 ); + req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf ); + req->length = ( u32 )bp->nq.ring_cnt; + req->logical_id = 0xFFFF; // Required value for Thor FW? + req->page_tbl_addr = virt_to_bus ( bp->nq.bd_virt ); + break; + case RING_ALLOC_REQ_RING_TYPE_L2_CMPL: + req->page_size = LM_PAGE_BITS ( 8 ); + req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf ); + req->length = ( u32 )bp->cq.ring_cnt; + req->page_tbl_addr = virt_to_bus ( bp->cq.bd_virt ); + if ( !bp->thor ) + break; + req->enables = RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID; + req->nq_ring_id = bp->nq_ring_id; + req->cq_handle = ( u64 )bp->nq_ring_id; + break; + case RING_ALLOC_REQ_RING_TYPE_TX: + req->page_size = LM_PAGE_BITS ( 8 ); + req->int_mode = RING_ALLOC_REQ_INT_MODE_POLL; + req->length = ( u32 )bp->tx.ring_cnt; + req->queue_id = TX_RING_QID; + req->stat_ctx_id = ( u32 )bp->stat_ctx_id; + req->cmpl_ring_id = bp->cq_ring_id; + req->page_tbl_addr = virt_to_bus ( bp->tx.bd_virt ); + break; + case RING_ALLOC_REQ_RING_TYPE_RX: + req->page_size = LM_PAGE_BITS ( 8 ); + req->int_mode = RING_ALLOC_REQ_INT_MODE_POLL; + req->length = ( u32 )bp->rx.ring_cnt; + req->stat_ctx_id = ( u32 )STAT_CTX_ID; + req->cmpl_ring_id = bp->cq_ring_id; + req->page_tbl_addr = virt_to_bus ( bp->rx.bd_virt ); + if ( !bp->thor ) + break; + req->queue_id = ( u16 )RX_RING_QID; + req->rx_buf_size = MAX_ETHERNET_PACKET_BUFFER_SIZE; + req->enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID; + break; + default: + return STATUS_SUCCESS; + } + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed, type = %x\n", __func__, type ); + return STATUS_FAILURE; + } + + if ( type == RING_ALLOC_REQ_RING_TYPE_L2_CMPL ) { + FLAG_SET ( bp->flag_hwrm, VALID_RING_CQ ); + bp->cq_ring_id = resp->ring_id; + } else if ( type == RING_ALLOC_REQ_RING_TYPE_TX ) { + FLAG_SET ( bp->flag_hwrm, VALID_RING_TX ); + bp->tx_ring_id = resp->ring_id; + } else if ( type == RING_ALLOC_REQ_RING_TYPE_RX ) { + FLAG_SET ( bp->flag_hwrm, VALID_RING_RX ); + bp->rx_ring_id = resp->ring_id; + } else if ( type == RING_ALLOC_REQ_RING_TYPE_NQ ) { + FLAG_SET ( bp->flag_hwrm, VALID_RING_NQ ); + bp->nq_ring_id = resp->ring_id; + } + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_ring_alloc_cq ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + return bnxt_hwrm_ring_alloc ( bp, RING_ALLOC_REQ_RING_TYPE_L2_CMPL ); +} + +static int bnxt_hwrm_ring_alloc_tx ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + return bnxt_hwrm_ring_alloc ( bp, RING_ALLOC_REQ_RING_TYPE_TX ); +} + +static int bnxt_hwrm_ring_alloc_rx ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + return bnxt_hwrm_ring_alloc ( bp, RING_ALLOC_REQ_RING_TYPE_RX ); +} + +static int bnxt_hwrm_ring_free_cq ( struct bnxt *bp ) +{ + int ret = STATUS_SUCCESS; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_CQ ) ) ) + return ret; + + ret = RING_FREE ( bp, bp->cq_ring_id, RING_FREE_REQ_RING_TYPE_L2_CMPL ); + if ( ret == STATUS_SUCCESS ) + FLAG_RESET ( bp->flag_hwrm, VALID_RING_CQ ); + + return ret; +} + +static int bnxt_hwrm_ring_free_tx ( struct bnxt *bp ) +{ + int ret = STATUS_SUCCESS; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_TX ) ) ) + return ret; + + ret = RING_FREE ( bp, bp->tx_ring_id, RING_FREE_REQ_RING_TYPE_TX ); + if ( ret == STATUS_SUCCESS ) + FLAG_RESET ( bp->flag_hwrm, VALID_RING_TX ); + + return ret; +} + +static int bnxt_hwrm_ring_free_rx ( struct bnxt *bp ) +{ + int ret = STATUS_SUCCESS; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_RX ) ) ) + return ret; + + ret = RING_FREE ( bp, bp->rx_ring_id, RING_FREE_REQ_RING_TYPE_RX ); + if ( ret == STATUS_SUCCESS ) + FLAG_RESET ( bp->flag_hwrm, VALID_RING_RX ); + + return ret; +} + +static int bnxt_hwrm_ring_alloc_nq ( struct bnxt *bp ) +{ + if ( !bp->thor ) + return STATUS_SUCCESS; + return bnxt_hwrm_ring_alloc ( bp, RING_ALLOC_REQ_RING_TYPE_NQ ); +} + +static int bnxt_hwrm_ring_free_nq ( struct bnxt *bp ) +{ + int ret = STATUS_SUCCESS; + + if ( !bp->thor ) + return STATUS_SUCCESS; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_NQ ) ) ) + return ret; + + ret = RING_FREE ( bp, bp->nq_ring_id, RING_FREE_REQ_RING_TYPE_NQ ); + if ( ret == STATUS_SUCCESS ) + FLAG_RESET ( bp->flag_hwrm, VALID_RING_NQ ); + + return ret; +} + +static int bnxt_hwrm_vnic_alloc ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_alloc_input ); + struct hwrm_vnic_alloc_input *req; + struct hwrm_vnic_alloc_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_vnic_alloc_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_vnic_alloc_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_ALLOC, cmd_len ); + req->flags = VNIC_ALLOC_REQ_FLAGS_DEFAULT; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_SET ( bp->flag_hwrm, VALID_VNIC_ID ); + bp->vnic_id = resp->vnic_id; + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_vnic_free ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_free_input ); + struct hwrm_vnic_free_input *req; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_VNIC_ID ) ) ) + return STATUS_SUCCESS; + + req = ( struct hwrm_vnic_free_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_FREE, cmd_len ); + req->vnic_id = bp->vnic_id; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_RESET ( bp->flag_hwrm, VALID_VNIC_ID ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_vnic_cfg ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_cfg_input ); + struct hwrm_vnic_cfg_input *req; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_vnic_cfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_CFG, cmd_len ); + req->enables = VNIC_CFG_REQ_ENABLES_MRU; + req->mru = bp->mtu; + + if ( bp->thor ) { + req->enables |= ( VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | + VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID ); + req->default_rx_ring_id = bp->rx_ring_id; + req->default_cmpl_ring_id = bp->cq_ring_id; + } else { + req->enables |= VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP; + req->dflt_ring_grp = bp->ring_grp_id; + } + + req->flags = VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE; + req->vnic_id = bp->vnic_id; + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_set_rx_mask ( struct bnxt *bp ) +{ + return bnxt_hwrm_set_rx_mask ( bp, RX_MASK ); +} + +static int bnxt_reset_rx_mask ( struct bnxt *bp ) +{ + return bnxt_hwrm_set_rx_mask ( bp, 0 ); +} + +typedef int ( *hwrm_func_t ) ( struct bnxt *bp ); + +hwrm_func_t bring_down_chip[] = { + bnxt_hwrm_func_drv_unrgtr, /* HWRM_FUNC_DRV_UNRGTR */ + NULL, +}; + +hwrm_func_t bring_down_nic[] = { + bnxt_hwrm_cfa_l2_filter_free, /* HWRM_CFA_L2_FILTER_FREE */ + bnxt_reset_rx_mask, + bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */ + bnxt_free_rx_iob, /* HWRM_FREE_IOB */ + bnxt_hwrm_vnic_free, /* HWRM_VNIC_FREE */ + bnxt_hwrm_ring_free_grp, /* HWRM_RING_GRP_FREE */ + bnxt_hwrm_ring_free_rx, /* HWRM_RING_FREE - RX Ring */ + bnxt_hwrm_ring_free_tx, /* HWRM_RING_FREE - TX Ring */ + bnxt_hwrm_stat_ctx_free, /* HWRM_STAT_CTX_FREE */ + bnxt_hwrm_ring_free_cq, /* HWRM_RING_FREE - CQ Ring */ + bnxt_hwrm_ring_free_nq, /* HWRM_RING_FREE - NQ Ring */ + NULL, +}; +hwrm_func_t bring_up_chip[] = { + bnxt_hwrm_ver_get, /* HWRM_VER_GET */ + bnxt_hwrm_func_reset_req, /* HWRM_FUNC_RESET */ + bnxt_hwrm_func_drv_rgtr, /* HWRM_FUNC_DRV_RGTR */ + bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */ + bnxt_hwrm_backing_store_cfg, /* HWRM_FUNC_BACKING_STORE_CFG */ + bnxt_hwrm_backing_store_qcfg, /* HWRM_FUNC_BACKING_STORE_QCFG */ + bnxt_hwrm_func_resource_qcaps, /* HWRM_FUNC_RESOURCE_QCAPS */ + bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */ + bnxt_get_vlan, /* HWRM_NVM_GET_VARIABLE - vlan */ + bnxt_hwrm_port_mac_cfg, /* HWRM_PORT_MAC_CFG */ + bnxt_hwrm_func_cfg_req, /* HWRM_FUNC_CFG */ + bnxt_query_phy_link, /* HWRM_PORT_PHY_QCFG */ + bnxt_get_device_address, /* HW MAC address */ + NULL, +}; + +hwrm_func_t bring_up_nic[] = { + bnxt_hwrm_stat_ctx_alloc, /* HWRM_STAT_CTX_ALLOC */ + bnxt_hwrm_queue_qportcfg, /* HWRM_QUEUE_QPORTCFG */ + bnxt_hwrm_ring_alloc_nq, /* HWRM_RING_ALLOC - NQ Ring */ + bnxt_hwrm_ring_alloc_cq, /* HWRM_RING_ALLOC - CQ Ring */ + bnxt_hwrm_ring_alloc_tx, /* HWRM_RING_ALLOC - TX Ring */ + bnxt_hwrm_ring_alloc_rx, /* HWRM_RING_ALLOC - RX Ring */ + bnxt_hwrm_ring_alloc_grp, /* HWRM_RING_GRP_ALLOC - Group */ + bnxt_hwrm_vnic_alloc, /* HWRM_VNIC_ALLOC */ + bnxt_post_rx_buffers, /* Post RX buffers */ + bnxt_hwrm_set_async_event, /* ENABLES_ASYNC_EVENT_CR */ + bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */ + bnxt_hwrm_cfa_l2_filter_alloc, /* HWRM_CFA_L2_FILTER_ALLOC */ + bnxt_get_phy_link, /* HWRM_PORT_PHY_QCFG - PhyLink */ + bnxt_set_rx_mask, /* HWRM_CFA_L2_SET_RX_MASK */ + NULL, +}; + +int bnxt_hwrm_run ( hwrm_func_t cmds[], struct bnxt *bp ) +{ + hwrm_func_t *ptr; + int ret; + + for ( ptr = cmds; *ptr; ++ptr ) { + memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE ); + memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE ); + ret = ( *ptr ) ( bp ); + if ( ret ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + } + return STATUS_SUCCESS; +} + +#define bnxt_down_chip( bp ) bnxt_hwrm_run ( bring_down_chip, bp ) +#define bnxt_up_chip( bp ) bnxt_hwrm_run ( bring_up_chip, bp ) +#define bnxt_down_nic( bp ) bnxt_hwrm_run ( bring_down_nic, bp ) +#define bnxt_up_nic( bp ) bnxt_hwrm_run ( bring_up_nic, bp ) + +static int bnxt_open ( struct net_device *dev ) +{ + struct bnxt *bp = netdev_priv ( dev ); + + DBGP ( "%s\n", __func__ ); + bnxt_mm_nic ( bp ); + return (bnxt_up_nic ( bp )); +} + +static void bnxt_tx_adjust_pkt ( struct bnxt *bp, struct io_buffer *iob ) +{ + u16 prev_len = iob_len ( iob ); + + bp->vlan_tx = bnxt_get_pkt_vlan ( ( char * )iob->data ); + if ( !bp->vlan_tx && bp->vlan_id ) + bnxt_add_vlan ( iob, bp->vlan_id ); + + dbg_tx_vlan ( bp, ( char * )iob->data, prev_len, iob_len ( iob ) ); + if ( iob_len ( iob ) != prev_len ) + prev_len = iob_len ( iob ); + + iob_pad ( iob, ETH_ZLEN ); + dbg_tx_pad ( prev_len, iob_len ( iob ) ); +} + +static int bnxt_tx ( struct net_device *dev, struct io_buffer *iob ) +{ + struct bnxt *bp = netdev_priv ( dev ); + u16 len, entry; + dma_addr_t mapping; + + if ( bnxt_tx_avail ( bp ) < 1 ) { + DBGP ( "- %s ( ): Failed no bd's available\n", __func__ ); + return -ENOBUFS; + } + + bnxt_tx_adjust_pkt ( bp, iob ); + entry = bp->tx.prod_id; + mapping = virt_to_bus ( iob->data ); + len = iob_len ( iob ); + bp->tx.iob[entry] = iob; + bnxt_set_txq ( bp, entry, mapping, len ); + entry = NEXT_IDX ( entry, bp->tx.ring_cnt ); + dump_tx_pkt ( ( u8 * )iob->data, len, bp->tx.prod_id ); + /* Packets are ready, update Tx producer idx local and on card. */ + bnxt_db_tx ( bp, ( u32 )entry ); + bp->tx.prod_id = entry; + bp->tx.cnt_req++; + /* memory barrier */ + mb ( ); + return 0; +} + +static void bnxt_adv_nq_index ( struct bnxt *bp, u16 cnt ) +{ + u16 cons_id; + + cons_id = bp->nq.cons_id + cnt; + if ( cons_id >= bp->nq.ring_cnt ) { + /* Toggle completion bit when the ring wraps. */ + bp->nq.completion_bit ^= 1; + cons_id = cons_id - bp->nq.ring_cnt; + } + bp->nq.cons_id = cons_id; +} + +void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt ) +{ + switch ( evt->event_id ) { + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: + if ( evt->event_data1 & 0x01 ) + bp->link_status = STATUS_LINK_ACTIVE; + else + bp->link_status = STATUS_LINK_DOWN; + bnxt_set_link ( bp ); + dbg_link_status ( bp ); + break; + default: + break; + } +} + +static void bnxt_service_cq ( struct net_device *dev ) +{ + struct bnxt *bp = netdev_priv ( dev ); + struct cmpl_base *cmp; + struct tx_cmpl *tx; + u16 old_cid = bp->cq.cons_id; + int done = SERVICE_NEXT_CQ_BD; + u32 cq_type; + + while ( done == SERVICE_NEXT_CQ_BD ) { + cmp = ( struct cmpl_base * )BD_NOW ( bp->cq.bd_virt, + bp->cq.cons_id, + sizeof ( struct cmpl_base ) ); + + if ( ( cmp->info3_v & CMPL_BASE_V ) ^ bp->cq.completion_bit ) + break; + + cq_type = cmp->type & CMPL_BASE_TYPE_MASK; + dump_evt ( ( u8 * )cmp, cq_type, bp->cq.cons_id, 0 ); + dump_cq ( cmp, bp->cq.cons_id ); + + switch ( cq_type ) { + case CMPL_BASE_TYPE_TX_L2: + tx = ( struct tx_cmpl * )cmp; + bnxt_tx_complete ( dev, ( u16 )tx->opaque ); + /* Fall through */ + case CMPL_BASE_TYPE_STAT_EJECT: + bnxt_adv_cq_index ( bp, 1 ); + break; + case CMPL_BASE_TYPE_RX_L2: + done = bnxt_rx_complete ( dev, + ( struct rx_pkt_cmpl * )cmp ); + break; + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + bnxt_link_evt ( bp, + ( struct hwrm_async_event_cmpl * )cmp ); + bnxt_adv_cq_index ( bp, 1 ); + break; + default: + done = NO_MORE_CQ_BD_TO_SERVICE; + break; + } + } + + if ( bp->cq.cons_id != old_cid ) + bnxt_db_cq ( bp ); +} + +static void bnxt_service_nq ( struct net_device *dev ) +{ + struct bnxt *bp = netdev_priv ( dev ); + struct nq_base *nqp; + u16 old_cid = bp->nq.cons_id; + int done = SERVICE_NEXT_NQ_BD; + u32 nq_type; + + if ( !bp->thor ) + return; + + while ( done == SERVICE_NEXT_NQ_BD ) { + nqp = ( struct nq_base * )BD_NOW ( bp->nq.bd_virt, + bp->nq.cons_id, sizeof ( struct nq_base ) ); + if ( ( nqp->v & NQ_CN_V ) ^ bp->nq.completion_bit ) + break; + nq_type = ( nqp->type & NQ_CN_TYPE_MASK ); + dump_evt ( ( u8 * )nqp, nq_type, bp->nq.cons_id, 1 ); + dump_nq ( nqp, bp->nq.cons_id ); + + switch ( nq_type ) { + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + bnxt_link_evt ( bp, + ( struct hwrm_async_event_cmpl * )nqp ); + /* Fall through */ + case NQ_CN_TYPE_CQ_NOTIFICATION: + bnxt_adv_nq_index ( bp, 1 ); + break; + default: + done = NO_MORE_NQ_BD_TO_SERVICE; + break; + } + } + + if ( bp->nq.cons_id != old_cid ) + bnxt_db_nq ( bp ); +} + +static void bnxt_poll ( struct net_device *dev ) +{ + mb ( ); + bnxt_service_cq ( dev ); + bnxt_service_nq ( dev ); +} + +static void bnxt_close ( struct net_device *dev ) +{ + struct bnxt *bp = netdev_priv ( dev ); + + DBGP ( "%s\n", __func__ ); + bnxt_down_nic (bp); + + /* iounmap PCI BAR ( s ) */ + bnxt_down_pci(bp); + + /* Get Bar Address */ + bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 ); + bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 ); + bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 ); + +} + +static struct net_device_operations bnxt_netdev_ops = { + .open = bnxt_open, + .close = bnxt_close, + .poll = bnxt_poll, + .transmit = bnxt_tx, +}; + +static int bnxt_init_one ( struct pci_device *pci ) +{ + struct net_device *netdev; + struct bnxt *bp; + int err = 0; + + DBGP ( "%s\n", __func__ ); + /* Allocate network device */ + netdev = alloc_etherdev ( sizeof ( *bp ) ); + if ( !netdev ) { + DBGP ( "- %s ( ): alloc_etherdev Failed\n", __func__ ); + err = -ENOMEM; + goto disable_pdev; + } + + /* Initialise network device */ + netdev_init ( netdev, &bnxt_netdev_ops ); + + /* Driver private area for this device */ + bp = netdev_priv ( netdev ); + + /* Set PCI driver private data */ + pci_set_drvdata ( pci, netdev ); + + /* Clear Private area data */ + memset ( bp, 0, sizeof ( *bp ) ); + bp->pdev = pci; + bp->dev = netdev; + netdev->dev = &pci->dev; + + /* Enable PCI device */ + adjust_pci_device ( pci ); + + /* Get PCI Information */ + bnxt_get_pci_info ( bp ); + + /* Allocate and Initialise device specific parameters */ + if ( bnxt_alloc_mem ( bp ) != 0 ) { + DBGP ( "- %s ( ): bnxt_alloc_mem Failed\n", __func__ ); + goto err_down_pci; + } + + /* Get device specific information */ + if ( bnxt_up_chip ( bp ) != 0 ) { + DBGP ( "- %s ( ): bnxt_up_chip Failed\n", __func__ ); + goto err_down_chip; + } + + /* Register Network device */ + if ( register_netdev ( netdev ) != 0 ) { + DBGP ( "- %s ( ): register_netdev Failed\n", __func__ ); + goto err_down_chip; + } + + return 0; + +err_down_chip: + bnxt_down_chip (bp); + bnxt_free_mem ( bp ); + +err_down_pci: + bnxt_down_pci ( bp ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); + +disable_pdev: + pci_set_drvdata ( pci, NULL ); + return err; +} + +static void bnxt_remove_one ( struct pci_device *pci ) +{ + struct net_device *netdev = pci_get_drvdata ( pci ); + struct bnxt *bp = netdev_priv ( netdev ); + + DBGP ( "%s\n", __func__ ); + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Bring down Chip */ + bnxt_down_chip(bp); + + /* Free Allocated resource */ + bnxt_free_mem ( bp ); + + /* iounmap PCI BAR ( s ) */ + bnxt_down_pci ( bp ); + + /* Stop network device */ + netdev_nullify ( netdev ); + + /* Drop refernce to network device */ + netdev_put ( netdev ); +} + +/* Broadcom NXE PCI driver */ +struct pci_driver bnxt_pci_driver __pci_driver = { + .ids = bnxt_nics, + .id_count = ARRAY_SIZE ( bnxt_nics ), + .probe = bnxt_init_one, + .remove = bnxt_remove_one, +}; diff --git a/src/drivers/net/bnxt/bnxt.h b/src/drivers/net/bnxt/bnxt.h new file mode 100644 index 00000000..4cca07bd --- /dev/null +++ b/src/drivers/net/bnxt/bnxt.h @@ -0,0 +1,1006 @@ +/* + * Copyright © 2018 Broadcom. All Rights Reserved. + * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + + * This program is free software; you can redistribute it and/or modify it under + * the terms of version 2 of the GNU General Public License as published by the + * Free Software Foundation. + + * This program is distributed in the hope that it will be useful. + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING + * ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR + * NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS + * ARE HELD TO BE LEGALLY INVALID. See the GNU General Public License for more + * details, a copy of which can be found in the file COPYING included with this + * package. + */ + +#undef ERRFILE +#define ERRFILE ERRFILE_tg3 + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#define dma_addr_t unsigned long + +union dma_addr64_t { + dma_addr_t addr; + u64 as_u64; +}; + +#include "bnxt_hsi.h" + +#define DRV_MODULE_NAME "bnxt" +#define IPXE_VERSION_MAJOR 1 +#define IPXE_VERSION_MINOR 0 +#define IPXE_VERSION_UPDATE 0 + +/* + * Broadcom ethernet driver defines. + */ +#define FLAG_SET(f, b) ((f) |= (b)) +#define FLAG_TEST(f, b) ((f) & (b)) +#define FLAG_RESET(f, b) ((f) &= ~(b)) +#define BNXT_FLAG_HWRM_SHORT_CMD_SUPP 0x0001 +#define BNXT_FLAG_HWRM_SHORT_CMD_REQ 0x0002 +#define BNXT_FLAG_RESOURCE_QCAPS_SUPPORT 0x0004 +#define BNXT_FLAG_MULTI_HOST 0x0008 +#define BNXT_FLAG_NPAR_MODE 0x0010 +#define BNXT_FLAG_ATOMICS_ENABLE 0x0020 +/******************************************************************************* + * Status codes. + ******************************************************************************/ +#define STATUS_SUCCESS 0 +#define STATUS_FAILURE 1 +#define STATUS_NO_RESOURCE 2 +#define STATUS_INVALID_PARAMETER 3 +#define STATUS_LINK_ACTIVE 4 +#define STATUS_LINK_DOWN 5 +#define STATUS_LINK_SETTING_MISMATCH 6 +#define STATUS_TOO_MANY_FRAGMENTS 7 +#define STATUS_TRANSMIT_ABORTED 8 +#define STATUS_TRANSMIT_ERROR 9 +#define STATUS_RECEIVE_ABORTED 10 +#define STATUS_RECEIVE_ERROR 11 +#define STATUS_INVALID_PACKET_SIZE 12 +#define STATUS_NO_MAP_REGISTER 13 +#define STATUS_UNKNOWN_ADAPTER 14 +#define STATUS_NO_COALESCE_BUFFER 15 +#define STATUS_UNKNOWN_PHY 16 +#define STATUS_PENDING 17 +#define STATUS_NO_TX_DESC 18 +#define STATUS_NO_TX_BD 19 +#define STATUS_UNKNOWN_MEDIUM 20 +#define STATUS_RESOURCE 21 +#define STATUS_ABORT_REASON_DISCONNECT 22 +#define STATUS_ABORT_REASON_UPLOAD 23 +#define STATUS_TIMEOUT 0xffff +/******************************************************************************* + * Receive filter masks. + ******************************************************************************/ +#define RX_MASK_ACCEPT_NONE 0x0000 +#define RX_MASK_ACCEPT_UNICAST 0x0001 +#define RX_MASK_ACCEPT_MULTICAST 0x0002 +#define RX_MASK_ACCEPT_ALL_MULTICAST 0x0004 +#define RX_MASK_ACCEPT_BROADCAST 0x0008 +#define RX_MASK_ACCEPT_ERROR_PACKET 0x0010 +#define RX_MASK_PROMISCUOUS_MODE 0x10000 +/******************************************************************************* + * media speed. + ******************************************************************************/ +#define MEDIUM_SPEED_AUTONEG 0x0000L +#define MEDIUM_SPEED_UNKNOWN 0x0000L +#define MEDIUM_SPEED_10MBPS 0x0100L +#define MEDIUM_SPEED_100MBPS 0x0200L +#define MEDIUM_SPEED_1000MBPS 0x0300L +#define MEDIUM_SPEED_2500MBPS 0x0400L +#define MEDIUM_SPEED_10GBPS 0x0600L +#define MEDIUM_SPEED_20GBPS 0x0700L +#define MEDIUM_SPEED_25GBPS 0x0800L +#define MEDIUM_SPEED_40GBPS 0x0900L +#define MEDIUM_SPEED_50GBPS 0x0a00L +#define MEDIUM_SPEED_100GBPS 0x0b00L +#define MEDIUM_SPEED_200GBPS 0x0c00L +#define MEDIUM_SPEED_AUTONEG_1G_FALLBACK 0x8000L /* Serdes */ +#define MEDIUM_SPEED_AUTONEG_2_5G_FALLBACK 0x8100L /* Serdes */ +#define MEDIUM_SPEED_HARDWARE_DEFAULT 0xff00L /* Serdes nvram def.*/ +#define MEDIUM_SPEED_MASK 0xff00L +#define GET_MEDIUM_SPEED(m) ((m) & MEDIUM_SPEED_MASK) +#define SET_MEDIUM_SPEED(bp, s) ((bp->medium & ~MEDIUM_SPEED_MASK) | s) +#define MEDIUM_UNKNOWN_DUPLEX 0x00000L +#define MEDIUM_FULL_DUPLEX 0x00000L +#define MEDIUM_HALF_DUPLEX 0x10000L +#define GET_MEDIUM_DUPLEX(m) ((m) & MEDIUM_HALF_DUPLEX) +#define SET_MEDIUM_DUPLEX(bp, d) ((bp->medium & ~MEDIUM_HALF_DUPLEX) | d) +#define MEDIUM_SELECTIVE_AUTONEG 0x01000000L +#define GET_MEDIUM_AUTONEG_MODE(m) ((m) & 0xff000000L) +#define PCICFG_ME_REGISTER 0x98 +#define GRC_COM_CHAN_BASE 0 +#define GRC_COM_CHAN_TRIG 0x100 +#define GRC_IND_BAR_0_ADDR 0x78 +#define GRC_IND_BAR_1_ADDR 0x7C +#define GRC_IND_BAR_0_DATA 0x80 +#define GRC_IND_BAR_1_DATA 0x84 +#define GRC_BASE_WIN_0 0x400 +#define GRC_DATA_WIN_0 0x1000 +#define HWRM_CMD_DEFAULT_TIMEOUT 500 /* in Miliseconds */ +#define HWRM_CMD_POLL_WAIT_TIME 100 /* In MicroeSconds */ +#define HWRM_CMD_DEFAULT_MULTIPLAYER(a) ((a) * 10) +#define HWRM_CMD_FLASH_MULTIPLAYER(a) ((a) * 100) +#define HWRM_CMD_FLASH_ERASE_MULTIPLAYER(a) ((a) * 1000) +#define HWRM_CMD_WAIT(b) ((bp->hwrm_cmd_timeout) * (b)) +#define MAX_ETHERNET_PACKET_BUFFER_SIZE 1536 +#define DEFAULT_NUMBER_OF_CMPL_RINGS 0x01 +#define DEFAULT_NUMBER_OF_TX_RINGS 0x01 +#define DEFAULT_NUMBER_OF_RX_RINGS 0x01 +#define DEFAULT_NUMBER_OF_RING_GRPS 0x01 +#define DEFAULT_NUMBER_OF_STAT_CTXS 0x01 +#define NUM_RX_BUFFERS 8 +#define MAX_RX_DESC_CNT 16 +#define MAX_TX_DESC_CNT 16 +#define MAX_CQ_DESC_CNT 64 +#define TX_RING_BUFFER_SIZE (MAX_TX_DESC_CNT * sizeof(struct tx_bd_short)) +#define RX_RING_BUFFER_SIZE \ + (MAX_RX_DESC_CNT * sizeof(struct rx_prod_pkt_bd)) +#define CQ_RING_BUFFER_SIZE (MAX_CQ_DESC_CNT * sizeof(struct cmpl_base)) +#define BNXT_DMA_ALIGNMENT 256 //64 +#define DMA_ALIGN_4K 4096 //thor tx & rx +#define REQ_BUFFER_SIZE 1024 +#define RESP_BUFFER_SIZE 1024 +#define DMA_BUFFER_SIZE 1024 +#define LM_PAGE_BITS(a) (a) +#define BNXT_RX_STD_DMA_SZ (1536 + 64 + 2) +#define NEXT_IDX(N, S) (((N) + 1) & ((S) - 1)) +#define BD_NOW(bd, entry, len) (&((u8 *)(bd))[(entry) * (len)]) +#define BNXT_CQ_INTR_MODE(vf) (\ + ((vf) ? RING_ALLOC_REQ_INT_MODE_MSIX : RING_ALLOC_REQ_INT_MODE_POLL)) +/* Set default link timeout period to 1 second */ +#define LINK_DEFAULT_TIMEOUT 1000 +#define LINK_POLL_WAIT_TIME 100 /* In Miliseconds */ +#define RX_MASK (\ + RX_MASK_ACCEPT_BROADCAST | \ + RX_MASK_ACCEPT_ALL_MULTICAST | \ + RX_MASK_ACCEPT_MULTICAST) +#define MAX_NQ_DESC_CNT 64 +#define NQ_RING_BUFFER_SIZE (MAX_NQ_DESC_CNT * sizeof(struct cmpl_base)) +#define TX_RING_QID (bp->thor ? (u16)bp->queue_id : ((u16)bp->port_idx * 10)) +#define RX_RING_QID (bp->thor ? bp->queue_id : 0) +#define STAT_CTX_ID ((bp->vf || bp->thor) ? bp->stat_ctx_id : 0) +#define TX_AVAIL(r) (r - 1) +#define TX_IN_USE(a, b, c) ((a - b) & (c - 1)) +#define NO_MORE_NQ_BD_TO_SERVICE 1 +#define SERVICE_NEXT_NQ_BD 0 +#define NO_MORE_CQ_BD_TO_SERVICE 1 +#define SERVICE_NEXT_CQ_BD 0 +#define MAC_HDR_SIZE 12 +#define VLAN_HDR_SIZE 4 +#define ETHERTYPE_VLAN 0x8100 +#define BYTE_SWAP_S(w) (\ + (((w) & 0xff00) >> 8) | \ + (((w) & 0x00ff) << 8)) +#define DB_OFFSET_PF 0x10000 +#define DB_OFFSET_VF 0x4000 +#define DBC_MSG_IDX(idx) (\ + ((idx) << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK) +#define DBC_MSG_XID(xid, flg) (\ + (((xid) << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | \ + DBC_DBC_PATH_L2 | (flg)) +#define PHY_STATUS 0x0001 +#define PHY_SPEED 0x0002 +#define DETECT_MEDIA 0x0004 +#define SUPPORT_SPEEDS 0x0008 +#define QCFG_PHY_ALL (\ + SUPPORT_SPEEDS | DETECT_MEDIA | PHY_SPEED | PHY_STATUS) +#define str_mbps "Mbps" +#define str_gbps "Gbps" +/* + * Broadcom ethernet driver nvm defines. + */ +/* nvm cfg 203 - u32 link_settings */ +#define LINK_SPEED_DRV_NUM 203 +#define LINK_SPEED_DRV_MASK 0x0000000F +#define LINK_SPEED_DRV_SHIFT 0 +#define LINK_SPEED_DRV_AUTONEG 0x0 +#define NS_LINK_SPEED_DRV_AUTONEG 0x0 +#define LINK_SPEED_DRV_1G 0x1 +#define NS_LINK_SPEED_DRV_1G 0x1 +#define LINK_SPEED_DRV_10G 0x2 +#define NS_LINK_SPEED_DRV_10G 0x2 +#define LINK_SPEED_DRV_25G 0x3 +#define NS_LINK_SPEED_DRV_25G 0x3 +#define LINK_SPEED_DRV_40G 0x4 +#define NS_LINK_SPEED_DRV_40G 0x4 +#define LINK_SPEED_DRV_50G 0x5 +#define NS_LINK_SPEED_DRV_50G 0x5 +#define LINK_SPEED_DRV_100G 0x6 +#define NS_LINK_SPEED_DRV_100G 0x6 +#define LINK_SPEED_DRV_200G 0x7 +#define NS_LINK_SPEED_DRV_200G 0x7 +#define LINK_SPEED_DRV_2_5G 0xE +#define NS_LINK_SPEED_DRV_2_5G 0xE +#define LINK_SPEED_DRV_100M 0xF +#define NS_LINK_SPEED_DRV_100M 0xF +/* nvm cfg 201 - u32 speed_cap_mask */ +#define SPEED_CAPABILITY_DRV_MASK 0x0000FFFF +#define SPEED_CAPABILITY_DRV_SHIFT 0 +#define SPEED_CAPABILITY_DRV_1G 0x1 +#define NS_SPEED_CAPABILITY_DRV_1G 0x1 +#define SPEED_CAPABILITY_DRV_10G 0x2 +#define NS_SPEED_CAPABILITY_DRV_10G 0x2 +#define SPEED_CAPABILITY_DRV_25G 0x4 +#define NS_SPEED_CAPABILITY_DRV_25G 0x4 +#define SPEED_CAPABILITY_DRV_40G 0x8 +#define NS_SPEED_CAPABILITY_DRV_40G 0x8 +#define SPEED_CAPABILITY_DRV_50G 0x10 +#define NS_SPEED_CAPABILITY_DRV_50G 0x10 +#define SPEED_CAPABILITY_DRV_100G 0x20 +#define NS_SPEED_CAPABILITY_DRV_100G 0x20 +#define SPEED_CAPABILITY_DRV_200G 0x40 +#define NS_SPEED_CAPABILITY_DRV_200G 0x40 +#define SPEED_CAPABILITY_DRV_2_5G 0x4000 +#define NS_SPEED_CAPABILITY_DRV_2_5G 0x4000 +#define SPEED_CAPABILITY_DRV_100M 0x8000 +#define NS_SPEED_CAPABILITY_DRV_100M 0x8000 +/* nvm cfg 202 */ +#define SPEED_CAPABILITY_FW_MASK 0xFFFF0000 +#define SPEED_CAPABILITY_FW_SHIFT 16 +#define SPEED_CAPABILITY_FW_1G (0x1L << 16) +#define NS_SPEED_CAPABILITY_FW_1G (0x1) +#define SPEED_CAPABILITY_FW_10G (0x2L << 16) +#define NS_SPEED_CAPABILITY_FW_10G (0x2) +#define SPEED_CAPABILITY_FW_25G (0x4L << 16) +#define NS_SPEED_CAPABILITY_FW_25G (0x4) +#define SPEED_CAPABILITY_FW_40G (0x8L << 16) +#define NS_SPEED_CAPABILITY_FW_40G (0x8) +#define SPEED_CAPABILITY_FW_50G (0x10L << 16) +#define NS_SPEED_CAPABILITY_FW_50G (0x10) +#define SPEED_CAPABILITY_FW_100G (0x20L << 16) +#define NS_SPEED_CAPABILITY_FW_100G (0x20) +#define SPEED_CAPABILITY_FW_200G (0x40L << 16) +#define NS_SPEED_CAPABILITY_FW_200G (0x40) +#define SPEED_CAPABILITY_FW_2_5G (0x4000L << 16) +#define NS_SPEED_CAPABILITY_FW_2_5G (0x4000) +#define SPEED_CAPABILITY_FW_100M (0x8000UL << 16) +#define NS_SPEED_CAPABILITY_FW_100M (0x8000) +/* nvm cfg 205 */ +#define LINK_SPEED_FW_NUM 205 +#define LINK_SPEED_FW_MASK 0x00000780 +#define LINK_SPEED_FW_SHIFT 7 +#define LINK_SPEED_FW_AUTONEG (0x0L << 7) +#define NS_LINK_SPEED_FW_AUTONEG (0x0) +#define LINK_SPEED_FW_1G (0x1L << 7) +#define NS_LINK_SPEED_FW_1G (0x1) +#define LINK_SPEED_FW_10G (0x2L << 7) +#define NS_LINK_SPEED_FW_10G (0x2) +#define LINK_SPEED_FW_25G (0x3L << 7) +#define NS_LINK_SPEED_FW_25G (0x3) +#define LINK_SPEED_FW_40G (0x4L << 7) +#define NS_LINK_SPEED_FW_40G (0x4) +#define LINK_SPEED_FW_50G (0x5L << 7) +#define NS_LINK_SPEED_FW_50G (0x5) +#define LINK_SPEED_FW_100G (0x6L << 7) +#define NS_LINK_SPEED_FW_100G (0x6) +#define LINK_SPEED_FW_200G (0x7L << 7) +#define NS_LINK_SPEED_FW_200G (0x7) +#define LINK_SPEED_FW_2_5G (0xEL << 7) +#define NS_LINK_SPEED_FW_2_5G (0xE) +#define LINK_SPEED_FW_100M (0xFL << 7) +#define NS_LINK_SPEED_FW_100M (0xF) +/* nvm cfg 210 */ +#define D3_LINK_SPEED_FW_NUM 210 +#define D3_LINK_SPEED_FW_MASK 0x000F0000 +#define D3_LINK_SPEED_FW_SHIFT 16 +#define D3_LINK_SPEED_FW_AUTONEG (0x0L << 16) +#define NS_D3_LINK_SPEED_FW_AUTONEG (0x0) +#define D3_LINK_SPEED_FW_1G (0x1L << 16) +#define NS_D3_LINK_SPEED_FW_1G (0x1) +#define D3_LINK_SPEED_FW_10G (0x2L << 16) +#define NS_D3_LINK_SPEED_FW_10G (0x2) +#define D3_LINK_SPEED_FW_25G (0x3L << 16) +#define NS_D3_LINK_SPEED_FW_25G (0x3) +#define D3_LINK_SPEED_FW_40G (0x4L << 16) +#define NS_D3_LINK_SPEED_FW_40G (0x4) +#define D3_LINK_SPEED_FW_50G (0x5L << 16) +#define NS_D3_LINK_SPEED_FW_50G (0x5) +#define D3_LINK_SPEED_FW_100G (0x6L << 16) +#define NS_D3_LINK_SPEED_FW_100G (0x6) +#define D3_LINK_SPEED_FW_200G (0x7L << 16) +#define NS_D3_LINK_SPEED_FW_200G (0x7) +#define D3_LINK_SPEED_FW_2_5G (0xEL << 16) +#define NS_D3_LINK_SPEED_FW_2_5G (0xE) +#define D3_LINK_SPEED_FW_100M (0xFL << 16) +#define NS_D3_LINK_SPEED_FW_100M (0xF) +/* nvm cfg 211 */ +#define D3_FLOW_CONTROL_FW_NUM 211 +#define D3_FLOW_CONTROL_FW_MASK 0x00700000 +#define D3_FLOW_CONTROL_FW_SHIFT 20 +#define D3_FLOW_CONTROL_FW_AUTO (0x0L << 20) +#define NS_D3_FLOW_CONTROL_FW_AUTO (0x0) +#define D3_FLOW_CONTROL_FW_TX (0x1L << 20) +#define NS_D3_FLOW_CONTROL_FW_TX (0x1) +#define D3_FLOW_CONTROL_FW_RX (0x2L << 20) +#define NS_D3_FLOW_CONTROL_FW_RX (0x2) +#define D3_FLOW_CONTROL_FW_BOTH (0x3L << 20) +#define NS_D3_FLOW_CONTROL_FW_BOTH (0x3) +#define D3_FLOW_CONTROL_FW_NONE (0x4L << 20) +#define NS_D3_FLOW_CONTROL_FW_NONE (0x4) +/* nvm cfg 213 */ +#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_NUM 213 +#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_MASK 0x02000000 +#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_SHIFT 25 +#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_DISABLED (0x0L << 25) +#define NS_PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_DISABLED (0x0) +#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_ENABLED (0x1L << 25) +#define NS_PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_ENABLED (0x1) +/* nvm cfg 357 - u32 mba_cfg2 */ +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_NUM 357 +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_MASK 0x0000FFFF +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_SHIFT 0 +/* nvm cfg 358 - u32 mba_cfg2 */ +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_NUM 358 +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_MASK 0x00010000 +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_SHIFT 16 +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_DISABLED (0x0L << 16) +#define NS_FUNC_CFG_PRE_BOOT_MBA_VLAN_DISABLED (0x0) +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED (0x1L << 16) +#define NS_FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED (0x1) + +struct tx_doorbell { + u32 key_idx; +#define TX_DOORBELL_IDX_MASK 0xffffffUL +#define TX_DOORBELL_IDX_SFT 0 +#define TX_DOORBELL_KEY_MASK 0xf0000000UL +#define TX_DOORBELL_KEY_SFT 28 + #define TX_DOORBELL_KEY_TX (0x0UL << 28) + #define TX_DOORBELL_KEY_LAST TX_DOORBELL_KEY_TX +}; + +struct rx_doorbell { + u32 key_idx; +#define RX_DOORBELL_IDX_MASK 0xffffffUL +#define RX_DOORBELL_IDX_SFT 0 +#define RX_DOORBELL_KEY_MASK 0xf0000000UL +#define RX_DOORBELL_KEY_SFT 28 + #define RX_DOORBELL_KEY_RX (0x1UL << 28) + #define RX_DOORBELL_KEY_LAST RX_DOORBELL_KEY_RX +}; + +struct cmpl_doorbell { + u32 key_mask_valid_idx; +#define CMPL_DOORBELL_IDX_MASK 0xffffffUL +#define CMPL_DOORBELL_IDX_SFT 0 +#define CMPL_DOORBELL_IDX_VALID 0x4000000UL +#define CMPL_DOORBELL_MASK 0x8000000UL +#define CMPL_DOORBELL_KEY_MASK 0xf0000000UL +#define CMPL_DOORBELL_KEY_SFT 28 + #define CMPL_DOORBELL_KEY_CMPL (0x2UL << 28) + #define CMPL_DOORBELL_KEY_LAST CMPL_DOORBELL_KEY_CMPL +}; + +/* dbc_dbc (size:64b/8B) */ +struct dbc_dbc { + __le32 index; + #define DBC_DBC_INDEX_MASK 0xffffffUL + #define DBC_DBC_INDEX_SFT 0 + __le32 type_path_xid; + #define DBC_DBC_XID_MASK 0xfffffUL + #define DBC_DBC_XID_SFT 0 + #define DBC_DBC_PATH_MASK 0x3000000UL + #define DBC_DBC_PATH_SFT 24 + #define DBC_DBC_PATH_ROCE (0x0UL << 24) + #define DBC_DBC_PATH_L2 (0x1UL << 24) + #define DBC_DBC_PATH_ENGINE (0x2UL << 24) + #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE + #define DBC_DBC_DEBUG_TRACE 0x8000000UL + #define DBC_DBC_TYPE_MASK 0xf0000000UL + #define DBC_DBC_TYPE_SFT 28 + #define DBC_DBC_TYPE_SQ (0x0UL << 28) + #define DBC_DBC_TYPE_RQ (0x1UL << 28) + #define DBC_DBC_TYPE_SRQ (0x2UL << 28) + #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28) + #define DBC_DBC_TYPE_CQ (0x4UL << 28) + #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28) + #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28) + #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28) + #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28) + #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28) + #define DBC_DBC_TYPE_NQ (0xaUL << 28) + #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28) + #define DBC_DBC_TYPE_NULL (0xfUL << 28) + #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL +}; + +/******************************************************************************* + * Transmit info. + *****************************************************************************/ +struct tx_bd_short { + u16 flags_type; +#define TX_BD_SHORT_TYPE_MASK 0x3fUL +#define TX_BD_SHORT_TYPE_SFT 0 +#define TX_BD_SHORT_TYPE_TX_BD_SHORT 0x0UL +#define TX_BD_SHORT_TYPE_LAST TX_BD_SHORT_TYPE_TX_BD_SHORT +#define TX_BD_SHORT_FLAGS_MASK 0xffc0UL +#define TX_BD_SHORT_FLAGS_SFT 6 +#define TX_BD_SHORT_FLAGS_PACKET_END 0x40UL +#define TX_BD_SHORT_FLAGS_NO_CMPL 0x80UL +#define TX_BD_SHORT_FLAGS_BD_CNT_MASK 0x1f00UL +#define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8 +#define TX_BD_SHORT_FLAGS_LHINT_MASK 0x6000UL +#define TX_BD_SHORT_FLAGS_LHINT_SFT 13 +#define TX_BD_SHORT_FLAGS_LHINT_LT512 (0x0UL << 13) +#define TX_BD_SHORT_FLAGS_LHINT_LT1K (0x1UL << 13) +#define TX_BD_SHORT_FLAGS_LHINT_LT2K (0x2UL << 13) +#define TX_BD_SHORT_FLAGS_LHINT_GTE2K (0x3UL << 13) +#define TX_BD_SHORT_FLAGS_LHINT_LAST TX_BD_SHORT_FLAGS_LHINT_GTE2K +#define TX_BD_SHORT_FLAGS_COAL_NOW 0x8000UL + u16 len; + u32 opaque; + union dma_addr64_t dma; +}; + +struct tx_cmpl { + u16 flags_type; +#define TX_CMPL_TYPE_MASK 0x3fUL +#define TX_CMPL_TYPE_SFT 0 +#define TX_CMPL_TYPE_TX_L2 0x0UL +#define TX_CMPL_TYPE_LAST TX_CMPL_TYPE_TX_L2 +#define TX_CMPL_FLAGS_MASK 0xffc0UL +#define TX_CMPL_FLAGS_SFT 6 +#define TX_CMPL_FLAGS_ERROR 0x40UL +#define TX_CMPL_FLAGS_PUSH 0x80UL + u16 unused_0; + u32 opaque; + u16 errors_v; +#define TX_CMPL_V 0x1UL +#define TX_CMPL_ERRORS_MASK 0xfffeUL +#define TX_CMPL_ERRORS_SFT 1 +#define TX_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL +#define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1 +#define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (0x0UL << 1) +#define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (0x2UL << 1) +#define TX_CMPL_ERRORS_BUFFER_ERROR_LAST TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT +#define TX_CMPL_ERRORS_ZERO_LENGTH_PKT 0x10UL +#define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH 0x20UL +#define TX_CMPL_ERRORS_DMA_ERROR 0x40UL +#define TX_CMPL_ERRORS_HINT_TOO_SHORT 0x80UL +#define TX_CMPL_ERRORS_POISON_TLP_ERROR 0x100UL + u16 unused_1; + u32 unused_2; +}; + +struct tx_info { + void *bd_virt; + struct io_buffer *iob[MAX_TX_DESC_CNT]; + u16 prod_id; /* Tx producer index. */ + u16 cons_id; + u16 ring_cnt; + u32 cnt; /* Tx statistics. */ + u32 cnt_req; +}; + +struct cmpl_base { + u16 type; +#define CMPL_BASE_TYPE_MASK 0x3fUL +#define CMPL_BASE_TYPE_SFT 0 +#define CMPL_BASE_TYPE_TX_L2 0x0UL +#define CMPL_BASE_TYPE_RX_L2 0x11UL +#define CMPL_BASE_TYPE_RX_AGG 0x12UL +#define CMPL_BASE_TYPE_RX_TPA_START 0x13UL +#define CMPL_BASE_TYPE_RX_TPA_END 0x15UL +#define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL +#define CMPL_BASE_TYPE_HWRM_DONE 0x20UL +#define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL +#define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL +#define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL +#define CMPL_BASE_TYPE_CQ_NOTIFICATION 0x30UL +#define CMPL_BASE_TYPE_SRQ_EVENT 0x32UL +#define CMPL_BASE_TYPE_DBQ_EVENT 0x34UL +#define CMPL_BASE_TYPE_QP_EVENT 0x38UL +#define CMPL_BASE_TYPE_FUNC_EVENT 0x3aUL +#define CMPL_BASE_TYPE_LAST CMPL_BASE_TYPE_FUNC_EVENT + u16 info1; + u32 info2; + u32 info3_v; +#define CMPL_BASE_V 0x1UL +#define CMPL_BASE_INFO3_MASK 0xfffffffeUL +#define CMPL_BASE_INFO3_SFT 1 + u32 info4; +}; + +struct cmp_info { + void *bd_virt; + u16 cons_id; + u16 ring_cnt; + u8 completion_bit; + u8 res[3]; +}; + +/* Completion Queue Notification */ +/* nq_cn (size:128b/16B) */ +struct nq_base { + u16 type; +/* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ +#define NQ_CN_TYPE_MASK 0x3fUL +#define NQ_CN_TYPE_SFT 0 +/* CQ Notification */ + #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL + #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION + u16 reserved16; +/* + * This is an application level ID used to identify the + * CQ. This field carries the lower 32b of the value. + */ + u32 cq_handle_low; + u32 v; +/* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ +#define NQ_CN_V 0x1UL +/* + * This is an application level ID used to identify the + * CQ. This field carries the upper 32b of the value. + */ + u32 cq_handle_high; +}; + +struct nq_info { + void *bd_virt; + u16 cons_id; + u16 ring_cnt; + u8 completion_bit; + u8 res[3]; +}; + +struct rx_pkt_cmpl { + u16 flags_type; +#define RX_PKT_CMPL_TYPE_MASK 0x3fUL +#define RX_PKT_CMPL_TYPE_SFT 0 +#define RX_PKT_CMPL_TYPE_RX_L2 0x11UL +#define RX_PKT_CMPL_TYPE_LAST RX_PKT_CMPL_TYPE_RX_L2 +#define RX_PKT_CMPL_FLAGS_MASK 0xffc0UL +#define RX_PKT_CMPL_FLAGS_SFT 6 +#define RX_PKT_CMPL_FLAGS_ERROR 0x40UL +#define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK 0x380UL +#define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7 +#define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (0x0UL << 7) +#define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (0x1UL << 7) +#define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (0x2UL << 7) +#define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST RX_PKT_CMPL_FLAGS_PLACEMENT_HDS +#define RX_PKT_CMPL_FLAGS_RSS_VALID 0x400UL +#define RX_PKT_CMPL_FLAGS_UNUSED 0x800UL +#define RX_PKT_CMPL_FLAGS_ITYPE_MASK 0xf000UL +#define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12 +#define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN (0x0UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_IP (0x1UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_TCP (0x2UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_UDP (0x3UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_FCOE (0x4UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_ROCE (0x5UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_ICMP (0x7UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (0x9UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_LAST RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP + u16 len; + u32 opaque; + u8 agg_bufs_v1; +#define RX_PKT_CMPL_V1 0x1UL +#define RX_PKT_CMPL_AGG_BUFS_MASK 0x3eUL +#define RX_PKT_CMPL_AGG_BUFS_SFT 1 +#define RX_PKT_CMPL_UNUSED1_MASK 0xc0UL +#define RX_PKT_CMPL_UNUSED1_SFT 6 + u8 rss_hash_type; + u8 payload_offset; + u8 unused1; + u32 rss_hash; +}; + +struct rx_pkt_cmpl_hi { + u32 flags2; +#define RX_PKT_CMPL_FLAGS2_IP_CS_CALC 0x1UL +#define RX_PKT_CMPL_FLAGS2_L4_CS_CALC 0x2UL +#define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC 0x4UL +#define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC 0x8UL +#define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK 0xf0UL +#define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4 +#define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (0x0UL << 4) +#define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (0x1UL << 4) +#define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \ + RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN +#define RX_PKT_CMPL_FLAGS2_IP_TYPE 0x100UL + u32 metadata; +#define RX_PKT_CMPL_METADATA_VID_MASK 0xfffUL +#define RX_PKT_CMPL_METADATA_VID_SFT 0 +#define RX_PKT_CMPL_METADATA_DE 0x1000UL +#define RX_PKT_CMPL_METADATA_PRI_MASK 0xe000UL +#define RX_PKT_CMPL_METADATA_PRI_SFT 13 +#define RX_PKT_CMPL_METADATA_TPID_MASK 0xffff0000UL +#define RX_PKT_CMPL_METADATA_TPID_SFT 16 + u16 errors_v2; +#define RX_PKT_CMPL_V2 0x1UL +#define RX_PKT_CMPL_ERRORS_MASK 0xfffeUL +#define RX_PKT_CMPL_ERRORS_SFT 1 +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1) +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1) +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2UL << 1) +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1) +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT +#define RX_PKT_CMPL_ERRORS_IP_CS_ERROR 0x10UL +#define RX_PKT_CMPL_ERRORS_L4_CS_ERROR 0x20UL +#define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR 0x40UL +#define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR 0x80UL +#define RX_PKT_CMPL_ERRORS_CRC_ERROR 0x100UL +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK 0xe00UL +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9 +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK 0xf000UL +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12 +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN + u16 cfa_code; + u32 reorder; +#define RX_PKT_CMPL_REORDER_MASK 0xffffffUL +#define RX_PKT_CMPL_REORDER_SFT 0 +}; + +struct rx_prod_pkt_bd { + u16 flags_type; +#define RX_PROD_PKT_BD_TYPE_MASK 0x3fUL +#define RX_PROD_PKT_BD_TYPE_SFT 0 +#define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT 0x4UL +#define RX_PROD_PKT_BD_TYPE_LAST RX_PROD_PKT_BD_TYPE_RX_PROD_PKT +#define RX_PROD_PKT_BD_FLAGS_MASK 0xffc0UL +#define RX_PROD_PKT_BD_FLAGS_SFT 6 +#define RX_PROD_PKT_BD_FLAGS_SOP_PAD 0x40UL +#define RX_PROD_PKT_BD_FLAGS_EOP_PAD 0x80UL +#define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK 0x300UL +#define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8 + u16 len; + u32 opaque; + union dma_addr64_t dma; +}; + +struct rx_info { + void *bd_virt; + struct io_buffer *iob[NUM_RX_BUFFERS]; + u16 iob_cnt; + u16 buf_cnt; /* Total Rx buffer descriptors. */ + u16 ring_cnt; + u16 cons_id; /* Last processed consumer index. */ +/* Receive statistics. */ + u32 cnt; + u32 good; + u32 drop_err; + u32 drop_lb; + u32 drop_vlan; +}; + +#define VALID_DRIVER_REG 0x0001 +#define VALID_STAT_CTX 0x0002 +#define VALID_RING_CQ 0x0004 +#define VALID_RING_TX 0x0008 +#define VALID_RING_RX 0x0010 +#define VALID_RING_GRP 0x0020 +#define VALID_VNIC_ID 0x0040 +#define VALID_RX_IOB 0x0080 +#define VALID_L2_FILTER 0x0100 +#define VALID_RING_NQ 0x0200 + +struct bnxt { +/* begin "general, frequently-used members" cacheline section */ +/* If the IRQ handler (which runs lockless) needs to be + * quiesced, the following bitmask state is used. The + * SYNC flag is set by non-IRQ context code to initiate + * the quiescence. + * + * When the IRQ handler notices that SYNC is set, it + * disables interrupts and returns. + * + * When all outstanding IRQ handlers have returned after + * the SYNC flag has been set, the setter can be assured + * that interrupts will no longer get run. + * + * In this way all SMP driver locks are never acquired + * in hw IRQ context, only sw IRQ context or lower. + */ + unsigned int irq_sync; + struct net_device *dev; + struct pci_device *pdev; + void *hwrm_addr_req; + void *hwrm_addr_resp; + void *hwrm_addr_dma; + dma_addr_t req_addr_mapping; + dma_addr_t resp_addr_mapping; + dma_addr_t dma_addr_mapping; + struct tx_info tx; /* Tx info. */ + struct rx_info rx; /* Rx info. */ + struct cmp_info cq; /* completion info. */ + struct nq_info nq; /* completion info. */ + u16 nq_ring_id; + u8 queue_id; + u8 thor; + u16 last_resp_code; + u16 seq_id; + u32 flag_hwrm; + u32 flags; +/* PCI info. */ + u16 subsystem_vendor; + u16 subsystem_device; + u16 cmd_reg; + u8 pf_num; /* absolute PF number */ + u8 vf; + void *bar0; + void *bar1; + void *bar2; +/* Device info. */ + u16 chip_num; +/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ + u32 chip_id; + u32 hwrm_cmd_timeout; + u16 hwrm_spec_code; + u16 hwrm_max_req_len; + u16 hwrm_max_ext_req_len; + u8 mac_addr[ETH_ALEN]; /* HW MAC address */ + u16 fid; + u8 port_idx; + u8 ordinal_value; + u16 mtu; + u16 ring_grp_id; + u16 cq_ring_id; + u16 tx_ring_id; + u16 rx_ring_id; + u16 current_link_speed; + u16 link_status; + u16 wait_link_timeout; + u64 l2_filter_id; + u16 vnic_id; + u16 stat_ctx_id; + u16 vlan_id; + u16 vlan_tx; + u32 mba_cfg2; + u32 medium; + u16 support_speeds; + u32 link_set; + u8 media_detect; + u8 rsvd; + u16 max_vfs; + u16 vf_res_strategy; + u16 min_vnics; + u16 max_vnics; + u16 max_msix; + u16 min_hw_ring_grps; + u16 max_hw_ring_grps; + u16 min_tx_rings; + u16 max_tx_rings; + u16 min_rx_rings; + u16 max_rx_rings; + u16 min_cp_rings; + u16 max_cp_rings; + u16 min_rsscos_ctxs; + u16 max_rsscos_ctxs; + u16 min_l2_ctxs; + u16 max_l2_ctxs; + u16 min_stat_ctxs; + u16 max_stat_ctxs; + u16 num_cmpl_rings; + u16 num_tx_rings; + u16 num_rx_rings; + u16 num_stat_ctxs; + u16 num_hw_ring_grps; +}; + +/* defines required to rsolve checkpatch errors / warnings */ +#define test_if if +#define write32 writel +#define write64 writeq +#define pci_read_byte pci_read_config_byte +#define pci_read_word16 pci_read_config_word +#define pci_write_word pci_write_config_word +#define SHORT_CMD_SUPPORTED VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED +#define SHORT_CMD_REQUIRED VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED +#define CQ_DOORBELL_KEY_MASK(a) (\ + CMPL_DOORBELL_KEY_CMPL | \ + CMPL_DOORBELL_IDX_VALID | \ + CMPL_DOORBELL_MASK | \ + (u32)(a)) +#define CQ_DOORBELL_KEY_IDX(a) (\ + CMPL_DOORBELL_KEY_CMPL | \ + CMPL_DOORBELL_IDX_VALID | \ + (u32)(a)) +#define TX_BD_FLAGS (\ + TX_BD_SHORT_TYPE_TX_BD_SHORT |\ + TX_BD_SHORT_FLAGS_COAL_NOW |\ + TX_BD_SHORT_FLAGS_PACKET_END |\ + (1 << TX_BD_SHORT_FLAGS_BD_CNT_SFT)) +#define PORT_PHY_FLAGS (\ + BNXT_FLAG_NPAR_MODE | \ + BNXT_FLAG_MULTI_HOST) +#define RING_FREE(bp, rid, flag) bnxt_hwrm_ring_free(bp, rid, flag) +#define SET_LINK(p, m, s) ((p & (m >> s)) << s) +#define SET_MBA(p, m, s) ((p & (m >> s)) << s) +#define SPEED_DRV_MASK LINK_SPEED_DRV_MASK +#define SPEED_DRV_SHIFT LINK_SPEED_DRV_SHIFT +#define SPEED_FW_MASK LINK_SPEED_FW_MASK +#define SPEED_FW_SHIFT LINK_SPEED_FW_SHIFT +#define D3_SPEED_FW_MASK D3_LINK_SPEED_FW_MASK +#define D3_SPEED_FW_SHIFT D3_LINK_SPEED_FW_SHIFT +#define MEDIA_AUTO_DETECT_MASK PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_MASK +#define MEDIA_AUTO_DETECT_SHIFT PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_SHIFT +#define VLAN_MASK FUNC_CFG_PRE_BOOT_MBA_VLAN_MASK +#define VLAN_SHIFT FUNC_CFG_PRE_BOOT_MBA_VLAN_SHIFT +#define VLAN_VALUE_MASK FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_MASK +#define VLAN_VALUE_SHIFT FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_SHIFT +#define VF_CFG_ENABLE_FLAGS (\ + FUNC_VF_CFG_REQ_ENABLES_MTU | \ + FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN | \ + FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR | \ + FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR) + +/* Device ID's */ +#define PCI_VID_BCOM 0x14e4 +#define CHIP_NUM_57500 0x1750 + +#define DID_57508 0x1750 +#define DID_57508_MF 0x1802 +#define DID_57508_MF_RDMA 0x1805 +#define DID_57504 0x1751 +#define DID_57504_MF 0x1801 +#define DID_57504_MF_RDMA 0x1804 +#define DID_57502 0x1752 +#define DID_57502_MF 0x1800 +#define DID_57502_MF_RDMA 0x1803 +#define DID_57508_VF 0x1806 +#define DID_57508_VF_RDMA 0x1807 +#define DID_57508_VF_HV 0x1806 +#define DID_57508_VF_RDMA_HV 0x1807 +/* Stratus Device IDs */ +#define DID_57320_1 0x16F0 +#define DID_57320_2 0x16F1 +#define DID_57454_MHB 0x1604 +#define DID_57454_MHB_RDMA 0x1605 +#define DID_57454_VF_RDMA 0x1606 +#define DID_57454_VF 0x1609 +#define DID_57454 0x1614 +#define DID_58802 0xD802 +#define DID_58804 0xD804 + +#define DID_57417_RDMA_MF 0x16C0 +#define DID_57417_VF_RDMA 0x16c1 +#define DID_57301 0x16C8 +#define DID_57302 0x16C9 +#define DID_57304 0x16CA +#define DID_57417_MF 0x16CC +#define DID_58700 0x16CD +#define DID_57311 0x16CE +#define DID_57312 0x16CF +#define DID_57402 0x16D0 +#define DID_57404 0x16D1 +#define DID_57406 0x16D2 +#define DID_57402_MF 0x16D4 +#define DID_57407C 0x16D5 +#define DID_57412 0x16D6 +#define DID_57414 0x16D7 +#define DID_57416C 0x16D8 +#define DID_57417C 0x16D9 +#define DID_57402L 0x16DA +#define DID_57404L 0x16DB +#define DID_57417_VF 0x16dc +#define DID_57412_MF 0x16DE +#define DID_57314 0x16DF +#define DID_57317C 0x16E0 +#define DID_57417F 0x16E2 +#define DID_57416F 0x16E3 +#define DID_57317F 0x16E4 +#define DID_57404_MF 0x16E7 +#define DID_57406_MF 0x16E8 +#define DID_57407F 0x16E9 +#define DID_57407_MF 0x16EA +#define DID_57412_RDMA_MF 0x16EB +#define DID_57414_MF 0x16EC +#define DID_57414_RDMA_MF 0x16ED +#define DID_57416_MF 0x16EE +#define DID_57416_RDMA_MF 0x16EF + +static struct pci_device_id bnxt_nics[] = { + PCI_ROM(PCI_VID_BCOM, DID_57417_RDMA_MF, "14e4-16C0", "14e4-16C0", 0), + PCI_ROM(PCI_VID_BCOM, DID_57417_VF_RDMA, "14e4-16C1", "14e4-16C1", 0), + PCI_ROM(PCI_VID_BCOM, DID_57301, "14e4-16C8", "14e4-16C8", 0), + PCI_ROM(PCI_VID_BCOM, DID_57302, "14e4-16C9", "14e4-16C9", 0), + PCI_ROM(PCI_VID_BCOM, DID_57304, "14e4-16CA", "14e4-16CA", 0), + PCI_ROM(PCI_VID_BCOM, DID_57417_MF, "14e4-16CC", "14e4-16CC", 0), + PCI_ROM(PCI_VID_BCOM, DID_58700, "14e4-16CD", "14e4-16CD", 0), + PCI_ROM(PCI_VID_BCOM, DID_57311, "14e4-16CE", "14e4-16CE", 0), + PCI_ROM(PCI_VID_BCOM, DID_57312, "14e4-16CF", "14e4-16CF", 0), + PCI_ROM(PCI_VID_BCOM, DID_57402, "14e4-16D0", "14e4-16D0", 0), + PCI_ROM(PCI_VID_BCOM, DID_57404, "14e4-16D1", "14e4-16D1", 0), + PCI_ROM(PCI_VID_BCOM, DID_57406, "14e4-16D2", "14e4-16D2", 0), + PCI_ROM(PCI_VID_BCOM, DID_57402_MF, "14e4-16D4", "14e4-16D4", 0), + PCI_ROM(PCI_VID_BCOM, DID_57407C, "14e4-16D5", "14e4-16D5", 0), + PCI_ROM(PCI_VID_BCOM, DID_57412, "14e4-16D6", "14e4-16D6", 0), + PCI_ROM(PCI_VID_BCOM, DID_57414, "14e4-16D7", "14e4-16D7", 0), + PCI_ROM(PCI_VID_BCOM, DID_57416C, "14e4-16D8", "14e4-16D8", 0), + PCI_ROM(PCI_VID_BCOM, DID_57417C, "14e4-16D9", "14e4-16D9", 0), + PCI_ROM(PCI_VID_BCOM, DID_57402L, "14e4-16DA", "14e4-16DA", 0), + PCI_ROM(PCI_VID_BCOM, DID_57404L, "14e4-16DB", "14e4-16DB", 0), + PCI_ROM(PCI_VID_BCOM, DID_57417_VF, "14e4-16DC", "14e4-16DC", 0), + PCI_ROM(PCI_VID_BCOM, DID_57412_MF, "14e4-16DE", "14e4-16DE", 0), + PCI_ROM(PCI_VID_BCOM, DID_57314, "14e4-16DF", "14e4-16DF", 0), + PCI_ROM(PCI_VID_BCOM, DID_57317C, "14e4-16E0", "14e4-16E0", 0), + PCI_ROM(PCI_VID_BCOM, DID_57417F, "14e4-16E2", "14e4-16E2", 0), + PCI_ROM(PCI_VID_BCOM, DID_57416F, "14e4-16E3", "14e4-16E3", 0), + PCI_ROM(PCI_VID_BCOM, DID_57317F, "14e4-16E4", "14e4-16E4", 0), + PCI_ROM(PCI_VID_BCOM, DID_57404_MF, "14e4-16E7", "14e4-16E7", 0), + PCI_ROM(PCI_VID_BCOM, DID_57406_MF, "14e4-16E8", "14e4-16E8", 0), + PCI_ROM(PCI_VID_BCOM, DID_57407F, "14e4-16E9", "14e4-16E9", 0), + PCI_ROM(PCI_VID_BCOM, DID_57407_MF, "14e4-16EA", "14e4-16EA", 0), + PCI_ROM(PCI_VID_BCOM, DID_57412_RDMA_MF, "14e4-16EB", "14e4-16EB", 0), + PCI_ROM(PCI_VID_BCOM, DID_57414_MF, "14e4-16EC", "14e4-16EC", 0), + PCI_ROM(PCI_VID_BCOM, DID_57414_RDMA_MF, "14e4-16ED", "14e4-16ED", 0), + PCI_ROM(PCI_VID_BCOM, DID_57416_MF, "14e4-16EE", "14e4-16EE", 0), + PCI_ROM(PCI_VID_BCOM, DID_57416_RDMA_MF, "14e4-16EF", "14e4-16EF", 0), + + PCI_ROM(PCI_VID_BCOM, DID_57320_1, "14e4-16F0", "14e4-16F0", 0), + PCI_ROM(PCI_VID_BCOM, DID_57320_2, "14e4-16F1", "14e4-16F1", 0), + PCI_ROM(PCI_VID_BCOM, DID_57454_MHB, "14e4-1604", "14e4-1604", 0), + PCI_ROM(PCI_VID_BCOM, DID_57454_MHB_RDMA, "14e4-1605", "14e4-1605", 0), + PCI_ROM(PCI_VID_BCOM, DID_57454_VF_RDMA, "14e4-1606", "14e4-1606", 0), + PCI_ROM(PCI_VID_BCOM, DID_57454_VF, "14e4-1609", "14e4-1609", 0), + PCI_ROM(PCI_VID_BCOM, DID_57454, "14e4-1614", "14e4-1614", 0), + PCI_ROM(PCI_VID_BCOM, DID_58802, "14e4-D802", "14e4-D802", 0), + PCI_ROM(PCI_VID_BCOM, DID_58804, "14e4-D804", "14e4-D804", 0), + + PCI_ROM(PCI_VID_BCOM, DID_57508, "14e4-1750", "14e4-1750", 0), + PCI_ROM(PCI_VID_BCOM, DID_57508_MF, "14e4-1802", "14e4-1802", 0), + PCI_ROM(PCI_VID_BCOM, DID_57508_MF_RDMA, "14e4-1805", "14e4-1805", 0), + PCI_ROM(PCI_VID_BCOM, DID_57504, "14e4-1751", "14e4-1751", 0), + PCI_ROM(PCI_VID_BCOM, DID_57504_MF, "14e4-1801", "14e4-1801", 0), + PCI_ROM(PCI_VID_BCOM, DID_57504_MF_RDMA, "14e4-1804", "14e4-1804", 0), + PCI_ROM(PCI_VID_BCOM, DID_57502, "14e4-1752", "14e4-1752", 0), + PCI_ROM(PCI_VID_BCOM, DID_57502_MF, "14e4-1800", "14e4-1800", 0), + PCI_ROM(PCI_VID_BCOM, DID_57502_MF_RDMA, "14e4-1803", "14e4-1803", 0), + PCI_ROM(PCI_VID_BCOM, DID_57508_VF, "14e4-1806", "14e4-1806", 0), + PCI_ROM(PCI_VID_BCOM, DID_57508_VF_RDMA, "14e4-1807", "14e4-1807", 0), + PCI_ROM(PCI_VID_BCOM, DID_57508_VF_HV, "14e4-1808", "14e4-1808", 0), + PCI_ROM(PCI_VID_BCOM, + DID_57508_VF_RDMA_HV, "14e4-1809", "14e4-1809", 0), +}; + +static u16 bnxt_vf_nics[] = { + DID_57508_VF, + DID_57508_VF_RDMA, + DID_57508_VF_HV, + DID_57508_VF_RDMA_HV, + DID_57417_VF, + DID_57417_VF_RDMA, +}; diff --git a/src/drivers/net/bnxt/bnxt_dbg.h b/src/drivers/net/bnxt/bnxt_dbg.h new file mode 100644 index 00000000..188978ad --- /dev/null +++ b/src/drivers/net/bnxt/bnxt_dbg.h @@ -0,0 +1,677 @@ +/* + * Copyright © 2018 Broadcom. All Rights Reserved. + * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + + * This program is free software; you can redistribute it and/or modify it under + * the terms of version 2 of the GNU General Public License as published by the + * Free Software Foundation. + + * This program is distributed in the hope that it will be useful. + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING + * ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR + * NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS + * ARE HELD TO BE LEGALLY INVALID. See the GNU General Public License for more + * details, a copy of which can be found in the file COPYING included with this + * package. + */ + +//#define DEBUG_DRV +//#define DEBUG_KEY +//#define DEBUG_PCI +//#define DEBUG_MEMORY +//#define DEBUG_LINK +//#define DEBUG_CHIP +//#define DEBUG_FAIL +//#define DEBUG_HWRM_CMDS +//#define DEBUG_HWRM_DUMP +//#define DEBUG_CQ +//#define DEBUG_CQ_DUMP +//#define DEBUG_TX +//#define DEBUG_TX_DUMP +//#define DEBUG_RX +//#define DEBUG_RX_DUMP + +#if \ + defined(DEBUG_DRV) || \ + defined(DEBUG_PCI) || \ + defined(DEBUG_CHIP) || \ + defined(DEBUG_MEMORY) || \ + defined(DEBUG_LINK) || \ + defined(DEBUG_FAIL) || \ + defined(DEBUG_HWRM_CMDS) || \ + defined(DEBUG_HWRM_DUMP) || \ + defined(DEBUG_CQ) || \ + defined(DEBUG_CQ_DUMP) || \ + defined(DEBUG_TX) || \ + defined(DEBUG_TX_DUMP) || \ + defined(DEBUG_RX) || \ + defined(DEBUG_RX_DUMP) +#define DEBUG_DEFAULT +#endif +#if defined(DEBUG_DEFAULT) +#define dbg_prn printf + +void pause_drv(void) +{ +#if defined(DEBUG_KEY) + dbg_prn(" Press a key..."); + getchar(); +#endif + dbg_prn("\n"); +} + +#define MAX_CHAR_SIZE(a) (u32)((1 << (a)) - 1) +#define DISP_U8 0x00 +#define DISP_U16 0x01 +#define DISP_U32 0x02 +#define DISP_U64 0x03 + +void dumpmemory1(u8 *buffer, u32 length, u8 flag) +{ + u32 jj = 0; + u8 i, c; + + dbg_prn("\n %p:", buffer); + for (jj = 0; jj < 16; jj++) { + if (!(jj & MAX_CHAR_SIZE(flag))) + dbg_prn(" "); + if (jj < length) + dbg_prn("%02x", buffer[jj]); + else + dbg_prn(" "); + if ((jj & 0xF) == 0xF) { + dbg_prn(" "); + for (i = 0; i < 16; i++) { + if (i < length) { + c = buffer[jj + i - 15]; + if (c >= 0x20 && c < 0x7F) + ; + else + c = '.'; + dbg_prn("%c", c); + } + } + } + } +} + +void dump_mem(u8 *buffer, u32 length, u8 flag) +{ + u32 length16, remlen, jj; + + length16 = length & 0xFFFFFFF0; + remlen = length & 0xF; + for (jj = 0; jj < length16; jj += 16) + dumpmemory1((u8 *)&buffer[jj], 16, flag); + if (remlen) + dumpmemory1((u8 *)&buffer[length16], remlen, flag); + if (length16 || remlen) + dbg_prn("\n"); +} +#else +#define dbg_prn(func) +#endif + +#if defined(DEBUG_PCI) +void dbg_pci(struct bnxt *bp, const char *func, u16 cmd_reg) +{ + struct pci_device *pdev = bp->pdev; + + dbg_prn("- %s()\n", func); + dbg_prn(" Bus:Dev:Func : %04X\n", pdev->busdevfn); + dbg_prn(" Vendor id : %04X\n", pdev->vendor); + dbg_prn(" Device id : %04X (%cF)\n", + pdev->device, (bp->vf) ? 'V' : 'P'); + dbg_prn(" Irq : %d\n", pdev->irq); + dbg_prn(" PCI Command Reg : %04X\n", cmd_reg); + dbg_prn(" Sub Vendor id : %04X\n", bp->subsystem_vendor); + dbg_prn(" Sub Device id : %04X\n", bp->subsystem_device); + dbg_prn(" PF Number : %X\n", bp->pf_num); + dbg_prn(" BAR (0) : %p %lx\n", + bp->bar0, pci_bar_start(pdev, PCI_BASE_ADDRESS_0)); + dbg_prn(" BAR (1) : %p %lx\n", + bp->bar1, pci_bar_start(pdev, PCI_BASE_ADDRESS_2)); + dbg_prn(" BAR (2) : %p %lx\n", + bp->bar2, pci_bar_start(pdev, PCI_BASE_ADDRESS_4)); + dbg_prn(" "); + pause_drv(); +} +#else +#define dbg_pci(bp, func, creg) +#endif + +#if defined(DEBUG_MEMORY) +void dbg_mem(struct bnxt *bp, const char *func) +{ + dbg_prn("- %s()\n", func); + dbg_prn(" bp Addr : %p", bp); + dbg_prn(" Len %4d", (u16)sizeof(struct bnxt)); + dbg_prn(" phy %lx\n", virt_to_bus(bp)); + dbg_prn(" bp->hwrm_req_addr : %p", bp->hwrm_addr_req); + dbg_prn(" Len %4d", (u16)REQ_BUFFER_SIZE); + dbg_prn(" phy %lx\n", bp->req_addr_mapping); + dbg_prn(" bp->hwrm_resp_addr : %p", bp->hwrm_addr_resp); + dbg_prn(" Len %4d", (u16)RESP_BUFFER_SIZE); + dbg_prn(" phy %lx\n", bp->resp_addr_mapping); + dbg_prn(" bp->dma_addr : %p", bp->hwrm_addr_dma); + dbg_prn(" Len %4d", (u16)DMA_BUFFER_SIZE); + dbg_prn(" phy %lx\n", bp->dma_addr_mapping); + dbg_prn(" bp->tx.bd_virt : %p", bp->tx.bd_virt); + dbg_prn(" Len %4d", (u16)TX_RING_BUFFER_SIZE); + dbg_prn(" phy %lx\n", virt_to_bus(bp->tx.bd_virt)); + dbg_prn(" bp->rx.bd_virt : %p", bp->rx.bd_virt); + dbg_prn(" Len %4d", (u16)RX_RING_BUFFER_SIZE); + dbg_prn(" phy %lx\n", virt_to_bus(bp->rx.bd_virt)); + dbg_prn(" bp->cq.bd_virt : %p", bp->cq.bd_virt); + dbg_prn(" Len %4d", (u16)CQ_RING_BUFFER_SIZE); + dbg_prn(" phy %lx\n", virt_to_bus(bp->cq.bd_virt)); + dbg_prn(" bp->nq.bd_virt : %p", bp->nq.bd_virt); + dbg_prn(" Len %4d", (u16)NQ_RING_BUFFER_SIZE); + dbg_prn(" phy %lx\n", virt_to_bus(bp->nq.bd_virt)); + dbg_prn(" "); + pause_drv(); +} +#else +#define dbg_mem(bp, func) (func = func) +#endif + +#if defined(DEBUG_CHIP) +void dbg_fw_ver(struct hwrm_ver_get_output *resp, u32 tmo) +{ + if (resp->hwrm_intf_maj_8b < 1) { + dbg_prn(" HWRM interface %d.%d.%d is older than 1.0.0.\n", + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b); + dbg_prn(" Update FW with HWRM interface 1.0.0 or newer.\n"); + } + dbg_prn(" FW Version : %d.%d.%d.%d\n", + resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, + resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); + dbg_prn(" cmd timeout : %d\n", tmo); + if (resp->hwrm_intf_maj_8b >= 1) + dbg_prn(" hwrm_max_req_len : %d\n", resp->max_req_win_len); + dbg_prn(" hwrm_max_ext_req : %d\n", resp->max_ext_req_len); + dbg_prn(" chip_num : %x\n", resp->chip_num); + dbg_prn(" chip_id : %x\n", + (u32)(resp->chip_rev << 24) | + (u32)(resp->chip_metal << 16) | + (u32)(resp->chip_bond_id << 8) | + (u32)resp->chip_platform_type); + test_if((resp->dev_caps_cfg & SHORT_CMD_SUPPORTED) && + (resp->dev_caps_cfg & SHORT_CMD_REQUIRED)) + dbg_prn(" SHORT_CMD_SUPPORTED\n"); +} + +void dbg_func_resource_qcaps(struct bnxt *bp) +{ +// Ring Groups + dbg_prn(" min_hw_ring_grps : %d\n", bp->min_hw_ring_grps); + dbg_prn(" max_hw_ring_grps : %d\n", bp->max_hw_ring_grps); +// TX Rings + dbg_prn(" min_tx_rings : %d\n", bp->min_tx_rings); + dbg_prn(" max_tx_rings : %d\n", bp->max_tx_rings); +// RX Rings + dbg_prn(" min_rx_rings : %d\n", bp->min_rx_rings); + dbg_prn(" max_rx_rings : %d\n", bp->max_rx_rings); +// Completion Rings + dbg_prn(" min_cq_rings : %d\n", bp->min_cp_rings); + dbg_prn(" max_cq_rings : %d\n", bp->max_cp_rings); +// Statistic Contexts + dbg_prn(" min_stat_ctxs : %d\n", bp->min_stat_ctxs); + dbg_prn(" max_stat_ctxs : %d\n", bp->max_stat_ctxs); +} + +void dbg_func_qcaps(struct bnxt *bp) +{ + dbg_prn(" Port Number : %d\n", bp->port_idx); + dbg_prn(" fid : 0x%04x\n", bp->fid); + dbg_prn(" PF MAC : %02x:%02x:%02x:%02x:%02x:%02x\n", + bp->mac_addr[0], + bp->mac_addr[1], + bp->mac_addr[2], + bp->mac_addr[3], + bp->mac_addr[4], + bp->mac_addr[5]); +} + +void dbg_func_qcfg(struct bnxt *bp) +{ + dbg_prn(" ordinal_value : %d\n", bp->ordinal_value); + dbg_prn(" stat_ctx_id : %x\n", bp->stat_ctx_id); + if (bp->vf) { + dbg_func_qcaps(bp); + dbg_prn(" vlan_id : %d\n", bp->vlan_id); + } +} + +void prn_set_speed(u32 speed) +{ + u32 speed1 = ((speed & LINK_SPEED_DRV_MASK) >> LINK_SPEED_DRV_SHIFT); + + dbg_prn(" Set Link Speed : "); + switch (speed & LINK_SPEED_DRV_MASK) { + case LINK_SPEED_DRV_1G: + dbg_prn("1 GBPS"); + break; + case LINK_SPEED_DRV_10G: + dbg_prn("10 GBPS"); + break; + case LINK_SPEED_DRV_25G: + dbg_prn("25 GBPS"); + break; + case LINK_SPEED_DRV_40G: + dbg_prn("40 GBPS"); + break; + case LINK_SPEED_DRV_50G: + dbg_prn("50 GBPS"); + break; + case LINK_SPEED_DRV_100G: + dbg_prn("100 GBPS"); + break; + case LINK_SPEED_DRV_200G: + dbg_prn("200 GBPS"); + break; + case LINK_SPEED_DRV_AUTONEG: + dbg_prn("AUTONEG"); + break; + default: + dbg_prn("%x", speed1); + break; + } + dbg_prn("\n"); +} + +void dbg_chip_info(struct bnxt *bp) +{ + if (bp->thor) + dbg_prn(" NQ Ring Id : %d\n", bp->nq_ring_id); + else + dbg_prn(" Grp ID : %d\n", bp->ring_grp_id); + dbg_prn(" Stat Ctx ID : %d\n", bp->stat_ctx_id); + dbg_prn(" CQ Ring Id : %d\n", bp->cq_ring_id); + dbg_prn(" Tx Ring Id : %d\n", bp->tx_ring_id); + dbg_prn(" Rx ring Id : %d\n", bp->rx_ring_id); + dbg_prn(" "); + pause_drv(); +} + +void dbg_num_rings(struct bnxt *bp) +{ + dbg_prn(" num_cmpl_rings : %d\n", bp->num_cmpl_rings); + dbg_prn(" num_tx_rings : %d\n", bp->num_tx_rings); + dbg_prn(" num_rx_rings : %d\n", bp->num_rx_rings); + dbg_prn(" num_ring_grps : %d\n", bp->num_hw_ring_grps); + dbg_prn(" num_stat_ctxs : %d\n", bp->num_stat_ctxs); +} + +void dbg_flags(const char *func, u32 flags) +{ + dbg_prn("- %s()\n", func); + dbg_prn(" bp->flags : 0x%04x\n", flags); +} + +void dbg_bnxt_pause(void) +{ + dbg_prn(" "); + pause_drv(); +} +#else +#define dbg_fw_ver(resp, tmo) +#define dbg_func_resource_qcaps(bp) +#define dbg_func_qcaps(bp) +#define dbg_func_qcfg(bp) +#define prn_set_speed(speed) +#define dbg_chip_info(bp) +#define dbg_num_rings(bp) +#define dbg_flags(func, flags) +#define dbg_bnxt_pause() +#endif + +#if defined(DEBUG_HWRM_CMDS) || defined(DEBUG_FAIL) +void dump_hwrm_req(struct bnxt *bp, const char *func, u32 len, u32 tmo) +{ + dbg_prn("- %s(0x%04x) cmd_len %d cmd_tmo %d", + func, (u16)((struct input *)bp->hwrm_addr_req)->req_type, + len, tmo); +#if defined(DEBUG_HWRM_DUMP) + dump_mem((u8 *)bp->hwrm_addr_req, len, DISP_U8); +#else + dbg_prn("\n"); +#endif +} + +void debug_resp(struct bnxt *bp, const char *func, u32 resp_len, u16 err) +{ + dbg_prn("- %s(0x%04x) - ", + func, (u16)((struct input *)bp->hwrm_addr_req)->req_type); + if (err == STATUS_SUCCESS) + dbg_prn("Done"); + else if (err != STATUS_TIMEOUT) + dbg_prn("Fail err 0x%04x", err); + else + dbg_prn("timedout"); +#if defined(DEBUG_HWRM_DUMP) + if (err != STATUS_TIMEOUT) { + dump_mem((u8 *)bp->hwrm_addr_resp, resp_len, DISP_U8); + sleep(1); + } else + dbg_prn("\n"); +#else + resp_len = resp_len; + dbg_prn("\n"); +#endif +} + +void dbg_hw_cmd(struct bnxt *bp, + const char *func, u16 cmd_len, + u16 resp_len, u32 cmd_tmo, u16 err) +{ +#if !defined(DEBUG_HWRM_CMDS) + if (err) +#endif + { + dump_hwrm_req(bp, func, cmd_len, cmd_tmo); + debug_resp(bp, func, resp_len, err); + } +} +#else +#define dbg_hw_cmd(bp, func, cmd_len, resp_len, cmd_tmo, err) (func = func) +#endif + +#if defined(DEBUG_HWRM_CMDS) +void dbg_short_cmd(u8 *req, const char *func, u32 len) +{ + struct hwrm_short_input *sreq; + + sreq = (struct hwrm_short_input *)req; + dbg_prn("- %s(0x%04x) short_cmd_len %d", + func, + sreq->req_type, + (int)len); +#if defined(DEBUG_HWRM_DUMP) + dump_mem((u8 *)sreq, len, DISP_U8); +#else + dbg_prn("\n"); +#endif +} +#else +#define dbg_short_cmd(sreq, func, len) +#endif + +#if defined(DEBUG_RX) +void dump_rx_bd(struct rx_pkt_cmpl *rx_cmp, + struct rx_pkt_cmpl_hi *rx_cmp_hi, + u32 desc_idx) +{ + dbg_prn(" RX desc_idx %d PktLen %d\n", desc_idx, rx_cmp->len); + dbg_prn("- rx_cmp %lx", virt_to_bus(rx_cmp)); +#if defined(DEBUG_RX_DUMP) + dump_mem((u8 *)rx_cmp, (u32)sizeof(struct rx_pkt_cmpl), DISP_U8); +#else + dbg_prn("\n"); +#endif + dbg_prn("- rx_cmp_hi %lx", virt_to_bus(rx_cmp_hi)); +#if defined(DEBUG_RX_DUMP) + dump_mem((u8 *)rx_cmp_hi, (u32)sizeof(struct rx_pkt_cmpl_hi), DISP_U8); +#else + dbg_prn("\n"); +#endif +} + +void dbg_rx_vlan(struct bnxt *bp, u32 meta, u16 f2, u16 rx_vid) +{ + dbg_prn(" Rx VLAN metadata %x flags2 %x\n", meta, f2); + dbg_prn(" Rx VLAN MBA %d TX %d RX %d\n", + bp->vlan_id, bp->vlan_tx, rx_vid); +} + +void dbg_alloc_rx_iob(struct io_buffer *iob, u16 id, u16 cid) +{ + dbg_prn(" Rx alloc_iob (%d) %p bd_virt (%d)\n", + id, iob->data, cid); +} + +void dbg_rx_cid(u16 idx, u16 cid) +{ + dbg_prn("- RX old cid %d new cid %d\n", idx, cid); +} + +void dbg_alloc_rx_iob_fail(u16 iob_idx, u16 cons_id) +{ + dbg_prn(" Rx alloc_iob (%d) ", iob_idx); + dbg_prn("failed for cons_id %d\n", cons_id); +} + +void dbg_rxp(u8 *iob, u16 rx_len, u8 drop) +{ + dbg_prn("- RX iob %lx Len %d ", virt_to_bus(iob), rx_len); + if (drop == 1) + dbg_prn("drop ErrPkt "); + else if (drop == 2) + dbg_prn("drop LoopBack "); + else if (drop == 3) + dbg_prn("drop VLAN"); +#if defined(DEBUG_RX_DUMP) + dump_mem(iob, (u32)rx_len, DISP_U8); +#else + dbg_prn("\n"); +#endif +} + +void dbg_rx_stat(struct bnxt *bp) +{ + dbg_prn("- RX Stat Total %d Good %d Drop err %d LB %d VLAN %d\n", + bp->rx.cnt, bp->rx.good, + bp->rx.drop_err, bp->rx.drop_lb, bp->rx.drop_vlan); +} +#else +#define dump_rx_bd(rx_cmp, rx_cmp_hi, desc_idx) +#define dbg_rx_vlan(bp, metadata, flags2, rx_vid) +#define dbg_alloc_rx_iob(iob, id, cid) +#define dbg_rx_cid(idx, cid) +#define dbg_alloc_rx_iob_fail(iob_idx, cons_id) +#define dbg_rxp(iob, rx_len, drop) +#define dbg_rx_stat(bp) +#endif + +#if defined(DEBUG_CQ) +static void dump_cq(struct cmpl_base *cmp, u16 cid) +{ + dbg_prn("- CQ Type "); + switch (cmp->type & CMPL_BASE_TYPE_MASK) { + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + dbg_prn("(ae)"); + break; + case CMPL_BASE_TYPE_STAT_EJECT: + dbg_prn("(se)"); + break; + case CMPL_BASE_TYPE_TX_L2: + dbg_prn("(tx)"); + break; + case CMPL_BASE_TYPE_RX_L2: + dbg_prn("(rx)"); + break; + default: + dbg_prn("%04x", (u16)(cmp->type & CMPL_BASE_TYPE_MASK)); + break; + } + dbg_prn(" cid %d", cid); +#if defined(DEBUG_CQ_DUMP) + dump_mem((u8 *)cmp, (u32)sizeof(struct cmpl_base), DISP_U8); +#else + dbg_prn("\n"); +#endif +} + +static void dump_nq(struct nq_base *nqp, u16 cid) +{ + dbg_prn("- NQ Type %lx cid %d", (nqp->type & NQ_CN_TYPE_MASK), cid); +#if defined(DEBUG_CQ_DUMP) + dump_mem((u8 *)nqp, (u32)sizeof(struct nq_base), DISP_U8); +#else + dbg_prn("\n"); +#endif +} +#else +#define dump_cq(cq, id) +#define dump_nq(nq, id) +#endif + +#if defined(DEBUG_TX) +void dbg_tx_avail(struct bnxt *bp, u32 avail, u16 use) +{ + dbg_prn("- Tx BD %d Avail %d Use %d pid %d cid %d\n", + bp->tx.ring_cnt, + avail, use, + bp->tx.prod_id, + bp->tx.cons_id); +} + +void dbg_tx_vlan(struct bnxt *bp, char *src, u16 plen, u16 len) +{ + dbg_prn("- Tx VLAN PKT %d MBA %d", bp->vlan_tx, bp->vlan_id); + dbg_prn(" PKT %d", + BYTE_SWAP_S(*(u16 *)(&src[MAC_HDR_SIZE + 2]))); + dbg_prn(" Pro %x", + BYTE_SWAP_S(*(u16 *)(&src[MAC_HDR_SIZE]))); + dbg_prn(" old len %d new len %d\n", plen, len); +} + +void dbg_tx_pad(u16 plen, u16 len) +{ + if (len != plen) + dbg_prn("- Tx padded(0) old len %d new len %d\n", plen, len); +} + +void dump_tx_stat(struct bnxt *bp) +{ + dbg_prn(" TX stats cnt %d req_cnt %d", bp->tx.cnt, bp->tx.cnt_req); + dbg_prn(" prod_id %d cons_id %d\n", bp->tx.prod_id, bp->tx.cons_id); +} + +void dump_tx_pkt(u8 *pkt, u16 len, u16 idx) +{ + dbg_prn(" TX(%d) Addr %lx Size %d", idx, virt_to_bus(pkt), len); +#if defined(DEBUG_TX_DUMP) + dump_mem(pkt, (u32)len, DISP_U8); +#else + dbg_prn("\n"); +#endif +} + +void dump_tx_bd(struct tx_bd_short *tx_bd, u16 len, int idx) +{ + dbg_prn(" Tx(%d) BD Addr %lx Size %d", idx, virt_to_bus(tx_bd), len); +#if defined(DEBUG_TX_DUMP) + dump_mem((u8 *)tx_bd, (u32)len, DISP_U8); +#else + dbg_prn("\n"); +#endif +} + +void dbg_tx_done(u8 *pkt, u16 len, u16 idx) +{ + dbg_prn(" Tx(%d) Done pkt %lx Size %d\n", idx, virt_to_bus(pkt), len); +} +#else +#define dbg_tx_avail(bp, a, u) +#define dbg_tx_vlan(bp, src, plen, len) +#define dbg_tx_pad(plen, len) +#define dump_tx_stat(bp) +#define dump_tx_pkt(pkt, len, idx) +#define dump_tx_bd(prod_bd, len, idx) +#define dbg_tx_done(pkt, len, idx) +#endif + +#if defined(DEBUG_LINK) +static void dump_evt(u8 *cmp, u32 type, u16 cid, u8 ring) +{ + u32 size; + u8 c; + + if (ring) { + c = 'N'; + size = sizeof(struct nq_base); + } else { + c = 'C'; + size = sizeof(struct cmpl_base); + } + switch (type) { + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + break; + default: + return; + } + dbg_prn("- %cQ Type (ae) cid %d", c, cid); + dump_mem(cmp, size, DISP_U8); +} + +void dbg_link_info(struct bnxt *bp) +{ + dbg_prn(" Current Speed : "); + switch (bp->current_link_speed) { + case PORT_PHY_QCFG_RESP_LINK_SPEED_200GB: + dbg_prn("200 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_100GB: + dbg_prn("100 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_50GB: + dbg_prn("50 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_40GB: + dbg_prn("40 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_25GB: + dbg_prn("25 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_20GB: + dbg_prn("20 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_10GB: + dbg_prn("10 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB: + dbg_prn("2.5 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_2GB: + dbg_prn("2 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_1GB: + dbg_prn("1 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_100MB: + dbg_prn("100 %s", str_mbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_10MB: + dbg_prn("10 %s", str_mbps); + break; + default: + dbg_prn("%x", bp->current_link_speed); + } + dbg_prn("\n"); + dbg_prn(" media_detect : %x\n", bp->media_detect); +} + +void dbg_link_status(struct bnxt *bp) +{ + dbg_prn(" Port(%d) : Link", bp->port_idx); + if (bp->link_status == STATUS_LINK_ACTIVE) + dbg_prn("Up"); + else + dbg_prn("Down"); + dbg_prn("\n"); +} + +void dbg_link_state(struct bnxt *bp, u32 tmo) +{ + dbg_link_status(bp); + dbg_link_info(bp); + dbg_prn(" Link wait time : %d ms", tmo); + pause_drv(); +} +#else +#define dump_evt(cq, ty, id, ring) +#define dbg_link_status(bp) +#define dbg_link_state(bp, tmo) +#endif diff --git a/src/drivers/net/bnxt/bnxt_hsi.h b/src/drivers/net/bnxt/bnxt_hsi.h new file mode 100644 index 00000000..086acb8b --- /dev/null +++ b/src/drivers/net/bnxt/bnxt_hsi.h @@ -0,0 +1,10337 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2019 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * DO NOT MODIFY!!! This file is automatically generated. + */ + +#ifndef _BNXT_HSI_H_ +#define _BNXT_HSI_H_ + +/* hwrm_cmd_hdr (size:128b/16B) */ +struct hwrm_cmd_hdr { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_resp_hdr (size:64b/8B) */ +struct hwrm_resp_hdr { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; + +#define CMD_DISCR_TLV_ENCAP 0x8000UL +#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP + +#define TLV_TYPE_HWRM_REQUEST 0x1UL +#define TLV_TYPE_HWRM_RESPONSE 0x2UL +#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL +#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL +#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL +#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL +#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL +#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL +#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL +#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL +#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL +#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL +#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL +#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE + +/* tlv (size:64b/8B) */ +struct tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 flags; + #define TLV_FLAGS_MORE 0x1UL + #define TLV_FLAGS_MORE_LAST 0x0UL + #define TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define TLV_FLAGS_REQUIRED 0x2UL + #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; +}; + +/* input (size:128b/16B) */ +struct input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* output (size:64b/8B) */ +struct output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; + +/* hwrm_short_input (size:128b/16B) */ +struct hwrm_short_input { + __le16 req_type; + __le16 signature; + #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL + #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD + __le16 unused_0; + __le16 size; + __le64 req_addr; +}; + +/* cmd_nums (size:64b/8B) */ +struct cmd_nums { + __le16 req_type; + #define HWRM_VER_GET 0x0UL + #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL + #define HWRM_FUNC_BUF_UNRGTR 0xeUL + #define HWRM_FUNC_VF_CFG 0xfUL + #define HWRM_RESERVED1 0x10UL + #define HWRM_FUNC_RESET 0x11UL + #define HWRM_FUNC_GETFID 0x12UL + #define HWRM_FUNC_VF_ALLOC 0x13UL + #define HWRM_FUNC_VF_FREE 0x14UL + #define HWRM_FUNC_QCAPS 0x15UL + #define HWRM_FUNC_QCFG 0x16UL + #define HWRM_FUNC_CFG 0x17UL + #define HWRM_FUNC_QSTATS 0x18UL + #define HWRM_FUNC_CLR_STATS 0x19UL + #define HWRM_FUNC_DRV_UNRGTR 0x1aUL + #define HWRM_FUNC_VF_RESC_FREE 0x1bUL + #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL + #define HWRM_FUNC_DRV_RGTR 0x1dUL + #define HWRM_FUNC_DRV_QVER 0x1eUL + #define HWRM_FUNC_BUF_RGTR 0x1fUL + #define HWRM_PORT_PHY_CFG 0x20UL + #define HWRM_PORT_MAC_CFG 0x21UL + #define HWRM_PORT_TS_QUERY 0x22UL + #define HWRM_PORT_QSTATS 0x23UL + #define HWRM_PORT_LPBK_QSTATS 0x24UL + #define HWRM_PORT_CLR_STATS 0x25UL + #define HWRM_PORT_LPBK_CLR_STATS 0x26UL + #define HWRM_PORT_PHY_QCFG 0x27UL + #define HWRM_PORT_MAC_QCFG 0x28UL + #define HWRM_PORT_MAC_PTP_QCFG 0x29UL + #define HWRM_PORT_PHY_QCAPS 0x2aUL + #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL + #define HWRM_PORT_PHY_I2C_READ 0x2cUL + #define HWRM_PORT_LED_CFG 0x2dUL + #define HWRM_PORT_LED_QCFG 0x2eUL + #define HWRM_PORT_LED_QCAPS 0x2fUL + #define HWRM_QUEUE_QPORTCFG 0x30UL + #define HWRM_QUEUE_QCFG 0x31UL + #define HWRM_QUEUE_CFG 0x32UL + #define HWRM_FUNC_VLAN_CFG 0x33UL + #define HWRM_FUNC_VLAN_QCFG 0x34UL + #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL + #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL + #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL + #define HWRM_QUEUE_PRI2COS_CFG 0x38UL + #define HWRM_QUEUE_COS2BW_QCFG 0x39UL + #define HWRM_QUEUE_COS2BW_CFG 0x3aUL + #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL + #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL + #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL + #define HWRM_VNIC_ALLOC 0x40UL + #define HWRM_VNIC_FREE 0x41UL + #define HWRM_VNIC_CFG 0x42UL + #define HWRM_VNIC_QCFG 0x43UL + #define HWRM_VNIC_TPA_CFG 0x44UL + #define HWRM_VNIC_TPA_QCFG 0x45UL + #define HWRM_VNIC_RSS_CFG 0x46UL + #define HWRM_VNIC_RSS_QCFG 0x47UL + #define HWRM_VNIC_PLCMODES_CFG 0x48UL + #define HWRM_VNIC_PLCMODES_QCFG 0x49UL + #define HWRM_VNIC_QCAPS 0x4aUL + #define HWRM_RING_ALLOC 0x50UL + #define HWRM_RING_FREE 0x51UL + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL + #define HWRM_RING_AGGINT_QCAPS 0x54UL + #define HWRM_RING_RESET 0x5eUL + #define HWRM_RING_GRP_ALLOC 0x60UL + #define HWRM_RING_GRP_FREE 0x61UL + #define HWRM_RESERVED5 0x64UL + #define HWRM_RESERVED6 0x65UL + #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL + #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL + #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL + #define HWRM_CFA_L2_FILTER_FREE 0x91UL + #define HWRM_CFA_L2_FILTER_CFG 0x92UL + #define HWRM_CFA_L2_SET_RX_MASK 0x93UL + #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL + #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL + #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL + #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL + #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL + #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL + #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL + #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL + #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL + #define HWRM_CFA_EM_FLOW_FREE 0x9dUL + #define HWRM_CFA_EM_FLOW_CFG 0x9eUL + #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL + #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL + #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL + #define HWRM_STAT_CTX_ENG_QUERY 0xafUL + #define HWRM_STAT_CTX_ALLOC 0xb0UL + #define HWRM_STAT_CTX_FREE 0xb1UL + #define HWRM_STAT_CTX_QUERY 0xb2UL + #define HWRM_STAT_CTX_CLR_STATS 0xb3UL + #define HWRM_PORT_QSTATS_EXT 0xb4UL + #define HWRM_FW_RESET 0xc0UL + #define HWRM_FW_QSTATUS 0xc1UL + #define HWRM_FW_HEALTH_CHECK 0xc2UL + #define HWRM_FW_SYNC 0xc3UL + #define HWRM_FW_SET_TIME 0xc8UL + #define HWRM_FW_GET_TIME 0xc9UL + #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL + #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL + #define HWRM_FW_IPC_MAILBOX 0xccUL + #define HWRM_EXEC_FWD_RESP 0xd0UL + #define HWRM_REJECT_FWD_RESP 0xd1UL + #define HWRM_FWD_RESP 0xd2UL + #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL + #define HWRM_OEM_CMD 0xd4UL + #define HWRM_TEMP_MONITOR_QUERY 0xe0UL + #define HWRM_WOL_FILTER_ALLOC 0xf0UL + #define HWRM_WOL_FILTER_FREE 0xf1UL + #define HWRM_WOL_FILTER_QCFG 0xf2UL + #define HWRM_WOL_REASON_QCFG 0xf3UL + #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL + #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL + #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL + #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL + #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL + #define HWRM_CFA_VFR_ALLOC 0xfdUL + #define HWRM_CFA_VFR_FREE 0xfeUL + #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL + #define HWRM_CFA_VF_PAIR_FREE 0x101UL + #define HWRM_CFA_VF_PAIR_INFO 0x102UL + #define HWRM_CFA_FLOW_ALLOC 0x103UL + #define HWRM_CFA_FLOW_FREE 0x104UL + #define HWRM_CFA_FLOW_FLUSH 0x105UL + #define HWRM_CFA_FLOW_STATS 0x106UL + #define HWRM_CFA_FLOW_INFO 0x107UL + #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL + #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL + #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL + #define HWRM_CFA_PAIR_ALLOC 0x10dUL + #define HWRM_CFA_PAIR_FREE 0x10eUL + #define HWRM_CFA_PAIR_INFO 0x10fUL + #define HWRM_FW_IPC_MSG 0x110UL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL + #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL + #define HWRM_CFA_FLOW_AGING_CFG 0x114UL + #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL + #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL + #define HWRM_ENGINE_CKV_HELLO 0x12dUL + #define HWRM_ENGINE_CKV_STATUS 0x12eUL + #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL + #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL + #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL + #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL + #define HWRM_ENGINE_CKV_FLUSH 0x133UL + #define HWRM_ENGINE_CKV_RNG_GET 0x134UL + #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL + #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL + #define HWRM_ENGINE_QG_QUERY 0x13dUL + #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL + #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL + #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL + #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL + #define HWRM_ENGINE_QG_METER_QUERY 0x142UL + #define HWRM_ENGINE_QG_METER_BIND 0x143UL + #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL + #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL + #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL + #define HWRM_ENGINE_SG_QUERY 0x147UL + #define HWRM_ENGINE_SG_METER_QUERY 0x148UL + #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL + #define HWRM_ENGINE_SG_QG_BIND 0x14aUL + #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL + #define HWRM_ENGINE_CONFIG_QUERY 0x154UL + #define HWRM_ENGINE_STATS_CONFIG 0x155UL + #define HWRM_ENGINE_STATS_CLEAR 0x156UL + #define HWRM_ENGINE_STATS_QUERY 0x157UL + #define HWRM_ENGINE_RQ_ALLOC 0x15eUL + #define HWRM_ENGINE_RQ_FREE 0x15fUL + #define HWRM_ENGINE_CQ_ALLOC 0x160UL + #define HWRM_ENGINE_CQ_FREE 0x161UL + #define HWRM_ENGINE_NQ_ALLOC 0x162UL + #define HWRM_ENGINE_NQ_FREE 0x163UL + #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL + #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL + #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL + #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL + #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL + #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL + #define HWRM_FUNC_VF_BW_CFG 0x195UL + #define HWRM_FUNC_VF_BW_QCFG 0x196UL + #define HWRM_SELFTEST_QLIST 0x200UL + #define HWRM_SELFTEST_EXEC 0x201UL + #define HWRM_SELFTEST_IRQ 0x202UL + #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL + #define HWRM_PCIE_QSTATS 0x204UL + #define HWRM_DBG_READ_DIRECT 0xff10UL + #define HWRM_DBG_READ_INDIRECT 0xff11UL + #define HWRM_DBG_WRITE_DIRECT 0xff12UL + #define HWRM_DBG_WRITE_INDIRECT 0xff13UL + #define HWRM_DBG_DUMP 0xff14UL + #define HWRM_DBG_ERASE_NVM 0xff15UL + #define HWRM_DBG_CFG 0xff16UL + #define HWRM_DBG_COREDUMP_LIST 0xff17UL + #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL + #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL + #define HWRM_DBG_FW_CLI 0xff1aUL + #define HWRM_DBG_I2C_CMD 0xff1bUL + #define HWRM_DBG_RING_INFO_GET 0xff1cUL + #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL + #define HWRM_NVM_VALIDATE_OPTION 0xffefUL + #define HWRM_NVM_FLUSH 0xfff0UL + #define HWRM_NVM_GET_VARIABLE 0xfff1UL + #define HWRM_NVM_SET_VARIABLE 0xfff2UL + #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL + #define HWRM_NVM_MODIFY 0xfff4UL + #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL + #define HWRM_NVM_GET_DEV_INFO 0xfff6UL + #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL + #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL + #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL + #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL + #define HWRM_NVM_GET_DIR_INFO 0xfffbUL + #define HWRM_NVM_RAW_DUMP 0xfffcUL + #define HWRM_NVM_READ 0xfffdUL + #define HWRM_NVM_WRITE 0xfffeUL + #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL + #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK + __le16 unused_0[3]; +}; + +/* ret_codes (size:64b/8B) */ +struct ret_codes { + __le16 error_code; + #define HWRM_ERR_CODE_SUCCESS 0x0UL + #define HWRM_ERR_CODE_FAIL 0x1UL + #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL + #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL + #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL + #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL + #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL + #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL + #define HWRM_ERR_CODE_NO_BUFFER 0x8UL + #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL + #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL + #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL + #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL + #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL + #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL + #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL + #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED + __le16 unused_0[3]; +}; + +/* hwrm_err_output (size:128b/16B) */ +struct hwrm_err_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 opaque_0; + __le16 opaque_1; + u8 cmd_err; + u8 valid; +}; + +#define HWRM_NA_SIGNATURE ((__le32)(-1)) +#define HWRM_MAX_REQ_LEN 128 +#define HWRM_MAX_RESP_LEN 280 +#define HW_HASH_INDEX_SIZE 0x80 +#define HW_HASH_KEY_SIZE 40 +#define HWRM_RESP_VALID_KEY 1 +#define HWRM_VERSION_MAJOR 1 +#define HWRM_VERSION_MINOR 10 +#define HWRM_VERSION_UPDATE 0 +#define HWRM_VERSION_RSVD 18 +#define HWRM_VERSION_STR "1.10.0.18" + +/* hwrm_ver_get_input (size:192b/24B) */ +struct hwrm_ver_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 hwrm_intf_maj; + u8 hwrm_intf_min; + u8 hwrm_intf_upd; + u8 unused_0[5]; +}; + +/* hwrm_ver_get_output (size:1408b/176B) */ +struct hwrm_ver_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hwrm_intf_maj_8b; + u8 hwrm_intf_min_8b; + u8 hwrm_intf_upd_8b; + u8 hwrm_intf_rsvd_8b; + u8 hwrm_fw_maj_8b; + u8 hwrm_fw_min_8b; + u8 hwrm_fw_bld_8b; + u8 hwrm_fw_rsvd_8b; + u8 mgmt_fw_maj_8b; + u8 mgmt_fw_min_8b; + u8 mgmt_fw_bld_8b; + u8 mgmt_fw_rsvd_8b; + u8 netctrl_fw_maj_8b; + u8 netctrl_fw_min_8b; + u8 netctrl_fw_bld_8b; + u8 netctrl_fw_rsvd_8b; + __le32 dev_caps_cfg; + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL + #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL + #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL + #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL + #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL + #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL + #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL + #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL + u8 roce_fw_maj_8b; + u8 roce_fw_min_8b; + u8 roce_fw_bld_8b; + u8 roce_fw_rsvd_8b; + char hwrm_fw_name[16]; + char mgmt_fw_name[16]; + char netctrl_fw_name[16]; + u8 reserved2[16]; + char roce_fw_name[16]; + __le16 chip_num; + u8 chip_rev; + u8 chip_metal; + u8 chip_bond_id; + u8 chip_platform_type; + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM + __le16 max_req_win_len; + __le16 max_resp_len; + __le16 def_req_timeout; + u8 flags; + #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL + #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL + u8 unused_0[2]; + u8 always_1; + __le16 hwrm_intf_major; + __le16 hwrm_intf_minor; + __le16 hwrm_intf_build; + __le16 hwrm_intf_patch; + __le16 hwrm_fw_major; + __le16 hwrm_fw_minor; + __le16 hwrm_fw_build; + __le16 hwrm_fw_patch; + __le16 mgmt_fw_major; + __le16 mgmt_fw_minor; + __le16 mgmt_fw_build; + __le16 mgmt_fw_patch; + __le16 netctrl_fw_major; + __le16 netctrl_fw_minor; + __le16 netctrl_fw_build; + __le16 netctrl_fw_patch; + __le16 roce_fw_major; + __le16 roce_fw_minor; + __le16 roce_fw_build; + __le16 roce_fw_patch; + __le16 max_ext_req_len; + u8 unused_1[5]; + u8 valid; +}; + +/* eject_cmpl (size:128b/16B) */ +struct eject_cmpl { + __le16 type; + #define EJECT_CMPL_TYPE_MASK 0x3fUL + #define EJECT_CMPL_TYPE_SFT 0 + #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL + #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT + #define EJECT_CMPL_FLAGS_MASK 0xffc0UL + #define EJECT_CMPL_FLAGS_SFT 6 + #define EJECT_CMPL_FLAGS_ERROR 0x40UL + __le16 len; + __le32 opaque; + __le16 v; + #define EJECT_CMPL_V 0x1UL + #define EJECT_CMPL_ERRORS_MASK 0xfffeUL + #define EJECT_CMPL_ERRORS_SFT 1 + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH + __le16 reserved16; + __le32 unused_2; +}; + +/* hwrm_cmpl (size:128b/16B) */ +struct hwrm_cmpl { + __le16 type; + #define CMPL_TYPE_MASK 0x3fUL + #define CMPL_TYPE_SFT 0 + #define CMPL_TYPE_HWRM_DONE 0x20UL + #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE + __le16 sequence_id; + __le32 unused_1; + __le32 v; + #define CMPL_V 0x1UL + __le32 unused_3; +}; + +/* hwrm_fwd_req_cmpl (size:128b/16B) */ +struct hwrm_fwd_req_cmpl { + __le16 req_len_type; + #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL + #define FWD_REQ_CMPL_TYPE_SFT 0 + #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL + #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ + #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL + #define FWD_REQ_CMPL_REQ_LEN_SFT 6 + __le16 source_id; + __le32 unused0; + __le32 req_buf_addr_v[2]; + #define FWD_REQ_CMPL_V 0x1UL + #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL + #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 +}; + +/* hwrm_fwd_resp_cmpl (size:128b/16B) */ +struct hwrm_fwd_resp_cmpl { + __le16 type; + #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL + #define FWD_RESP_CMPL_TYPE_SFT 0 + #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL + #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP + __le16 source_id; + __le16 resp_len; + __le16 unused_1; + __le32 resp_buf_addr_v[2]; + #define FWD_RESP_CMPL_V 0x1UL + #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL + #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1 +}; + +/* hwrm_async_event_cmpl (size:128b/16B) */ +struct hwrm_async_event_cmpl { + __le16 type; + #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL + #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL + #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL + #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_V 0x1UL + #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_status_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20 +}; + +/* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_mtu_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0 +}; + +/* hwrm_async_event_cmpl_link_speed_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16 +}; + +/* hwrm_async_event_cmpl_dcb_config_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_dcb_config_change { + __le16 type; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16) + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24) + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE +}; + +/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_conn_not_allowed { + __le16 type; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN +}; + +/* hwrm_async_event_cmpl_link_speed_cfg_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL +}; + +/* hwrm_async_event_cmpl_port_phy_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_phy_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_FEC_CFG_CHANGE 0x10000UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_EEE_CFG_CHANGE 0x20000UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE 0x40000UL +}; + +/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */ +struct hwrm_async_event_cmpl_reset_notify { + __le16 type; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16 +}; + +/* hwrm_async_event_cmpl_func_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_unload { + __le16 type; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_func_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_load { + __le16 type; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_func_flr_proc_cmplt (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_flr_proc_cmplt { + __le16 type; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_LAST ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_LAST ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V 0x1UL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_unload { + __le16 type; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 +}; + +/* hwrm_async_event_cmpl_pf_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_load { + __le16 type; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16 +}; + +/* hwrm_async_event_cmpl_vf_flr (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_flr { + __le16 type; + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_LAST ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_SFT 16 +}; + +/* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_mac_addr_change { + __le16 type; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_pf_vf_comm_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_vf_comm_status_change { + __le16 type; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL +}; + +/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL +}; + +/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_llfc_pfc_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_MASK 0xffc0UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_SFT 6 + __le16 event_id; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE 0x34UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_MASK 0x3UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_SFT 0 + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LLFC 0x1UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC 0x2UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LAST ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_MASK 0x1cUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_SFT 2 + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_MASK 0x1fffe0UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_SFT 5 +}; + +/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_default_vnic_change { + __le16 type; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6 + __le16 event_id; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10 +}; + +/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */ +struct hwrm_async_event_cmpl_hw_flow_aged { + __le16 type; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0 + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31) + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31) + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX +}; + +/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */ +struct hwrm_async_event_cmpl_hwrm_error { + __le16 type; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR + __le32 event_data2; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0 + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL +}; + +/* hwrm_func_reset_input (size:192b/24B) */ +struct hwrm_func_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL + __le16 vf_id; + u8 func_reset_level; + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF + u8 unused_0; +}; + +/* hwrm_func_reset_output (size:128b/16B) */ +struct hwrm_func_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_getfid_input (size:192b/24B) */ +struct hwrm_func_getfid_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL + __le16 pci_id; + u8 unused_0[2]; +}; + +/* hwrm_func_getfid_output (size:128b/16B) */ +struct hwrm_func_getfid_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_vf_alloc_input (size:192b/24B) */ +struct hwrm_func_vf_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* hwrm_func_vf_alloc_output (size:128b/16B) */ +struct hwrm_func_vf_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 first_vf_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_vf_free_input (size:192b/24B) */ +struct hwrm_func_vf_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* hwrm_func_vf_free_output (size:128b/16B) */ +struct hwrm_func_vf_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_cfg_input (size:448b/56B) */ +struct hwrm_func_vf_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL + #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL + #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL + __le16 mtu; + __le16 guest_vlan; + __le16 async_event_cr; + u8 dflt_mac_addr[6]; + __le32 flags; + #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL + #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL + #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL + #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL + #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL + #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL + #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL + #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL + __le16 num_rsscos_ctxs; + __le16 num_cmpl_rings; + __le16 num_tx_rings; + __le16 num_rx_rings; + __le16 num_l2_ctxs; + __le16 num_vnics; + __le16 num_stat_ctxs; + __le16 num_hw_ring_grps; + u8 unused_0[4]; +}; + +/* hwrm_func_vf_cfg_output (size:128b/16B) */ +struct hwrm_func_vf_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_qcaps_input (size:192b/24B) */ +struct hwrm_func_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_qcaps_output (size:640b/80B) */ +struct hwrm_func_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le32 flags; + #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL + #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL + #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL + #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL + #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL + #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL + #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL + #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL + u8 mac_address[6]; + __le16 max_rsscos_ctx; + __le16 max_cmpl_rings; + __le16 max_tx_rings; + __le16 max_rx_rings; + __le16 max_l2_ctxs; + __le16 max_vnics; + __le16 first_vf_id; + __le16 max_vfs; + __le16 max_stat_ctx; + __le32 max_encap_records; + __le32 max_decap_records; + __le32 max_tx_em_flows; + __le32 max_tx_wm_flows; + __le32 max_rx_em_flows; + __le32 max_rx_wm_flows; + __le32 max_mcast_filters; + __le32 max_flow_id; + __le32 max_hw_ring_grps; + __le16 max_sp_tx_rings; + u8 unused_0; + u8 valid; +}; + +/* hwrm_func_qcfg_input (size:192b/24B) */ +struct hwrm_func_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_qcfg_output (size:704b/88B) */ +struct hwrm_func_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le16 vlan; + __le16 flags; + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL + #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL + #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL + #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL + #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL + #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL + u8 mac_address[6]; + __le16 pci_id; + __le16 alloc_rsscos_ctx; + __le16 alloc_cmpl_rings; + __le16 alloc_tx_rings; + __le16 alloc_rx_rings; + __le16 alloc_l2_ctx; + __le16 alloc_vnics; + __le16 mtu; + __le16 mru; + __le16 stat_ctx_id; + u8 port_partition_type; + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN + u8 port_pf_cnt; + #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL + #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL + __le16 dflt_vnic_id; + __le16 max_mtu_configured; + __le32 min_bw; + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 max_bw; + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID + u8 evb_mode; + #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL + #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL + #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL + #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA + u8 options; + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2 + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO + #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL + #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4 + __le16 alloc_vfs; + __le32 alloc_mcast_filters; + __le32 alloc_hw_ring_grps; + __le16 alloc_sp_tx_rings; + __le16 alloc_stat_ctx; + __le16 alloc_msix; + __le16 registered_vfs; + u8 unused_1[3]; + u8 always_1; + __le32 reset_addr_poll; + u8 unused_2[3]; + u8 valid; +}; + +/* hwrm_func_cfg_input (size:704b/88B) */ +struct hwrm_func_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 num_msix; + __le32 flags; + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL + #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL + #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2 + #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL + #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL + #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL + #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL + #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL + #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL + #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL + #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL + #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL + #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL + #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL + #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL + #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL + #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL + __le32 enables; + #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL + #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL + #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL + #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL + #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL + #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL + #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL + #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL + #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL + #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL + #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL + #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL + #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL + #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL + #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL + #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL + #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL + #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL + #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL + #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL + __le16 mtu; + __le16 mru; + __le16 num_rsscos_ctxs; + __le16 num_cmpl_rings; + __le16 num_tx_rings; + __le16 num_rx_rings; + __le16 num_l2_ctxs; + __le16 num_vnics; + __le16 num_stat_ctxs; + __le16 num_hw_ring_grps; + u8 dflt_mac_addr[6]; + __le16 dflt_vlan; + __be32 dflt_ip_addr[4]; + __le32 min_bw; + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 max_bw; + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID + __le16 async_event_cr; + u8 vlan_antispoof_mode; + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN + u8 allowed_vlan_pris; + u8 evb_mode; + #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL + #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL + #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL + #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA + u8 options; + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2 + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO + #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL + #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4 + __le16 num_mcast_filters; +}; + +/* hwrm_func_cfg_output (size:128b/16B) */ +struct hwrm_func_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_qstats_input (size:192b/24B) */ +struct hwrm_func_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_qstats_output (size:1408b/176B) */ +struct hwrm_func_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_clr_stats_input (size:192b/24B) */ +struct hwrm_func_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_clr_stats_output (size:128b/16B) */ +struct hwrm_func_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_resc_free_input (size:192b/24B) */ +struct hwrm_func_vf_resc_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0[6]; +}; + +/* hwrm_func_vf_resc_free_output (size:128b/16B) */ +struct hwrm_func_vf_resc_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_rgtr_input (size:896b/112B) */ +struct hwrm_func_drv_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL + #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL + __le32 enables; + #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL + #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL + #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL + __le16 os_type; + #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; + u8 unused_0[3]; + __le32 timestamp; + u8 unused_1[4]; + __le32 vf_req_fwd[8]; + __le32 async_event_fwd[8]; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; +}; + +/* hwrm_func_drv_rgtr_output (size:128b/16B) */ +struct hwrm_func_drv_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_drv_unrgtr_input (size:192b/24B) */ +struct hwrm_func_drv_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_func_drv_unrgtr_output (size:128b/16B) */ +struct hwrm_func_drv_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_buf_rgtr_input (size:1024b/128B) */ +struct hwrm_func_buf_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL + #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL + __le16 vf_id; + __le16 req_buf_num_pages; + __le16 req_buf_page_size; + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G + __le16 req_buf_len; + __le16 resp_buf_len; + u8 unused_0[2]; + __le64 req_buf_page_addr0; + __le64 req_buf_page_addr1; + __le64 req_buf_page_addr2; + __le64 req_buf_page_addr3; + __le64 req_buf_page_addr4; + __le64 req_buf_page_addr5; + __le64 req_buf_page_addr6; + __le64 req_buf_page_addr7; + __le64 req_buf_page_addr8; + __le64 req_buf_page_addr9; + __le64 error_buf_addr; + __le64 resp_buf_addr; +}; + +/* hwrm_func_buf_rgtr_output (size:128b/16B) */ +struct hwrm_func_buf_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_buf_unrgtr_input (size:192b/24B) */ +struct hwrm_func_buf_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_BUF_UNRGTR_REQ_ENABLES_VF_ID 0x1UL + __le16 vf_id; + u8 unused_0[2]; +}; + +/* hwrm_func_buf_unrgtr_output (size:128b/16B) */ +struct hwrm_func_buf_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_qver_input (size:192b/24B) */ +struct hwrm_func_drv_qver_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 reserved; + __le16 fid; + u8 unused_0[2]; +}; + +/* hwrm_func_drv_qver_output (size:256b/32B) */ +struct hwrm_func_drv_qver_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 os_type; + #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; + u8 unused_0[3]; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_func_resource_qcaps_input (size:192b/24B) */ +struct hwrm_func_resource_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_resource_qcaps_output (size:448b/56B) */ +struct hwrm_func_resource_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 max_vfs; + __le16 max_msix; + __le16 vf_reservation_strategy; + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC + __le16 min_rsscos_ctx; + __le16 max_rsscos_ctx; + __le16 min_cmpl_rings; + __le16 max_cmpl_rings; + __le16 min_tx_rings; + __le16 max_tx_rings; + __le16 min_rx_rings; + __le16 max_rx_rings; + __le16 min_l2_ctxs; + __le16 max_l2_ctxs; + __le16 min_vnics; + __le16 max_vnics; + __le16 min_stat_ctx; + __le16 max_stat_ctx; + __le16 min_hw_ring_grps; + __le16 max_hw_ring_grps; + __le16 max_tx_scheduler_inputs; + __le16 flags; + #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */ +struct hwrm_func_vf_resource_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 max_msix; + __le16 min_rsscos_ctx; + __le16 max_rsscos_ctx; + __le16 min_cmpl_rings; + __le16 max_cmpl_rings; + __le16 min_tx_rings; + __le16 max_tx_rings; + __le16 min_rx_rings; + __le16 max_rx_rings; + __le16 min_l2_ctxs; + __le16 max_l2_ctxs; + __le16 min_vnics; + __le16 max_vnics; + __le16 min_stat_ctx; + __le16 max_stat_ctx; + __le16 min_hw_ring_grps; + __le16 max_hw_ring_grps; + __le16 flags; + #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL + u8 unused_0[2]; +}; + +/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */ +struct hwrm_func_vf_resource_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 reserved_rsscos_ctx; + __le16 reserved_cmpl_rings; + __le16 reserved_tx_rings; + __le16 reserved_rx_rings; + __le16 reserved_l2_ctxs; + __le16 reserved_vnics; + __le16 reserved_stat_ctx; + __le16 reserved_hw_ring_grps; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */ +struct hwrm_func_backing_store_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 qp_max_entries; + __le16 qp_min_qp1_entries; + __le16 qp_max_l2_entries; + __le16 qp_entry_size; + __le16 srq_max_l2_entries; + __le32 srq_max_entries; + __le16 srq_entry_size; + __le16 cq_max_l2_entries; + __le32 cq_max_entries; + __le16 cq_entry_size; + __le16 vnic_max_vnic_entries; + __le16 vnic_max_ring_table_entries; + __le16 vnic_entry_size; + __le32 stat_max_entries; + __le16 stat_entry_size; + __le16 tqm_entry_size; + __le32 tqm_min_entries_per_ring; + __le32 tqm_max_entries_per_ring; + __le32 mrav_max_entries; + __le16 mrav_entry_size; + __le16 tim_entry_size; + __le32 tim_max_entries; + u8 unused_0[2]; + u8 tqm_entries_multiple; + u8 valid; +}; + +/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */ +struct hwrm_func_backing_store_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL + __le32 enables; + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL + u8 qpc_pg_size_qpc_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G + u8 srq_pg_size_srq_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G + u8 cq_pg_size_cq_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G + u8 vnic_pg_size_vnic_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G + u8 stat_pg_size_stat_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G + u8 tqm_sp_pg_size_tqm_sp_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G + u8 tqm_ring0_pg_size_tqm_ring0_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G + u8 tqm_ring1_pg_size_tqm_ring1_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G + u8 tqm_ring2_pg_size_tqm_ring2_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G + u8 tqm_ring3_pg_size_tqm_ring3_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G + u8 tqm_ring4_pg_size_tqm_ring4_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G + u8 tqm_ring5_pg_size_tqm_ring5_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G + u8 tqm_ring6_pg_size_tqm_ring6_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G + u8 tqm_ring7_pg_size_tqm_ring7_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G + u8 mrav_pg_size_mrav_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G + u8 tim_pg_size_tim_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G + __le64 qpc_page_dir; + __le64 srq_page_dir; + __le64 cq_page_dir; + __le64 vnic_page_dir; + __le64 stat_page_dir; + __le64 tqm_sp_page_dir; + __le64 tqm_ring0_page_dir; + __le64 tqm_ring1_page_dir; + __le64 tqm_ring2_page_dir; + __le64 tqm_ring3_page_dir; + __le64 tqm_ring4_page_dir; + __le64 tqm_ring5_page_dir; + __le64 tqm_ring6_page_dir; + __le64 tqm_ring7_page_dir; + __le64 mrav_page_dir; + __le64 tim_page_dir; + __le32 qp_num_entries; + __le32 srq_num_entries; + __le32 cq_num_entries; + __le32 stat_num_entries; + __le32 tqm_sp_num_entries; + __le32 tqm_ring0_num_entries; + __le32 tqm_ring1_num_entries; + __le32 tqm_ring2_num_entries; + __le32 tqm_ring3_num_entries; + __le32 tqm_ring4_num_entries; + __le32 tqm_ring5_num_entries; + __le32 tqm_ring6_num_entries; + __le32 tqm_ring7_num_entries; + __le32 mrav_num_entries; + __le32 tim_num_entries; + __le16 qp_num_qp1_entries; + __le16 qp_num_l2_entries; + __le16 qp_entry_size; + __le16 srq_num_l2_entries; + __le16 srq_entry_size; + __le16 cq_num_l2_entries; + __le16 cq_entry_size; + __le16 vnic_num_vnic_entries; + __le16 vnic_num_ring_table_entries; + __le16 vnic_entry_size; + __le16 stat_entry_size; + __le16 tqm_entry_size; + __le16 mrav_entry_size; + __le16 tim_entry_size; +}; + +/* hwrm_func_backing_store_cfg_output (size:128b/16B) */ +struct hwrm_func_backing_store_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_backing_store_qcfg_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_backing_store_qcfg_output (size:1920b/240B) */ +struct hwrm_func_backing_store_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_BACKING_STORE_QCFG_RESP_FLAGS_PREBOOT_MODE 0x1UL + u8 unused_0[4]; + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_QP 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_SRQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_CQ 0x4UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_VNIC 0x8UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_STAT 0x10UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_SP 0x20UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING0 0x40UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING1 0x80UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING2 0x100UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING3 0x200UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING4 0x400UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING5 0x800UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING6 0x1000UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING7 0x2000UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_MRAV 0x4000UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TIM 0x8000UL + u8 qpc_pg_size_qpc_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_1G + u8 srq_pg_size_srq_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_1G + u8 cq_pg_size_cq_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_1G + u8 vnic_pg_size_vnic_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_1G + u8 stat_pg_size_stat_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_1G + u8 tqm_sp_pg_size_tqm_sp_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_1G + u8 tqm_ring0_pg_size_tqm_ring0_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_1G + u8 tqm_ring1_pg_size_tqm_ring1_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_1G + u8 tqm_ring2_pg_size_tqm_ring2_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_1G + u8 tqm_ring3_pg_size_tqm_ring3_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_1G + u8 tqm_ring4_pg_size_tqm_ring4_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_1G + u8 tqm_ring5_pg_size_tqm_ring5_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_1G + u8 tqm_ring6_pg_size_tqm_ring6_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_1G + u8 tqm_ring7_pg_size_tqm_ring7_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_1G + u8 mrav_pg_size_mrav_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_1G + u8 tim_pg_size_tim_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_1G + __le64 qpc_page_dir; + __le64 srq_page_dir; + __le64 cq_page_dir; + __le64 vnic_page_dir; + __le64 stat_page_dir; + __le64 tqm_sp_page_dir; + __le64 tqm_ring0_page_dir; + __le64 tqm_ring1_page_dir; + __le64 tqm_ring2_page_dir; + __le64 tqm_ring3_page_dir; + __le64 tqm_ring4_page_dir; + __le64 tqm_ring5_page_dir; + __le64 tqm_ring6_page_dir; + __le64 tqm_ring7_page_dir; + __le64 mrav_page_dir; + __le64 tim_page_dir; + __le16 qp_num_qp1_entries; + __le16 qp_num_l2_entries; + __le32 qp_num_entries; + __le32 srq_num_entries; + __le16 srq_num_l2_entries; + __le16 cq_num_l2_entries; + __le32 cq_num_entries; + __le16 vnic_num_vnic_entries; + __le16 vnic_num_ring_table_entries; + __le32 stat_num_entries; + __le32 tqm_sp_num_entries; + __le32 tqm_ring0_num_entries; + __le32 tqm_ring1_num_entries; + __le32 tqm_ring2_num_entries; + __le32 tqm_ring3_num_entries; + __le32 tqm_ring4_num_entries; + __le32 tqm_ring5_num_entries; + __le32 tqm_ring6_num_entries; + __le32 tqm_ring7_num_entries; + __le32 mrav_num_entries; + __le32 tim_num_entries; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_func_vlan_qcfg_input (size:192b/24B) */ +struct hwrm_func_vlan_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_vlan_qcfg_output (size:320b/40B) */ +struct hwrm_func_vlan_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 unused_0; + __le16 stag_vid; + u8 stag_pcp; + u8 unused_1; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_2; + __be16 ctag_tpid; + __le32 rsvd2; + __le32 rsvd3; + u8 unused_3[3]; + u8 valid; +}; + +/* hwrm_func_vlan_cfg_input (size:384b/48B) */ +struct hwrm_func_vlan_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 enables; + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL + __le16 stag_vid; + u8 stag_pcp; + u8 unused_1; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_2; + __be16 ctag_tpid; + __le32 rsvd1; + __le32 rsvd2; + u8 unused_3[4]; +}; + +/* hwrm_func_vlan_cfg_output (size:128b/16B) */ +struct hwrm_func_vlan_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */ +struct hwrm_func_vf_vnic_ids_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0[2]; + __le32 max_vnic_id_cnt; + __le64 vnic_id_tbl_addr; +}; + +/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */ +struct hwrm_func_vf_vnic_ids_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id_cnt; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_vf_bw_cfg_input (size:960b/120B) */ +struct hwrm_func_vf_bw_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_vfs; + __le16 unused[3]; + __le16 vfn[48]; + #define FUNC_VF_BW_CFG_REQ_VFN_VFID_MASK 0xfffUL + #define FUNC_VF_BW_CFG_REQ_VFN_VFID_SFT 0 + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_MASK 0xf000UL + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_SFT 12 + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_0 (0x0UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_6_66 (0x1UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_13_33 (0x2UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_20 (0x3UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_26_66 (0x4UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_33_33 (0x5UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_40 (0x6UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_46_66 (0x7UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_53_33 (0x8UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_60 (0x9UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_66_66 (0xaUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_73_33 (0xbUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_80 (0xcUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_86_66 (0xdUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_93_33 (0xeUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_100 (0xfUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_LAST FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_100 +}; + +/* hwrm_func_vf_bw_cfg_output (size:128b/16B) */ +struct hwrm_func_vf_bw_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_bw_qcfg_input (size:960b/120B) */ +struct hwrm_func_vf_bw_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_vfs; + __le16 unused[3]; + __le16 vfn[48]; + #define FUNC_VF_BW_QCFG_REQ_VFN_VFID_MASK 0xfffUL + #define FUNC_VF_BW_QCFG_REQ_VFN_VFID_SFT 0 +}; + +/* hwrm_func_vf_bw_qcfg_output (size:960b/120B) */ +struct hwrm_func_vf_bw_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 num_vfs; + __le16 unused[3]; + __le16 vfn[48]; + #define FUNC_VF_BW_QCFG_RESP_VFN_VFID_MASK 0xfffUL + #define FUNC_VF_BW_QCFG_RESP_VFN_VFID_SFT 0 + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_MASK 0xf000UL + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_SFT 12 + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_0 (0x0UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_6_66 (0x1UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_13_33 (0x2UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_20 (0x3UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_26_66 (0x4UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_33_33 (0x5UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_40 (0x6UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_46_66 (0x7UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_53_33 (0x8UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_60 (0x9UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_66_66 (0xaUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_73_33 (0xbUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_80 (0xcUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_86_66 (0xdUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_93_33 (0xeUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_100 (0xfUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_LAST FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_100 + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_if_change_input (size:192b/24B) */ +struct hwrm_func_drv_if_change_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL + __le32 unused; +}; + +/* hwrm_func_drv_if_change_output (size:128b/16B) */ +struct hwrm_func_drv_if_change_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_port_phy_cfg_input (size:448b/56B) */ +struct hwrm_port_phy_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL + #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL + #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL + __le32 enables; + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL + #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL + #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL + #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL + #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL + #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL + __le16 port_id; + __le16 force_link_speed; + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB + u8 auto_mode; + #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK + u8 auto_duplex; + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH + u8 auto_pause; + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL + u8 unused_0; + __le16 auto_link_speed; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB + __le16 auto_link_speed_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL + u8 wirespeed; + #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL + #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL + #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON + u8 lpbk; + #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL + #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL + #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL + #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL + #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL + u8 force_pause; + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL + u8 unused_1; + __le32 preemphasis; + __le16 eee_link_speed_mask; + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL + u8 unused_2[2]; + __le32 tx_lpi_timer; + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0 + __le32 unused_3; +}; + +/* hwrm_port_phy_cfg_output (size:128b/16B) */ +struct hwrm_port_phy_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */ +struct hwrm_port_phy_cfg_cmd_err { + u8 code; + #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL + #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL + #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY + u8 unused_0[7]; +}; + +/* hwrm_port_phy_qcfg_input (size:192b/24B) */ +struct hwrm_port_phy_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_phy_qcfg_output (size:768b/96B) */ +struct hwrm_port_phy_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 link; + #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK + u8 unused_0; + __le16 link_speed; + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB + u8 duplex_cfg; + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL + u8 pause; + #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL + __le16 support_speeds; + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL + __le16 force_link_speed; + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB + u8 auto_mode; + #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK + u8 auto_pause; + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL + __le16 auto_link_speed; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB + __le16 auto_link_speed_mask; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL + u8 wirespeed; + #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL + #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL + #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON + u8 lpbk; + #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL + #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL + u8 force_pause; + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL + u8 module_status; + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE + __le32 preemphasis; + u8 phy_maj; + u8 phy_min; + u8 phy_bld; + u8 phy_type; + #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 + u8 media_type; + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE + u8 xcvr_pkg_type; + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL + u8 eee_config_phy_addr; + #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL + #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0 + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5 + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL + u8 parallel_detect; + #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL + __le16 link_partner_adv_speeds; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL + u8 link_partner_adv_auto_mode; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK + u8 link_partner_adv_pause; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL + __le16 adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le16 link_partner_adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le32 xcvr_identifier_type_tx_lpi_timer; + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 + __le16 fec_cfg; + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + u8 duplex_state; + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL + u8 option_flags; + #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL + char phy_vendor_name[16]; + char phy_vendor_partnumber[16]; + u8 unused_2[7]; + u8 valid; +}; + +/* hwrm_port_mac_cfg_input (size:320b/40B) */ +struct hwrm_port_mac_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL + #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL + #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL + #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL + #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL + #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL + #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL + __le32 enables; + #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL + #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL + #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL + #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL + #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL + #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL + #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL + #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL + __le16 port_id; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL + #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL + #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL + #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE + u8 vlan_pri2cos_map_pri; + u8 reserved1; + u8 tunnel_pri2cos_map_pri; + u8 dscp2pri_map_pri; + __le16 rx_ts_capture_ptp_msg_type; + __le16 tx_ts_capture_ptp_msg_type; + u8 cos_field_cfg; + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5 + u8 unused_0[3]; +}; + +/* hwrm_port_mac_cfg_output (size:128b/16B) */ +struct hwrm_port_mac_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + __le16 mtu; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL + #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE + u8 unused_0; + u8 valid; +}; + +/* hwrm_port_mac_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_mac_qcfg_output (size:192b/24B) */ +struct hwrm_port_mac_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + __le16 mtu; + u8 ipg; + u8 lpbk; + #define PORT_MAC_QCFG_RESP_LPBK_NONE 0x0UL + #define PORT_MAC_QCFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_MAC_QCFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_MAC_QCFG_RESP_LPBK_LAST PORT_MAC_QCFG_RESP_LPBK_REMOTE + u8 vlan_pri2cos_map_pri; + u8 flags; + #define PORT_MAC_QCFG_RESP_FLAGS_VLAN_PRI2COS_ENABLE 0x1UL + #define PORT_MAC_QCFG_RESP_FLAGS_TUNNEL_PRI2COS_ENABLE 0x2UL + #define PORT_MAC_QCFG_RESP_FLAGS_IP_DSCP2COS_ENABLE 0x4UL + #define PORT_MAC_QCFG_RESP_FLAGS_OOB_WOL_ENABLE 0x8UL + #define PORT_MAC_QCFG_RESP_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL + #define PORT_MAC_QCFG_RESP_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x20UL + u8 tunnel_pri2cos_map_pri; + u8 dscp2pri_map_pri; + __le16 rx_ts_capture_ptp_msg_type; + __le16 tx_ts_capture_ptp_msg_type; + u8 cos_field_cfg; + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_RSVD 0x1UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_SFT 5 + u8 valid; +}; + +/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_ptp_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */ +struct hwrm_port_mac_ptp_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL + u8 unused_0[3]; + __le32 rx_ts_reg_off_lower; + __le32 rx_ts_reg_off_upper; + __le32 rx_ts_reg_off_seq_id; + __le32 rx_ts_reg_off_src_id_0; + __le32 rx_ts_reg_off_src_id_1; + __le32 rx_ts_reg_off_src_id_2; + __le32 rx_ts_reg_off_domain_id; + __le32 rx_ts_reg_off_fifo; + __le32 rx_ts_reg_off_fifo_adv; + __le32 rx_ts_reg_off_granularity; + __le32 tx_ts_reg_off_lower; + __le32 tx_ts_reg_off_upper; + __le32 tx_ts_reg_off_seq_id; + __le32 tx_ts_reg_off_fifo; + __le32 tx_ts_reg_off_granularity; + u8 unused_1[7]; + u8 valid; +}; + +/* tx_port_stats (size:3264b/408B) */ +struct tx_port_stats { + __le64 tx_64b_frames; + __le64 tx_65b_127b_frames; + __le64 tx_128b_255b_frames; + __le64 tx_256b_511b_frames; + __le64 tx_512b_1023b_frames; + __le64 tx_1024b_1518b_frames; + __le64 tx_good_vlan_frames; + __le64 tx_1519b_2047b_frames; + __le64 tx_2048b_4095b_frames; + __le64 tx_4096b_9216b_frames; + __le64 tx_9217b_16383b_frames; + __le64 tx_good_frames; + __le64 tx_total_frames; + __le64 tx_ucast_frames; + __le64 tx_mcast_frames; + __le64 tx_bcast_frames; + __le64 tx_pause_frames; + __le64 tx_pfc_frames; + __le64 tx_jabber_frames; + __le64 tx_fcs_err_frames; + __le64 tx_control_frames; + __le64 tx_oversz_frames; + __le64 tx_single_dfrl_frames; + __le64 tx_multi_dfrl_frames; + __le64 tx_single_coll_frames; + __le64 tx_multi_coll_frames; + __le64 tx_late_coll_frames; + __le64 tx_excessive_coll_frames; + __le64 tx_frag_frames; + __le64 tx_err; + __le64 tx_tagged_frames; + __le64 tx_dbl_tagged_frames; + __le64 tx_runt_frames; + __le64 tx_fifo_underruns; + __le64 tx_pfc_ena_frames_pri0; + __le64 tx_pfc_ena_frames_pri1; + __le64 tx_pfc_ena_frames_pri2; + __le64 tx_pfc_ena_frames_pri3; + __le64 tx_pfc_ena_frames_pri4; + __le64 tx_pfc_ena_frames_pri5; + __le64 tx_pfc_ena_frames_pri6; + __le64 tx_pfc_ena_frames_pri7; + __le64 tx_eee_lpi_events; + __le64 tx_eee_lpi_duration; + __le64 tx_llfc_logical_msgs; + __le64 tx_hcfc_msgs; + __le64 tx_total_collisions; + __le64 tx_bytes; + __le64 tx_xthol_frames; + __le64 tx_stat_discard; + __le64 tx_stat_error; +}; + +/* rx_port_stats (size:4224b/528B) */ +struct rx_port_stats { + __le64 rx_64b_frames; + __le64 rx_65b_127b_frames; + __le64 rx_128b_255b_frames; + __le64 rx_256b_511b_frames; + __le64 rx_512b_1023b_frames; + __le64 rx_1024b_1518b_frames; + __le64 rx_good_vlan_frames; + __le64 rx_1519b_2047b_frames; + __le64 rx_2048b_4095b_frames; + __le64 rx_4096b_9216b_frames; + __le64 rx_9217b_16383b_frames; + __le64 rx_total_frames; + __le64 rx_ucast_frames; + __le64 rx_mcast_frames; + __le64 rx_bcast_frames; + __le64 rx_fcs_err_frames; + __le64 rx_ctrl_frames; + __le64 rx_pause_frames; + __le64 rx_pfc_frames; + __le64 rx_unsupported_opcode_frames; + __le64 rx_unsupported_da_pausepfc_frames; + __le64 rx_wrong_sa_frames; + __le64 rx_align_err_frames; + __le64 rx_oor_len_frames; + __le64 rx_code_err_frames; + __le64 rx_false_carrier_frames; + __le64 rx_ovrsz_frames; + __le64 rx_jbr_frames; + __le64 rx_mtu_err_frames; + __le64 rx_match_crc_frames; + __le64 rx_promiscuous_frames; + __le64 rx_tagged_frames; + __le64 rx_double_tagged_frames; + __le64 rx_trunc_frames; + __le64 rx_good_frames; + __le64 rx_pfc_xon2xoff_frames_pri0; + __le64 rx_pfc_xon2xoff_frames_pri1; + __le64 rx_pfc_xon2xoff_frames_pri2; + __le64 rx_pfc_xon2xoff_frames_pri3; + __le64 rx_pfc_xon2xoff_frames_pri4; + __le64 rx_pfc_xon2xoff_frames_pri5; + __le64 rx_pfc_xon2xoff_frames_pri6; + __le64 rx_pfc_xon2xoff_frames_pri7; + __le64 rx_pfc_ena_frames_pri0; + __le64 rx_pfc_ena_frames_pri1; + __le64 rx_pfc_ena_frames_pri2; + __le64 rx_pfc_ena_frames_pri3; + __le64 rx_pfc_ena_frames_pri4; + __le64 rx_pfc_ena_frames_pri5; + __le64 rx_pfc_ena_frames_pri6; + __le64 rx_pfc_ena_frames_pri7; + __le64 rx_sch_crc_err_frames; + __le64 rx_undrsz_frames; + __le64 rx_frag_frames; + __le64 rx_eee_lpi_events; + __le64 rx_eee_lpi_duration; + __le64 rx_llfc_physical_msgs; + __le64 rx_llfc_logical_msgs; + __le64 rx_llfc_msgs_with_crc_err; + __le64 rx_hcfc_msgs; + __le64 rx_hcfc_msgs_with_crc_err; + __le64 rx_bytes; + __le64 rx_runt_bytes; + __le64 rx_runt_frames; + __le64 rx_stat_discard; + __le64 rx_stat_err; +}; + +/* hwrm_port_qstats_input (size:320b/40B) */ +struct hwrm_port_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* hwrm_port_qstats_output (size:128b/16B) */ +struct hwrm_port_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0[3]; + u8 valid; +}; + +/* tx_port_stats_ext (size:2048b/256B) */ +struct tx_port_stats_ext { + __le64 tx_bytes_cos0; + __le64 tx_bytes_cos1; + __le64 tx_bytes_cos2; + __le64 tx_bytes_cos3; + __le64 tx_bytes_cos4; + __le64 tx_bytes_cos5; + __le64 tx_bytes_cos6; + __le64 tx_bytes_cos7; + __le64 tx_packets_cos0; + __le64 tx_packets_cos1; + __le64 tx_packets_cos2; + __le64 tx_packets_cos3; + __le64 tx_packets_cos4; + __le64 tx_packets_cos5; + __le64 tx_packets_cos6; + __le64 tx_packets_cos7; + __le64 pfc_pri0_tx_duration_us; + __le64 pfc_pri0_tx_transitions; + __le64 pfc_pri1_tx_duration_us; + __le64 pfc_pri1_tx_transitions; + __le64 pfc_pri2_tx_duration_us; + __le64 pfc_pri2_tx_transitions; + __le64 pfc_pri3_tx_duration_us; + __le64 pfc_pri3_tx_transitions; + __le64 pfc_pri4_tx_duration_us; + __le64 pfc_pri4_tx_transitions; + __le64 pfc_pri5_tx_duration_us; + __le64 pfc_pri5_tx_transitions; + __le64 pfc_pri6_tx_duration_us; + __le64 pfc_pri6_tx_transitions; + __le64 pfc_pri7_tx_duration_us; + __le64 pfc_pri7_tx_transitions; +}; + +/* rx_port_stats_ext (size:2368b/296B) */ +struct rx_port_stats_ext { + __le64 link_down_events; + __le64 continuous_pause_events; + __le64 resume_pause_events; + __le64 continuous_roce_pause_events; + __le64 resume_roce_pause_events; + __le64 rx_bytes_cos0; + __le64 rx_bytes_cos1; + __le64 rx_bytes_cos2; + __le64 rx_bytes_cos3; + __le64 rx_bytes_cos4; + __le64 rx_bytes_cos5; + __le64 rx_bytes_cos6; + __le64 rx_bytes_cos7; + __le64 rx_packets_cos0; + __le64 rx_packets_cos1; + __le64 rx_packets_cos2; + __le64 rx_packets_cos3; + __le64 rx_packets_cos4; + __le64 rx_packets_cos5; + __le64 rx_packets_cos6; + __le64 rx_packets_cos7; + __le64 pfc_pri0_rx_duration_us; + __le64 pfc_pri0_rx_transitions; + __le64 pfc_pri1_rx_duration_us; + __le64 pfc_pri1_rx_transitions; + __le64 pfc_pri2_rx_duration_us; + __le64 pfc_pri2_rx_transitions; + __le64 pfc_pri3_rx_duration_us; + __le64 pfc_pri3_rx_transitions; + __le64 pfc_pri4_rx_duration_us; + __le64 pfc_pri4_rx_transitions; + __le64 pfc_pri5_rx_duration_us; + __le64 pfc_pri5_rx_transitions; + __le64 pfc_pri6_rx_duration_us; + __le64 pfc_pri6_rx_transitions; + __le64 pfc_pri7_rx_duration_us; + __le64 pfc_pri7_rx_transitions; +}; + +/* hwrm_port_qstats_ext_input (size:320b/40B) */ +struct hwrm_port_qstats_ext_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0[2]; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* hwrm_port_qstats_ext_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tx_stat_size; + __le16 rx_stat_size; + __le16 total_active_cos_queues; + u8 flags; + #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL + u8 valid; +}; + +/* hwrm_port_lpbk_qstats_input (size:128b/16B) */ +struct hwrm_port_lpbk_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_port_lpbk_qstats_output (size:768b/96B) */ +struct hwrm_port_lpbk_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 lpbk_ucast_frames; + __le64 lpbk_mcast_frames; + __le64 lpbk_bcast_frames; + __le64 lpbk_ucast_bytes; + __le64 lpbk_mcast_bytes; + __le64 lpbk_bcast_bytes; + __le64 tx_stat_discard; + __le64 tx_stat_error; + __le64 rx_stat_discard; + __le64 rx_stat_error; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_clr_stats_input (size:192b/24B) */ +struct hwrm_port_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 flags; + #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL + u8 unused_0[5]; +}; + +/* hwrm_port_clr_stats_output (size:128b/16B) */ +struct hwrm_port_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */ +struct hwrm_port_lpbk_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */ +struct hwrm_port_lpbk_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_ts_query_input (size:192b/24B) */ +struct hwrm_port_ts_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL + #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL + #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL + #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX + __le16 port_id; + u8 unused_0[2]; +}; + +/* hwrm_port_ts_query_output (size:192b/24B) */ +struct hwrm_port_ts_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 ptp_msg_ts; + __le16 ptp_msg_seqid; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_port_phy_qcaps_input (size:192b/24B) */ +struct hwrm_port_phy_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_phy_qcaps_output (size:192b/24B) */ +struct hwrm_port_phy_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2 + u8 port_cnt; + #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4 + __le16 supported_speeds_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL + __le16 supported_speeds_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL + __le16 supported_speeds_eee_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL + __le32 tx_lpi_timer_low; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0 + #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24 + __le32 valid_tx_lpi_timer_high; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0 + #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_VALID_SFT 24 +}; + +/* hwrm_port_phy_i2c_write_input (size:832b/104B) */ +struct hwrm_port_phy_i2c_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL + __le16 port_id; + u8 i2c_slave_addr; + u8 unused_0; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; + __le32 data[16]; +}; + +/* hwrm_port_phy_i2c_write_output (size:128b/16B) */ +struct hwrm_port_phy_i2c_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_i2c_read_input (size:320b/40B) */ +struct hwrm_port_phy_i2c_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL + __le16 port_id; + u8 i2c_slave_addr; + u8 unused_0; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; +}; + +/* hwrm_port_phy_i2c_read_output (size:640b/80B) */ +struct hwrm_port_phy_i2c_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 data[16]; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_led_cfg_input (size:512b/64B) */ +struct hwrm_port_led_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL + __le16 port_id; + u8 num_leds; + u8 rsvd; + u8 led0_id; + u8 led0_state; + #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT + u8 led0_color; + #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 rsvd0; + u8 led1_id; + u8 led1_state; + #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT + u8 led1_color; + #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 rsvd1; + u8 led2_id; + u8 led2_state; + #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT + u8 led2_color; + #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 rsvd2; + u8 led3_id; + u8 led3_state; + #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT + u8 led3_color; + #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 rsvd3; +}; + +/* hwrm_port_led_cfg_output (size:128b/16B) */ +struct hwrm_port_led_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_led_qcfg_input (size:192b/24B) */ +struct hwrm_port_led_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_led_qcfg_output (size:448b/56B) */ +struct hwrm_port_led_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID + u8 led0_state; + #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT + u8 led0_color; + #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID + u8 led1_state; + #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT + u8 led1_color; + #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID + u8 led2_state; + #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT + u8 led2_color; + #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID + u8 led3_state; + #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT + u8 led3_color; + #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 unused_4[6]; + u8 valid; +}; + +/* hwrm_port_led_qcaps_input (size:192b/24B) */ +struct hwrm_port_led_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_led_qcaps_output (size:384b/48B) */ +struct hwrm_port_led_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 unused[3]; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID + u8 led0_group_id; + u8 unused_0; + __le16 led0_state_caps; + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led0_color_caps; + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID + u8 led1_group_id; + u8 unused_1; + __le16 led1_state_caps; + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led1_color_caps; + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID + u8 led2_group_id; + u8 unused_2; + __le16 led2_state_caps; + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led2_color_caps; + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID + u8 led3_group_id; + u8 unused_3; + __le16 led3_state_caps; + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led3_color_caps; + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 unused_4[3]; + u8 valid; +}; + +/* hwrm_queue_qportcfg_input (size:192b/24B) */ +struct hwrm_queue_qportcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX + __le16 port_id; + u8 drv_qmap_cap; + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED + u8 unused_0; +}; + +/* hwrm_queue_qportcfg_output (size:256b/32B) */ +struct hwrm_queue_qportcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 max_configurable_queues; + u8 max_configurable_lossless_queues; + u8 queue_cfg_allowed; + u8 queue_cfg_info; + #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 queue_pfcenable_cfg_allowed; + u8 queue_pri2cos_cfg_allowed; + u8 queue_cos2bw_cfg_allowed; + u8 queue_id0; + u8 queue_id0_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN + u8 queue_id1; + u8 queue_id1_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN + u8 queue_id2; + u8 queue_id2_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN + u8 queue_id3; + u8 queue_id3_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN + u8 queue_id4; + u8 queue_id4_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN + u8 queue_id5; + u8 queue_id5_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN + u8 queue_id6; + u8 queue_id6_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN + u8 queue_id7; + u8 queue_id7_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN + u8 valid; +}; + +/* hwrm_queue_qcfg_input (size:192b/24B) */ +struct hwrm_queue_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX + __le32 queue_id; +}; + +/* hwrm_queue_qcfg_output (size:128b/16B) */ +struct hwrm_queue_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 queue_len; + u8 service_profile; + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN + u8 queue_cfg_info; + #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 unused_0; + u8 valid; +}; + +/* hwrm_queue_cfg_input (size:320b/40B) */ +struct hwrm_queue_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL + #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0 + #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL + #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR + __le32 enables; + #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL + #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL + __le32 queue_id; + __le32 dflt_len; + u8 service_profile; + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN + u8 unused_0[7]; +}; + +/* hwrm_queue_cfg_output (size:128b/16B) */ +struct hwrm_queue_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL + __le16 port_id; + u8 unused_0[2]; +}; + +/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL + u8 port_id; + u8 unused_0[3]; +}; + +/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 pri0_cos_queue_id; + u8 pri1_cos_queue_id; + u8 pri2_cos_queue_id; + u8 pri3_cos_queue_id; + u8 pri4_cos_queue_id; + u8 pri5_cos_queue_id; + u8 pri6_cos_queue_id; + u8 pri7_cos_queue_id; + u8 queue_cfg_info; + #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */ +struct hwrm_queue_pri2cos_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0 + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL + __le32 enables; + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL + u8 port_id; + u8 pri0_cos_queue_id; + u8 pri1_cos_queue_id; + u8 pri2_cos_queue_id; + u8 pri3_cos_queue_id; + u8 pri4_cos_queue_id; + u8 pri5_cos_queue_id; + u8 pri6_cos_queue_id; + u8 pri7_cos_queue_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */ +struct hwrm_queue_pri2cos_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */ +struct hwrm_queue_cos2bw_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */ +struct hwrm_queue_cos2bw_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 queue_id0; + u8 unused_0; + __le16 unused_1; + __le32 queue_id0_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id0_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id0_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + u8 queue_id1; + __le32 queue_id1_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id1_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id1_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id1_pri_lvl; + u8 queue_id1_bw_weight; + u8 queue_id2; + __le32 queue_id2_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id2_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id2_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id2_pri_lvl; + u8 queue_id2_bw_weight; + u8 queue_id3; + __le32 queue_id3_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id3_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id3_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id3_pri_lvl; + u8 queue_id3_bw_weight; + u8 queue_id4; + __le32 queue_id4_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id4_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id4_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id4_pri_lvl; + u8 queue_id4_bw_weight; + u8 queue_id5; + __le32 queue_id5_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id5_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id5_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id5_pri_lvl; + u8 queue_id5_bw_weight; + u8 queue_id6; + __le32 queue_id6_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id6_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id6_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id6_pri_lvl; + u8 queue_id6_bw_weight; + u8 queue_id7; + __le32 queue_id7_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id7_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id7_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id7_pri_lvl; + u8 queue_id7_bw_weight; + u8 unused_2[4]; + u8 valid; +}; + +/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */ +struct hwrm_queue_cos2bw_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL + __le16 port_id; + u8 queue_id0; + u8 unused_0; + __le32 queue_id0_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id0_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id0_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + u8 queue_id1; + __le32 queue_id1_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id1_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id1_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id1_pri_lvl; + u8 queue_id1_bw_weight; + u8 queue_id2; + __le32 queue_id2_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id2_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id2_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id2_pri_lvl; + u8 queue_id2_bw_weight; + u8 queue_id3; + __le32 queue_id3_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id3_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id3_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id3_pri_lvl; + u8 queue_id3_bw_weight; + u8 queue_id4; + __le32 queue_id4_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id4_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id4_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id4_pri_lvl; + u8 queue_id4_bw_weight; + u8 queue_id5; + __le32 queue_id5_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id5_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id5_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id5_pri_lvl; + u8 queue_id5_bw_weight; + u8 queue_id6; + __le32 queue_id6_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id6_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id6_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id6_pri_lvl; + u8 queue_id6_bw_weight; + u8 queue_id7; + __le32 queue_id7_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id7_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id7_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id7_pri_lvl; + u8 queue_id7_bw_weight; + u8 unused_1[5]; +}; + +/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */ +struct hwrm_queue_cos2bw_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */ +struct hwrm_queue_dscp_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */ +struct hwrm_queue_dscp_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_dscp_bits; + u8 unused_0; + __le16 max_entries; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */ +struct hwrm_queue_dscp2pri_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + u8 port_id; + u8 unused_0; + __le16 dest_data_buffer_size; + u8 unused_1[4]; +}; + +/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 entry_cnt; + u8 default_pri; + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */ +struct hwrm_queue_dscp2pri_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le32 flags; + #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL + __le32 enables; + #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL + u8 port_id; + u8 default_pri; + __le16 entry_cnt; + u8 unused_0[4]; +}; + +/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_alloc_input (size:192b/24B) */ +struct hwrm_vnic_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_vnic_alloc_output (size:128b/16B) */ +struct hwrm_vnic_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_vnic_free_input (size:192b/24B) */ +struct hwrm_vnic_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + u8 unused_0[4]; +}; + +/* hwrm_vnic_free_output (size:128b/16B) */ +struct hwrm_vnic_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_cfg_input (size:320b/40B) */ +struct hwrm_vnic_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL + #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL + #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL + #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL + #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL + #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL + #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL + __le32 enables; + #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL + #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL + #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL + #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL + #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL + #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL + #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL + __le16 vnic_id; + __le16 dflt_ring_grp; + __le16 rss_rule; + __le16 cos_rule; + __le16 lb_rule; + __le16 mru; + __le16 default_rx_ring_id; + __le16 default_cmpl_ring_id; +}; + +/* hwrm_vnic_cfg_output (size:128b/16B) */ +struct hwrm_vnic_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_qcfg_input (size:256b/32B) */ +struct hwrm_vnic_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define VNIC_QCFG_REQ_ENABLES_VF_ID_VALID 0x1UL + __le32 vnic_id; + __le16 vf_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 dflt_ring_grp; + __le16 rss_rule; + __le16 cos_rule; + __le16 lb_rule; + __le16 mru; + u8 unused_0[2]; + __le32 flags; + #define VNIC_QCFG_RESP_FLAGS_DEFAULT 0x1UL + #define VNIC_QCFG_RESP_FLAGS_VLAN_STRIP_MODE 0x2UL + #define VNIC_QCFG_RESP_FLAGS_BD_STALL_MODE 0x4UL + #define VNIC_QCFG_RESP_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL + #define VNIC_QCFG_RESP_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL + #define VNIC_QCFG_RESP_FLAGS_RSS_DFLT_CR_MODE 0x20UL + #define VNIC_QCFG_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_vnic_qcaps_input (size:192b/24B) */ +struct hwrm_vnic_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + u8 unused_0[4]; +}; + +/* hwrm_vnic_qcaps_output (size:192b/24B) */ +struct hwrm_vnic_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + u8 unused_0[2]; + __le32 flags; + #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL + #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL + #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL + #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */ +struct hwrm_vnic_tpa_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL + #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL + #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL + __le32 enables; + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL + #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL + __le16 vnic_id; + __le16 max_agg_segs; + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX + __le16 max_aggs; + #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX + u8 unused_0[2]; + __le32 max_agg_timer; + __le32 min_agg_len; +}; + +/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */ +struct hwrm_vnic_tpa_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_tpa_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vnic_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_tpa_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL + #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL + #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL + #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL + #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL + __le16 max_agg_segs; + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX + __le16 max_aggs; + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX + __le32 max_agg_timer; + __le32 min_agg_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_rss_cfg_input (size:384b/48B) */ +struct hwrm_vnic_rss_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 hash_type; + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL + __le16 vnic_id; + u8 ring_table_pair_index; + u8 hash_mode_flags; + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL + __le64 ring_grp_tbl_addr; + __le64 hash_key_tbl_addr; + __le16 rss_ctx_idx; + u8 unused_1[6]; +}; + +/* hwrm_vnic_rss_cfg_output (size:128b/16B) */ +struct hwrm_vnic_rss_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_rss_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_ctx_idx; + u8 unused_0[6]; +}; + +/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */ +struct hwrm_vnic_rss_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 hash_type; + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL + u8 unused_0[4]; + __le32 hash_key[10]; + u8 hash_mode_flags; + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL + u8 unused_1[6]; + u8 valid; +}; + +/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */ +struct hwrm_vnic_plcmodes_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL + __le32 enables; + #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL + __le32 vnic_id; + __le16 jumbo_thresh; + __le16 hds_offset; + __le16 hds_threshold; + u8 unused_0[6]; +}; + +/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */ +struct hwrm_vnic_plcmodes_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_plcmodes_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + u8 unused_0[4]; +}; + +/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_REGULAR_PLACEMENT 0x1UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_JUMBO_PLACEMENT 0x2UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_IPV4 0x4UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_IPV6 0x8UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_FCOE 0x10UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_ROCE 0x20UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_DFLT_VNIC 0x40UL + __le16 jumbo_thresh; + __le16 hds_offset; + __le16 hds_threshold; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rss_cos_lb_ctx_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_cos_lb_ctx_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_alloc_input (size:704b/88B) */ +struct hwrm_ring_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL + #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL + #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL + #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL + #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL + #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL + u8 ring_type; + #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL + #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL + #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL + #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL + #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ + u8 unused_0; + __le16 flags; + #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL + __le64 page_tbl_addr; + __le32 fbo; + u8 page_size; + u8 page_tbl_depth; + u8 unused_1[2]; + __le32 length; + __le16 logical_id; + __le16 cmpl_ring_id; + __le16 queue_id; + __le16 rx_buf_size; + __le16 rx_ring_id; + __le16 nq_ring_id; + __le16 ring_arb_cfg; + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0 + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ + #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL + #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4 + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 + __le16 unused_3; + __le32 reserved3; + __le32 stat_ctx_id; + __le32 reserved4; + __le32 max_bw; + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0 + #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL + #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID + u8 int_mode; + #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL + #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL + #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL + #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL + #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL + u8 unused_4[3]; + __le64 cq_handle; +}; + +/* hwrm_ring_alloc_output (size:128b/16B) */ +struct hwrm_ring_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ring_id; + __le16 logical_ring_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_ring_free_input (size:192b/24B) */ +struct hwrm_ring_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_FREE_REQ_RING_TYPE_TX 0x1UL + #define RING_FREE_REQ_RING_TYPE_RX 0x2UL + #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL + #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL + #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ + u8 unused_0; + __le16 ring_id; + u8 unused_1[4]; +}; + +/* hwrm_ring_free_output (size:128b/16B) */ +struct hwrm_ring_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_reset_input (size:192b/24B) */ +struct hwrm_ring_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_RESET_REQ_RING_TYPE_TX 0x1UL + #define RING_RESET_REQ_RING_TYPE_RX 0x2UL + #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL + u8 unused_0; + __le16 ring_id; + u8 unused_1[4]; +}; + +/* hwrm_ring_reset_output (size:128b/16B) */ +struct hwrm_ring_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */ +struct hwrm_ring_aggint_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */ +struct hwrm_ring_aggint_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 cmpl_params; + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL + __le32 nq_params; + #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL + __le16 num_cmpl_dma_aggr_min; + __le16 num_cmpl_dma_aggr_max; + __le16 num_cmpl_dma_aggr_during_int_min; + __le16 num_cmpl_dma_aggr_during_int_max; + __le16 cmpl_aggr_dma_tmr_min; + __le16 cmpl_aggr_dma_tmr_max; + __le16 cmpl_aggr_dma_tmr_during_int_min; + __le16 cmpl_aggr_dma_tmr_during_int_max; + __le16 int_lat_tmr_min_min; + __le16 int_lat_tmr_min_max; + __le16 int_lat_tmr_max_min; + __le16 int_lat_tmr_max_max; + __le16 num_cmpl_aggr_int_min; + __le16 num_cmpl_aggr_int_max; + __le16 timer_units; + u8 unused_0[1]; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + u8 unused_0[6]; +}; + +/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + __le16 flags; + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + __le16 enables; + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL + u8 unused_0[4]; +}; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_grp_alloc_input (size:192b/24B) */ +struct hwrm_ring_grp_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 cr; + __le16 rr; + __le16 ar; + __le16 sc; +}; + +/* hwrm_ring_grp_alloc_output (size:128b/16B) */ +struct hwrm_ring_grp_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 ring_group_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_ring_grp_free_input (size:192b/24B) */ +struct hwrm_ring_grp_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 ring_group_id; + u8 unused_0[4]; +}; + +/* hwrm_ring_grp_free_output (size:128b/16B) */ +struct hwrm_ring_grp_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */ +struct hwrm_cfa_l2_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4 + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE + __le32 enables; + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + u8 l2_addr[6]; + u8 unused_0[2]; + u8 l2_addr_mask[6]; + __le16 l2_ovlan; + __le16 l2_ovlan_mask; + __le16 l2_ivlan; + __le16 l2_ivlan_mask; + u8 unused_1[2]; + u8 t_l2_addr[6]; + u8 unused_2[2]; + u8 t_l2_addr_mask[6]; + __le16 t_l2_ovlan; + __le16 t_l2_ovlan_mask; + __le16 t_l2_ivlan; + __le16 t_l2_ivlan_mask; + u8 src_type; + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG + u8 unused_3; + __le32 src_id; + u8 tunnel_type; + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_4; + __le16 dst_id; + __le16 mirror_vnic_id; + u8 pri_hint; + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN + u8 unused_5; + __le32 unused_6; + __le64 l2_filter_id_hint; +}; + +/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_l2_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 l2_filter_id; + __le32 flow_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_l2_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 l2_filter_id; +}; + +/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */ +struct hwrm_cfa_l2_filter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE + __le32 enables; + #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + __le64 l2_filter_id; + __le32 dst_id; + __le32 new_mirror_vnic_id; +}; + +/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */ +struct hwrm_cfa_l2_set_rx_mask_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + __le32 mask; + #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL + __le64 mc_tbl_addr; + __le32 num_mc_entries; + u8 unused_0[4]; + __le64 vlan_tag_tbl_addr; + __le32 num_vlan_tags; + u8 unused_1[4]; +}; + +/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */ +struct hwrm_cfa_l2_set_rx_mask_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */ +struct hwrm_cfa_l2_set_rx_mask_cmd_err { + u8 code; + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR + u8 unused_0[7]; +}; + +/* hwrm_cfa_vlan_antispoof_cfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 num_vlan_entries; + __le64 vlan_tag_mask_tbl_addr; +}; + +/* hwrm_cfa_vlan_antispoof_cfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vlan_antispoof_qcfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 max_vlan_entries; + __le64 vlan_tag_mask_tbl_addr; +}; + +/* hwrm_cfa_vlan_antispoof_qcfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 num_vlan_entries; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */ +struct hwrm_cfa_tunnel_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + __le32 enables; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL + __le64 l2_filter_id; + u8 l2_addr[6]; + __le16 l2_ivlan; + __le32 l3_addr[4]; + __le32 t_l3_addr[4]; + u8 l3_addr_type; + u8 t_l3_addr_type; + u8 tunnel_type; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 tunnel_flags; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL + __le32 vni; + __le32 dst_vnic_id; + __le32 mirror_vnic_id; +}; + +/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tunnel_filter_id; + __le32 flow_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 tunnel_filter_id; +}; + +/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_tunnel_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_tunnel_type_alloc_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dest_fid; + u8 tunnel_type; + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 flags; + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_FLAGS_MODIFY_DST 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_cfa_redirect_tunnel_type_alloc_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_tunnel_type_free_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dest_fid; + u8 tunnel_type; + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_LAST CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0[5]; +}; + +/* hwrm_cfa_redirect_tunnel_type_free_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_tunnel_type_info_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 src_fid; + u8 tunnel_type; + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_LAST CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0[5]; +}; + +/* hwrm_cfa_redirect_tunnel_type_info_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 dest_fid; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */ +struct hwrm_vxlan_ipv4_hdr { + u8 ver_hlen; + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0 + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4 + u8 tos; + __be16 ip_id; + __be16 flags_frag_offset; + u8 ttl; + u8 protocol; + __be32 src_ip_addr; + __be32 dest_ip_addr; +}; + +/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */ +struct hwrm_vxlan_ipv6_hdr { + __be32 ver_tc_flow_label; + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK + __be16 payload_len; + u8 next_hdr; + u8 ttl; + __be32 src_ip_addr[4]; + __be32 dest_ip_addr[4]; +}; + +/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */ +struct hwrm_cfa_encap_data_vxlan { + u8 src_mac_addr[6]; + __le16 unused_0; + u8 dst_mac_addr[6]; + u8 num_vlan_tags; + u8 unused_1; + __be16 ovlan_tpid; + __be16 ovlan_tci; + __be16 ivlan_tpid; + __be16 ivlan_tci; + __le32 l3[10]; + #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL + #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 + __be16 src_port; + __be16 dst_port; + __be32 vni; + u8 hdr_rsvd0[3]; + u8 hdr_rsvd1; + u8 hdr_flags; + u8 unused[3]; +}; + +/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */ +struct hwrm_cfa_encap_record_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + u8 encap_type; + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE + u8 unused_0[3]; + __le32 encap_data[20]; +}; + +/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 encap_record_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_encap_record_free_input (size:192b/24B) */ +struct hwrm_cfa_encap_record_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_record_id; + u8 unused_0[4]; +}; + +/* hwrm_cfa_encap_record_free_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_ntuple_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL + __le32 enables; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL + __le64 l2_filter_id; + u8 src_macaddr[6]; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP + __le16 dst_id; + __le16 mirror_vnic_id; + u8 tunnel_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 pri_hint; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST + __be32 src_ipaddr[4]; + __be32 src_ipaddr_mask[4]; + __be32 dst_ipaddr[4]; + __be32 dst_ipaddr_mask[4]; + __be16 src_port; + __be16 src_port_mask; + __be16 dst_port; + __be16 dst_port_mask; + __le64 ntuple_filter_id_hint; +}; + +/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 ntuple_filter_id; + __le32 flow_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_ntuple_filter_alloc_cmd_err { + u8 code; + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR + u8 unused_0[7]; +}; + +/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 ntuple_filter_id; +}; + +/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */ +struct hwrm_cfa_ntuple_filter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL + u8 unused_0[4]; + __le64 ntuple_filter_id; + __le32 new_dst_id; + __le32 new_mirror_vnic_id; + __le16 new_meter_instance_id; + #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID + u8 unused_1[6]; +}; + +/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_em_flow_alloc_input (size:896b/112B) */ +struct hwrm_cfa_em_flow_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_LAST CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_RX + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_BYTE_CTR 0x2UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PKT_CTR 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_DECAP 0x8UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_ENCAP 0x10UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_DROP 0x20UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_METER 0x40UL + __le32 enables; + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x2UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_TUNNEL_ID 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_MACADDR 0x10UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_OVLAN_VID 0x20UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IVLAN_VID 0x40UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_ETHERTYPE 0x80UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_IPADDR 0x100UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_IPADDR 0x200UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x400UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x800UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_PORT 0x1000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_PORT 0x2000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_ID 0x4000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x8000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_ENCAP_RECORD_ID 0x10000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_METER_INSTANCE_ID 0x20000UL + __le64 l2_filter_id; + u8 tunnel_type; + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0[3]; + __le32 tunnel_id; + u8 src_macaddr[6]; + __le16 meter_instance_id; + #define CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_LAST CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_INVALID + u8 dst_macaddr[6]; + __le16 ovlan_vid; + __le16 ivlan_vid; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_LAST CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UDP + u8 unused_1[2]; + __be32 src_ipaddr[4]; + __be32 dst_ipaddr[4]; + __be16 src_port; + __be16 dst_port; + __le16 dst_id; + __le16 mirror_vnic_id; + __le32 encap_record_id; + u8 unused_2[4]; +}; + +/* hwrm_cfa_em_flow_alloc_output (size:192b/24B) */ +struct hwrm_cfa_em_flow_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 em_filter_id; + __le32 flow_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_em_flow_free_input (size:192b/24B) */ +struct hwrm_cfa_em_flow_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 em_filter_id; +}; + +/* hwrm_cfa_em_flow_free_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_em_flow_cfg_input (size:384b/48B) */ +struct hwrm_cfa_em_flow_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL + #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL + u8 unused_0[4]; + __le64 em_filter_id; + __le32 new_dst_id; + __le32 new_mirror_vnic_id; + __le16 new_meter_instance_id; + #define CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID + u8 unused_1[6]; +}; + +/* hwrm_cfa_em_flow_cfg_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_profile_alloc_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_LAST CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_RX + u8 meter_type; + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC2697 0x0UL + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC2698 0x1UL + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC4115 0x2UL + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_LAST CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC4115 + __le16 reserved1; + __le32 reserved2; + __le32 commit_rate; + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_INVALID + __le32 commit_burst; + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID + __le32 excess_peak_rate; + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID + __le32 excess_peak_burst; + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID +}; + +/* hwrm_cfa_meter_profile_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 meter_profile_id; + #define CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_LAST CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_INVALID + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_meter_profile_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_profile_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_LAST CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_profile_id; + #define CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_LAST CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_INVALID + u8 unused_1[4]; +}; + +/* hwrm_cfa_meter_profile_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_profile_cfg_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_LAST CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_RX + u8 meter_type; + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC2697 0x0UL + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC2698 0x1UL + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC4115 0x2UL + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_LAST CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC4115 + __le16 meter_profile_id; + #define CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_LAST CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_INVALID + __le32 reserved; + __le32 commit_rate; + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_INVALID + __le32 commit_burst; + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID + __le32 excess_peak_rate; + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID + __le32 excess_peak_burst; + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID +}; + +/* hwrm_cfa_meter_profile_cfg_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_instance_alloc_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_LAST CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_profile_id; + #define CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_LAST CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_INVALID + u8 unused_1[4]; +}; + +/* hwrm_cfa_meter_instance_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 meter_instance_id; + #define CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_LAST CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_INVALID + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_meter_instance_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_LAST CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_instance_id; + #define CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_LAST CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_INVALID + u8 unused_1[4]; +}; + +/* hwrm_cfa_meter_instance_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */ +struct hwrm_cfa_decap_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL + __le32 enables; + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + __be32 tunnel_id; + u8 tunnel_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0; + __le16 unused_1; + u8 src_macaddr[6]; + u8 unused_2[2]; + u8 dst_macaddr[6]; + __be16 ovlan_vid; + __be16 ivlan_vid; + __be16 t_ovlan_vid; + __be16 t_ivlan_vid; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP + __le16 unused_3; + __le32 unused_4; + __be32 src_ipaddr[4]; + __be32 dst_ipaddr[4]; + __be16 src_port; + __be16 dst_port; + __le16 dst_id; + __le16 l2_ctxt_ref_id; +}; + +/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 decap_filter_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_decap_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 decap_filter_id; + u8 unused_0[4]; +}; + +/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_flow_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1 + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3 + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 + #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL + __le16 src_fid; + __le32 tunnel_handle; + __le16 action_flags; + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL + __le16 dst_fid; + __be16 l2_rewrite_vlan_tpid; + __be16 l2_rewrite_vlan_tci; + __le16 act_meter_id; + __le16 ref_flow_handle; + __be16 ethertype; + __be16 outer_vlan_tci; + __be16 dmac[3]; + __be16 inner_vlan_tci; + __be16 smac[3]; + u8 ip_dst_mask_len; + u8 ip_src_mask_len; + __be32 ip_dst[4]; + __be32 ip_src[4]; + __be16 l4_src_port; + __be16 l4_src_port_mask; + __be16 l4_dst_port; + __be16 l4_dst_port_mask; + __be32 nat_ip_address[4]; + __be16 l2_rewrite_dmac[3]; + __be16 nat_port; + __be16 l2_rewrite_smac[3]; + u8 ip_proto; + u8 tunnel_type; + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL +}; + +/* hwrm_cfa_flow_alloc_output (size:256b/32B) */ +struct hwrm_cfa_flow_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flow_handle; + u8 unused_0[2]; + __le32 flow_id; + __le64 ext_flow_handle; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_free_input (size:256b/32B) */ +struct hwrm_cfa_flow_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + u8 unused_0[6]; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_free_output (size:256b/32B) */ +struct hwrm_cfa_flow_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet; + __le64 byte; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_info_input (size:256b/32B) */ +struct hwrm_cfa_flow_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0 + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL + u8 unused_0[6]; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_info_output (size:448b/56B) */ +struct hwrm_cfa_flow_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + u8 profile; + __le16 src_fid; + __le16 dst_fid; + __le16 l2_ctxt_id; + __le64 em_info; + __le64 tcam_info; + __le64 vfp_tcam_info; + __le16 ar_id; + __le16 flow_handle; + __le32 tunnel_handle; + __le16 flow_timer; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_flow_flush_input (size:192b/24B) */ +struct hwrm_cfa_flow_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + u8 unused_0[4]; +}; + +/* hwrm_cfa_flow_flush_output (size:128b/16B) */ +struct hwrm_cfa_flow_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_stats_input (size:640b/80B) */ +struct hwrm_cfa_flow_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_flows; + __le16 flow_handle_0; + __le16 flow_handle_1; + __le16 flow_handle_2; + __le16 flow_handle_3; + __le16 flow_handle_4; + __le16 flow_handle_5; + __le16 flow_handle_6; + __le16 flow_handle_7; + __le16 flow_handle_8; + __le16 flow_handle_9; + u8 unused_0[2]; + __le32 flow_id_0; + __le32 flow_id_1; + __le32 flow_id_2; + __le32 flow_id_3; + __le32 flow_id_4; + __le32 flow_id_5; + __le32 flow_id_6; + __le32 flow_id_7; + __le32 flow_id_8; + __le32 flow_id_9; +}; + +/* hwrm_cfa_flow_stats_output (size:1408b/176B) */ +struct hwrm_cfa_flow_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet_0; + __le64 packet_1; + __le64 packet_2; + __le64 packet_3; + __le64 packet_4; + __le64 packet_5; + __le64 packet_6; + __le64 packet_7; + __le64 packet_8; + __le64 packet_9; + __le64 byte_0; + __le64 byte_1; + __le64 byte_2; + __le64 byte_3; + __le64 byte_4; + __le64 byte_5; + __le64 byte_6; + __le64 byte_7; + __le64 byte_8; + __le64 byte_9; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_timer_reset_input (size:256b/32B) */ +struct hwrm_cfa_flow_aging_timer_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + u8 unused_0[6]; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_aging_timer_reset_output (size:128b/16B) */ +struct hwrm_cfa_flow_aging_timer_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_cfg_input (size:256b/32B) */ +struct hwrm_cfa_flow_aging_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 enables; + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_TCP_FLOW_TIMER 0x1UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_TCP_FIN_TIMER 0x2UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_UDP_FLOW_TIMER 0x4UL + u8 flags; + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_LAST CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_RX + u8 unused_0; + __le32 tcp_flow_timer; + __le32 tcp_fin_timer; + __le32 udp_flow_timer; +}; + +/* hwrm_cfa_flow_aging_cfg_output (size:128b/16B) */ +struct hwrm_cfa_flow_aging_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_qcfg_input (size:192b/24B) */ +struct hwrm_cfa_flow_aging_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH 0x1UL + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_LAST CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_RX + u8 unused_0[7]; +}; + +/* hwrm_cfa_flow_aging_qcfg_output (size:192b/24B) */ +struct hwrm_cfa_flow_aging_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 tcp_flow_timer; + __le32 tcp_fin_timer; + __le32 udp_flow_timer; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_qcaps_input (size:192b/24B) */ +struct hwrm_cfa_flow_aging_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH 0x1UL + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_LAST CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_RX + u8 unused_0[7]; +}; + +/* hwrm_cfa_flow_aging_qcaps_output (size:256b/32B) */ +struct hwrm_cfa_flow_aging_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 max_tcp_flow_timer; + __le32 max_tcp_fin_timer; + __le32 max_udp_flow_timer; + __le32 max_aging_flows; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vf_pair_alloc_input (size:448b/56B) */ +struct hwrm_cfa_vf_pair_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_a_id; + __le16 vf_b_id; + u8 unused_0[4]; + char pair_name[32]; +}; + +/* hwrm_cfa_vf_pair_alloc_output (size:128b/16B) */ +struct hwrm_cfa_vf_pair_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vf_pair_free_input (size:384b/48B) */ +struct hwrm_cfa_vf_pair_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char pair_name[32]; +}; + +/* hwrm_cfa_vf_pair_free_output (size:128b/16B) */ +struct hwrm_cfa_vf_pair_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vf_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_vf_pair_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_VF_PAIR_INFO_REQ_FLAGS_LOOKUP_TYPE 0x1UL + __le16 vf_pair_index; + u8 unused_0[2]; + char vf_pair_name[32]; +}; + +/* hwrm_cfa_vf_pair_info_output (size:512b/64B) */ +struct hwrm_cfa_vf_pair_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_vf_pair_index; + __le16 vf_a_fid; + __le16 vf_a_index; + __le16 vf_b_fid; + __le16 vf_b_index; + u8 pair_state; + #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ALLOCATED 0x1UL + #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ACTIVE 0x2UL + #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_LAST CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ACTIVE + u8 unused_0[5]; + char pair_name[32]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_pair_alloc_input (size:576b/72B) */ +struct hwrm_cfa_pair_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 pair_mode; + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_VF2FN 0x0UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN 0x1UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2REP 0x2UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_PROXY 0x3UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_PFPAIR 0x4UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_MOD 0x5UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_MODALL 0x6UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_LAST CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_MODALL + u8 unused_0; + __le16 vf_a_id; + u8 host_b_id; + u8 pf_b_id; + __le16 vf_b_id; + u8 port_id; + u8 pri; + __le16 new_pf_fid; + __le32 enables; + #define CFA_PAIR_ALLOC_REQ_ENABLES_Q_AB_VALID 0x1UL + #define CFA_PAIR_ALLOC_REQ_ENABLES_Q_BA_VALID 0x2UL + #define CFA_PAIR_ALLOC_REQ_ENABLES_FC_AB_VALID 0x4UL + #define CFA_PAIR_ALLOC_REQ_ENABLES_FC_BA_VALID 0x8UL + char pair_name[32]; + u8 q_ab; + u8 q_ba; + u8 fc_ab; + u8 fc_ba; + u8 unused_1[4]; +}; + +/* hwrm_cfa_pair_alloc_output (size:192b/24B) */ +struct hwrm_cfa_pair_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code_a; + __le16 tx_cfa_action_a; + __le16 rx_cfa_code_b; + __le16 tx_cfa_action_b; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_pair_free_input (size:384b/48B) */ +struct hwrm_cfa_pair_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char pair_name[32]; +}; + +/* hwrm_cfa_pair_free_output (size:128b/16B) */ +struct hwrm_cfa_pair_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_pair_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_PAIR_INFO_REQ_FLAGS_LOOKUP_TYPE 0x1UL + #define CFA_PAIR_INFO_REQ_FLAGS_LOOKUP_REPRE 0x2UL + __le16 pair_index; + u8 pair_pfid; + u8 pair_vfid; + char pair_name[32]; +}; + +/* hwrm_cfa_pair_info_output (size:576b/72B) */ +struct hwrm_cfa_pair_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_pair_index; + __le16 a_fid; + u8 host_a_index; + u8 pf_a_index; + __le16 vf_a_index; + __le16 rx_cfa_code_a; + __le16 tx_cfa_action_a; + __le16 b_fid; + u8 host_b_index; + u8 pf_b_index; + __le16 vf_b_index; + __le16 rx_cfa_code_b; + __le16 tx_cfa_action_b; + u8 pair_mode; + #define CFA_PAIR_INFO_RESP_PAIR_MODE_VF2FN 0x0UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_REP2FN 0x1UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_REP2REP 0x2UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_PROXY 0x3UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_PFPAIR 0x4UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_LAST CFA_PAIR_INFO_RESP_PAIR_MODE_PFPAIR + u8 pair_state; + #define CFA_PAIR_INFO_RESP_PAIR_STATE_ALLOCATED 0x1UL + #define CFA_PAIR_INFO_RESP_PAIR_STATE_ACTIVE 0x2UL + #define CFA_PAIR_INFO_RESP_PAIR_STATE_LAST CFA_PAIR_INFO_RESP_PAIR_STATE_ACTIVE + char pair_name[32]; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */ +struct hwrm_cfa_vfr_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 reserved; + u8 unused_0[4]; + char vfr_name[32]; +}; + +/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */ +struct hwrm_cfa_vfr_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code; + __le16 tx_cfa_action; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_vfr_free_input (size:384b/48B) */ +struct hwrm_cfa_vfr_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char vfr_name[32]; +}; + +/* hwrm_cfa_vfr_free_output (size:128b/16B) */ +struct hwrm_cfa_vfr_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_query_tunnel_type_input (size:192b/24B) */ +struct hwrm_cfa_redirect_query_tunnel_type_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 src_fid; + u8 unused_0[6]; +}; + +/* hwrm_cfa_redirect_query_tunnel_type_output (size:128b/16B) */ +struct hwrm_cfa_redirect_query_tunnel_type_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 tunnel_mask; + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_NONTUNNEL 0x1UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_VXLAN 0x2UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_NVGRE 0x4UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_L2GRE 0x8UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_IPIP 0x10UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_GENEVE 0x20UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_MPLS 0x40UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_STT 0x80UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_IPGRE 0x100UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_VXLAN_V4 0x200UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_IPGRE_V1 0x400UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_ANYTUNNEL 0x800UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_L2_ETYPE 0x1000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE + u8 unused_0[7]; +}; + +/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + __be16 tunnel_dst_port_val; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE + u8 unused_0; + __be16 tunnel_dst_port_val; + u8 unused_1[4]; +}; + +/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE + u8 unused_0; + __le16 tunnel_dst_port_id; + u8 unused_1[4]; +}; + +/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_1[7]; + u8 valid; +}; + +/* ctx_hw_stats (size:1280b/160B) */ +struct ctx_hw_stats { + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 tpa_pkts; + __le64 tpa_bytes; + __le64 tpa_events; + __le64 tpa_aborts; +}; + +/* ctx_eng_stats (size:512b/64B) */ +struct ctx_eng_stats { + __le64 eng_bytes_in; + __le64 eng_bytes_out; + __le64 aux_bytes_in; + __le64 aux_bytes_out; + __le64 commands; + __le64 error_commands; + __le64 cce_engine_usage; + __le64 cdd_engine_usage; +}; + +/* hwrm_stat_ctx_alloc_input (size:256b/32B) */ +struct hwrm_stat_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 stats_dma_addr; + __le32 update_period_ms; + u8 stat_ctx_flags; + #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL + u8 unused_0[3]; +}; + +/* hwrm_stat_ctx_alloc_output (size:128b/16B) */ +struct hwrm_stat_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_stat_ctx_free_input (size:192b/24B) */ +struct hwrm_stat_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_free_output (size:128b/16B) */ +struct hwrm_stat_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_stat_ctx_query_input (size:192b/24B) */ +struct hwrm_stat_ctx_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_query_output (size:1408b/176B) */ +struct hwrm_stat_ctx_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_err_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_err_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_stat_ctx_eng_query_input (size:192b/24B) */ +struct hwrm_stat_ctx_eng_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_eng_query_output (size:640b/80B) */ +struct hwrm_stat_ctx_eng_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 eng_bytes_in; + __le64 eng_bytes_out; + __le64 aux_bytes_in; + __le64 aux_bytes_out; + __le64 commands; + __le64 error_commands; + __le64 cce_engine_usage; + __le64 cdd_engine_usage; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */ +struct hwrm_stat_ctx_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */ +struct hwrm_stat_ctx_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_pcie_qstats_input (size:256b/32B) */ +struct hwrm_pcie_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 pcie_stat_size; + u8 unused_0[6]; + __le64 pcie_stat_host_addr; +}; + +/* hwrm_pcie_qstats_output (size:128b/16B) */ +struct hwrm_pcie_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 pcie_stat_size; + u8 unused_0[5]; + u8 valid; +}; + +/* pcie_ctx_hw_stats (size:768b/96B) */ +struct pcie_ctx_hw_stats { + __le64 pcie_pl_signal_integrity; + __le64 pcie_dl_signal_integrity; + __le64 pcie_tl_signal_integrity; + __le64 pcie_link_integrity; + __le64 pcie_tx_traffic_rate; + __le64 pcie_rx_traffic_rate; + __le64 pcie_tx_dllp_statistics; + __le64 pcie_rx_dllp_statistics; + __le64 pcie_equalization_time; + __le32 pcie_ltssm_histogram[4]; + __le64 pcie_recovery_histogram; +}; + +/* hwrm_fw_reset_input (size:192b/24B) */ +struct hwrm_fw_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT + u8 selfrst_status; + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL + #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE + u8 host_idx; + u8 flags; + #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_fw_reset_output (size:128b/16B) */ +struct hwrm_fw_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL + #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_qstatus_input (size:192b/24B) */ +struct hwrm_fw_qstatus_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP + u8 unused_0[7]; +}; + +/* hwrm_fw_qstatus_output (size:128b/16B) */ +struct hwrm_fw_qstatus_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_set_time_input (size:256b/32B) */ +struct hwrm_fw_set_time_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 year; + #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL + #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 unused_0; + __le16 millisecond; + __le16 zone; + #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL + #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL + #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN + u8 unused_1[4]; +}; + +/* hwrm_fw_set_time_output (size:128b/16B) */ +struct hwrm_fw_set_time_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_get_time_input (size:128b/16B) */ +struct hwrm_fw_get_time_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_get_time_output (size:192b/24B) */ +struct hwrm_fw_get_time_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 year; + #define FW_GET_TIME_RESP_YEAR_UNKNOWN 0x0UL + #define FW_GET_TIME_RESP_YEAR_LAST FW_GET_TIME_RESP_YEAR_UNKNOWN + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 unused_0; + __le16 millisecond; + __le16 zone; + #define FW_GET_TIME_RESP_ZONE_UTC 0x0UL + #define FW_GET_TIME_RESP_ZONE_UNKNOWN 0xffffUL + #define FW_GET_TIME_RESP_ZONE_LAST FW_GET_TIME_RESP_ZONE_UNKNOWN + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_struct_hdr (size:128b/16B) */ +struct hwrm_struct_hdr { + __le16 struct_id; + #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL + #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL + #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL + #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL + #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL + #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL + #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL + #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL + #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL + #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL + #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL + #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_RSS_V2 + __le16 len; + u8 version; + u8 count; + __le16 subtype; + __le16 next_offset; + #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL + u8 unused_0[6]; +}; + +/* hwrm_struct_data_dcbx_ets (size:256b/32B) */ +struct hwrm_struct_data_dcbx_ets { + u8 destination; + #define STRUCT_DATA_DCBX_ETS_DESTINATION_CONFIGURATION 0x1UL + #define STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION 0x2UL + #define STRUCT_DATA_DCBX_ETS_DESTINATION_LAST STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION + u8 max_tcs; + __le16 unused1; + u8 pri0_to_tc_map; + u8 pri1_to_tc_map; + u8 pri2_to_tc_map; + u8 pri3_to_tc_map; + u8 pri4_to_tc_map; + u8 pri5_to_tc_map; + u8 pri6_to_tc_map; + u8 pri7_to_tc_map; + u8 tc0_to_bw_map; + u8 tc1_to_bw_map; + u8 tc2_to_bw_map; + u8 tc3_to_bw_map; + u8 tc4_to_bw_map; + u8 tc5_to_bw_map; + u8 tc6_to_bw_map; + u8 tc7_to_bw_map; + u8 tc0_to_tsa_map; + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_SP 0x0UL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_CBS 0x1UL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_ETS 0x2UL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC 0xffUL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_LAST STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC + u8 tc1_to_tsa_map; + u8 tc2_to_tsa_map; + u8 tc3_to_tsa_map; + u8 tc4_to_tsa_map; + u8 tc5_to_tsa_map; + u8 tc6_to_tsa_map; + u8 tc7_to_tsa_map; + u8 unused_0[4]; +}; + +/* hwrm_struct_data_dcbx_pfc (size:64b/8B) */ +struct hwrm_struct_data_dcbx_pfc { + u8 pfc_priority_bitmap; + u8 max_pfc_tcs; + u8 mbc; + u8 unused_0[5]; +}; + +/* hwrm_struct_data_dcbx_app (size:64b/8B) */ +struct hwrm_struct_data_dcbx_app { + __be16 protocol_id; + u8 protocol_selector; + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT + u8 priority; + u8 valid; + u8 unused_0[3]; +}; + +/* hwrm_struct_data_dcbx_feature_state (size:64b/8B) */ +struct hwrm_struct_data_dcbx_feature_state { + u8 dcbx_mode; + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_DISABLED 0x0UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_IEEE 0x1UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE 0x2UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_LAST STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE + u8 ets_state; + u8 pfc_state; + u8 app_state; + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ENABLE_BIT_POS 0x7UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_WILLING_BIT_POS 0x6UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS 0x5UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_LAST STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS + u8 unused[3]; + u8 resets; + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_ETS 0x1UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_PFC 0x2UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_APP 0x4UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE 0x8UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_LAST STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE +}; + +/* hwrm_struct_data_lldp (size:64b/8B) */ +struct hwrm_struct_data_lldp { + u8 admin_state; + #define STRUCT_DATA_LLDP_ADMIN_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_TX 0x1UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_RX 0x2UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE 0x3UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_LAST STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE + u8 port_description_state; + #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_LAST STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE + u8 system_name_state; + #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE + u8 system_desc_state; + #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE + u8 system_cap_state; + #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE + u8 mgmt_addr_state; + #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_LAST STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE + u8 async_event_notification_state; + #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_LAST STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE + u8 unused_0; +}; + +/* hwrm_struct_data_lldp_generic (size:2112b/264B) */ +struct hwrm_struct_data_lldp_generic { + u8 tlv_type; + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_CHASSIS 0x1UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT 0x2UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_NAME 0x3UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_DESCRIPTION 0x4UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_NAME 0x5UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION 0x6UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_LAST STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION + u8 subtype; + u8 length; + u8 unused1[5]; + __le32 tlv_value[64]; +}; + +/* hwrm_struct_data_lldp_device (size:1472b/184B) */ +struct hwrm_struct_data_lldp_device { + __le16 ttl; + u8 mgmt_addr_len; + u8 mgmt_addr_type; + u8 unused_3[4]; + __le32 mgmt_addr[8]; + __le32 system_caps; + u8 intf_num_type; + u8 mgmt_addr_oid_length; + u8 unused_4[2]; + __le32 intf_num; + u8 unused_5[4]; + __le32 mgmt_addr_oid[32]; +}; + +/* hwrm_struct_data_port_description (size:64b/8B) */ +struct hwrm_struct_data_port_description { + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_struct_data_rss_v2 (size:128b/16B) */ +struct hwrm_struct_data_rss_v2 { + __le16 flags; + #define STRUCT_DATA_RSS_V2_FLAGS_HASH_VALID 0x1UL + __le16 rss_ctx_id; + __le16 num_ring_groups; + __le16 hash_type; + #define STRUCT_DATA_RSS_V2_HASH_TYPE_IPV4 0x1UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_TCP_IPV4 0x2UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_UDP_IPV4 0x4UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_IPV6 0x8UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_TCP_IPV6 0x10UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_UDP_IPV6 0x20UL + __le64 hash_key_ring_group_ids; +}; + +/* hwrm_struct_data_power_information (size:192b/24B) */ +struct hwrm_struct_data_power_information { + __le32 bkup_power_info_ver; + __le32 platform_bkup_power_count; + __le32 load_milli_watt; + __le32 bkup_time_milli_seconds; + __le32 bkup_power_status; + __le32 bkup_power_charge_time; +}; + +/* hwrm_fw_set_structured_data_input (size:256b/32B) */ +struct hwrm_fw_set_structured_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + u8 hdr_cnt; + u8 unused_0[5]; +}; + +/* hwrm_fw_set_structured_data_output (size:128b/16B) */ +struct hwrm_fw_set_structured_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */ +struct hwrm_fw_set_structured_data_cmd_err { + u8 code; + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; +}; + +/* hwrm_fw_get_structured_data_input (size:256b/32B) */ +struct hwrm_fw_get_structured_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + __le16 data_len; + __le16 structure_id; + __le16 subtype; + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL + u8 count; + u8 unused_0; +}; + +/* hwrm_fw_get_structured_data_output (size:128b/16B) */ +struct hwrm_fw_get_structured_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hdr_cnt; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */ +struct hwrm_fw_get_structured_data_cmd_err { + u8 code; + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; +}; + +/* hwrm_fw_ipc_msg_input (size:320b/40B) */ +struct hwrm_fw_ipc_msg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FW_IPC_MSG_REQ_ENABLES_COMMAND_ID 0x1UL + #define FW_IPC_MSG_REQ_ENABLES_SRC_PROCESSOR 0x2UL + #define FW_IPC_MSG_REQ_ENABLES_DATA_OFFSET 0x4UL + #define FW_IPC_MSG_REQ_ENABLES_LENGTH 0x8UL + __le16 command_id; + #define FW_IPC_MSG_REQ_COMMAND_ID_ROCE_LAG 0x1UL + #define FW_IPC_MSG_REQ_COMMAND_ID_LAST FW_IPC_MSG_REQ_COMMAND_ID_ROCE_LAG + u8 src_processor; + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_CFW 0x1UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_BONO 0x2UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_APE 0x3UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_KONG 0x4UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_LAST FW_IPC_MSG_REQ_SRC_PROCESSOR_KONG + u8 unused_0; + __le32 data_offset; + __le16 length; + u8 unused_1[2]; + __le64 opaque; +}; + +/* hwrm_fw_ipc_msg_output (size:128b/16B) */ +struct hwrm_fw_ipc_msg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_ipc_mailbox_input (size:256b/32B) */ +struct hwrm_fw_ipc_mailbox_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + u8 unused; + u8 event_id; + u8 port_id; + __le32 event_data1; + __le32 event_data2; + u8 unused_0[4]; +}; + +/* hwrm_fw_ipc_mailbox_output (size:128b/16B) */ +struct hwrm_fw_ipc_mailbox_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_ipc_mailbox_cmd_err (size:64b/8B) */ +struct hwrm_fw_ipc_mailbox_cmd_err { + u8 code; + #define FW_IPC_MAILBOX_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_IPC_MAILBOX_CMD_ERR_CODE_LAST FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; +}; + +/* hwrm_fw_health_check_input (size:128b/16B) */ +struct hwrm_fw_health_check_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_health_check_output (size:128b/16B) */ +struct hwrm_fw_health_check_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 fw_status; + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_BOOTED 0x1UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_MISMATCH 0x2UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_BOOTED 0x4UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_MISMATCH 0x8UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_BOOTED 0x10UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_MISMATCH 0x20UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SECOND_RT 0x40UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_fw_sync_input (size:192b/24B) */ +struct hwrm_fw_sync_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 sync_action; + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SBI 0x1UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SRT 0x2UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_CRT 0x4UL + #define FW_SYNC_REQ_SYNC_ACTION_ACTION 0x80000000UL + u8 unused_0[4]; +}; + +/* hwrm_fw_sync_output (size:128b/16B) */ +struct hwrm_fw_sync_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 sync_status; + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_MASK 0xffUL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_SFT 0 + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_SUCCESS 0x0UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_IN_PROGRESS 0x1UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_TIMEOUT 0x2UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL 0x3UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_LAST FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL + #define FW_SYNC_RESP_SYNC_STATUS_SYNC_ERR 0x40000000UL + #define FW_SYNC_RESP_SYNC_STATUS_SYNC_COMPLETE 0x80000000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_exec_fwd_resp_input (size:1024b/128B) */ +struct hwrm_exec_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + u8 unused_0[6]; +}; + +/* hwrm_exec_fwd_resp_output (size:128b/16B) */ +struct hwrm_exec_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_reject_fwd_resp_input (size:1024b/128B) */ +struct hwrm_reject_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + u8 unused_0[6]; +}; + +/* hwrm_reject_fwd_resp_output (size:128b/16B) */ +struct hwrm_reject_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fwd_resp_input (size:1024b/128B) */ +struct hwrm_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_resp_target_id; + __le16 encap_resp_cmpl_ring; + __le16 encap_resp_len; + u8 unused_0; + u8 unused_1; + __le64 encap_resp_addr; + __le32 encap_resp[24]; +}; + +/* hwrm_fwd_resp_output (size:128b/16B) */ +struct hwrm_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */ +struct hwrm_fwd_async_event_cmpl_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_async_event_target_id; + u8 unused_0[6]; + __le32 encap_async_event_cmpl[4]; +}; + +/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */ +struct hwrm_fwd_async_event_cmpl_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_temp_monitor_query_input (size:128b/16B) */ +struct hwrm_temp_monitor_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_temp_monitor_query_output (size:128b/16B) */ +struct hwrm_temp_monitor_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 temp; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_wol_filter_alloc_input (size:512b/64B) */ +struct hwrm_wol_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL + __le16 port_id; + u8 wol_type; + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID + u8 unused_0[5]; + u8 mac_address[6]; + __le16 pattern_offset; + __le16 pattern_buf_size; + __le16 pattern_mask_size; + u8 unused_1[4]; + __le64 pattern_buf_addr; + __le64 pattern_mask_addr; +}; + +/* hwrm_wol_filter_alloc_output (size:128b/16B) */ +struct hwrm_wol_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 wol_filter_id; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_wol_filter_free_input (size:256b/32B) */ +struct hwrm_wol_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL + __le32 enables; + #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL + __le16 port_id; + u8 wol_filter_id; + u8 unused_0[5]; +}; + +/* hwrm_wol_filter_free_output (size:128b/16B) */ +struct hwrm_wol_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_wol_filter_qcfg_input (size:448b/56B) */ +struct hwrm_wol_filter_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 handle; + u8 unused_0[4]; + __le64 pattern_buf_addr; + __le16 pattern_buf_size; + u8 unused_1[6]; + __le64 pattern_mask_addr; + __le16 pattern_mask_size; + u8 unused_2[6]; +}; + +/* hwrm_wol_filter_qcfg_output (size:256b/32B) */ +struct hwrm_wol_filter_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_handle; + u8 wol_filter_id; + u8 wol_type; + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID + __le32 unused_0; + u8 mac_address[6]; + __le16 pattern_offset; + __le16 pattern_size; + __le16 pattern_mask_size; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_wol_reason_qcfg_input (size:320b/40B) */ +struct hwrm_wol_reason_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; + __le64 wol_pkt_buf_addr; + __le16 wol_pkt_buf_size; + u8 unused_1[6]; +}; + +/* hwrm_wol_reason_qcfg_output (size:128b/16B) */ +struct hwrm_wol_reason_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 wol_filter_id; + u8 wol_reason; + #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL + #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL + #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL + #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID + u8 wol_pkt_len; + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_dbg_read_direct_input (size:256b/32B) */ +struct hwrm_dbg_read_direct_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 read_addr; + __le32 read_len32; +}; + +/* hwrm_dbg_read_direct_output (size:128b/16B) */ +struct hwrm_dbg_read_direct_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_write_direct_input (size:448b/56B) */ +struct hwrm_dbg_write_direct_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 write_addr; + __le32 write_len32; + __le32 write_data[8]; +}; + +/* hwrm_dbg_write_direct_output (size:128b/16B) */ +struct hwrm_dbg_write_direct_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_read_indirect_input (size:320b/40B) */ +struct hwrm_dbg_read_indirect_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_dest_addr_len; + u8 indirect_access_type; + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L2 0x0UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L3L4 0x1UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L2 0x2UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L3L4 0x3UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STAT_CTXS 0x4UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_L2_TCAM 0x5UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_L2_TCAM 0x6UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_IPV6_SUBNET_TCAM 0x7UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_IPV6_SUBNET_TCAM 0x8UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_SRC_PROPERTIES_TCAM 0x9UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_SRC_PROPERTIES_TCAM 0xaUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_VEB_LOOKUP_TCAM 0xbUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_PROFILE_LOOKUP_TCAM 0xcUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_PROFILE_LOOKUP_TCAM 0xdUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_LOOKUP_TCAM 0xeUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_LOOKUP_TCAM 0xfUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MHB 0x10UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_GBL 0x11UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC 0x12UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_LAST DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC + u8 unused_0[3]; + __le32 start_index; + __le32 num_of_entries; +}; + +/* hwrm_dbg_read_indirect_output (size:128b/16B) */ +struct hwrm_dbg_read_indirect_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_write_indirect_input (size:512b/64B) */ +struct hwrm_dbg_write_indirect_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 indirect_access_type; + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L2 0x0UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L3L4 0x1UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L2 0x2UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L3L4 0x3UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STAT_CTXS 0x4UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_L2_TCAM 0x5UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_L2_TCAM 0x6UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_IPV6_SUBNET_TCAM 0x7UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_IPV6_SUBNET_TCAM 0x8UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_SRC_PROPERTIES_TCAM 0x9UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_SRC_PROPERTIES_TCAM 0xaUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_VEB_LOOKUP_TCAM 0xbUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_PROFILE_LOOKUP_TCAM 0xcUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_PROFILE_LOOKUP_TCAM 0xdUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_LOOKUP_TCAM 0xeUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_LOOKUP_TCAM 0xfUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MHB 0x10UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_GBL 0x11UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC 0x12UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_LAST DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC + u8 unused_0[3]; + __le32 start_index; + __le32 num_of_entries; + u8 unused_1[4]; + __le32 write_data[8]; +}; + +/* hwrm_dbg_write_indirect_output (size:128b/16B) */ +struct hwrm_dbg_write_indirect_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_dump_input (size:320b/40B) */ +struct hwrm_dbg_dump_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 handle; + u8 unused_0[4]; + __le64 host_dbg_dump_addr; + __le64 host_dbg_dump_addr_len; +}; + +/* hwrm_dbg_dump_output (size:192b/24B) */ +struct hwrm_dbg_dump_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 nexthandle; + __le32 dbg_data_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_erase_nvm_input (size:192b/24B) */ +struct hwrm_dbg_erase_nvm_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define DBG_ERASE_NVM_REQ_FLAGS_ERASE_ALL 0x1UL + u8 unused_0[6]; +}; + +/* hwrm_dbg_erase_nvm_output (size:128b/16B) */ +struct hwrm_dbg_erase_nvm_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_cfg_input (size:192b/24B) */ +struct hwrm_dbg_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define DBG_CFG_REQ_FLAGS_UART_LOG 0x1UL + #define DBG_CFG_REQ_FLAGS_UART_LOG_SECONDARY 0x2UL + u8 unused_0[4]; +}; + +/* hwrm_dbg_cfg_output (size:128b/16B) */ +struct hwrm_dbg_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* coredump_segment_record (size:128b/16B) */ +struct coredump_segment_record { + __le16 component_id; + __le16 segment_id; + __le16 max_instances; + u8 version_hi; + u8 version_low; + u8 seg_flags; + u8 unused_0[7]; +}; + +/* hwrm_dbg_coredump_list_input (size:256b/32B) */ +struct hwrm_dbg_coredump_list_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le16 seq_no; + u8 unused_0[2]; +}; + +/* hwrm_dbg_coredump_list_output (size:128b/16B) */ +struct hwrm_dbg_coredump_list_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL + u8 unused_0; + __le16 total_segments; + __le16 data_len; + u8 unused_1; + u8 valid; +}; + +/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */ +struct hwrm_dbg_coredump_initiate_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 component_id; + __le16 segment_id; + __le16 instance; + __le16 unused_0; + u8 seg_flags; + u8 unused_1[7]; +}; + +/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */ +struct hwrm_dbg_coredump_initiate_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* coredump_data_hdr (size:128b/16B) */ +struct coredump_data_hdr { + __le32 address; + __le32 flags_length; + __le32 instance; + __le32 next_offset; +}; + +/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */ +struct hwrm_dbg_coredump_retrieve_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le32 unused_0; + __le16 component_id; + __le16 segment_id; + __le16 instance; + __le16 unused_1; + u8 seg_flags; + u8 unused_2; + __le16 unused_3; + __le32 unused_4; + __le32 seq_no; + __le32 unused_5; +}; + +/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */ +struct hwrm_dbg_coredump_retrieve_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL + u8 unused_0; + __le16 data_len; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_dbg_i2c_cmd_input (size:320b/40B) */ +struct hwrm_dbg_i2c_cmd_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le16 read_size; + __le16 write_size; + u8 chnl_id; + u8 options; + #define DBG_I2C_CMD_REQ_OPTIONS_10_BIT_ADDRESSING 0x1UL + #define DBG_I2C_CMD_REQ_OPTIONS_FAST_MODE 0x2UL + __le16 slave_addr; + u8 xfer_mode; + #define DBG_I2C_CMD_REQ_XFER_MODE_MASTER_READ 0x0UL + #define DBG_I2C_CMD_REQ_XFER_MODE_MASTER_WRITE 0x1UL + #define DBG_I2C_CMD_REQ_XFER_MODE_MASTER_WRITE_READ 0x2UL + #define DBG_I2C_CMD_REQ_XFER_MODE_LAST DBG_I2C_CMD_REQ_XFER_MODE_MASTER_WRITE_READ + u8 unused_1[7]; +}; + +/* hwrm_dbg_i2c_cmd_output (size:128b/16B) */ +struct hwrm_dbg_i2c_cmd_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_fw_cli_input (size:1024b/128B) */ +struct hwrm_dbg_fw_cli_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le16 cli_cmd_len; + u8 unused_0[2]; + u8 cli_cmd[96]; +}; + +/* hwrm_dbg_fw_cli_output (size:128b/16B) */ +struct hwrm_dbg_fw_cli_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 cli_data_len; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_dbg_ring_info_get_input (size:192b/24B) */ +struct hwrm_dbg_ring_info_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX + u8 unused_0[3]; + __le32 fw_ring_id; +}; + +/* hwrm_dbg_ring_info_get_output (size:192b/24B) */ +struct hwrm_dbg_ring_info_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 producer_index; + __le32 consumer_index; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_raw_write_blk_input (size:256b/32B) */ +struct hwrm_nvm_raw_write_blk_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le32 dest_addr; + __le32 len; +}; + +/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */ +struct hwrm_nvm_raw_write_blk_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_read_input (size:320b/40B) */ +struct hwrm_nvm_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le16 dir_idx; + u8 unused_0[2]; + __le32 offset; + __le32 len; + u8 unused_1[4]; +}; + +/* hwrm_nvm_read_output (size:128b/16B) */ +struct hwrm_nvm_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_raw_dump_input (size:256b/32B) */ +struct hwrm_nvm_raw_dump_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 offset; + __le32 len; +}; + +/* hwrm_nvm_raw_dump_output (size:128b/16B) */ +struct hwrm_nvm_raw_dump_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */ +struct hwrm_nvm_get_dir_entries_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; +}; + +/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */ +struct hwrm_nvm_get_dir_entries_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dir_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dir_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_nvm_get_dir_info_output (size:192b/24B) */ +struct hwrm_nvm_get_dir_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 entries; + __le32 entry_length; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_write_input (size:384b/48B) */ +struct hwrm_nvm_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 dir_data_length; + __le16 option; + __le16 flags; + #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL + __le32 dir_item_length; + __le32 unused_0; +}; + +/* hwrm_nvm_write_output (size:128b/16B) */ +struct hwrm_nvm_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 dir_item_length; + __le16 dir_idx; + u8 unused_0; + u8 valid; +}; + +/* hwrm_nvm_write_cmd_err (size:64b/8B) */ +struct hwrm_nvm_write_cmd_err { + u8 code; + #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL + #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE + u8 unused_0[7]; +}; + +/* hwrm_nvm_modify_input (size:320b/40B) */ +struct hwrm_nvm_modify_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_idx; + u8 unused_0[2]; + __le32 offset; + __le32 len; + u8 unused_1[4]; +}; + +/* hwrm_nvm_modify_output (size:128b/16B) */ +struct hwrm_nvm_modify_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL + __le16 dir_idx; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + u8 opt_ordinal; + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0 + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT + u8 unused_0[3]; +}; + +/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 dir_item_length; + __le32 dir_data_length; + __le32 fw_ver; + __le16 dir_ordinal; + __le16 dir_idx; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */ +struct hwrm_nvm_erase_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_idx; + u8 unused_0[6]; +}; + +/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_erase_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dev_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dev_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_nvm_get_dev_info_output (size:256b/32B) */ +struct hwrm_nvm_get_dev_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 manufacturer_id; + __le16 device_id; + __le32 sector_size; + __le32 nvram_size; + __le32 reserved_size; + __le32 available_size; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_mod_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL + __le16 dir_idx; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 checksum; +}; + +/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_mod_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_verify_update_input (size:192b/24B) */ +struct hwrm_nvm_verify_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + u8 unused_0[2]; +}; + +/* hwrm_nvm_verify_update_output (size:128b/16B) */ +struct hwrm_nvm_verify_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_install_update_input (size:192b/24B) */ +struct hwrm_nvm_install_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 install_type; + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL + __le16 flags; + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL + u8 unused_0[2]; +}; + +/* hwrm_nvm_install_update_output (size:192b/24B) */ +struct hwrm_nvm_install_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 installed_items; + u8 result; + #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS + u8 problem_item; + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE + u8 reset_required; + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */ +struct hwrm_nvm_install_update_cmd_err { + u8 code; + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE + u8 unused_0[7]; +}; + +/* hwrm_nvm_flush_input (size:128b/16B) */ +struct hwrm_nvm_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_nvm_flush_output (size:128b/16B) */ +struct hwrm_nvm_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_flush_cmd_err (size:64b/8B) */ +struct hwrm_nvm_flush_cmd_err { + u8 code; + #define NVM_FLUSH_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_FLUSH_CMD_ERR_CODE_FAIL 0x1UL + #define NVM_FLUSH_CMD_ERR_CODE_LAST NVM_FLUSH_CMD_ERR_CODE_FAIL + u8 unused_0[7]; +}; + +/* hwrm_nvm_get_variable_input (size:320b/40B) */ +struct hwrm_nvm_get_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL + u8 unused_0; +}; + +/* hwrm_nvm_get_variable_output (size:128b/16B) */ +struct hwrm_nvm_get_variable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_get_variable_cmd_err { + u8 code; + #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT + u8 unused_0[7]; +}; + +/* hwrm_nvm_set_variable_input (size:320b/40B) */ +struct hwrm_nvm_set_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1 + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH + u8 unused_0; +}; + +/* hwrm_nvm_set_variable_output (size:128b/16B) */ +struct hwrm_nvm_set_variable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_set_variable_cmd_err { + u8 code; + #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR + u8 unused_0[7]; +}; + +/* hwrm_nvm_validate_option_input (size:320b/40B) */ +struct hwrm_nvm_validate_option_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_LAST NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 unused_0[2]; +}; + +/* hwrm_nvm_validate_option_output (size:128b/16B) */ +struct hwrm_nvm_validate_option_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 result; + #define NVM_VALIDATE_OPTION_RESP_RESULT_NOT_MATCH 0x0UL + #define NVM_VALIDATE_OPTION_RESP_RESULT_MATCH 0x1UL + #define NVM_VALIDATE_OPTION_RESP_RESULT_LAST NVM_VALIDATE_OPTION_RESP_RESULT_MATCH + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */ +struct hwrm_nvm_validate_option_cmd_err { + u8 code; + #define NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN + u8 unused_0[7]; +}; + +/* hwrm_nvm_factory_defaults_input (size:192b/24B) */ +struct hwrm_nvm_factory_defaults_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 mode; + #define NVM_FACTORY_DEFAULTS_REQ_MODE_RESTORE 0x0UL + #define NVM_FACTORY_DEFAULTS_REQ_MODE_CREATE 0x1UL + #define NVM_FACTORY_DEFAULTS_REQ_MODE_LAST NVM_FACTORY_DEFAULTS_REQ_MODE_CREATE + u8 unused_0[7]; +}; + +/* hwrm_nvm_factory_defaults_output (size:128b/16B) */ +struct hwrm_nvm_factory_defaults_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 result; + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_OK 0x0UL + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_RESTORE_OK 0x1UL + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_ALREADY 0x2UL + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_LAST NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_ALREADY + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_nvm_factory_defaults_cmd_err (size:64b/8B) */ +struct hwrm_nvm_factory_defaults_cmd_err { + u8 code; + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_VALID_CFG 0x1UL + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG 0x2UL + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_LAST NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG + u8 unused_0[7]; +}; + +/* hwrm_selftest_qlist_input (size:128b/16B) */ +struct hwrm_selftest_qlist_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_selftest_qlist_output (size:2240b/280B) */ +struct hwrm_selftest_qlist_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_tests; + u8 available_tests; + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 offline_tests; + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0; + __le16 test_timeout; + u8 unused_1[2]; + char test0_name[32]; + char test1_name[32]; + char test2_name[32]; + char test3_name[32]; + char test4_name[32]; + char test5_name[32]; + char test6_name[32]; + char test7_name[32]; + u8 unused_2[7]; + u8 valid; +}; + +/* hwrm_selftest_exec_input (size:192b/24B) */ +struct hwrm_selftest_exec_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0[7]; +}; + +/* hwrm_selftest_exec_output (size:128b/16B) */ +struct hwrm_selftest_exec_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 requested_tests; + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 test_success; + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_selftest_irq_input (size:128b/16B) */ +struct hwrm_selftest_irq_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_selftest_irq_output (size:128b/16B) */ +struct hwrm_selftest_irq_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_selftest_retrieve_serdes_data_input (size:256b/32B) */ +struct hwrm_selftest_retrieve_serdes_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 resp_data_addr; + __le32 resp_data_offset; + __le16 data_len; + u8 flags; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0x7UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0 + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_EYE_PROJECTION 0x8UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL + u8 options; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PCIE_LANE_NO_MASK 0xfUL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PCIE_LANE_NO_SFT 0 + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION 0x10UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_HORIZONTAL (0x0UL << 4) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_VERTICAL (0x1UL << 4) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_LAST SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_VERTICAL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE 0x20UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_LEFT_TOP (0x0UL << 5) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_RIGHT_BOTTOM (0x1UL << 5) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_LAST SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_RIGHT_BOTTOM + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_RSVD_MASK 0xc0UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_RSVD_SFT 6 +}; + +/* hwrm_selftest_retrieve_serdes_data_output (size:128b/16B) */ +struct hwrm_selftest_retrieve_serdes_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 total_data_len; + __le16 copied_data_len; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_oem_cmd_input (size:1024b/128B) */ +struct hwrm_oem_cmd_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 IANA; + __le32 unused_0; + __le32 oem_data[26]; +}; + +/* hwrm_oem_cmd_output (size:1344b/168B) */ +struct hwrm_oem_cmd_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 IANA; + __le32 unused_0; + __le32 oem_data[36]; + u8 unused_1[7]; + u8 valid; +}; + +#endif /* _BNXT_HSI_H_ */ diff --git a/src/drivers/net/dm96xx.c b/src/drivers/net/dm96xx.c new file mode 100644 index 00000000..61b957be --- /dev/null +++ b/src/drivers/net/dm96xx.c @@ -0,0 +1,673 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include "dm96xx.h" + +/** @file + * + * Davicom DM96xx USB Ethernet driver + * + */ + +/****************************************************************************** + * + * Register operations + * + ****************************************************************************** + */ + +/** + * Reset device + * + * @v dm96xx DM96xx device + * @ret rc Return status code + */ +static int dm96xx_reset ( struct dm96xx_device *dm96xx ) { + int ncr; + int rc; + + /* Reset device */ + if ( ( rc = dm96xx_write_register ( dm96xx, DM96XX_NCR, + DM96XX_NCR_RST ) ) != 0 ) { + DBGC ( dm96xx, "DM96XX %p could not reset: %s\n", + dm96xx, strerror ( rc ) ); + return rc; + } + + /* Wait for reset to complete */ + udelay ( DM96XX_RESET_DELAY_US ); + + /* Check that reset has completed */ + ncr = dm96xx_read_register ( dm96xx, DM96XX_NCR ); + if ( ncr < 0 ) { + rc = ncr; + DBGC ( dm96xx, "DM96XX %p failed to reset: %s\n", + dm96xx, strerror ( rc ) ); + return rc; + } + if ( ncr & DM96XX_NCR_RST ) { + DBGC ( dm96xx, "DM96XX %p failed to reset (NCR=%#02x)\n", + dm96xx, ncr ); + return -EIO; + } + + return 0; +} + +/** + * Read MAC address + * + * @v dm96xx DM96xx device + * @v mac MAC address to fill in + * @ret rc Return status code + */ +static int dm96xx_read_mac ( struct dm96xx_device *dm96xx, uint8_t *mac ) { + int rc; + + /* Read MAC address */ + if ( ( rc = dm96xx_read_registers ( dm96xx, DM96XX_PAR, mac, + ETH_ALEN ) ) != 0 ) { + DBGC ( dm96xx, "DM96XX %p could not read MAC address: %s\n", + dm96xx, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Write MAC address + * + * @v dm96xx DM96xx device + * @v mac MAC address + * @ret rc Return status code + */ +static int dm96xx_write_mac ( struct dm96xx_device *dm96xx, uint8_t *mac ) { + int rc; + + /* Write MAC address */ + if ( ( rc = dm96xx_write_registers ( dm96xx, DM96XX_PAR, mac, + ETH_ALEN ) ) != 0 ) { + DBGC ( dm96xx, "DM96XX %p could not write MAC address: %s\n", + dm96xx, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Update link status based on network status register + * + * @v dm96xx DM96xx device + * @v nsr Network status register + */ +static void dm96xx_link_nsr ( struct dm96xx_device *dm96xx, unsigned int nsr ) { + struct net_device *netdev = dm96xx->netdev; + + if ( nsr & DM96XX_NSR_LINKST ) { + if ( ! netdev_link_ok ( netdev ) ) + netdev_link_up ( netdev ); + } else { + if ( netdev_link_ok ( netdev ) ) + netdev_link_down ( netdev ); + } +} + +/** + * Get link status + * + * @v dm96xx DM96xx device + * @ret rc Return status code + */ +static int dm96xx_check_link ( struct dm96xx_device *dm96xx ) { + int nsr; + int rc; + + /* Read network status register */ + nsr = dm96xx_read_register ( dm96xx, DM96XX_NSR ); + if ( nsr < 0 ) { + rc = nsr; + DBGC ( dm96xx, "DM96XX %p could not read network status: %s\n", + dm96xx, strerror ( rc ) ); + return rc; + } + + /* Update link status */ + dm96xx_link_nsr ( dm96xx, nsr ); + + return 0; +} + +/** + * Set DM9601-compatible RX header mode + * + * @v dm96xx DM96xx device + * @ret rc Return status code + */ +static int dm96xx_rx_mode ( struct dm96xx_device *dm96xx ) { + int chipr; + int mode_ctl; + int rc; + + /* Get chip revision */ + chipr = dm96xx_read_register ( dm96xx, DM96XX_CHIPR ); + if ( chipr < 0 ) { + rc = chipr; + DBGC ( dm96xx, "DM96XX %p could not read chip revision: %s\n", + dm96xx, strerror ( rc ) ); + return rc; + } + + /* Do nothing if device is a DM9601 anyway */ + if ( chipr == DM96XX_CHIPR_9601 ) + return 0; + + /* Read current mode control */ + mode_ctl = dm96xx_read_register ( dm96xx, DM96XX_MODE_CTL ); + if ( mode_ctl < 0 ) { + rc = mode_ctl; + DBGC ( dm96xx, "DM96XX %p could not read mode control: %s\n", + dm96xx, strerror ( rc ) ); + return rc; + } + + /* Write mode control */ + mode_ctl &= ~DM96XX_MODE_CTL_MODE; + if ( ( rc = dm96xx_write_register ( dm96xx, DM96XX_MODE_CTL, + mode_ctl ) ) != 0 ) { + DBGC ( dm96xx, "DM96XX %p could not write mode control: %s\n", + dm96xx, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/****************************************************************************** + * + * Endpoint operations + * + ****************************************************************************** + */ + +/** + * Complete interrupt transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void dm96xx_intr_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct dm96xx_device *dm96xx = container_of ( ep, struct dm96xx_device, + usbnet.intr ); + struct net_device *netdev = dm96xx->netdev; + struct dm96xx_interrupt *intr; + size_t len = iob_len ( iobuf ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto done; + + /* Record USB errors against the network device */ + if ( rc != 0 ) { + DBGC ( dm96xx, "DM96XX %p interrupt failed: %s\n", + dm96xx, strerror ( rc ) ); + DBGC_HDA ( dm96xx, 0, iobuf->data, iob_len ( iobuf ) ); + netdev_rx_err ( netdev, NULL, rc ); + goto done; + } + + /* Extract message header */ + if ( len < sizeof ( *intr ) ) { + DBGC ( dm96xx, "DM96XX %p underlength interrupt:\n", dm96xx ); + DBGC_HDA ( dm96xx, 0, iobuf->data, iob_len ( iobuf ) ); + netdev_rx_err ( netdev, NULL, -EINVAL ); + goto done; + } + intr = iobuf->data; + + /* Update link status */ + dm96xx_link_nsr ( dm96xx, intr->nsr ); + + done: + /* Free I/O buffer */ + free_iob ( iobuf ); +} + +/** Interrupt endpoint operations */ +static struct usb_endpoint_driver_operations dm96xx_intr_operations = { + .complete = dm96xx_intr_complete, +}; + +/** + * Complete bulk IN transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void dm96xx_in_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct dm96xx_device *dm96xx = container_of ( ep, struct dm96xx_device, + usbnet.in ); + struct net_device *netdev = dm96xx->netdev; + struct dm96xx_rx_header *header; + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) { + free_iob ( iobuf ); + return; + } + + /* Record USB errors against the network device */ + if ( rc != 0 ) { + DBGC ( dm96xx, "DM96XX %p bulk IN failed: %s\n", + dm96xx, strerror ( rc ) ); + goto err; + } + + /* Sanity check */ + if ( iob_len ( iobuf ) < ( sizeof ( *header ) + 4 /* CRC */ ) ) { + DBGC ( dm96xx, "DM96XX %p underlength bulk IN\n", dm96xx ); + DBGC_HDA ( dm96xx, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto err; + } + + /* Strip header and CRC */ + header = iobuf->data; + iob_pull ( iobuf, sizeof ( *header ) ); + iob_unput ( iobuf, 4 /* CRC */ ); + + /* Check status */ + if ( header->rsr & ~DM96XX_RSR_MF ) { + DBGC ( dm96xx, "DM96XX %p receive error %02x:\n", + dm96xx, header->rsr ); + DBGC_HDA ( dm96xx, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EIO; + goto err; + } + + /* Hand off to network stack */ + netdev_rx ( netdev, iob_disown ( iobuf ) ); + return; + + err: + /* Hand off to network stack */ + netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); +} + +/** Bulk IN endpoint operations */ +static struct usb_endpoint_driver_operations dm96xx_in_operations = { + .complete = dm96xx_in_complete, +}; + +/** + * Transmit packet + * + * @v dm96xx DM96xx device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int dm96xx_out_transmit ( struct dm96xx_device *dm96xx, + struct io_buffer *iobuf ) { + struct dm96xx_tx_header *header; + size_t len = iob_len ( iobuf ); + int rc; + + /* Prepend header */ + if ( ( rc = iob_ensure_headroom ( iobuf, sizeof ( *header ) ) ) != 0 ) + return rc; + header = iob_push ( iobuf, sizeof ( *header ) ); + header->len = cpu_to_le16 ( len ); + + /* Enqueue I/O buffer */ + if ( ( rc = usb_stream ( &dm96xx->usbnet.out, iobuf, 0 ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Complete bulk OUT transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void dm96xx_out_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct dm96xx_device *dm96xx = container_of ( ep, struct dm96xx_device, + usbnet.out ); + struct net_device *netdev = dm96xx->netdev; + + /* Report TX completion */ + netdev_tx_complete_err ( netdev, iobuf, rc ); +} + +/** Bulk OUT endpoint operations */ +static struct usb_endpoint_driver_operations dm96xx_out_operations = { + .complete = dm96xx_out_complete, +}; + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int dm96xx_open ( struct net_device *netdev ) { + struct dm96xx_device *dm96xx = netdev->priv; + unsigned int rcr; + int rc; + + /* Set DM9601-compatible RX header mode */ + if ( ( rc = dm96xx_rx_mode ( dm96xx ) ) != 0 ) + goto err_rx_mode; + + /* Write MAC address */ + if ( ( rc = dm96xx_write_mac ( dm96xx, netdev->ll_addr ) ) != 0 ) + goto err_write_mac; + + /* Open USB network device */ + if ( ( rc = usbnet_open ( &dm96xx->usbnet ) ) != 0 ) { + DBGC ( dm96xx, "DM96XX %p could not open: %s\n", + dm96xx, strerror ( rc ) ); + goto err_open; + } + + /* Set receive filters */ + rcr = ( DM96XX_RCR_ALL | DM96XX_RCR_RUNT | DM96XX_RCR_PRMSC | + DM96XX_RCR_RXEN ); + if ( ( rc = dm96xx_write_register ( dm96xx, DM96XX_RCR, rcr ) ) != 0 ) { + DBGC ( dm96xx, "DM96XX %p could not write receive filters: " + "%s\n", dm96xx, strerror ( rc ) ); + goto err_write_rcr; + } + + /* Update link status */ + if ( ( rc = dm96xx_check_link ( dm96xx ) ) != 0 ) + goto err_check_link; + + return 0; + + err_check_link: + err_write_rcr: + usbnet_close ( &dm96xx->usbnet ); + err_open: + err_write_mac: + err_rx_mode: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void dm96xx_close ( struct net_device *netdev ) { + struct dm96xx_device *dm96xx = netdev->priv; + + /* Close USB network device */ + usbnet_close ( &dm96xx->usbnet ); + + /* Reset device */ + dm96xx_reset ( dm96xx ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int dm96xx_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct dm96xx_device *dm96xx = netdev->priv; + int rc; + + /* Transmit packet */ + if ( ( rc = dm96xx_out_transmit ( dm96xx, iobuf ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void dm96xx_poll ( struct net_device *netdev ) { + struct dm96xx_device *dm96xx = netdev->priv; + int rc; + + /* Poll USB bus */ + usb_poll ( dm96xx->bus ); + + /* Refill endpoints */ + if ( ( rc = usbnet_refill ( &dm96xx->usbnet ) ) != 0 ) + netdev_rx_err ( netdev, NULL, rc ); +} + +/** DM96xx network device operations */ +static struct net_device_operations dm96xx_operations = { + .open = dm96xx_open, + .close = dm96xx_close, + .transmit = dm96xx_transmit, + .poll = dm96xx_poll, +}; + +/****************************************************************************** + * + * USB interface + * + ****************************************************************************** + */ + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int dm96xx_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct usb_device *usb = func->usb; + struct net_device *netdev; + struct dm96xx_device *dm96xx; + int rc; + + /* Allocate and initialise structure */ + netdev = alloc_etherdev ( sizeof ( *dm96xx ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &dm96xx_operations ); + netdev->dev = &func->dev; + dm96xx = netdev->priv; + memset ( dm96xx, 0, sizeof ( *dm96xx ) ); + dm96xx->usb = usb; + dm96xx->bus = usb->port->hub->bus; + dm96xx->netdev = netdev; + usbnet_init ( &dm96xx->usbnet, func, &dm96xx_intr_operations, + &dm96xx_in_operations, &dm96xx_out_operations ); + usb_refill_init ( &dm96xx->usbnet.intr, 0, 0, DM96XX_INTR_MAX_FILL ); + usb_refill_init ( &dm96xx->usbnet.in, 0, DM96XX_IN_MTU, + DM96XX_IN_MAX_FILL ); + DBGC ( dm96xx, "DM96XX %p on %s\n", dm96xx, func->name ); + + /* Describe USB network device */ + if ( ( rc = usbnet_describe ( &dm96xx->usbnet, config ) ) != 0 ) { + DBGC ( dm96xx, "DM96XX %p could not describe: %s\n", + dm96xx, strerror ( rc ) ); + goto err_describe; + } + + /* Reset device */ + if ( ( rc = dm96xx_reset ( dm96xx ) ) != 0 ) + goto err_reset; + + /* Read MAC address */ + if ( ( rc = dm96xx_read_mac ( dm96xx, netdev->hw_addr ) ) != 0 ) + goto err_read_mac; + + /* Get initial link status */ + if ( ( rc = dm96xx_check_link ( dm96xx ) ) != 0 ) + goto err_check_link; + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register; + + usb_func_set_drvdata ( func, netdev ); + return 0; + + unregister_netdev ( netdev ); + err_register: + err_check_link: + err_read_mac: + err_reset: + err_describe: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void dm96xx_remove ( struct usb_function *func ) { + struct net_device *netdev = usb_func_get_drvdata ( func ); + + unregister_netdev ( netdev ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** DM96xx device IDs */ +static struct usb_device_id dm96xx_ids[] = { + { + .name = "dm9601-corega", + .vendor = 0x07aa, + .product = 0x9601, + }, + { + .name = "dm9601", + .vendor = 0x0a46, + .product = 0x9601, + }, + { + .name = "zt6688", + .vendor = 0x0a46, + .product = 0x6688, + }, + { + .name = "st268", + .vendor = 0x0a46, + .product = 0x0268, + }, + { + .name = "adm8515", + .vendor = 0x0a46, + .product = 0x8515, + }, + { + .name = "dm9601-hirose", + .vendor = 0x0a47, + .product = 0x9601, + }, + { + .name = "dm9601-8101", + .vendor = 0x0fe6, + .product = 0x8101, + }, + { + .name = "dm9601-9700", + .vendor = 0x0fe6, + .product = 0x9700, + }, + { + .name = "dm9000e", + .vendor = 0x0a46, + .product = 0x9000, + }, + { + .name = "dm9620", + .vendor = 0x0a46, + .product = 0x9620, + }, + { + .name = "dm9621A", + .vendor = 0x0a46, + .product = 0x9621, + }, + { + .name = "dm9622", + .vendor = 0x0a46, + .product = 0x9622, + }, + { + .name = "dm962Oa", + .vendor = 0x0a46, + .product = 0x0269, + }, + { + .name = "dm9621a", + .vendor = 0x0a46, + .product = 0x1269, + }, +}; + +/** Davicom DM96xx driver */ +struct usb_driver dm96xx_driver __usb_driver = { + .ids = dm96xx_ids, + .id_count = ( sizeof ( dm96xx_ids ) / sizeof ( dm96xx_ids[0] ) ), + .class = USB_CLASS_ID ( USB_ANY_ID, USB_ANY_ID, USB_ANY_ID ), + .score = USB_SCORE_NORMAL, + .probe = dm96xx_probe, + .remove = dm96xx_remove, +}; diff --git a/src/drivers/net/dm96xx.h b/src/drivers/net/dm96xx.h new file mode 100644 index 00000000..43a1a4e3 --- /dev/null +++ b/src/drivers/net/dm96xx.h @@ -0,0 +1,194 @@ +#ifndef _DM96XX_H +#define _DM96XX_H + +/** @file + * + * Davicom DM96xx USB Ethernet driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** Read register(s) */ +#define DM96XX_READ_REGISTER \ + ( USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE | \ + USB_REQUEST_TYPE ( 0x00 ) ) + +/** Write register(s) */ +#define DM96XX_WRITE_REGISTER \ + ( USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE | \ + USB_REQUEST_TYPE ( 0x01 ) ) + +/** Write single register */ +#define DM96XX_WRITE1_REGISTER \ + ( USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE | \ + USB_REQUEST_TYPE ( 0x03 ) ) + +/** Network control register */ +#define DM96XX_NCR 0x00 +#define DM96XX_NCR_RST 0x01 /**< Software reset */ + +/** Network status register */ +#define DM96XX_NSR 0x01 +#define DM96XX_NSR_LINKST 0x40 /**< Link status */ + +/** Receive control register */ +#define DM96XX_RCR 0x05 +#define DM96XX_RCR_ALL 0x08 /**< Pass all multicast */ +#define DM96XX_RCR_RUNT 0x04 /**< Pass runt packet */ +#define DM96XX_RCR_PRMSC 0x02 /**< Promiscuous mode */ +#define DM96XX_RCR_RXEN 0x01 /**< RX enable */ + +/** Receive status register */ +#define DM96XX_RSR 0x06 +#define DM96XX_RSR_MF 0x40 /**< Multicast frame */ + +/** PHY address registers */ +#define DM96XX_PAR 0x10 + +/** Chip revision register */ +#define DM96XX_CHIPR 0x2c +#define DM96XX_CHIPR_9601 0x00 /**< DM9601 */ +#define DM96XX_CHIPR_9620 0x01 /**< DM9620 */ + +/** RX header control/status register (DM9620+ only) */ +#define DM96XX_MODE_CTL 0x91 +#define DM96XX_MODE_CTL_MODE 0x80 /**< 4-byte header mode */ + +/** DM96xx interrupt data */ +struct dm96xx_interrupt { + /** Network status register */ + uint8_t nsr; + /** Transmit status registers */ + uint8_t tsr[2]; + /** Receive status register */ + uint8_t rsr; + /** Receive overflow counter register */ + uint8_t rocr; + /** Receive packet counter */ + uint8_t rxc; + /** Transmit packet counter */ + uint8_t txc; + /** General purpose register */ + uint8_t gpr; +} __attribute__ (( packed )); + +/** DM96xx receive header */ +struct dm96xx_rx_header { + /** Packet status */ + uint8_t rsr; + /** Packet length (excluding this header, including CRC) */ + uint16_t len; +} __attribute__ (( packed )); + +/** DM96xx transmit header */ +struct dm96xx_tx_header { + /** Packet length (excluding this header) */ + uint16_t len; +} __attribute__ (( packed )); + +/** A DM96xx network device */ +struct dm96xx_device { + /** USB device */ + struct usb_device *usb; + /** USB bus */ + struct usb_bus *bus; + /** Network device */ + struct net_device *netdev; + /** USB network device */ + struct usbnet_device usbnet; +}; + +/** + * Read registers + * + * @v dm96xx DM96xx device + * @v offset Register offset + * @v data Data buffer + * @v len Length of data + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +dm96xx_read_registers ( struct dm96xx_device *dm96xx, unsigned int offset, + void *data, size_t len ) { + + return usb_control ( dm96xx->usb, DM96XX_READ_REGISTER, 0, offset, + data, len ); +} + +/** + * Read register + * + * @v dm96xx DM96xx device + * @v offset Register offset + * @ret value Register value, or negative error + */ +static inline __attribute__ (( always_inline )) int +dm96xx_read_register ( struct dm96xx_device *dm96xx, unsigned int offset ) { + uint8_t value; + int rc; + + if ( ( rc = dm96xx_read_registers ( dm96xx, offset, &value, + sizeof ( value ) ) ) != 0 ) + return rc; + return value; +} + +/** + * Write registers + * + * @v dm96xx DM96xx device + * @v offset Register offset + * @v data Data buffer + * @v len Length of data + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +dm96xx_write_registers ( struct dm96xx_device *dm96xx, unsigned int offset, + void *data, size_t len ) { + + return usb_control ( dm96xx->usb, DM96XX_WRITE_REGISTER, 0, offset, + data, len ); +} + +/** + * Write register + * + * @v dm96xx DM96xx device + * @v offset Register offset + * @v value Register value + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +dm96xx_write_register ( struct dm96xx_device *dm96xx, unsigned int offset, + uint8_t value ) { + + return usb_control ( dm96xx->usb, DM96XX_WRITE1_REGISTER, value, + offset, NULL, 0 ); +} + +/** Reset delay (in microseconds) */ +#define DM96XX_RESET_DELAY_US 10 + +/** Interrupt maximum fill level + * + * This is a policy decision. + */ +#define DM96XX_INTR_MAX_FILL 2 + +/** Bulk IN maximum fill level + * + * This is a policy decision. + */ +#define DM96XX_IN_MAX_FILL 8 + +/** Bulk IN buffer size */ +#define DM96XX_IN_MTU \ + ( 4 /* DM96xx header */ + ETH_FRAME_LEN + \ + 4 /* possible VLAN header */ + 4 /* CRC */ ) + +#endif /* _DM96XX_H */ diff --git a/src/drivers/net/ecm.c b/src/drivers/net/ecm.c new file mode 100644 index 00000000..847a45b8 --- /dev/null +++ b/src/drivers/net/ecm.c @@ -0,0 +1,522 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ecm.h" + +/** @file + * + * CDC-ECM USB Ethernet driver + * + */ + +/** Interrupt completion profiler */ +static struct profiler ecm_intr_profiler __profiler = + { .name = "ecm.intr" }; + +/** Bulk IN completion profiler */ +static struct profiler ecm_in_profiler __profiler = + { .name = "ecm.in" }; + +/** Bulk OUT profiler */ +static struct profiler ecm_out_profiler __profiler = + { .name = "ecm.out" }; + +/****************************************************************************** + * + * Ethernet functional descriptor + * + ****************************************************************************** + */ + +/** + * Locate Ethernet functional descriptor + * + * @v config Configuration descriptor + * @v interface Interface descriptor + * @ret desc Descriptor, or NULL if not found + */ +struct ecm_ethernet_descriptor * +ecm_ethernet_descriptor ( struct usb_configuration_descriptor *config, + struct usb_interface_descriptor *interface ) { + struct ecm_ethernet_descriptor *desc; + + for_each_interface_descriptor ( desc, config, interface ) { + if ( ( desc->header.type == USB_CS_INTERFACE_DESCRIPTOR ) && + ( desc->subtype == CDC_SUBTYPE_ETHERNET ) ) + return desc; + } + return NULL; +} + +/** + * Get hardware MAC address + * + * @v usb USB device + * @v desc Ethernet functional descriptor + * @v hw_addr Hardware address to fill in + * @ret rc Return status code + */ +int ecm_fetch_mac ( struct usb_device *usb, + struct ecm_ethernet_descriptor *desc, uint8_t *hw_addr ) { + char buf[ base16_encoded_len ( ETH_ALEN ) + 1 /* NUL */ ]; + int len; + int rc; + + /* Fetch MAC address string */ + len = usb_get_string_descriptor ( usb, desc->mac, 0, buf, + sizeof ( buf ) ); + if ( len < 0 ) { + rc = len; + return rc; + } + + /* Sanity check */ + if ( len != ( ( int ) ( sizeof ( buf ) - 1 /* NUL */ ) ) ) { + DBGC ( usb, "USB %s has invalid ECM MAC \"%s\"\n", + usb->name, buf ); + return -EINVAL; + } + + /* Decode MAC address */ + len = base16_decode ( buf, hw_addr, ETH_ALEN ); + if ( len < 0 ) { + rc = len; + DBGC ( usb, "USB %s could not decode ECM MAC \"%s\": %s\n", + usb->name, buf, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/****************************************************************************** + * + * CDC-ECM communications interface + * + ****************************************************************************** + */ + +/** + * Complete interrupt transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void ecm_intr_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct ecm_device *ecm = container_of ( ep, struct ecm_device, + usbnet.intr ); + struct net_device *netdev = ecm->netdev; + struct usb_setup_packet *message; + size_t len = iob_len ( iobuf ); + + /* Profile completions */ + profile_start ( &ecm_intr_profiler ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto ignore; + + /* Drop packets with errors */ + if ( rc != 0 ) { + DBGC ( ecm, "ECM %p interrupt failed: %s\n", + ecm, strerror ( rc ) ); + DBGC_HDA ( ecm, 0, iobuf->data, iob_len ( iobuf ) ); + goto error; + } + + /* Extract message header */ + if ( len < sizeof ( *message ) ) { + DBGC ( ecm, "ECM %p underlength interrupt:\n", ecm ); + DBGC_HDA ( ecm, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto error; + } + message = iobuf->data; + + /* Parse message header */ + switch ( message->request ) { + + case cpu_to_le16 ( CDC_NETWORK_CONNECTION ) : + if ( message->value && ! netdev_link_ok ( netdev ) ) { + DBGC ( ecm, "ECM %p link up\n", ecm ); + netdev_link_up ( netdev ); + } else if ( netdev_link_ok ( netdev ) && ! message->value ) { + DBGC ( ecm, "ECM %p link down\n", ecm ); + netdev_link_down ( netdev ); + } + break; + + case cpu_to_le16 ( CDC_CONNECTION_SPEED_CHANGE ) : + /* Ignore */ + break; + + default: + DBGC ( ecm, "ECM %p unrecognised interrupt:\n", ecm ); + DBGC_HDA ( ecm, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -ENOTSUP; + goto error; + } + + /* Free I/O buffer */ + free_iob ( iobuf ); + profile_stop ( &ecm_intr_profiler ); + + return; + + error: + netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); + ignore: + free_iob ( iobuf ); + return; +} + +/** Interrupt endpoint operations */ +static struct usb_endpoint_driver_operations ecm_intr_operations = { + .complete = ecm_intr_complete, +}; + +/****************************************************************************** + * + * CDC-ECM data interface + * + ****************************************************************************** + */ + +/** + * Complete bulk IN transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void ecm_in_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf, + int rc ) { + struct ecm_device *ecm = container_of ( ep, struct ecm_device, + usbnet.in ); + struct net_device *netdev = ecm->netdev; + + /* Profile receive completions */ + profile_start ( &ecm_in_profiler ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto ignore; + + /* Record USB errors against the network device */ + if ( rc != 0 ) { + DBGC ( ecm, "ECM %p bulk IN failed: %s\n", + ecm, strerror ( rc ) ); + goto error; + } + + /* Hand off to network stack */ + netdev_rx ( netdev, iob_disown ( iobuf ) ); + + profile_stop ( &ecm_in_profiler ); + return; + + error: + netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); + ignore: + free_iob ( iobuf ); +} + +/** Bulk IN endpoint operations */ +static struct usb_endpoint_driver_operations ecm_in_operations = { + .complete = ecm_in_complete, +}; + +/** + * Transmit packet + * + * @v ecm CDC-ECM device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int ecm_out_transmit ( struct ecm_device *ecm, + struct io_buffer *iobuf ) { + int rc; + + /* Profile transmissions */ + profile_start ( &ecm_out_profiler ); + + /* Enqueue I/O buffer */ + if ( ( rc = usb_stream ( &ecm->usbnet.out, iobuf, 1 ) ) != 0 ) + return rc; + + profile_stop ( &ecm_out_profiler ); + return 0; +} + +/** + * Complete bulk OUT transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void ecm_out_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf, + int rc ) { + struct ecm_device *ecm = container_of ( ep, struct ecm_device, + usbnet.out ); + struct net_device *netdev = ecm->netdev; + + /* Report TX completion */ + netdev_tx_complete_err ( netdev, iobuf, rc ); +} + +/** Bulk OUT endpoint operations */ +static struct usb_endpoint_driver_operations ecm_out_operations = { + .complete = ecm_out_complete, +}; + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int ecm_open ( struct net_device *netdev ) { + struct ecm_device *ecm = netdev->priv; + struct usb_device *usb = ecm->usb; + unsigned int filter; + int rc; + + /* Open USB network device */ + if ( ( rc = usbnet_open ( &ecm->usbnet ) ) != 0 ) { + DBGC ( ecm, "ECM %p could not open: %s\n", + ecm, strerror ( rc ) ); + goto err_open; + } + + /* Set packet filter */ + filter = ( ECM_PACKET_TYPE_PROMISCUOUS | + ECM_PACKET_TYPE_ALL_MULTICAST | + ECM_PACKET_TYPE_DIRECTED | + ECM_PACKET_TYPE_BROADCAST ); + if ( ( rc = usb_control ( usb, ECM_SET_ETHERNET_PACKET_FILTER, + filter, ecm->usbnet.comms, NULL, 0 ) ) != 0 ){ + DBGC ( ecm, "ECM %p could not set packet filter: %s\n", + ecm, strerror ( rc ) ); + goto err_set_filter; + } + + return 0; + + err_set_filter: + usbnet_close ( &ecm->usbnet ); + err_open: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void ecm_close ( struct net_device *netdev ) { + struct ecm_device *ecm = netdev->priv; + + /* Close USB network device */ + usbnet_close ( &ecm->usbnet ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int ecm_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct ecm_device *ecm = netdev->priv; + int rc; + + /* Transmit packet */ + if ( ( rc = ecm_out_transmit ( ecm, iobuf ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void ecm_poll ( struct net_device *netdev ) { + struct ecm_device *ecm = netdev->priv; + int rc; + + /* Poll USB bus */ + usb_poll ( ecm->bus ); + + /* Refill endpoints */ + if ( ( rc = usbnet_refill ( &ecm->usbnet ) ) != 0 ) + netdev_rx_err ( netdev, NULL, rc ); +} + +/** CDC-ECM network device operations */ +static struct net_device_operations ecm_operations = { + .open = ecm_open, + .close = ecm_close, + .transmit = ecm_transmit, + .poll = ecm_poll, +}; + +/****************************************************************************** + * + * USB interface + * + ****************************************************************************** + */ + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int ecm_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct usb_device *usb = func->usb; + struct net_device *netdev; + struct ecm_device *ecm; + struct usb_interface_descriptor *comms; + struct ecm_ethernet_descriptor *ethernet; + int rc; + + /* Allocate and initialise structure */ + netdev = alloc_etherdev ( sizeof ( *ecm ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &ecm_operations ); + netdev->dev = &func->dev; + ecm = netdev->priv; + memset ( ecm, 0, sizeof ( *ecm ) ); + ecm->usb = usb; + ecm->bus = usb->port->hub->bus; + ecm->netdev = netdev; + usbnet_init ( &ecm->usbnet, func, &ecm_intr_operations, + &ecm_in_operations, &ecm_out_operations ); + usb_refill_init ( &ecm->usbnet.intr, 0, 0, ECM_INTR_MAX_FILL ); + usb_refill_init ( &ecm->usbnet.in, 0, ECM_IN_MTU, ECM_IN_MAX_FILL ); + DBGC ( ecm, "ECM %p on %s\n", ecm, func->name ); + + /* Describe USB network device */ + if ( ( rc = usbnet_describe ( &ecm->usbnet, config ) ) != 0 ) { + DBGC ( ecm, "ECM %p could not describe: %s\n", + ecm, strerror ( rc ) ); + goto err_describe; + } + + /* Locate Ethernet descriptor */ + comms = usb_interface_descriptor ( config, ecm->usbnet.comms, 0 ); + assert ( comms != NULL ); + ethernet = ecm_ethernet_descriptor ( config, comms ); + if ( ! ethernet ) { + DBGC ( ecm, "ECM %p has no Ethernet descriptor\n", ecm ); + rc = -EINVAL; + goto err_ethernet; + } + + /* Fetch MAC address */ + if ( ( rc = ecm_fetch_mac ( usb, ethernet, netdev->hw_addr ) ) != 0 ) { + DBGC ( ecm, "ECM %p could not fetch MAC address: %s\n", + ecm, strerror ( rc ) ); + goto err_fetch_mac; + } + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register; + + usb_func_set_drvdata ( func, ecm ); + return 0; + + unregister_netdev ( netdev ); + err_register: + err_fetch_mac: + err_ethernet: + err_describe: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void ecm_remove ( struct usb_function *func ) { + struct ecm_device *ecm = usb_func_get_drvdata ( func ); + struct net_device *netdev = ecm->netdev; + + unregister_netdev ( netdev ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** CDC-ECM device IDs */ +static struct usb_device_id ecm_ids[] = { + { + .name = "cdc-ecm", + .vendor = USB_ANY_ID, + .product = USB_ANY_ID, + }, +}; + +/** CDC-ECM driver */ +struct usb_driver ecm_driver __usb_driver = { + .ids = ecm_ids, + .id_count = ( sizeof ( ecm_ids ) / sizeof ( ecm_ids[0] ) ), + .class = USB_CLASS_ID ( USB_CLASS_CDC, USB_SUBCLASS_CDC_ECM, 0 ), + .score = USB_SCORE_NORMAL, + .probe = ecm_probe, + .remove = ecm_remove, +}; diff --git a/src/drivers/net/ecm.h b/src/drivers/net/ecm.h new file mode 100644 index 00000000..83d324bd --- /dev/null +++ b/src/drivers/net/ecm.h @@ -0,0 +1,93 @@ +#ifndef _ECM_H +#define _ECM_H + +/** @file + * + * CDC-ECM USB Ethernet driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** CDC-ECM subclass */ +#define USB_SUBCLASS_CDC_ECM 0x06 + +/** Set Ethernet packet filter */ +#define ECM_SET_ETHERNET_PACKET_FILTER \ + ( USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 0x43 ) ) + +/** Ethernet packet types */ +enum ecm_ethernet_packet_filter { + /** Promiscuous mode */ + ECM_PACKET_TYPE_PROMISCUOUS = 0x0001, + /** All multicast packets */ + ECM_PACKET_TYPE_ALL_MULTICAST = 0x0002, + /** Unicast packets */ + ECM_PACKET_TYPE_DIRECTED = 0x0004, + /** Broadcast packets */ + ECM_PACKET_TYPE_BROADCAST = 0x0008, + /** Specified multicast packets */ + ECM_PACKET_TYPE_MULTICAST = 0x0010, +}; + +/** An Ethernet Functional Descriptor */ +struct ecm_ethernet_descriptor { + /** Descriptor header */ + struct usb_descriptor_header header; + /** Descriptor subtype */ + uint8_t subtype; + /** MAC address string */ + uint8_t mac; + /** Ethernet statistics bitmap */ + uint32_t statistics; + /** Maximum segment size */ + uint16_t mtu; + /** Multicast filter configuration */ + uint16_t mcast; + /** Number of wake-on-LAN filters */ + uint8_t wol; +} __attribute__ (( packed )); + +/** A CDC-ECM network device */ +struct ecm_device { + /** USB device */ + struct usb_device *usb; + /** USB bus */ + struct usb_bus *bus; + /** Network device */ + struct net_device *netdev; + /** USB network device */ + struct usbnet_device usbnet; +}; + +/** Interrupt maximum fill level + * + * This is a policy decision. + */ +#define ECM_INTR_MAX_FILL 2 + +/** Bulk IN maximum fill level + * + * This is a policy decision. + */ +#define ECM_IN_MAX_FILL 8 + +/** Bulk IN buffer size + * + * This is a policy decision. + */ +#define ECM_IN_MTU ( ETH_FRAME_LEN + 4 /* possible VLAN header */ ) + +extern struct ecm_ethernet_descriptor * +ecm_ethernet_descriptor ( struct usb_configuration_descriptor *config, + struct usb_interface_descriptor *interface ); +extern int ecm_fetch_mac ( struct usb_device *usb, + struct ecm_ethernet_descriptor *desc, + uint8_t *hw_addr ); + +#endif /* _ECM_H */ diff --git a/src/drivers/net/efi/nii.c b/src/drivers/net/efi/nii.c new file mode 100644 index 00000000..e76e211c --- /dev/null +++ b/src/drivers/net/efi/nii.c @@ -0,0 +1,1371 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nii.h" + +/** @file + * + * NII driver + * + */ + +/* Error numbers generated by NII */ +#define EIO_INVALID_CDB __einfo_error ( EINFO_EIO_INVALID_CDB ) +#define EINFO_EIO_INVALID_CDB \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_INVALID_CDB, \ + "Invalid CDB" ) +#define EIO_INVALID_CPB __einfo_error ( EINFO_EIO_INVALID_CPB ) +#define EINFO_EIO_INVALID_CPB \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_INVALID_CPB, \ + "Invalid CPB" ) +#define EIO_BUSY __einfo_error ( EINFO_EIO_BUSY ) +#define EINFO_EIO_BUSY \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_BUSY, \ + "Busy" ) +#define EIO_QUEUE_FULL __einfo_error ( EINFO_EIO_QUEUE_FULL ) +#define EINFO_EIO_QUEUE_FULL \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_QUEUE_FULL, \ + "Queue full" ) +#define EIO_ALREADY_STARTED __einfo_error ( EINFO_EIO_ALREADY_STARTED ) +#define EINFO_EIO_ALREADY_STARTED \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_ALREADY_STARTED, \ + "Already started" ) +#define EIO_NOT_STARTED __einfo_error ( EINFO_EIO_NOT_STARTED ) +#define EINFO_EIO_NOT_STARTED \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_NOT_STARTED, \ + "Not started" ) +#define EIO_NOT_SHUTDOWN __einfo_error ( EINFO_EIO_NOT_SHUTDOWN ) +#define EINFO_EIO_NOT_SHUTDOWN \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_NOT_SHUTDOWN, \ + "Not shutdown" ) +#define EIO_ALREADY_INITIALIZED __einfo_error ( EINFO_EIO_ALREADY_INITIALIZED ) +#define EINFO_EIO_ALREADY_INITIALIZED \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_ALREADY_INITIALIZED, \ + "Already initialized" ) +#define EIO_NOT_INITIALIZED __einfo_error ( EINFO_EIO_NOT_INITIALIZED ) +#define EINFO_EIO_NOT_INITIALIZED \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_NOT_INITIALIZED, \ + "Not initialized" ) +#define EIO_DEVICE_FAILURE __einfo_error ( EINFO_EIO_DEVICE_FAILURE ) +#define EINFO_EIO_DEVICE_FAILURE \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_DEVICE_FAILURE, \ + "Device failure" ) +#define EIO_NVDATA_FAILURE __einfo_error ( EINFO_EIO_NVDATA_FAILURE ) +#define EINFO_EIO_NVDATA_FAILURE \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_NVDATA_FAILURE, \ + "Non-volatile data failure" ) +#define EIO_UNSUPPORTED __einfo_error ( EINFO_EIO_UNSUPPORTED ) +#define EINFO_EIO_UNSUPPORTED \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_UNSUPPORTED, \ + "Unsupported" ) +#define EIO_BUFFER_FULL __einfo_error ( EINFO_EIO_BUFFER_FULL ) +#define EINFO_EIO_BUFFER_FULL \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_BUFFER_FULL, \ + "Buffer full" ) +#define EIO_INVALID_PARAMETER __einfo_error ( EINFO_EIO_INVALID_PARAMETER ) +#define EINFO_EIO_INVALID_PARAMETER \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_INVALID_PARAMETER, \ + "Invalid parameter" ) +#define EIO_INVALID_UNDI __einfo_error ( EINFO_EIO_INVALID_UNDI ) +#define EINFO_EIO_INVALID_UNDI \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_INVALID_UNDI, \ + "Invalid UNDI" ) +#define EIO_IPV4_NOT_SUPPORTED __einfo_error ( EINFO_EIO_IPV4_NOT_SUPPORTED ) +#define EINFO_EIO_IPV4_NOT_SUPPORTED \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_IPV4_NOT_SUPPORTED, \ + "IPv4 not supported" ) +#define EIO_IPV6_NOT_SUPPORTED __einfo_error ( EINFO_EIO_IPV6_NOT_SUPPORTED ) +#define EINFO_EIO_IPV6_NOT_SUPPORTED \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_IPV6_NOT_SUPPORTED, \ + "IPv6 not supported" ) +#define EIO_NOT_ENOUGH_MEMORY __einfo_error ( EINFO_EIO_NOT_ENOUGH_MEMORY ) +#define EINFO_EIO_NOT_ENOUGH_MEMORY \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_NOT_ENOUGH_MEMORY, \ + "Not enough memory" ) +#define EIO_NO_DATA __einfo_error ( EINFO_EIO_NO_DATA ) +#define EINFO_EIO_NO_DATA \ + __einfo_uniqify ( EINFO_EIO, PXE_STATCODE_NO_DATA, \ + "No data" ) +#define EIO_STAT( stat ) \ + EUNIQ ( EINFO_EIO, -(stat), EIO_INVALID_CDB, EIO_INVALID_CPB, \ + EIO_BUSY, EIO_QUEUE_FULL, EIO_ALREADY_STARTED, \ + EIO_NOT_STARTED, EIO_NOT_SHUTDOWN, EIO_ALREADY_INITIALIZED, \ + EIO_NOT_INITIALIZED, EIO_DEVICE_FAILURE, EIO_NVDATA_FAILURE, \ + EIO_UNSUPPORTED, EIO_BUFFER_FULL, EIO_INVALID_PARAMETER, \ + EIO_INVALID_UNDI, EIO_IPV4_NOT_SUPPORTED, \ + EIO_IPV6_NOT_SUPPORTED, EIO_NOT_ENOUGH_MEMORY, EIO_NO_DATA ) + +/** Maximum PCI BAR + * + * This is defined in , but we + * can't #include that since it collides with . + */ +#define PCI_MAX_BAR 6 + +/** An NII memory mapping */ +struct nii_mapping { + /** List of mappings */ + struct list_head list; + /** Mapped address */ + UINT64 addr; + /** Mapping cookie created by PCI I/O protocol */ + VOID *mapping; +}; + +/** An NII NIC */ +struct nii_nic { + /** EFI device */ + struct efi_device *efidev; + /** Network interface identifier protocol */ + EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL *nii; + /** !PXE structure */ + PXE_SW_UNDI *undi; + /** Entry point */ + EFIAPI VOID ( * issue ) ( UINT64 cdb ); + /** Generic device */ + struct device dev; + + /** PCI device */ + EFI_HANDLE pci_device; + /** PCI I/O protocol */ + EFI_PCI_IO_PROTOCOL *pci_io; + /** Memory BAR */ + unsigned int mem_bar; + /** I/O BAR */ + unsigned int io_bar; + + /** Broadcast address */ + PXE_MAC_ADDR broadcast; + /** Maximum packet length */ + size_t mtu; + + /** Hardware transmit/receive buffer */ + userptr_t buffer; + /** Hardware transmit/receive buffer length */ + size_t buffer_len; + + /** Saved task priority level */ + EFI_TPL saved_tpl; + + /** Media status is supported */ + int media; + + /** Current transmit buffer */ + struct io_buffer *txbuf; + /** Current receive buffer */ + struct io_buffer *rxbuf; + + /** Mapping list */ + struct list_head mappings; +}; + +/** Maximum number of received packets per poll */ +#define NII_RX_QUOTA 4 + +/** + * Open PCI I/O protocol and identify BARs + * + * @v nii NII NIC + * @ret rc Return status code + */ +static int nii_pci_open ( struct nii_nic *nii ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_HANDLE device = nii->efidev->device; + EFI_HANDLE pci_device; + union { + EFI_PCI_IO_PROTOCOL *pci_io; + void *interface; + } pci_io; + union { + EFI_ACPI_ADDRESS_SPACE_DESCRIPTOR *acpi; + void *resource; + } desc; + int bar; + EFI_STATUS efirc; + int rc; + + /* Locate PCI I/O protocol */ + if ( ( rc = efi_locate_device ( device, &efi_pci_io_protocol_guid, + &pci_device ) ) != 0 ) { + DBGC ( nii, "NII %s could not locate PCI I/O protocol: %s\n", + nii->dev.name, strerror ( rc ) ); + goto err_locate; + } + nii->pci_device = pci_device; + + /* Open PCI I/O protocol */ + if ( ( efirc = bs->OpenProtocol ( pci_device, &efi_pci_io_protocol_guid, + &pci_io.interface, efi_image_handle, + device, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ + rc = -EEFI ( efirc ); + DBGC ( nii, "NII %s could not open PCI I/O protocol: %s\n", + nii->dev.name, strerror ( rc ) ); + goto err_open; + } + nii->pci_io = pci_io.pci_io; + + /* Identify memory and I/O BARs */ + nii->mem_bar = PCI_MAX_BAR; + nii->io_bar = PCI_MAX_BAR; + for ( bar = ( PCI_MAX_BAR - 1 ) ; bar >= 0 ; bar-- ) { + efirc = nii->pci_io->GetBarAttributes ( nii->pci_io, bar, NULL, + &desc.resource ); + if ( efirc == EFI_UNSUPPORTED ) { + /* BAR not present; ignore */ + continue; + } + if ( efirc != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( nii, "NII %s could not get BAR %d attributes: " + "%s\n", nii->dev.name, bar, strerror ( rc ) ); + goto err_get_bar_attributes; + } + if ( desc.acpi->ResType == ACPI_ADDRESS_SPACE_TYPE_MEM ) { + nii->mem_bar = bar; + } else if ( desc.acpi->ResType == ACPI_ADDRESS_SPACE_TYPE_IO ) { + nii->io_bar = bar; + } + bs->FreePool ( desc.resource ); + } + DBGC ( nii, "NII %s has ", nii->dev.name ); + if ( nii->mem_bar < PCI_MAX_BAR ) { + DBGC ( nii, "memory BAR %d and ", nii->mem_bar ); + } else { + DBGC ( nii, "no memory BAR and " ); + } + if ( nii->io_bar < PCI_MAX_BAR ) { + DBGC ( nii, "I/O BAR %d\n", nii->io_bar ); + } else { + DBGC ( nii, "no I/O BAR\n" ); + } + + return 0; + + err_get_bar_attributes: + bs->CloseProtocol ( pci_device, &efi_pci_io_protocol_guid, + efi_image_handle, device ); + err_open: + err_locate: + return rc; +} + +/** + * Close PCI I/O protocol + * + * @v nii NII NIC + * @ret rc Return status code + */ +static void nii_pci_close ( struct nii_nic *nii ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct nii_mapping *map; + struct nii_mapping *tmp; + + /* Remove any stale mappings */ + list_for_each_entry_safe ( map, tmp, &nii->mappings, list ) { + DBGC ( nii, "NII %s removing stale mapping %#llx\n", + nii->dev.name, ( ( unsigned long long ) map->addr ) ); + nii->pci_io->Unmap ( nii->pci_io, map->mapping ); + list_del ( &map->list ); + free ( map ); + } + + /* Close protocols */ + bs->CloseProtocol ( nii->pci_device, &efi_pci_io_protocol_guid, + efi_image_handle, nii->efidev->device ); +} + +/** + * I/O callback + * + * @v unique_id NII NIC + * @v op Operations + * @v len Length of data + * @v addr Address + * @v data Data buffer + */ +static EFIAPI VOID nii_io ( UINT64 unique_id, UINT8 op, UINT8 len, UINT64 addr, + UINT64 data ) { + struct nii_nic *nii = ( ( void * ) ( intptr_t ) unique_id ); + EFI_PCI_IO_PROTOCOL_ACCESS *access; + EFI_PCI_IO_PROTOCOL_IO_MEM io; + EFI_PCI_IO_PROTOCOL_WIDTH width; + unsigned int bar; + EFI_STATUS efirc; + int rc; + + /* Determine accessor and BAR */ + if ( op & ( PXE_MEM_READ | PXE_MEM_WRITE ) ) { + access = &nii->pci_io->Mem; + bar = nii->mem_bar; + } else { + access = &nii->pci_io->Io; + bar = nii->io_bar; + } + + /* Determine operaton */ + io = ( ( op & ( PXE_IO_WRITE | PXE_MEM_WRITE ) ) ? + access->Write : access->Read ); + + /* Determine width */ + width = ( fls ( len ) - 1 ); + + /* Issue operation */ + if ( ( efirc = io ( nii->pci_io, width, bar, addr, 1, + ( ( void * ) ( intptr_t ) data ) ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( nii, "NII %s I/O operation %#x failed: %s\n", + nii->dev.name, op, strerror ( rc ) ); + /* No way to report failure */ + return; + } +} + +/** + * Map callback + * + * @v unique_id NII NIC + * @v addr Address of memory to be mapped + * @v len Length of memory to be mapped + * @v dir Direction of data flow + * @v mapped Device mapped address to fill in + */ +static EFIAPI VOID nii_map ( UINT64 unique_id, UINT64 addr, UINT32 len, + UINT32 dir, UINT64 mapped ) { + struct nii_nic *nii = ( ( void * ) ( intptr_t ) unique_id ); + EFI_PHYSICAL_ADDRESS *phys = ( ( void * ) ( intptr_t ) mapped ); + EFI_PCI_IO_PROTOCOL_OPERATION op; + struct nii_mapping *map; + UINTN count = len; + EFI_STATUS efirc; + int rc; + + /* Return a zero mapped address on failure */ + *phys = 0; + + /* Determine PCI mapping operation */ + switch ( dir ) { + case TO_AND_FROM_DEVICE: + op = EfiPciIoOperationBusMasterCommonBuffer; + break; + case FROM_DEVICE: + op = EfiPciIoOperationBusMasterWrite; + break; + case TO_DEVICE: + op = EfiPciIoOperationBusMasterRead; + break; + default: + DBGC ( nii, "NII %s unsupported mapping direction %d\n", + nii->dev.name, dir ); + goto err_dir; + } + + /* Allocate a mapping record */ + map = zalloc ( sizeof ( *map ) ); + if ( ! map ) + goto err_alloc; + map->addr = addr; + + /* Create map */ + if ( ( efirc = nii->pci_io->Map ( nii->pci_io, op, + ( ( void * ) ( intptr_t ) addr ), + &count, phys, &map->mapping ) ) != 0){ + rc = -EEFI ( efirc ); + DBGC ( nii, "NII %s map operation failed: %s\n", + nii->dev.name, strerror ( rc ) ); + goto err_map; + } + + /* Add to list of mappings */ + list_add ( &map->list, &nii->mappings ); + DBGC2 ( nii, "NII %s mapped %#llx+%#x->%#llx\n", + nii->dev.name, ( ( unsigned long long ) addr ), + len, ( ( unsigned long long ) *phys ) ); + return; + + list_del ( &map->list ); + err_map: + free ( map ); + err_alloc: + err_dir: + return; +} + +/** + * Unmap callback + * + * @v unique_id NII NIC + * @v addr Address of mapped memory + * @v len Length of mapped memory + * @v dir Direction of data flow + * @v mapped Device mapped address + */ +static EFIAPI VOID nii_unmap ( UINT64 unique_id, UINT64 addr, UINT32 len, + UINT32 dir __unused, UINT64 mapped ) { + struct nii_nic *nii = ( ( void * ) ( intptr_t ) unique_id ); + struct nii_mapping *map; + + /* Locate mapping record */ + list_for_each_entry ( map, &nii->mappings, list ) { + if ( map->addr == addr ) { + nii->pci_io->Unmap ( nii->pci_io, map->mapping ); + list_del ( &map->list ); + free ( map ); + DBGC2 ( nii, "NII %s unmapped %#llx+%#x->%#llx\n", + nii->dev.name, ( ( unsigned long long ) addr ), + len, ( ( unsigned long long ) mapped ) ); + return; + } + } + + DBGC ( nii, "NII %s non-existent mapping %#llx+%#x->%#llx\n", + nii->dev.name, ( ( unsigned long long ) addr ), + len, ( ( unsigned long long ) mapped ) ); +} + +/** + * Sync callback + * + * @v unique_id NII NIC + * @v addr Address of mapped memory + * @v len Length of mapped memory + * @v dir Direction of data flow + * @v mapped Device mapped address + */ +static EFIAPI VOID nii_sync ( UINT64 unique_id __unused, UINT64 addr, + UINT32 len, UINT32 dir, UINT64 mapped ) { + const void *src; + void *dst; + + /* Do nothing if this is an identity mapping */ + if ( addr == mapped ) + return; + + /* Determine direction */ + if ( dir == FROM_DEVICE ) { + src = ( ( void * ) ( intptr_t ) mapped ); + dst = ( ( void * ) ( intptr_t ) addr ); + } else { + src = ( ( void * ) ( intptr_t ) addr ); + dst = ( ( void * ) ( intptr_t ) mapped ); + } + + /* Copy data */ + memcpy ( dst, src, len ); +} + +/** + * Delay callback + * + * @v unique_id NII NIC + * @v microseconds Delay in microseconds + */ +static EFIAPI VOID nii_delay ( UINT64 unique_id __unused, UINTN microseconds ) { + + udelay ( microseconds ); +} + +/** + * Block callback + * + * @v unique_id NII NIC + * @v acquire Acquire lock + */ +static EFIAPI VOID nii_block ( UINT64 unique_id, UINT32 acquire ) { + struct nii_nic *nii = ( ( void * ) ( intptr_t ) unique_id ); + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + + /* This functionality (which is copied verbatim from the + * SnpDxe implementation of this function) appears to be + * totally brain-dead, since it produces no actual blocking + * behaviour. + */ + if ( acquire ) { + nii->saved_tpl = bs->RaiseTPL ( TPL_NOTIFY ); + } else { + bs->RestoreTPL ( nii->saved_tpl ); + } +} + +/** + * Construct operation from opcode and flags + * + * @v opcode Opcode + * @v opflags Flags + * @ret op Operation + */ +#define NII_OP( opcode, opflags ) ( (opcode) | ( (opflags) << 16 ) ) + +/** + * Extract opcode from operation + * + * @v op Operation + * @ret opcode Opcode + */ +#define NII_OPCODE( op ) ( (op) & 0xffff ) + +/** + * Extract flags from operation + * + * @v op Operation + * @ret opflags Flags + */ +#define NII_OPFLAGS( op ) ( (op) >> 16 ) + +/** + * Issue command with parameter block and data block + * + * @v nii NII NIC + * @v op Operation + * @v cpb Command parameter block, or NULL + * @v cpb_len Command parameter block length + * @v db Data block, or NULL + * @v db_len Data block length + * @ret stat Status flags, or negative status code + */ +static int nii_issue_cpb_db ( struct nii_nic *nii, unsigned int op, void *cpb, + size_t cpb_len, void *db, size_t db_len ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + PXE_CDB cdb; + UINTN tpl; + + /* Prepare command descriptor block */ + memset ( &cdb, 0, sizeof ( cdb ) ); + cdb.OpCode = NII_OPCODE ( op ); + cdb.OpFlags = NII_OPFLAGS ( op ); + cdb.CPBaddr = ( ( intptr_t ) cpb ); + cdb.CPBsize = cpb_len; + cdb.DBaddr = ( ( intptr_t ) db ); + cdb.DBsize = db_len; + cdb.IFnum = nii->nii->IfNum; + + /* Raise task priority level */ + tpl = bs->RaiseTPL ( TPL_CALLBACK ); + + /* Issue command */ + DBGC2 ( nii, "NII %s issuing %02x:%04x ifnum %d%s%s\n", + nii->dev.name, cdb.OpCode, cdb.OpFlags, cdb.IFnum, + ( cpb ? " cpb" : "" ), ( db ? " db" : "" ) ); + if ( cpb ) + DBGC2_HD ( nii, cpb, cpb_len ); + if ( db ) + DBGC2_HD ( nii, db, db_len ); + nii->issue ( ( intptr_t ) &cdb ); + + /* Restore task priority level */ + bs->RestoreTPL ( tpl ); + + /* Check completion status */ + if ( cdb.StatCode != PXE_STATCODE_SUCCESS ) + return -cdb.StatCode; + + /* Return command-specific status flags */ + return ( cdb.StatFlags & ~PXE_STATFLAGS_STATUS_MASK ); +} + +/** + * Issue command with parameter block + * + * @v nii NII NIC + * @v op Operation + * @v cpb Command parameter block, or NULL + * @v cpb_len Command parameter block length + * @ret stat Status flags, or negative status code + */ +static int nii_issue_cpb ( struct nii_nic *nii, unsigned int op, void *cpb, + size_t cpb_len ) { + + return nii_issue_cpb_db ( nii, op, cpb, cpb_len, NULL, 0 ); +} + +/** + * Issue command with data block + * + * @v nii NII NIC + * @v op Operation + * @v db Data block, or NULL + * @v db_len Data block length + * @ret stat Status flags, or negative status code + */ +static int nii_issue_db ( struct nii_nic *nii, unsigned int op, void *db, + size_t db_len ) { + + return nii_issue_cpb_db ( nii, op, NULL, 0, db, db_len ); +} + +/** + * Issue command + * + * + * @v nii NII NIC + * @v op Operation + * @ret stat Status flags, or negative status code + */ +static int nii_issue ( struct nii_nic *nii, unsigned int op ) { + + return nii_issue_cpb_db ( nii, op, NULL, 0, NULL, 0 ); +} + +/** + * Start UNDI + * + * @v nii NII NIC + * @ret rc Return status code + */ +static int nii_start_undi ( struct nii_nic *nii ) { + PXE_CPB_START_31 cpb; + int stat; + int rc; + + /* Construct parameter block */ + memset ( &cpb, 0, sizeof ( cpb ) ); + cpb.Delay = ( ( intptr_t ) nii_delay ); + cpb.Block = ( ( intptr_t ) nii_block ); + cpb.Mem_IO = ( ( intptr_t ) nii_io ); + cpb.Map_Mem = ( ( intptr_t ) nii_map ); + cpb.UnMap_Mem = ( ( intptr_t ) nii_unmap ); + cpb.Sync_Mem = ( ( intptr_t ) nii_sync ); + cpb.Unique_ID = ( ( intptr_t ) nii ); + + /* Issue command */ + if ( ( stat = nii_issue_cpb ( nii, PXE_OPCODE_START, &cpb, + sizeof ( cpb ) ) ) < 0 ) { + rc = -EIO_STAT ( stat ); + DBGC ( nii, "NII %s could not start: %s\n", + nii->dev.name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Stop UNDI + * + * @v nii NII NIC + */ +static void nii_stop_undi ( struct nii_nic *nii ) { + int stat; + int rc; + + /* Issue command */ + if ( ( stat = nii_issue ( nii, PXE_OPCODE_STOP ) ) < 0 ) { + rc = -EIO_STAT ( stat ); + DBGC ( nii, "NII %s could not stop: %s\n", + nii->dev.name, strerror ( rc ) ); + /* Nothing we can do about it */ + return; + } +} + +/** + * Get initialisation information + * + * @v nii NII NIC + * @v netdev Network device to fill in + * @ret rc Return status code + */ +static int nii_get_init_info ( struct nii_nic *nii, + struct net_device *netdev ) { + PXE_DB_GET_INIT_INFO db; + int stat; + int rc; + + /* Issue command */ + if ( ( stat = nii_issue_db ( nii, PXE_OPCODE_GET_INIT_INFO, &db, + sizeof ( db ) ) ) < 0 ) { + rc = -EIO_STAT ( stat ); + DBGC ( nii, "NII %s could not get initialisation info: %s\n", + nii->dev.name, strerror ( rc ) ); + return rc; + } + + /* Determine link layer protocol */ + switch ( db.IFtype ) { + case PXE_IFTYPE_ETHERNET : + netdev->ll_protocol = ðernet_protocol; + break; + default: + DBGC ( nii, "NII %s unknown interface type %#02x\n", + nii->dev.name, db.IFtype ); + return -ENOTSUP; + } + + /* Sanity checks */ + assert ( db.MediaHeaderLen == netdev->ll_protocol->ll_header_len ); + assert ( db.HWaddrLen == netdev->ll_protocol->hw_addr_len ); + assert ( db.HWaddrLen == netdev->ll_protocol->ll_addr_len ); + + /* Extract parameters */ + nii->buffer_len = db.MemoryRequired; + nii->mtu = ( db.FrameDataLen + db.MediaHeaderLen ); + netdev->max_pkt_len = nii->mtu; + nii->media = ( stat & PXE_STATFLAGS_GET_STATUS_NO_MEDIA_SUPPORTED ); + + return 0; +} + +/** + * Initialise UNDI + * + * @v nii NII NIC + * @v flags Flags + * @ret rc Return status code + */ +static int nii_initialise_flags ( struct nii_nic *nii, unsigned int flags ) { + PXE_CPB_INITIALIZE cpb; + PXE_DB_INITIALIZE db; + unsigned int op; + int stat; + int rc; + + /* Allocate memory buffer */ + nii->buffer = umalloc ( nii->buffer_len ); + if ( ! nii->buffer ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Construct parameter block */ + memset ( &cpb, 0, sizeof ( cpb ) ); + cpb.MemoryAddr = ( ( intptr_t ) nii->buffer ); + cpb.MemoryLength = nii->buffer_len; + + /* Construct data block */ + memset ( &db, 0, sizeof ( db ) ); + + /* Issue command */ + op = NII_OP ( PXE_OPCODE_INITIALIZE, flags ); + if ( ( stat = nii_issue_cpb_db ( nii, op, &cpb, sizeof ( cpb ), + &db, sizeof ( db ) ) ) < 0 ) { + rc = -EIO_STAT ( stat ); + DBGC ( nii, "NII %s could not initialise: %s\n", + nii->dev.name, strerror ( rc ) ); + goto err_initialize; + } + + return 0; + + err_initialize: + ufree ( nii->buffer ); + err_alloc: + return rc; +} + +/** + * Initialise UNDI with cable detection + * + * @v nii NII NIC + * @ret rc Return status code + */ +static int nii_initialise_cable ( struct nii_nic *nii ) { + unsigned int flags; + + /* Initialise UNDI */ + flags = PXE_OPFLAGS_INITIALIZE_DETECT_CABLE; + return nii_initialise_flags ( nii, flags ); +} + +/** + * Initialise UNDI + * + * @v nii NII NIC + * @ret rc Return status code + */ +static int nii_initialise ( struct nii_nic *nii ) { + unsigned int flags; + + /* Initialise UNDI */ + flags = PXE_OPFLAGS_INITIALIZE_DO_NOT_DETECT_CABLE; + return nii_initialise_flags ( nii, flags ); +} + +/** + * Shut down UNDI + * + * @v nii NII NIC + */ +static void nii_shutdown ( struct nii_nic *nii ) { + int stat; + int rc; + + /* Issue command */ + if ( ( stat = nii_issue ( nii, PXE_OPCODE_SHUTDOWN ) ) < 0 ) { + rc = -EIO_STAT ( stat ); + DBGC ( nii, "NII %s could not shut down: %s\n", + nii->dev.name, strerror ( rc ) ); + /* Leak memory to avoid corruption */ + return; + } + + /* Free buffer */ + ufree ( nii->buffer ); +} + +/** + * Get station addresses + * + * @v nii NII NIC + * @v netdev Network device to fill in + * @ret rc Return status code + */ +static int nii_get_station_address ( struct nii_nic *nii, + struct net_device *netdev ) { + PXE_DB_STATION_ADDRESS db; + unsigned int op; + int stat; + int rc; + + /* Initialise UNDI */ + if ( ( rc = nii_initialise ( nii ) ) != 0 ) + goto err_initialise; + + /* Issue command */ + op = NII_OP ( PXE_OPCODE_STATION_ADDRESS, + PXE_OPFLAGS_STATION_ADDRESS_READ ); + if ( ( stat = nii_issue_db ( nii, op, &db, sizeof ( db ) ) ) < 0 ) { + rc = -EIO_STAT ( stat ); + DBGC ( nii, "NII %s could not get station address: %s\n", + nii->dev.name, strerror ( rc ) ); + goto err_station_address; + } + + /* Copy MAC addresses */ + memcpy ( netdev->ll_addr, db.StationAddr, + netdev->ll_protocol->ll_addr_len ); + memcpy ( netdev->hw_addr, db.PermanentAddr, + netdev->ll_protocol->hw_addr_len ); + memcpy ( nii->broadcast, db.BroadcastAddr, + sizeof ( nii->broadcast ) ); + + err_station_address: + nii_shutdown ( nii ); + err_initialise: + return rc; +} + +/** + * Set station address + * + * @v nii NII NIC + * @v netdev Network device + * @ret rc Return status code + */ +static int nii_set_station_address ( struct nii_nic *nii, + struct net_device *netdev ) { + uint32_t implementation = nii->undi->Implementation; + PXE_CPB_STATION_ADDRESS cpb; + unsigned int op; + int stat; + int rc; + + /* Fail if setting station address is unsupported */ + if ( ! ( implementation & PXE_ROMID_IMP_STATION_ADDR_SETTABLE ) ) + return -ENOTSUP; + + /* Construct parameter block */ + memset ( &cpb, 0, sizeof ( cpb ) ); + memcpy ( cpb.StationAddr, netdev->ll_addr, + netdev->ll_protocol->ll_addr_len ); + + /* Issue command */ + op = NII_OP ( PXE_OPCODE_STATION_ADDRESS, + PXE_OPFLAGS_STATION_ADDRESS_WRITE ); + if ( ( stat = nii_issue_cpb ( nii, op, &cpb, sizeof ( cpb ) ) ) < 0 ) { + rc = -EIO_STAT ( stat ); + DBGC ( nii, "NII %s could not set station address: %s\n", + nii->dev.name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Set receive filters + * + * @v nii NII NIC + * @ret rc Return status code + */ +static int nii_set_rx_filters ( struct nii_nic *nii ) { + uint32_t implementation = nii->undi->Implementation; + unsigned int flags; + unsigned int op; + int stat; + int rc; + + /* Construct receive filter set */ + flags = ( PXE_OPFLAGS_RECEIVE_FILTER_ENABLE | + PXE_OPFLAGS_RECEIVE_FILTER_UNICAST ); + if ( implementation & PXE_ROMID_IMP_BROADCAST_RX_SUPPORTED ) + flags |= PXE_OPFLAGS_RECEIVE_FILTER_BROADCAST; + if ( implementation & PXE_ROMID_IMP_PROMISCUOUS_RX_SUPPORTED ) + flags |= PXE_OPFLAGS_RECEIVE_FILTER_PROMISCUOUS; + if ( implementation & PXE_ROMID_IMP_PROMISCUOUS_MULTICAST_RX_SUPPORTED ) + flags |= PXE_OPFLAGS_RECEIVE_FILTER_ALL_MULTICAST; + + /* Issue command */ + op = NII_OP ( PXE_OPCODE_RECEIVE_FILTERS, flags ); + if ( ( stat = nii_issue ( nii, op ) ) < 0 ) { + rc = -EIO_STAT ( stat ); + DBGC ( nii, "NII %s could not set receive filters %#04x: %s\n", + nii->dev.name, flags, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int nii_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct nii_nic *nii = netdev->priv; + PXE_CPB_TRANSMIT cpb; + unsigned int op; + int stat; + int rc; + + /* Defer the packet if there is already a transmission in progress */ + if ( nii->txbuf ) { + netdev_tx_defer ( netdev, iobuf ); + return 0; + } + + /* Construct parameter block */ + memset ( &cpb, 0, sizeof ( cpb ) ); + cpb.FrameAddr = virt_to_bus ( iobuf->data ); + cpb.DataLen = iob_len ( iobuf ); + cpb.MediaheaderLen = netdev->ll_protocol->ll_header_len; + + /* Transmit packet */ + op = NII_OP ( PXE_OPCODE_TRANSMIT, + ( PXE_OPFLAGS_TRANSMIT_WHOLE | + PXE_OPFLAGS_TRANSMIT_DONT_BLOCK ) ); + if ( ( stat = nii_issue_cpb ( nii, op, &cpb, sizeof ( cpb ) ) ) < 0 ) { + rc = -EIO_STAT ( stat ); + DBGC ( nii, "NII %s could not transmit: %s\n", + nii->dev.name, strerror ( rc ) ); + return rc; + } + nii->txbuf = iobuf; + + return 0; +} + +/** + * Poll for completed packets + * + * @v netdev Network device + * @v stat Status flags + */ +static void nii_poll_tx ( struct net_device *netdev, unsigned int stat ) { + struct nii_nic *nii = netdev->priv; + struct io_buffer *iobuf; + + /* Do nothing unless we have a completion */ + if ( stat & PXE_STATFLAGS_GET_STATUS_NO_TXBUFS_WRITTEN ) + return; + + /* Sanity check */ + assert ( nii->txbuf != NULL ); + + /* Complete transmission */ + iobuf = nii->txbuf; + nii->txbuf = NULL; + netdev_tx_complete ( netdev, iobuf ); +} + +/** + * Poll for received packets + * + * @v netdev Network device + */ +static void nii_poll_rx ( struct net_device *netdev ) { + struct nii_nic *nii = netdev->priv; + PXE_CPB_RECEIVE cpb; + PXE_DB_RECEIVE db; + unsigned int quota; + int stat; + int rc; + + /* Retrieve up to NII_RX_QUOTA packets */ + for ( quota = NII_RX_QUOTA ; quota ; quota-- ) { + + /* Allocate buffer, if required */ + if ( ! nii->rxbuf ) { + nii->rxbuf = alloc_iob ( nii->mtu ); + if ( ! nii->rxbuf ) { + /* Leave for next poll */ + break; + } + } + + /* Construct parameter block */ + memset ( &cpb, 0, sizeof ( cpb ) ); + cpb.BufferAddr = virt_to_bus ( nii->rxbuf->data ); + cpb.BufferLen = iob_tailroom ( nii->rxbuf ); + + /* Issue command */ + if ( ( stat = nii_issue_cpb_db ( nii, PXE_OPCODE_RECEIVE, + &cpb, sizeof ( cpb ), + &db, sizeof ( db ) ) ) < 0 ) { + + /* PXE_STATCODE_NO_DATA is just the usual "no packet" + * status indicator; ignore it. + */ + if ( stat == -PXE_STATCODE_NO_DATA ) + break; + + /* Anything else is an error */ + rc = -EIO_STAT ( stat ); + DBGC ( nii, "NII %s could not receive: %s\n", + nii->dev.name, strerror ( rc ) ); + netdev_rx_err ( netdev, NULL, rc ); + break; + } + + /* Hand off to network stack */ + iob_put ( nii->rxbuf, db.FrameLen ); + netdev_rx ( netdev, nii->rxbuf ); + nii->rxbuf = NULL; + } +} + +/** + * Check for link state changes + * + * @v netdev Network device + * @v stat Status flags + */ +static void nii_poll_link ( struct net_device *netdev, unsigned int stat ) { + int no_media = ( stat & PXE_STATFLAGS_GET_STATUS_NO_MEDIA ); + + if ( no_media && netdev_link_ok ( netdev ) ) { + netdev_link_down ( netdev ); + } else if ( ( ! no_media ) && ( ! netdev_link_ok ( netdev ) ) ) { + netdev_link_up ( netdev ); + } +} + +/** + * Poll for completed packets + * + * @v netdev Network device + */ +static void nii_poll ( struct net_device *netdev ) { + struct nii_nic *nii = netdev->priv; + PXE_DB_GET_STATUS db; + unsigned int op; + int stat; + int rc; + + /* Construct data block */ + memset ( &db, 0, sizeof ( db ) ); + + /* Get status */ + op = NII_OP ( PXE_OPCODE_GET_STATUS, + ( PXE_OPFLAGS_GET_INTERRUPT_STATUS | + ( nii->txbuf ? PXE_OPFLAGS_GET_TRANSMITTED_BUFFERS : 0)| + ( nii->media ? PXE_OPFLAGS_GET_MEDIA_STATUS : 0 ) ) ); + if ( ( stat = nii_issue_db ( nii, op, &db, sizeof ( db ) ) ) < 0 ) { + rc = -EIO_STAT ( stat ); + DBGC ( nii, "NII %s could not get status: %s\n", + nii->dev.name, strerror ( rc ) ); + return; + } + + /* Process any TX completions */ + if ( nii->txbuf ) + nii_poll_tx ( netdev, stat ); + + /* Process any RX completions */ + nii_poll_rx ( netdev ); + + /* Check for link state changes */ + if ( nii->media ) + nii_poll_link ( netdev, stat ); +} + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int nii_open ( struct net_device *netdev ) { + struct nii_nic *nii = netdev->priv; + int rc; + + /* Initialise NIC + * + * We don't care about link state here, and would prefer to + * have the NIC initialise even if no cable is present, to + * match the behaviour of all other iPXE drivers. + * + * Some Emulex NII drivers have a bug which prevents packets + * from being sent or received unless we specifically ask it + * to detect cable presence during initialisation. + * + * Unfortunately, some other NII drivers (e.g. Mellanox) may + * time out and report failure if asked to detect cable + * presence during initialisation on links that are physically + * slow to reach link-up. + * + * Attempt to work around both of these problems by first + * attempting to initialise with cable presence detection, + * then falling back to initialising without cable presence + * detection. + */ + if ( ( rc = nii_initialise_cable ( nii ) ) != 0 ) { + DBGC ( nii, "NII %s could not initialise with cable " + "detection: %s\n", nii->dev.name, strerror ( rc ) ); + if ( ( rc = nii_initialise ( nii ) ) != 0 ) { + DBGC ( nii, "NII %s could not initialise without " + "cable detection: %s\n", + nii->dev.name, strerror ( rc ) ); + goto err_initialise; + } + } + + /* Attempt to set station address */ + if ( ( rc = nii_set_station_address ( nii, netdev ) ) != 0 ) { + DBGC ( nii, "NII %s could not set station address: %s\n", + nii->dev.name, strerror ( rc ) ); + /* Treat as non-fatal */ + } + + /* Set receive filters */ + if ( ( rc = nii_set_rx_filters ( nii ) ) != 0 ) + goto err_set_rx_filters; + + return 0; + + err_set_rx_filters: + nii_shutdown ( nii ); + err_initialise: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void nii_close ( struct net_device *netdev ) { + struct nii_nic *nii = netdev->priv; + + /* Shut down NIC */ + nii_shutdown ( nii ); + + /* Discard transmit buffer, if applicable */ + if ( nii->txbuf ) { + netdev_tx_complete_err ( netdev, nii->txbuf, -ECANCELED ); + nii->txbuf = NULL; + } + + /* Discard receive buffer, if applicable */ + if ( nii->rxbuf ) { + free_iob ( nii->rxbuf ); + nii->rxbuf = NULL; + } +} + +/** NII network device operations */ +static struct net_device_operations nii_operations = { + .open = nii_open, + .close = nii_close, + .transmit = nii_transmit, + .poll = nii_poll, +}; + +/** + * Attach driver to device + * + * @v efidev EFI device + * @ret rc Return status code + */ +int nii_start ( struct efi_device *efidev ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_HANDLE device = efidev->device; + struct net_device *netdev; + struct nii_nic *nii; + void *interface; + EFI_STATUS efirc; + int rc; + + /* Allocate and initialise structure */ + netdev = alloc_netdev ( sizeof ( *nii ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &nii_operations ); + nii = netdev->priv; + nii->efidev = efidev; + INIT_LIST_HEAD ( &nii->mappings ); + netdev->ll_broadcast = nii->broadcast; + efidev_set_drvdata ( efidev, netdev ); + + /* Populate underlying device information */ + efi_device_info ( device, "NII", &nii->dev ); + nii->dev.driver_name = "NII"; + nii->dev.parent = &efidev->dev; + list_add ( &nii->dev.siblings, &efidev->dev.children ); + INIT_LIST_HEAD ( &nii->dev.children ); + netdev->dev = &nii->dev; + + /* Open NII protocol */ + if ( ( efirc = bs->OpenProtocol ( device, &efi_nii31_protocol_guid, + &interface, efi_image_handle, device, + ( EFI_OPEN_PROTOCOL_BY_DRIVER | + EFI_OPEN_PROTOCOL_EXCLUSIVE )))!=0){ + rc = -EEFI ( efirc ); + DBGC ( nii, "NII %s cannot open NII protocol: %s\n", + nii->dev.name, strerror ( rc ) ); + DBGC_EFI_OPENERS ( device, device, &efi_nii31_protocol_guid ); + goto err_open_protocol; + } + nii->nii = interface; + + /* Locate UNDI and entry point */ + nii->undi = ( ( void * ) ( intptr_t ) nii->nii->Id ); + if ( ! nii->undi ) { + DBGC ( nii, "NII %s has no UNDI\n", nii->dev.name ); + rc = -ENODEV; + goto err_no_undi; + } + if ( nii->undi->Implementation & PXE_ROMID_IMP_HW_UNDI ) { + DBGC ( nii, "NII %s is a mythical hardware UNDI\n", + nii->dev.name ); + rc = -ENOTSUP; + goto err_hw_undi; + } + if ( nii->undi->Implementation & PXE_ROMID_IMP_SW_VIRT_ADDR ) { + nii->issue = ( ( void * ) ( intptr_t ) nii->undi->EntryPoint ); + } else { + nii->issue = ( ( ( void * ) nii->undi ) + + nii->undi->EntryPoint ); + } + DBGC ( nii, "NII %s using UNDI v%x.%x at %p entry %p impl %#08x\n", + nii->dev.name, nii->nii->MajorVer, nii->nii->MinorVer, + nii->undi, nii->issue, nii->undi->Implementation ); + + /* Open PCI I/O protocols and locate BARs */ + if ( ( rc = nii_pci_open ( nii ) ) != 0 ) + goto err_pci_open; + + /* Start UNDI */ + if ( ( rc = nii_start_undi ( nii ) ) != 0 ) + goto err_start_undi; + + /* Get initialisation information */ + if ( ( rc = nii_get_init_info ( nii, netdev ) ) != 0 ) + goto err_get_init_info; + + /* Get MAC addresses */ + if ( ( rc = nii_get_station_address ( nii, netdev ) ) != 0 ) + goto err_get_station_address; + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register_netdev; + DBGC ( nii, "NII %s registered as %s for %s\n", nii->dev.name, + netdev->name, efi_handle_name ( device ) ); + + /* Set initial link state (if media detection is not supported) */ + if ( ! nii->media ) + netdev_link_up ( netdev ); + + return 0; + + unregister_netdev ( netdev ); + err_register_netdev: + err_get_station_address: + err_get_init_info: + nii_stop_undi ( nii ); + err_start_undi: + nii_pci_close ( nii ); + err_pci_open: + err_hw_undi: + err_no_undi: + bs->CloseProtocol ( device, &efi_nii31_protocol_guid, + efi_image_handle, device ); + err_open_protocol: + list_del ( &nii->dev.siblings ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Detach driver from device + * + * @v efidev EFI device + */ +void nii_stop ( struct efi_device *efidev ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct net_device *netdev = efidev_get_drvdata ( efidev ); + struct nii_nic *nii = netdev->priv; + EFI_HANDLE device = efidev->device; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Stop UNDI */ + nii_stop_undi ( nii ); + + /* Close PCI I/O protocols */ + nii_pci_close ( nii ); + + /* Close NII protocol */ + bs->CloseProtocol ( device, &efi_nii31_protocol_guid, + efi_image_handle, device ); + + /* Free network device */ + list_del ( &nii->dev.siblings ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} diff --git a/src/drivers/net/efi/nii.h b/src/drivers/net/efi/nii.h new file mode 100644 index 00000000..c10be9db --- /dev/null +++ b/src/drivers/net/efi/nii.h @@ -0,0 +1,17 @@ +#ifndef _NII_H +#define _NII_H + +/** @file + * + * NII driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +struct efi_device; + +extern int nii_start ( struct efi_device *efidev ); +extern void nii_stop ( struct efi_device *efidev ); + +#endif /* _NII_H */ diff --git a/src/drivers/net/ena.c b/src/drivers/net/ena.c new file mode 100644 index 00000000..5c76eb6f --- /dev/null +++ b/src/drivers/net/ena.c @@ -0,0 +1,1016 @@ +/* + * Copyright (C) 2018 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ena.h" + +/** @file + * + * Amazon ENA network driver + * + */ + +/** + * Get direction name (for debugging) + * + * @v direction Direction + * @ret name Direction name + */ +static const char * ena_direction ( unsigned int direction ) { + + switch ( direction ) { + case ENA_SQ_TX: return "TX"; + case ENA_SQ_RX: return "RX"; + default: return ""; + } +} + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset hardware + * + * @v ena ENA device + * @ret rc Return status code + */ +static int ena_reset ( struct ena_nic *ena ) { + uint32_t stat; + unsigned int i; + + /* Trigger reset */ + writel ( ENA_CTRL_RESET, ( ena->regs + ENA_CTRL ) ); + + /* Wait for reset to complete */ + for ( i = 0 ; i < ENA_RESET_MAX_WAIT_MS ; i++ ) { + + /* Check if device is ready */ + stat = readl ( ena->regs + ENA_STAT ); + if ( stat & ENA_STAT_READY ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( ena, "ENA %p timed out waiting for reset (status %#08x)\n", + ena, stat ); + return -ETIMEDOUT; +} + +/****************************************************************************** + * + * Admin queue + * + ****************************************************************************** + */ + +/** + * Set queue base address + * + * @v ena ENA device + * @v offset Register offset + * @v address Base address + */ +static inline void ena_set_base ( struct ena_nic *ena, unsigned int offset, + void *base ) { + physaddr_t phys = virt_to_bus ( base ); + + /* Program base address registers */ + writel ( ( phys & 0xffffffffUL ), + ( ena->regs + offset + ENA_BASE_LO ) ); + if ( sizeof ( phys ) > sizeof ( uint32_t ) ) { + writel ( ( ( ( uint64_t ) phys ) >> 32 ), + ( ena->regs + offset + ENA_BASE_HI ) ); + } else { + writel ( 0, ( ena->regs + offset + ENA_BASE_HI ) ); + } +} + +/** + * Set queue capabilities + * + * @v ena ENA device + * @v offset Register offset + * @v count Number of entries + * @v size Size of each entry + */ +static inline __attribute__ (( always_inline )) void +ena_set_caps ( struct ena_nic *ena, unsigned int offset, unsigned int count, + size_t size ) { + + /* Program capabilities register */ + writel ( ENA_CAPS ( count, size ), ( ena->regs + offset ) ); +} + +/** + * Clear queue capabilities + * + * @v ena ENA device + * @v offset Register offset + */ +static inline __attribute__ (( always_inline )) void +ena_clear_caps ( struct ena_nic *ena, unsigned int offset ) { + + /* Clear capabilities register */ + writel ( 0, ( ena->regs + offset ) ); +} + +/** + * Create admin queues + * + * @v ena ENA device + * @ret rc Return status code + */ +static int ena_create_admin ( struct ena_nic *ena ) { + size_t aq_len = ( ENA_AQ_COUNT * sizeof ( ena->aq.req[0] ) ); + size_t acq_len = ( ENA_ACQ_COUNT * sizeof ( ena->acq.rsp[0] ) ); + int rc; + + /* Allocate admin completion queue */ + ena->acq.rsp = malloc_dma ( acq_len, acq_len ); + if ( ! ena->acq.rsp ) { + rc = -ENOMEM; + goto err_alloc_acq; + } + memset ( ena->acq.rsp, 0, acq_len ); + + /* Allocate admin queue */ + ena->aq.req = malloc_dma ( aq_len, aq_len ); + if ( ! ena->aq.req ) { + rc = -ENOMEM; + goto err_alloc_aq; + } + memset ( ena->aq.req, 0, aq_len ); + + /* Program queue addresses and capabilities */ + ena_set_base ( ena, ENA_ACQ_BASE, ena->acq.rsp ); + ena_set_caps ( ena, ENA_ACQ_CAPS, ENA_ACQ_COUNT, + sizeof ( ena->acq.rsp[0] ) ); + ena_set_base ( ena, ENA_AQ_BASE, ena->aq.req ); + ena_set_caps ( ena, ENA_AQ_CAPS, ENA_AQ_COUNT, + sizeof ( ena->aq.req[0] ) ); + + DBGC ( ena, "ENA %p AQ [%08lx,%08lx) ACQ [%08lx,%08lx)\n", + ena, virt_to_phys ( ena->aq.req ), + ( virt_to_phys ( ena->aq.req ) + aq_len ), + virt_to_phys ( ena->acq.rsp ), + ( virt_to_phys ( ena->acq.rsp ) + acq_len ) ); + return 0; + + ena_clear_caps ( ena, ENA_AQ_CAPS ); + ena_clear_caps ( ena, ENA_ACQ_CAPS ); + free_dma ( ena->aq.req, aq_len ); + err_alloc_aq: + free_dma ( ena->acq.rsp, acq_len ); + err_alloc_acq: + return rc; +} + +/** + * Destroy admin queues + * + * @v ena ENA device + */ +static void ena_destroy_admin ( struct ena_nic *ena ) { + size_t aq_len = ( ENA_AQ_COUNT * sizeof ( ena->aq.req[0] ) ); + size_t acq_len = ( ENA_ACQ_COUNT * sizeof ( ena->acq.rsp[0] ) ); + + /* Clear queue capabilities */ + ena_clear_caps ( ena, ENA_AQ_CAPS ); + ena_clear_caps ( ena, ENA_ACQ_CAPS ); + wmb(); + + /* Free queues */ + free_dma ( ena->aq.req, aq_len ); + free_dma ( ena->acq.rsp, acq_len ); + DBGC ( ena, "ENA %p AQ and ACQ destroyed\n", ena ); +} + +/** + * Get next available admin queue request + * + * @v ena ENA device + * @ret req Admin queue request + */ +static union ena_aq_req * ena_admin_req ( struct ena_nic *ena ) { + union ena_aq_req *req; + unsigned int index; + + /* Get next request */ + index = ( ena->aq.prod % ENA_AQ_COUNT ); + req = &ena->aq.req[index]; + + /* Initialise request */ + memset ( ( ( ( void * ) req ) + sizeof ( req->header ) ), 0, + ( sizeof ( *req ) - sizeof ( req->header ) ) ); + req->header.id = ena->aq.prod; + + /* Increment producer counter */ + ena->aq.prod++; + + return req; +} + +/** + * Issue admin queue request + * + * @v ena ENA device + * @v req Admin queue request + * @v rsp Admin queue response to fill in + * @ret rc Return status code + */ +static int ena_admin ( struct ena_nic *ena, union ena_aq_req *req, + union ena_acq_rsp **rsp ) { + unsigned int index; + unsigned int i; + int rc; + + /* Locate response */ + index = ( ena->acq.cons % ENA_ACQ_COUNT ); + *rsp = &ena->acq.rsp[index]; + + /* Mark request as ready */ + req->header.flags ^= ENA_AQ_PHASE; + wmb(); + DBGC2 ( ena, "ENA %p admin request %#x:\n", + ena, le16_to_cpu ( req->header.id ) ); + DBGC2_HDA ( ena, virt_to_phys ( req ), req, sizeof ( *req ) ); + + /* Ring doorbell */ + writel ( ena->aq.prod, ( ena->regs + ENA_AQ_DB ) ); + + /* Wait for response */ + for ( i = 0 ; i < ENA_ADMIN_MAX_WAIT_MS ; i++ ) { + + /* Check for response */ + if ( ( (*rsp)->header.flags ^ ena->acq.phase ) & ENA_ACQ_PHASE){ + mdelay ( 1 ); + continue; + } + DBGC2 ( ena, "ENA %p admin response %#x:\n", + ena, le16_to_cpu ( (*rsp)->header.id ) ); + DBGC2_HDA ( ena, virt_to_phys ( *rsp ), *rsp, sizeof ( **rsp )); + + /* Increment consumer counter */ + ena->acq.cons++; + if ( ( ena->acq.cons % ENA_ACQ_COUNT ) == 0 ) + ena->acq.phase ^= ENA_ACQ_PHASE; + + /* Check command identifier */ + if ( (*rsp)->header.id != req->header.id ) { + DBGC ( ena, "ENA %p admin response %#x mismatch:\n", + ena, le16_to_cpu ( (*rsp)->header.id ) ); + rc = -EILSEQ; + goto err; + } + + /* Check status */ + if ( (*rsp)->header.status != 0 ) { + DBGC ( ena, "ENA %p admin response %#x status %d:\n", + ena, le16_to_cpu ( (*rsp)->header.id ), + (*rsp)->header.status ); + rc = -EIO; + goto err; + } + + /* Success */ + return 0; + } + + rc = -ETIMEDOUT; + DBGC ( ena, "ENA %p timed out waiting for admin request %#x:\n", + ena, le16_to_cpu ( req->header.id ) ); + err: + DBGC_HDA ( ena, virt_to_phys ( req ), req, sizeof ( *req ) ); + DBGC_HDA ( ena, virt_to_phys ( *rsp ), *rsp, sizeof ( **rsp ) ); + return rc; +} + +/** + * Create submission queue + * + * @v ena ENA device + * @v sq Submission queue + * @v cq Corresponding completion queue + * @ret rc Return status code + */ +static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq, + struct ena_cq *cq ) { + union ena_aq_req *req; + union ena_acq_rsp *rsp; + int rc; + + /* Allocate submission queue entries */ + sq->sqe.raw = malloc_dma ( sq->len, ENA_ALIGN ); + if ( ! sq->sqe.raw ) { + rc = -ENOMEM; + goto err_alloc; + } + memset ( sq->sqe.raw, 0, sq->len ); + + /* Construct request */ + req = ena_admin_req ( ena ); + req->header.opcode = ENA_CREATE_SQ; + req->create_sq.direction = sq->direction; + req->create_sq.policy = cpu_to_le16 ( ENA_SQ_HOST_MEMORY | + ENA_SQ_CONTIGUOUS ); + req->create_sq.cq_id = cpu_to_le16 ( cq->id ); + req->create_sq.count = cpu_to_le16 ( sq->count ); + req->create_sq.address = cpu_to_le64 ( virt_to_bus ( sq->sqe.raw ) ); + + /* Issue request */ + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + goto err_admin; + + /* Parse response */ + sq->id = le16_to_cpu ( rsp->create_sq.id ); + sq->doorbell = le32_to_cpu ( rsp->create_sq.doorbell ); + + /* Reset producer counter and phase */ + sq->prod = 0; + sq->phase = ENA_SQE_PHASE; + + DBGC ( ena, "ENA %p %s SQ%d at [%08lx,%08lx) db +%04x CQ%d\n", + ena, ena_direction ( sq->direction ), sq->id, + virt_to_phys ( sq->sqe.raw ), + ( virt_to_phys ( sq->sqe.raw ) + sq->len ), + sq->doorbell, cq->id ); + return 0; + + err_admin: + free_dma ( sq->sqe.raw, sq->len ); + err_alloc: + return rc; +} + +/** + * Destroy submission queue + * + * @v ena ENA device + * @v sq Submission queue + * @ret rc Return status code + */ +static int ena_destroy_sq ( struct ena_nic *ena, struct ena_sq *sq ) { + union ena_aq_req *req; + union ena_acq_rsp *rsp; + int rc; + + /* Construct request */ + req = ena_admin_req ( ena ); + req->header.opcode = ENA_DESTROY_SQ; + req->destroy_sq.id = cpu_to_le16 ( sq->id ); + req->destroy_sq.direction = sq->direction; + + /* Issue request */ + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + return rc; + + /* Free submission queue entries */ + free_dma ( sq->sqe.raw, sq->len ); + + DBGC ( ena, "ENA %p %s SQ%d destroyed\n", + ena, ena_direction ( sq->direction ), sq->id ); + return 0; +} + +/** + * Create completion queue + * + * @v ena ENA device + * @v cq Completion queue + * @ret rc Return status code + */ +static int ena_create_cq ( struct ena_nic *ena, struct ena_cq *cq ) { + union ena_aq_req *req; + union ena_acq_rsp *rsp; + int rc; + + /* Allocate completion queue entries */ + cq->cqe.raw = malloc_dma ( cq->len, ENA_ALIGN ); + if ( ! cq->cqe.raw ) { + rc = -ENOMEM; + goto err_alloc; + } + memset ( cq->cqe.raw, 0, cq->len ); + + /* Construct request */ + req = ena_admin_req ( ena ); + req->header.opcode = ENA_CREATE_CQ; + req->create_cq.size = cq->size; + req->create_cq.count = cpu_to_le16 ( cq->requested ); + req->create_cq.address = cpu_to_le64 ( virt_to_bus ( cq->cqe.raw ) ); + + /* Issue request */ + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + goto err_admin; + + /* Parse response */ + cq->id = le16_to_cpu ( rsp->create_cq.id ); + cq->actual = le16_to_cpu ( rsp->create_cq.count ); + cq->doorbell = le32_to_cpu ( rsp->create_cq.doorbell ); + cq->mask = ( cq->actual - 1 ); + if ( cq->actual != cq->requested ) { + DBGC ( ena, "ENA %p CQ%d requested %d actual %d\n", + ena, cq->id, cq->requested, cq->actual ); + } + + /* Reset consumer counter and phase */ + cq->cons = 0; + cq->phase = ENA_CQE_PHASE; + + DBGC ( ena, "ENA %p CQ%d at [%08lx,%08lx) db +%04x\n", + ena, cq->id, virt_to_phys ( cq->cqe.raw ), + ( virt_to_phys ( cq->cqe.raw ) + cq->len ), cq->doorbell ); + return 0; + + err_admin: + free_dma ( cq->cqe.raw, cq->len ); + err_alloc: + return rc; +} + +/** + * Destroy completion queue + * + * @v ena ENA device + * @v cq Completion queue + * @ret rc Return status code + */ +static int ena_destroy_cq ( struct ena_nic *ena, struct ena_cq *cq ) { + union ena_aq_req *req; + union ena_acq_rsp *rsp; + int rc; + + /* Construct request */ + req = ena_admin_req ( ena ); + req->header.opcode = ENA_DESTROY_CQ; + req->destroy_cq.id = cpu_to_le16 ( cq->id ); + + /* Issue request */ + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + return rc; + + /* Free completion queue entries */ + free_dma ( cq->cqe.raw, cq->len ); + + DBGC ( ena, "ENA %p CQ%d destroyed\n", ena, cq->id ); + return 0; +} + +/** + * Create queue pair + * + * @v ena ENA device + * @v qp Queue pair + * @ret rc Return status code + */ +static int ena_create_qp ( struct ena_nic *ena, struct ena_qp *qp ) { + int rc; + + /* Create completion queue */ + if ( ( rc = ena_create_cq ( ena, &qp->cq ) ) != 0 ) + goto err_create_cq; + + /* Create submission queue */ + if ( ( rc = ena_create_sq ( ena, &qp->sq, &qp->cq ) ) != 0 ) + goto err_create_sq; + + return 0; + + ena_destroy_sq ( ena, &qp->sq ); + err_create_sq: + ena_destroy_cq ( ena, &qp->cq ); + err_create_cq: + return rc; +} + +/** + * Destroy queue pair + * + * @v ena ENA device + * @v qp Queue pair + * @ret rc Return status code + */ +static int ena_destroy_qp ( struct ena_nic *ena, struct ena_qp *qp ) { + + /* Destroy submission queue */ + ena_destroy_sq ( ena, &qp->sq ); + + /* Destroy completion queue */ + ena_destroy_cq ( ena, &qp->cq ); + + return 0; +} + +/** + * Get device attributes + * + * @v netdev Network device + * @ret rc Return status code + */ +static int ena_get_device_attributes ( struct net_device *netdev ) { + struct ena_nic *ena = netdev->priv; + union ena_aq_req *req; + union ena_acq_rsp *rsp; + union ena_feature *feature; + int rc; + + /* Construct request */ + req = ena_admin_req ( ena ); + req->header.opcode = ENA_GET_FEATURE; + req->get_feature.id = ENA_DEVICE_ATTRIBUTES; + + /* Issue request */ + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + return rc; + + /* Parse response */ + feature = &rsp->get_feature.feature; + memcpy ( netdev->hw_addr, feature->device.mac, ETH_ALEN ); + netdev->max_pkt_len = le32_to_cpu ( feature->device.mtu ); + netdev->mtu = ( netdev->max_pkt_len - ETH_HLEN ); + + DBGC ( ena, "ENA %p MAC %s MTU %zd\n", + ena, eth_ntoa ( netdev->hw_addr ), netdev->max_pkt_len ); + return 0; +} + +/** + * Get statistics (for debugging) + * + * @v ena ENA device + * @ret rc Return status code + */ +static int ena_get_stats ( struct ena_nic *ena ) { + union ena_aq_req *req; + union ena_acq_rsp *rsp; + struct ena_get_stats_rsp *stats; + int rc; + + /* Do nothing unless debug messages are enabled */ + if ( ! DBG_LOG ) + return 0; + + /* Construct request */ + req = ena_admin_req ( ena ); + req->header.opcode = ENA_GET_STATS; + req->get_stats.type = ENA_STATS_TYPE_BASIC; + req->get_stats.scope = ENA_STATS_SCOPE_ETH; + req->get_stats.device = ENA_DEVICE_MINE; + + /* Issue request */ + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + return rc; + + /* Parse response */ + stats = &rsp->get_stats; + DBGC ( ena, "ENA %p TX bytes %#llx packets %#llx\n", ena, + ( ( unsigned long long ) le64_to_cpu ( stats->tx_bytes ) ), + ( ( unsigned long long ) le64_to_cpu ( stats->tx_packets ) ) ); + DBGC ( ena, "ENA %p RX bytes %#llx packets %#llx drops %#llx\n", ena, + ( ( unsigned long long ) le64_to_cpu ( stats->rx_bytes ) ), + ( ( unsigned long long ) le64_to_cpu ( stats->rx_packets ) ), + ( ( unsigned long long ) le64_to_cpu ( stats->rx_drops ) ) ); + + return 0; +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Refill receive queue + * + * @v netdev Network device + */ +static void ena_refill_rx ( struct net_device *netdev ) { + struct ena_nic *ena = netdev->priv; + struct io_buffer *iobuf; + struct ena_rx_sqe *sqe; + unsigned int index; + physaddr_t address; + size_t len = netdev->max_pkt_len; + unsigned int refilled = 0; + + /* Refill queue */ + while ( ( ena->rx.sq.prod - ena->rx.cq.cons ) < ENA_RX_COUNT ) { + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( len ); + if ( ! iobuf ) { + /* Wait for next refill */ + break; + } + + /* Get next submission queue entry */ + index = ( ena->rx.sq.prod % ENA_RX_COUNT ); + sqe = &ena->rx.sq.sqe.rx[index]; + + /* Construct submission queue entry */ + address = virt_to_bus ( iobuf->data ); + sqe->len = cpu_to_le16 ( len ); + sqe->id = cpu_to_le16 ( ena->rx.sq.prod ); + sqe->address = cpu_to_le64 ( address ); + wmb(); + sqe->flags = ( ENA_SQE_FIRST | ENA_SQE_LAST | ENA_SQE_CPL | + ena->rx.sq.phase ); + + /* Increment producer counter */ + ena->rx.sq.prod++; + if ( ( ena->rx.sq.prod % ENA_RX_COUNT ) == 0 ) + ena->rx.sq.phase ^= ENA_SQE_PHASE; + + /* Record I/O buffer */ + assert ( ena->rx_iobuf[index] == NULL ); + ena->rx_iobuf[index] = iobuf; + + DBGC2 ( ena, "ENA %p RX %d at [%08llx,%08llx)\n", ena, sqe->id, + ( ( unsigned long long ) address ), + ( ( unsigned long long ) address + len ) ); + refilled++; + } + + /* Ring doorbell, if applicable */ + if ( refilled ) { + wmb(); + writel ( ena->rx.sq.prod, ( ena->regs + ena->rx.sq.doorbell ) ); + } +} + +/** + * Discard unused receive I/O buffers + * + * @v ena ENA device + */ +static void ena_empty_rx ( struct ena_nic *ena ) { + unsigned int i; + + for ( i = 0 ; i < ENA_RX_COUNT ; i++ ) { + if ( ena->rx_iobuf[i] ) + free_iob ( ena->rx_iobuf[i] ); + ena->rx_iobuf[i] = NULL; + } +} + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int ena_open ( struct net_device *netdev ) { + struct ena_nic *ena = netdev->priv; + int rc; + + /* Create transmit queue pair */ + if ( ( rc = ena_create_qp ( ena, &ena->tx ) ) != 0 ) + goto err_create_tx; + + /* Create receive queue pair */ + if ( ( rc = ena_create_qp ( ena, &ena->rx ) ) != 0 ) + goto err_create_rx; + + /* Refill receive queue */ + ena_refill_rx ( netdev ); + + return 0; + + ena_destroy_qp ( ena, &ena->rx ); + err_create_rx: + ena_destroy_qp ( ena, &ena->tx ); + err_create_tx: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void ena_close ( struct net_device *netdev ) { + struct ena_nic *ena = netdev->priv; + + /* Dump statistics (for debugging) */ + ena_get_stats ( ena ); + + /* Destroy receive queue pair */ + ena_destroy_qp ( ena, &ena->rx ); + + /* Discard any unused receive buffers */ + ena_empty_rx ( ena ); + + /* Destroy transmit queue pair */ + ena_destroy_qp ( ena, &ena->tx ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int ena_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { + struct ena_nic *ena = netdev->priv; + struct ena_tx_sqe *sqe; + unsigned int index; + physaddr_t address; + size_t len; + + /* Get next submission queue entry */ + if ( ( ena->tx.sq.prod - ena->tx.cq.cons ) >= ENA_TX_COUNT ) { + DBGC ( ena, "ENA %p out of transmit descriptors\n", ena ); + return -ENOBUFS; + } + index = ( ena->tx.sq.prod % ENA_TX_COUNT ); + sqe = &ena->tx.sq.sqe.tx[index]; + + /* Construct submission queue entry */ + address = virt_to_bus ( iobuf->data ); + len = iob_len ( iobuf ); + sqe->len = cpu_to_le16 ( len ); + sqe->id = ena->tx.sq.prod; + sqe->address = cpu_to_le64 ( address ); + wmb(); + sqe->flags = ( ENA_SQE_FIRST | ENA_SQE_LAST | ENA_SQE_CPL | + ena->tx.sq.phase ); + wmb(); + + /* Increment producer counter */ + ena->tx.sq.prod++; + if ( ( ena->tx.sq.prod % ENA_TX_COUNT ) == 0 ) + ena->tx.sq.phase ^= ENA_SQE_PHASE; + + /* Ring doorbell */ + writel ( ena->tx.sq.prod, ( ena->regs + ena->tx.sq.doorbell ) ); + + DBGC2 ( ena, "ENA %p TX %d at [%08llx,%08llx)\n", ena, sqe->id, + ( ( unsigned long long ) address ), + ( ( unsigned long long ) address + len ) ); + return 0; +} + +/** + * Poll for completed transmissions + * + * @v netdev Network device + */ +static void ena_poll_tx ( struct net_device *netdev ) { + struct ena_nic *ena = netdev->priv; + struct ena_tx_cqe *cqe; + unsigned int index; + + /* Check for completed packets */ + while ( ena->tx.cq.cons != ena->tx.sq.prod ) { + + /* Get next completion queue entry */ + index = ( ena->tx.cq.cons & ena->tx.cq.mask ); + cqe = &ena->tx.cq.cqe.tx[index]; + + /* Stop if completion queue entry is empty */ + if ( ( cqe->flags ^ ena->tx.cq.phase ) & ENA_CQE_PHASE ) + return; + DBGC2 ( ena, "ENA %p TX %d complete\n", ena, + ( le16_to_cpu ( cqe->id ) >> 2 /* Don't ask */ ) ); + + /* Increment consumer counter */ + ena->tx.cq.cons++; + if ( ! ( ena->tx.cq.cons & ena->tx.cq.mask ) ) + ena->tx.cq.phase ^= ENA_CQE_PHASE; + + /* Complete transmit */ + netdev_tx_complete_next ( netdev ); + } +} + +/** + * Poll for received packets + * + * @v netdev Network device + */ +static void ena_poll_rx ( struct net_device *netdev ) { + struct ena_nic *ena = netdev->priv; + struct ena_rx_cqe *cqe; + struct io_buffer *iobuf; + unsigned int index; + size_t len; + + /* Check for received packets */ + while ( ena->rx.cq.cons != ena->rx.sq.prod ) { + + /* Get next completion queue entry */ + index = ( ena->rx.cq.cons % ENA_RX_COUNT ); + cqe = &ena->rx.cq.cqe.rx[index]; + + /* Stop if completion queue entry is empty */ + if ( ( cqe->flags ^ ena->rx.cq.phase ) & ENA_CQE_PHASE ) + return; + + /* Increment consumer counter */ + ena->rx.cq.cons++; + if ( ! ( ena->rx.cq.cons & ena->rx.cq.mask ) ) + ena->rx.cq.phase ^= ENA_CQE_PHASE; + + /* Populate I/O buffer */ + iobuf = ena->rx_iobuf[index]; + ena->rx_iobuf[index] = NULL; + len = le16_to_cpu ( cqe->len ); + iob_put ( iobuf, len ); + + /* Hand off to network stack */ + DBGC2 ( ena, "ENA %p RX %d complete (length %zd)\n", + ena, le16_to_cpu ( cqe->id ), len ); + netdev_rx ( netdev, iobuf ); + } +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void ena_poll ( struct net_device *netdev ) { + + /* Poll for transmit completions */ + ena_poll_tx ( netdev ); + + /* Poll for receive completions */ + ena_poll_rx ( netdev ); + + /* Refill receive ring */ + ena_refill_rx ( netdev ); +} + +/** ENA network device operations */ +static struct net_device_operations ena_operations = { + .open = ena_open, + .close = ena_close, + .transmit = ena_transmit, + .poll = ena_poll, +}; + +/****************************************************************************** + * + * PCI interface + * + ****************************************************************************** + */ + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int ena_probe ( struct pci_device *pci ) { + struct net_device *netdev; + struct ena_nic *ena; + int rc; + + /* Allocate and initialise net device */ + netdev = alloc_etherdev ( sizeof ( *ena ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &ena_operations ); + ena = netdev->priv; + pci_set_drvdata ( pci, netdev ); + netdev->dev = &pci->dev; + memset ( ena, 0, sizeof ( *ena ) ); + ena->acq.phase = ENA_ACQ_PHASE; + ena_cq_init ( &ena->tx.cq, ENA_TX_COUNT, + sizeof ( ena->tx.cq.cqe.tx[0] ) ); + ena_sq_init ( &ena->tx.sq, ENA_SQ_TX, ENA_TX_COUNT, + sizeof ( ena->tx.sq.sqe.tx[0] ) ); + ena_cq_init ( &ena->rx.cq, ENA_RX_COUNT, + sizeof ( ena->rx.cq.cqe.rx[0] ) ); + ena_sq_init ( &ena->rx.sq, ENA_SQ_RX, ENA_RX_COUNT, + sizeof ( ena->rx.sq.sqe.rx[0] ) ); + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Map registers */ + ena->regs = pci_ioremap ( pci, pci->membase, ENA_BAR_SIZE ); + if ( ! ena->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + + /* Reset the NIC */ + if ( ( rc = ena_reset ( ena ) ) != 0 ) + goto err_reset; + + /* Create admin queues */ + if ( ( rc = ena_create_admin ( ena ) ) != 0 ) + goto err_create_admin; + + /* Fetch MAC address */ + if ( ( rc = ena_get_device_attributes ( netdev ) ) != 0 ) + goto err_get_device_attributes; + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register_netdev; + + /* Mark as link up, since we have no way to test link state on + * this hardware. + */ + netdev_link_up ( netdev ); + + return 0; + + unregister_netdev ( netdev ); + err_register_netdev: + err_get_device_attributes: + ena_destroy_admin ( ena ); + err_create_admin: + ena_reset ( ena ); + err_reset: + iounmap ( ena->regs ); + err_ioremap: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void ena_remove ( struct pci_device *pci ) { + struct net_device *netdev = pci_get_drvdata ( pci ); + struct ena_nic *ena = netdev->priv; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Destroy admin queues */ + ena_destroy_admin ( ena ); + + /* Reset card */ + ena_reset ( ena ); + + /* Free network device */ + iounmap ( ena->regs ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** ENA PCI device IDs */ +static struct pci_device_id ena_nics[] = { + PCI_ROM ( 0x1d0f, 0xec20, "ena-vf", "ENA VF", 0 ), + PCI_ROM ( 0x1d0f, 0xec21, "ena-vf-llq", "ENA VF (LLQ)", 0 ), +}; + +/** ENA PCI driver */ +struct pci_driver ena_driver __pci_driver = { + .ids = ena_nics, + .id_count = ( sizeof ( ena_nics ) / sizeof ( ena_nics[0] ) ), + .probe = ena_probe, + .remove = ena_remove, +}; diff --git a/src/drivers/net/ena.h b/src/drivers/net/ena.h new file mode 100644 index 00000000..0496fc6b --- /dev/null +++ b/src/drivers/net/ena.h @@ -0,0 +1,588 @@ +#ifndef _ENA_H +#define _ENA_H + +/** @file + * + * Amazon ENA network driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** BAR size */ +#define ENA_BAR_SIZE 16384 + +/** Queue alignment */ +#define ENA_ALIGN 4096 + +/** Number of admin queue entries */ +#define ENA_AQ_COUNT 2 + +/** Number of admin completion queue entries */ +#define ENA_ACQ_COUNT 2 + +/** Number of transmit queue entries */ +#define ENA_TX_COUNT 16 + +/** Number of receive queue entries */ +#define ENA_RX_COUNT 16 + +/** Base address low register offset */ +#define ENA_BASE_LO 0x0 + +/** Base address high register offset */ +#define ENA_BASE_HI 0x4 + +/** Capability register value */ +#define ENA_CAPS( count, size ) ( ( (size) << 16 ) | ( (count) << 0 ) ) + +/** Admin queue base address register */ +#define ENA_AQ_BASE 0x10 + +/** Admin queue capabilities register */ +#define ENA_AQ_CAPS 0x18 + +/** Admin completion queue base address register */ +#define ENA_ACQ_BASE 0x20 + +/** Admin completion queue capabilities register */ +#define ENA_ACQ_CAPS 0x28 + +/** Admin queue doorbell register */ +#define ENA_AQ_DB 0x2c + +/** Maximum time to wait for admin requests */ +#define ENA_ADMIN_MAX_WAIT_MS 5000 + +/** Device control register */ +#define ENA_CTRL 0x54 +#define ENA_CTRL_RESET 0x00000001UL /**< Reset */ + +/** Maximum time to wait for reset */ +#define ENA_RESET_MAX_WAIT_MS 1000 + +/** Device status register */ +#define ENA_STAT 0x58 +#define ENA_STAT_READY 0x00000001UL /**< Ready */ + +/** Admin queue entry header */ +struct ena_aq_header { + /** Request identifier */ + uint8_t id; + /** Reserved */ + uint8_t reserved; + /** Opcode */ + uint8_t opcode; + /** Flags */ + uint8_t flags; +} __attribute__ (( packed )); + +/** Admin queue ownership phase flag */ +#define ENA_AQ_PHASE 0x01 + +/** Admin completion queue entry header */ +struct ena_acq_header { + /** Request identifier */ + uint8_t id; + /** Reserved */ + uint8_t reserved; + /** Status */ + uint8_t status; + /** Flags */ + uint8_t flags; + /** Extended status */ + uint16_t ext; + /** Consumer index */ + uint16_t cons; +} __attribute__ (( packed )); + +/** Admin completion queue ownership phase flag */ +#define ENA_ACQ_PHASE 0x01 + +/** Device attributes */ +#define ENA_DEVICE_ATTRIBUTES 1 + +/** Device attributes */ +struct ena_device_attributes { + /** Implementation */ + uint32_t implementation; + /** Device version */ + uint32_t version; + /** Supported features */ + uint32_t features; + /** Reserved */ + uint8_t reserved_a[4]; + /** Physical address width */ + uint32_t physical; + /** Virtual address width */ + uint32_t virtual; + /** MAC address */ + uint8_t mac[ETH_ALEN]; + /** Reserved */ + uint8_t reserved_b[2]; + /** Maximum MTU */ + uint32_t mtu; +} __attribute__ (( packed )); + +/** Feature */ +union ena_feature { + /** Device attributes */ + struct ena_device_attributes device; +}; + +/** Submission queue direction */ +enum ena_sq_direction { + /** Transmit */ + ENA_SQ_TX = 0x20, + /** Receive */ + ENA_SQ_RX = 0x40, +}; + +/** Create submission queue */ +#define ENA_CREATE_SQ 1 + +/** Create submission queue request */ +struct ena_create_sq_req { + /** Header */ + struct ena_aq_header header; + /** Direction */ + uint8_t direction; + /** Reserved */ + uint8_t reserved_a; + /** Policy */ + uint16_t policy; + /** Completion queue identifier */ + uint16_t cq_id; + /** Number of entries */ + uint16_t count; + /** Base address */ + uint64_t address; + /** Writeback address */ + uint64_t writeback; + /** Reserved */ + uint8_t reserved_b[8]; +} __attribute__ (( packed )); + +/** Submission queue policy */ +enum ena_sq_policy { + /** Use host memory */ + ENA_SQ_HOST_MEMORY = 0x0001, + /** Memory is contiguous */ + ENA_SQ_CONTIGUOUS = 0x0100, +}; + +/** Create submission queue response */ +struct ena_create_sq_rsp { + /** Header */ + struct ena_acq_header header; + /** Submission queue identifier */ + uint16_t id; + /** Reserved */ + uint8_t reserved[2]; + /** Doorbell register offset */ + uint32_t doorbell; + /** LLQ descriptor ring offset */ + uint32_t llq_desc; + /** LLQ header offset */ + uint32_t llq_data; +} __attribute__ (( packed )); + +/** Destroy submission queue */ +#define ENA_DESTROY_SQ 2 + +/** Destroy submission queue request */ +struct ena_destroy_sq_req { + /** Header */ + struct ena_aq_header header; + /** Submission queue identifier */ + uint16_t id; + /** Direction */ + uint8_t direction; + /** Reserved */ + uint8_t reserved; +} __attribute__ (( packed )); + +/** Destroy submission queue response */ +struct ena_destroy_sq_rsp { + /** Header */ + struct ena_acq_header header; +} __attribute__ (( packed )); + +/** Create completion queue */ +#define ENA_CREATE_CQ 3 + +/** Create completion queue request */ +struct ena_create_cq_req { + /** Header */ + struct ena_aq_header header; + /** Interrupts enabled */ + uint8_t intr; + /** Entry size (in 32-bit words) */ + uint8_t size; + /** Number of entries */ + uint16_t count; + /** MSI-X vector */ + uint32_t vector; + /** Base address */ + uint64_t address; +} __attribute__ (( packed )); + +/** Create completion queue response */ +struct ena_create_cq_rsp { + /** Header */ + struct ena_acq_header header; + /** Completion queue identifier */ + uint16_t id; + /** Actual number of entries */ + uint16_t count; + /** NUMA node register offset */ + uint32_t node; + /** Doorbell register offset */ + uint32_t doorbell; + /** Interrupt unmask register offset */ + uint32_t intr; +} __attribute__ (( packed )); + +/** Destroy completion queue */ +#define ENA_DESTROY_CQ 4 + +/** Destroy completion queue request */ +struct ena_destroy_cq_req { + /** Header */ + struct ena_aq_header header; + /** Completion queue identifier */ + uint16_t id; + /** Reserved */ + uint8_t reserved[2]; +} __attribute__ (( packed )); + +/** Destroy completion queue response */ +struct ena_destroy_cq_rsp { + /** Header */ + struct ena_acq_header header; +} __attribute__ (( packed )); + +/** Get feature */ +#define ENA_GET_FEATURE 8 + +/** Get feature request */ +struct ena_get_feature_req { + /** Header */ + struct ena_aq_header header; + /** Length */ + uint32_t len; + /** Address */ + uint64_t address; + /** Flags */ + uint8_t flags; + /** Feature identifier */ + uint8_t id; + /** Reserved */ + uint8_t reserved[2]; +} __attribute__ (( packed )); + +/** Get feature response */ +struct ena_get_feature_rsp { + /** Header */ + struct ena_acq_header header; + /** Feature */ + union ena_feature feature; +} __attribute__ (( packed )); + +/** Get statistics */ +#define ENA_GET_STATS 11 + +/** Get statistics request */ +struct ena_get_stats_req { + /** Header */ + struct ena_aq_header header; + /** Reserved */ + uint8_t reserved_a[12]; + /** Type */ + uint8_t type; + /** Scope */ + uint8_t scope; + /** Reserved */ + uint8_t reserved_b[2]; + /** Queue ID */ + uint16_t queue; + /** Device ID */ + uint16_t device; +} __attribute__ (( packed )); + +/** Basic statistics */ +#define ENA_STATS_TYPE_BASIC 0 + +/** Ethernet statistics */ +#define ENA_STATS_SCOPE_ETH 1 + +/** My device */ +#define ENA_DEVICE_MINE 0xffff + +/** Get statistics response */ +struct ena_get_stats_rsp { + /** Header */ + struct ena_acq_header header; + /** Transmit byte count */ + uint64_t tx_bytes; + /** Transmit packet count */ + uint64_t tx_packets; + /** Receive byte count */ + uint64_t rx_bytes; + /** Receive packet count */ + uint64_t rx_packets; + /** Receive drop count */ + uint64_t rx_drops; +} __attribute__ (( packed )); + +/** Admin queue request */ +union ena_aq_req { + /** Header */ + struct ena_aq_header header; + /** Create submission queue */ + struct ena_create_sq_req create_sq; + /** Destroy submission queue */ + struct ena_destroy_sq_req destroy_sq; + /** Create completion queue */ + struct ena_create_cq_req create_cq; + /** Destroy completion queue */ + struct ena_destroy_cq_req destroy_cq; + /** Get feature */ + struct ena_get_feature_req get_feature; + /** Get statistics */ + struct ena_get_stats_req get_stats; + /** Padding */ + uint8_t pad[64]; +}; + +/** Admin completion queue response */ +union ena_acq_rsp { + /** Header */ + struct ena_acq_header header; + /** Create submission queue */ + struct ena_create_sq_rsp create_sq; + /** Destroy submission queue */ + struct ena_destroy_sq_rsp destroy_sq; + /** Create completion queue */ + struct ena_create_cq_rsp create_cq; + /** Destroy completion queue */ + struct ena_destroy_cq_rsp destroy_cq; + /** Get feature */ + struct ena_get_feature_rsp get_feature; + /** Get statistics */ + struct ena_get_stats_rsp get_stats; + /** Padding */ + uint8_t pad[64]; +}; + +/** Admin queue */ +struct ena_aq { + /** Requests */ + union ena_aq_req *req; + /** Producer counter */ + unsigned int prod; +}; + +/** Admin completion queue */ +struct ena_acq { + /** Responses */ + union ena_acq_rsp *rsp; + /** Consumer counter */ + unsigned int cons; + /** Phase */ + unsigned int phase; +}; + +/** Transmit submission queue entry */ +struct ena_tx_sqe { + /** Length */ + uint16_t len; + /** Reserved */ + uint8_t reserved_a; + /** Flags */ + uint8_t flags; + /** Reserved */ + uint8_t reserved_b[3]; + /** Request identifier */ + uint8_t id; + /** Address */ + uint64_t address; +} __attribute__ (( packed )); + +/** Receive submission queue entry */ +struct ena_rx_sqe { + /** Length */ + uint16_t len; + /** Reserved */ + uint8_t reserved_a; + /** Flags */ + uint8_t flags; + /** Request identifier */ + uint16_t id; + /** Reserved */ + uint8_t reserved_b[2]; + /** Address */ + uint64_t address; +} __attribute__ (( packed )); + +/** Submission queue ownership phase flag */ +#define ENA_SQE_PHASE 0x01 + +/** This is the first descriptor */ +#define ENA_SQE_FIRST 0x04 + +/** This is the last descriptor */ +#define ENA_SQE_LAST 0x08 + +/** Request completion */ +#define ENA_SQE_CPL 0x10 + +/** Transmit completion queue entry */ +struct ena_tx_cqe { + /** Request identifier */ + uint16_t id; + /** Status */ + uint8_t status; + /** Flags */ + uint8_t flags; + /** Reserved */ + uint8_t reserved[2]; + /** Consumer index */ + uint16_t cons; +} __attribute__ (( packed )); + +/** Receive completion queue entry */ +struct ena_rx_cqe { + /** Reserved */ + uint8_t reserved_a[3]; + /** Flags */ + uint8_t flags; + /** Length */ + uint16_t len; + /** Request identifier */ + uint16_t id; + /** Reserved */ + uint8_t reserved_b[8]; +} __attribute__ (( packed )); + +/** Completion queue ownership phase flag */ +#define ENA_CQE_PHASE 0x01 + +/** Submission queue */ +struct ena_sq { + /** Entries */ + union { + /** Transmit submission queue entries */ + struct ena_tx_sqe *tx; + /** Receive submission queue entries */ + struct ena_rx_sqe *rx; + /** Raw data */ + void *raw; + } sqe; + /** Doorbell register offset */ + unsigned int doorbell; + /** Total length of entries */ + size_t len; + /** Producer counter */ + unsigned int prod; + /** Phase */ + unsigned int phase; + /** Submission queue identifier */ + uint16_t id; + /** Direction */ + uint8_t direction; + /** Number of entries */ + uint8_t count; +}; + +/** + * Initialise submission queue + * + * @v sq Submission queue + * @v direction Direction + * @v count Number of entries + * @v size Size of each entry + */ +static inline __attribute__ (( always_inline )) void +ena_sq_init ( struct ena_sq *sq, unsigned int direction, unsigned int count, + size_t size ) { + + sq->len = ( count * size ); + sq->direction = direction; + sq->count = count; +} + +/** Completion queue */ +struct ena_cq { + /** Entries */ + union { + /** Transmit completion queue entries */ + struct ena_tx_cqe *tx; + /** Receive completion queue entries */ + struct ena_rx_cqe *rx; + /** Raw data */ + void *raw; + } cqe; + /** Doorbell register offset */ + unsigned int doorbell; + /** Total length of entries */ + size_t len; + /** Consumer counter */ + unsigned int cons; + /** Phase */ + unsigned int phase; + /** Completion queue identifier */ + uint16_t id; + /** Entry size (in 32-bit words) */ + uint8_t size; + /** Requested number of entries */ + uint8_t requested; + /** Actual number of entries */ + uint8_t actual; + /** Actual number of entries minus one */ + uint8_t mask; +}; + +/** + * Initialise completion queue + * + * @v cq Completion queue + * @v count Number of entries + * @v size Size of each entry + */ +static inline __attribute__ (( always_inline )) void +ena_cq_init ( struct ena_cq *cq, unsigned int count, size_t size ) { + + cq->len = ( count * size ); + cq->size = ( size / sizeof ( uint32_t ) ); + cq->requested = count; +} + +/** Queue pair */ +struct ena_qp { + /** Submission queue */ + struct ena_sq sq; + /** Completion queue */ + struct ena_cq cq; +}; + +/** An ENA network card */ +struct ena_nic { + /** Registers */ + void *regs; + /** Admin queue */ + struct ena_aq aq; + /** Admin completion queue */ + struct ena_acq acq; + /** Transmit queue */ + struct ena_qp tx; + /** Receive queue */ + struct ena_qp rx; + /** Receive I/O buffers */ + struct io_buffer *rx_iobuf[ENA_RX_COUNT]; +}; + +#endif /* _ENA_H */ diff --git a/src/drivers/net/eoib.c b/src/drivers/net/eoib.c new file mode 100644 index 00000000..ba291295 --- /dev/null +++ b/src/drivers/net/eoib.c @@ -0,0 +1,893 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Ethernet over Infiniband + * + */ + +/** Number of EoIB send work queue entries */ +#define EOIB_NUM_SEND_WQES 8 + +/** Number of EoIB receive work queue entries */ +#define EOIB_NUM_RECV_WQES 4 + +/** Number of EoIB completion queue entries */ +#define EOIB_NUM_CQES 16 + +/** Link status for "broadcast join in progress" */ +#define EINPROGRESS_JOINING __einfo_error ( EINFO_EINPROGRESS_JOINING ) +#define EINFO_EINPROGRESS_JOINING __einfo_uniqify \ + ( EINFO_EINPROGRESS, 0x01, "Joining" ) + +/** Human-readable message for the link status */ +struct errortab eoib_errors[] __errortab = { + __einfo_errortab ( EINFO_EINPROGRESS_JOINING ), +}; + +/** List of EoIB devices */ +static LIST_HEAD ( eoib_devices ); + +static struct net_device_operations eoib_operations; + +/**************************************************************************** + * + * EoIB peer cache + * + **************************************************************************** + */ + +/** An EoIB peer cache entry */ +struct eoib_peer { + /** List of EoIB peer cache entries */ + struct list_head list; + /** Ethernet MAC */ + uint8_t mac[ETH_ALEN]; + /** Infiniband address vector */ + struct ib_address_vector av; +}; + +/** + * Find EoIB peer cache entry + * + * @v eoib EoIB device + * @v mac Ethernet MAC + * @ret peer EoIB peer, or NULL if not found + */ +static struct eoib_peer * eoib_find_peer ( struct eoib_device *eoib, + const uint8_t *mac ) { + struct eoib_peer *peer; + + /* Find peer cache entry */ + list_for_each_entry ( peer, &eoib->peers, list ) { + if ( memcmp ( mac, peer->mac, sizeof ( peer->mac ) ) == 0 ) { + /* Move peer to start of list */ + list_del ( &peer->list ); + list_add ( &peer->list, &eoib->peers ); + return peer; + } + } + + return NULL; +} + +/** + * Create EoIB peer cache entry + * + * @v eoib EoIB device + * @v mac Ethernet MAC + * @ret peer EoIB peer, or NULL on error + */ +static struct eoib_peer * eoib_create_peer ( struct eoib_device *eoib, + const uint8_t *mac ) { + struct eoib_peer *peer; + + /* Allocate and initialise peer cache entry */ + peer = zalloc ( sizeof ( *peer ) ); + if ( peer ) { + memcpy ( peer->mac, mac, sizeof ( peer->mac ) ); + list_add ( &peer->list, &eoib->peers ); + } + return peer; +} + +/** + * Flush EoIB peer cache + * + * @v eoib EoIB device + */ +static void eoib_flush_peers ( struct eoib_device *eoib ) { + struct eoib_peer *peer; + struct eoib_peer *tmp; + + list_for_each_entry_safe ( peer, tmp, &eoib->peers, list ) { + list_del ( &peer->list ); + free ( peer ); + } +} + +/** + * Discard some entries from the peer cache + * + * @ret discarded Number of cached items discarded + */ +static unsigned int eoib_discard ( void ) { + struct net_device *netdev; + struct eoib_device *eoib; + struct eoib_peer *peer; + unsigned int discarded = 0; + + /* Try to discard one cache entry for each EoIB device */ + for_each_netdev ( netdev ) { + + /* Skip non-EoIB devices */ + if ( netdev->op != &eoib_operations ) + continue; + eoib = netdev->priv; + + /* Discard least recently used cache entry (if any) */ + list_for_each_entry_reverse ( peer, &eoib->peers, list ) { + list_del ( &peer->list ); + free ( peer ); + discarded++; + break; + } + } + + return discarded; +} + +/** EoIB cache discarder */ +struct cache_discarder eoib_discarder __cache_discarder ( CACHE_EXPENSIVE ) = { + .discard = eoib_discard, +}; + +/** + * Find destination address vector + * + * @v eoib EoIB device + * @v mac Ethernet MAC + * @ret av Address vector, or NULL to send as broadcast + */ +static struct ib_address_vector * eoib_tx_av ( struct eoib_device *eoib, + const uint8_t *mac ) { + struct ib_device *ibdev = eoib->ibdev; + struct eoib_peer *peer; + int rc; + + /* If this is a broadcast or multicast MAC address, then send + * this packet as a broadcast. + */ + if ( is_multicast_ether_addr ( mac ) ) { + DBGCP ( eoib, "EoIB %s %s TX multicast\n", + eoib->name, eth_ntoa ( mac ) ); + return NULL; + } + + /* If we have no peer cache entry, then create one and send + * this packet as a broadcast. + */ + peer = eoib_find_peer ( eoib, mac ); + if ( ! peer ) { + DBGC ( eoib, "EoIB %s %s TX unknown\n", + eoib->name, eth_ntoa ( mac ) ); + eoib_create_peer ( eoib, mac ); + return NULL; + } + + /* If we have not yet recorded a received GID and QPN for this + * peer cache entry, then send this packet as a broadcast. + */ + if ( ! peer->av.gid_present ) { + DBGCP ( eoib, "EoIB %s %s TX not yet recorded\n", + eoib->name, eth_ntoa ( mac ) ); + return NULL; + } + + /* If we have not yet resolved a path to this peer, then send + * this packet as a broadcast. + */ + if ( ( rc = ib_resolve_path ( ibdev, &peer->av ) ) != 0 ) { + DBGCP ( eoib, "EoIB %s %s TX not yet resolved\n", + eoib->name, eth_ntoa ( mac ) ); + return NULL; + } + + /* Force use of GRH even for local destinations */ + peer->av.gid_present = 1; + + /* We have a fully resolved peer: send this packet as a + * unicast. + */ + DBGCP ( eoib, "EoIB %s %s TX " IB_GID_FMT " QPN %#lx\n", eoib->name, + eth_ntoa ( mac ), IB_GID_ARGS ( &peer->av.gid ), peer->av.qpn ); + return &peer->av; +} + +/** + * Record source address vector + * + * @v eoib EoIB device + * @v mac Ethernet MAC + * @v lid Infiniband LID + */ +static void eoib_rx_av ( struct eoib_device *eoib, const uint8_t *mac, + const struct ib_address_vector *av ) { + const union ib_gid *gid = &av->gid; + unsigned long qpn = av->qpn; + struct eoib_peer *peer; + + /* Sanity checks */ + if ( ! av->gid_present ) { + DBGC ( eoib, "EoIB %s %s RX with no GID\n", + eoib->name, eth_ntoa ( mac ) ); + return; + } + + /* Find peer cache entry (if any) */ + peer = eoib_find_peer ( eoib, mac ); + if ( ! peer ) { + DBGCP ( eoib, "EoIB %s %s RX " IB_GID_FMT " (ignored)\n", + eoib->name, eth_ntoa ( mac ), IB_GID_ARGS ( gid ) ); + return; + } + + /* Some dubious EoIB implementations utilise an Ethernet-to- + * EoIB gateway that will send packets from the wrong QPN. + */ + if ( eoib_has_gateway ( eoib ) && + ( memcmp ( gid, &eoib->gateway.gid, sizeof ( *gid ) ) == 0 ) ) { + qpn = eoib->gateway.qpn; + } + + /* Do nothing if peer cache entry is complete and correct */ + if ( ( peer->av.lid == av->lid ) && ( peer->av.qpn == qpn ) ) { + DBGCP ( eoib, "EoIB %s %s RX unchanged\n", + eoib->name, eth_ntoa ( mac ) ); + return; + } + + /* Update peer cache entry */ + peer->av.qpn = qpn; + peer->av.qkey = eoib->broadcast.qkey; + peer->av.gid_present = 1; + memcpy ( &peer->av.gid, gid, sizeof ( peer->av.gid ) ); + DBGC ( eoib, "EoIB %s %s RX " IB_GID_FMT " QPN %#lx\n", eoib->name, + eth_ntoa ( mac ), IB_GID_ARGS ( &peer->av.gid ), peer->av.qpn ); +} + +/**************************************************************************** + * + * EoIB network device + * + **************************************************************************** + */ + +/** + * Transmit packet via EoIB network device + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int eoib_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct eoib_device *eoib = netdev->priv; + struct eoib_header *eoib_hdr; + struct ethhdr *ethhdr; + struct ib_address_vector *av; + size_t zlen; + + /* Sanity checks */ + assert ( iob_len ( iobuf ) >= sizeof ( *ethhdr ) ); + assert ( iob_headroom ( iobuf ) >= sizeof ( *eoib_hdr ) ); + + /* Look up destination address vector */ + ethhdr = iobuf->data; + av = eoib_tx_av ( eoib, ethhdr->h_dest ); + + /* Prepend EoIB header */ + eoib_hdr = iob_push ( iobuf, sizeof ( *eoib_hdr ) ); + eoib_hdr->magic = htons ( EOIB_MAGIC ); + eoib_hdr->reserved = 0; + + /* Pad buffer to minimum Ethernet frame size */ + zlen = ( sizeof ( *eoib_hdr ) + ETH_ZLEN ); + assert ( zlen <= IOB_ZLEN ); + if ( iob_len ( iobuf ) < zlen ) + iob_pad ( iobuf, zlen ); + + /* If we have no unicast address then send as a broadcast, + * with a duplicate sent to the gateway if applicable. + */ + if ( ! av ) { + av = &eoib->broadcast; + if ( eoib_has_gateway ( eoib ) ) + eoib->duplicate ( eoib, iobuf ); + } + + /* Post send work queue entry */ + return ib_post_send ( eoib->ibdev, eoib->qp, av, iobuf ); +} + +/** + * Handle EoIB send completion + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void eoib_complete_send ( struct ib_device *ibdev __unused, + struct ib_queue_pair *qp, + struct io_buffer *iobuf, int rc ) { + struct eoib_device *eoib = ib_qp_get_ownerdata ( qp ); + + netdev_tx_complete_err ( eoib->netdev, iobuf, rc ); +} + +/** + * Handle EoIB receive completion + * + * @v ibdev Infiniband device + * @v qp Queue pair + * @v dest Destination address vector, or NULL + * @v source Source address vector, or NULL + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void eoib_complete_recv ( struct ib_device *ibdev __unused, + struct ib_queue_pair *qp, + struct ib_address_vector *dest __unused, + struct ib_address_vector *source, + struct io_buffer *iobuf, int rc ) { + struct eoib_device *eoib = ib_qp_get_ownerdata ( qp ); + struct net_device *netdev = eoib->netdev; + struct eoib_header *eoib_hdr; + struct ethhdr *ethhdr; + + /* Record errors */ + if ( rc != 0 ) { + netdev_rx_err ( netdev, iobuf, rc ); + return; + } + + /* Sanity check */ + if ( iob_len ( iobuf ) < ( sizeof ( *eoib_hdr ) + sizeof ( *ethhdr ) )){ + DBGC ( eoib, "EoIB %s received packet too short to " + "contain EoIB and Ethernet headers\n", eoib->name ); + DBGC_HD ( eoib, iobuf->data, iob_len ( iobuf ) ); + netdev_rx_err ( netdev, iobuf, -EIO ); + return; + } + if ( ! source ) { + DBGC ( eoib, "EoIB %s received packet without address " + "vector\n", eoib->name ); + netdev_rx_err ( netdev, iobuf, -ENOTTY ); + return; + } + + /* Strip EoIB header */ + iob_pull ( iobuf, sizeof ( *eoib_hdr ) ); + + /* Update neighbour cache entry, if any */ + ethhdr = iobuf->data; + eoib_rx_av ( eoib, ethhdr->h_source, source ); + + /* Hand off to network layer */ + netdev_rx ( netdev, iobuf ); +} + +/** EoIB completion operations */ +static struct ib_completion_queue_operations eoib_cq_op = { + .complete_send = eoib_complete_send, + .complete_recv = eoib_complete_recv, +}; + +/** EoIB queue pair operations */ +static struct ib_queue_pair_operations eoib_qp_op = { + .alloc_iob = alloc_iob, +}; + +/** + * Poll EoIB network device + * + * @v netdev Network device + */ +static void eoib_poll ( struct net_device *netdev ) { + struct eoib_device *eoib = netdev->priv; + struct ib_device *ibdev = eoib->ibdev; + + /* Poll Infiniband device */ + ib_poll_eq ( ibdev ); + + /* Poll the retry timers (required for EoIB multicast join) */ + retry_poll(); +} + +/** + * Handle EoIB broadcast multicast group join completion + * + * @v membership Multicast group membership + * @v rc Status code + */ +static void eoib_join_complete ( struct ib_mc_membership *membership, int rc ) { + struct eoib_device *eoib = + container_of ( membership, struct eoib_device, membership ); + + /* Record join status as link status */ + netdev_link_err ( eoib->netdev, rc ); +} + +/** + * Join EoIB broadcast multicast group + * + * @v eoib EoIB device + * @ret rc Return status code + */ +static int eoib_join_broadcast_group ( struct eoib_device *eoib ) { + int rc; + + /* Join multicast group */ + if ( ( rc = ib_mcast_join ( eoib->ibdev, eoib->qp, + &eoib->membership, &eoib->broadcast, + eoib->mask, eoib_join_complete ) ) != 0 ) { + DBGC ( eoib, "EoIB %s could not join broadcast group: %s\n", + eoib->name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Leave EoIB broadcast multicast group + * + * @v eoib EoIB device + */ +static void eoib_leave_broadcast_group ( struct eoib_device *eoib ) { + + /* Leave multicast group */ + ib_mcast_leave ( eoib->ibdev, eoib->qp, &eoib->membership ); +} + +/** + * Handle link status change + * + * @v eoib EoIB device + */ +static void eoib_link_state_changed ( struct eoib_device *eoib ) { + struct net_device *netdev = eoib->netdev; + struct ib_device *ibdev = eoib->ibdev; + int rc; + + /* Leave existing broadcast group */ + if ( eoib->qp ) + eoib_leave_broadcast_group ( eoib ); + + /* Update broadcast GID based on potentially-new partition key */ + eoib->broadcast.gid.words[2] = htons ( ibdev->pkey | IB_PKEY_FULL ); + + /* Set net device link state to reflect Infiniband link state */ + rc = ib_link_rc ( ibdev ); + netdev_link_err ( netdev, ( rc ? rc : -EINPROGRESS_JOINING ) ); + + /* Join new broadcast group */ + if ( ib_is_open ( ibdev ) && ib_link_ok ( ibdev ) && eoib->qp && + ( ( rc = eoib_join_broadcast_group ( eoib ) ) != 0 ) ) { + DBGC ( eoib, "EoIB %s could not rejoin broadcast group: " + "%s\n", eoib->name, strerror ( rc ) ); + netdev_link_err ( netdev, rc ); + return; + } +} + +/** + * Open EoIB network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int eoib_open ( struct net_device *netdev ) { + struct eoib_device *eoib = netdev->priv; + struct ib_device *ibdev = eoib->ibdev; + int rc; + + /* Open IB device */ + if ( ( rc = ib_open ( ibdev ) ) != 0 ) { + DBGC ( eoib, "EoIB %s could not open %s: %s\n", + eoib->name, ibdev->name, strerror ( rc ) ); + goto err_ib_open; + } + + /* Allocate completion queue */ + if ( ( rc = ib_create_cq ( ibdev, EOIB_NUM_CQES, &eoib_cq_op, + &eoib->cq ) ) != 0 ) { + DBGC ( eoib, "EoIB %s could not create completion queue: %s\n", + eoib->name, strerror ( rc ) ); + goto err_create_cq; + } + + /* Allocate queue pair */ + if ( ( rc = ib_create_qp ( ibdev, IB_QPT_UD, EOIB_NUM_SEND_WQES, + eoib->cq, EOIB_NUM_RECV_WQES, eoib->cq, + &eoib_qp_op, netdev->name, &eoib->qp ) )!=0){ + DBGC ( eoib, "EoIB %s could not create queue pair: %s\n", + eoib->name, strerror ( rc ) ); + goto err_create_qp; + } + ib_qp_set_ownerdata ( eoib->qp, eoib ); + + /* Fill receive rings */ + ib_refill_recv ( ibdev, eoib->qp ); + + /* Fake a link status change to join the broadcast group */ + eoib_link_state_changed ( eoib ); + + return 0; + + ib_destroy_qp ( ibdev, eoib->qp ); + eoib->qp = NULL; + err_create_qp: + ib_destroy_cq ( ibdev, eoib->cq ); + eoib->cq = NULL; + err_create_cq: + ib_close ( ibdev ); + err_ib_open: + return rc; +} + +/** + * Close EoIB network device + * + * @v netdev Network device + */ +static void eoib_close ( struct net_device *netdev ) { + struct eoib_device *eoib = netdev->priv; + struct ib_device *ibdev = eoib->ibdev; + + /* Flush peer cache */ + eoib_flush_peers ( eoib ); + + /* Leave broadcast group */ + eoib_leave_broadcast_group ( eoib ); + + /* Tear down the queues */ + ib_destroy_qp ( ibdev, eoib->qp ); + eoib->qp = NULL; + ib_destroy_cq ( ibdev, eoib->cq ); + eoib->cq = NULL; + + /* Close IB device */ + ib_close ( ibdev ); +} + +/** EoIB network device operations */ +static struct net_device_operations eoib_operations = { + .open = eoib_open, + .close = eoib_close, + .transmit = eoib_transmit, + .poll = eoib_poll, +}; + +/** + * Create EoIB device + * + * @v ibdev Infiniband device + * @v hw_addr Ethernet MAC + * @v broadcast Broadcast address vector + * @v name Interface name (or NULL to use default) + * @ret rc Return status code + */ +int eoib_create ( struct ib_device *ibdev, const uint8_t *hw_addr, + struct ib_address_vector *broadcast, const char *name ) { + struct net_device *netdev; + struct eoib_device *eoib; + int rc; + + /* Allocate network device */ + netdev = alloc_etherdev ( sizeof ( *eoib ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &eoib_operations ); + eoib = netdev->priv; + netdev->dev = ibdev->dev; + eoib->netdev = netdev; + eoib->ibdev = ibdev_get ( ibdev ); + memcpy ( &eoib->broadcast, broadcast, sizeof ( eoib->broadcast ) ); + INIT_LIST_HEAD ( &eoib->peers ); + + /* Set MAC address */ + memcpy ( netdev->hw_addr, hw_addr, ETH_ALEN ); + + /* Set interface name, if applicable */ + if ( name ) + snprintf ( netdev->name, sizeof ( netdev->name ), "%s", name ); + eoib->name = netdev->name; + + /* Add to list of EoIB devices */ + list_add_tail ( &eoib->list, &eoib_devices ); + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register; + + DBGC ( eoib, "EoIB %s created for %s MAC %s\n", + eoib->name, ibdev->name, eth_ntoa ( hw_addr ) ); + DBGC ( eoib, "EoIB %s broadcast GID " IB_GID_FMT "\n", + eoib->name, IB_GID_ARGS ( &broadcast->gid ) ); + return 0; + + unregister_netdev ( netdev ); + err_register: + list_del ( &eoib->list ); + ibdev_put ( ibdev ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Find EoIB device + * + * @v ibdev Infiniband device + * @v hw_addr Original Ethernet MAC + * @ret eoib EoIB device + */ +struct eoib_device * eoib_find ( struct ib_device *ibdev, + const uint8_t *hw_addr ) { + struct eoib_device *eoib; + + list_for_each_entry ( eoib, &eoib_devices, list ) { + if ( ( eoib->ibdev == ibdev ) && + ( memcmp ( eoib->netdev->hw_addr, hw_addr, + ETH_ALEN ) == 0 ) ) + return eoib; + } + return NULL; +} + +/** + * Remove EoIB device + * + * @v eoib EoIB device + */ +void eoib_destroy ( struct eoib_device *eoib ) { + struct net_device *netdev = eoib->netdev; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Remove from list of network devices */ + list_del ( &eoib->list ); + + /* Drop reference to Infiniband device */ + ibdev_put ( eoib->ibdev ); + + /* Free network device */ + DBGC ( eoib, "EoIB %s destroyed\n", eoib->name ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** + * Probe EoIB device + * + * @v ibdev Infiniband device + * @ret rc Return status code + */ +static int eoib_probe ( struct ib_device *ibdev __unused ) { + + /* EoIB devices are not created automatically */ + return 0; +} + +/** + * Handle device or link status change + * + * @v ibdev Infiniband device + */ +static void eoib_notify ( struct ib_device *ibdev ) { + struct eoib_device *eoib; + + /* Handle link status change for any attached EoIB devices */ + list_for_each_entry ( eoib, &eoib_devices, list ) { + if ( eoib->ibdev != ibdev ) + continue; + eoib_link_state_changed ( eoib ); + } +} + +/** + * Remove EoIB device + * + * @v ibdev Infiniband device + */ +static void eoib_remove ( struct ib_device *ibdev ) { + struct eoib_device *eoib; + struct eoib_device *tmp; + + /* Remove any attached EoIB devices */ + list_for_each_entry_safe ( eoib, tmp, &eoib_devices, list ) { + if ( eoib->ibdev != ibdev ) + continue; + eoib_destroy ( eoib ); + } +} + +/** EoIB driver */ +struct ib_driver eoib_driver __ib_driver = { + .name = "EoIB", + .probe = eoib_probe, + .notify = eoib_notify, + .remove = eoib_remove, +}; + +/**************************************************************************** + * + * EoIB heartbeat packets + * + **************************************************************************** + */ + +/** + * Silently ignore incoming EoIB heartbeat packets + * + * @v iobuf I/O buffer + * @v netdev Network device + * @v ll_source Link-layer source address + * @v flags Packet flags + * @ret rc Return status code + */ +static int eoib_heartbeat_rx ( struct io_buffer *iobuf, + struct net_device *netdev __unused, + const void *ll_dest __unused, + const void *ll_source __unused, + unsigned int flags __unused ) { + free_iob ( iobuf ); + return 0; +} + +/** + * Transcribe EoIB heartbeat address + * + * @v net_addr EoIB heartbeat address + * @ret string "" + * + * This operation is meaningless for the EoIB heartbeat protocol. + */ +static const char * eoib_heartbeat_ntoa ( const void *net_addr __unused ) { + return ""; +} + +/** EoIB heartbeat network protocol */ +struct net_protocol eoib_heartbeat_protocol __net_protocol = { + .name = "EoIB", + .net_proto = htons ( EOIB_MAGIC ), + .rx = eoib_heartbeat_rx, + .ntoa = eoib_heartbeat_ntoa, +}; + +/**************************************************************************** + * + * EoIB gateway + * + **************************************************************************** + * + * Some dubious EoIB implementations require all broadcast traffic to + * be sent twice: once to the actual broadcast group, and once as a + * unicast to the EoIB-to-Ethernet gateway. This somewhat curious + * design arises since the EoIB-to-Ethernet gateway hardware lacks the + * ability to attach a queue pair to a multicast GID (or LID), and so + * cannot receive traffic sent to the broadcast group. + * + */ + +/** + * Transmit duplicate packet to the EoIB gateway + * + * @v eoib EoIB device + * @v original Original I/O buffer + */ +static void eoib_duplicate ( struct eoib_device *eoib, + struct io_buffer *original ) { + struct net_device *netdev = eoib->netdev; + struct ib_device *ibdev = eoib->ibdev; + struct ib_address_vector *av = &eoib->gateway; + size_t len = iob_len ( original ); + struct io_buffer *copy; + int rc; + + /* Create copy of I/O buffer */ + copy = alloc_iob ( len ); + if ( ! copy ) { + rc = -ENOMEM; + goto err_alloc; + } + memcpy ( iob_put ( copy, len ), original->data, len ); + + /* Append to network device's transmit queue */ + list_add_tail ( ©->list, &original->list ); + + /* Resolve path to gateway */ + if ( ( rc = ib_resolve_path ( ibdev, av ) ) != 0 ) { + DBGC ( eoib, "EoIB %s no path to gateway: %s\n", + eoib->name, strerror ( rc ) ); + goto err_path; + } + + /* Force use of GRH even for local destinations */ + av->gid_present = 1; + + /* Post send work queue entry */ + if ( ( rc = ib_post_send ( eoib->ibdev, eoib->qp, av, copy ) ) != 0 ) + goto err_post_send; + + return; + + err_post_send: + err_path: + list_del ( ©->list ); + err_alloc: + netdev_tx_err ( netdev, copy, rc ); +} + +/** + * Set EoIB gateway + * + * @v eoib EoIB device + * @v av Address vector, or NULL to clear gateway + */ +void eoib_set_gateway ( struct eoib_device *eoib, + struct ib_address_vector *av ) { + + if ( av ) { + DBGC ( eoib, "EoIB %s using gateway " IB_GID_FMT "\n", + eoib->name, IB_GID_ARGS ( &av->gid ) ); + memcpy ( &eoib->gateway, av, sizeof ( eoib->gateway ) ); + eoib->duplicate = eoib_duplicate; + } else { + DBGC ( eoib, "EoIB %s not using gateway\n", eoib->name ); + eoib->duplicate = NULL; + } +} diff --git a/src/drivers/net/exanic.c b/src/drivers/net/exanic.c new file mode 100644 index 00000000..8849da28 --- /dev/null +++ b/src/drivers/net/exanic.c @@ -0,0 +1,915 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "exanic.h" + +/** @file + * + * Exablaze ExaNIC driver + * + */ + +/* Disambiguate the various error causes */ +#define EIO_ABORTED __einfo_error ( EINFO_EIO_ABORTED ) +#define EINFO_EIO_ABORTED \ + __einfo_uniqify ( EINFO_EIO, 0x01, "Frame aborted" ) +#define EIO_CORRUPT __einfo_error ( EINFO_EIO_CORRUPT ) +#define EINFO_EIO_CORRUPT \ + __einfo_uniqify ( EINFO_EIO, 0x02, "CRC incorrect" ) +#define EIO_HWOVFL __einfo_error ( EINFO_EIO_HWOVFL ) +#define EINFO_EIO_HWOVFL \ + __einfo_uniqify ( EINFO_EIO, 0x03, "Hardware overflow" ) +#define EIO_STATUS( status ) \ + EUNIQ ( EINFO_EIO, ( (status) & EXANIC_STATUS_ERROR_MASK ), \ + EIO_ABORTED, EIO_CORRUPT, EIO_HWOVFL ) + +/** + * Write DMA base address register + * + * @v addr DMA base address + * @v reg Register + */ +static void exanic_write_base ( physaddr_t addr, void *reg ) { + uint32_t lo; + uint32_t hi; + + /* Write high and low registers, setting flags as appropriate */ + lo = addr; + if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) { + /* 64-bit build; may be a 32-bit or 64-bit address */ + hi = ( ( ( uint64_t ) addr ) >> 32 ); + if ( ! hi ) + lo |= EXANIC_DMA_32_BIT; + } else { + /* 32-bit build; always a 32-bit address */ + hi = 0; + lo |= EXANIC_DMA_32_BIT; + } + writel ( hi, ( reg + 0 ) ); + writel ( lo, ( reg + 4 ) ); +} + +/** + * Clear DMA base address register + * + * @v reg Register + */ +static inline void exanic_clear_base ( void *reg ) { + + /* Clear both high and low registers */ + writel ( 0, ( reg + 0 ) ); + writel ( 0, ( reg + 4 ) ); +} + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset hardware + * + * @v exanic ExaNIC device + */ +static void exanic_reset ( struct exanic *exanic ) { + void *port_regs; + unsigned int i; + + /* Disable all possible ports */ + for ( i = 0 ; i < EXANIC_MAX_PORTS ; i++ ) { + port_regs = ( exanic->regs + EXANIC_PORT_REGS ( i ) ); + writel ( 0, ( port_regs + EXANIC_PORT_ENABLE ) ); + writel ( 0, ( port_regs + EXANIC_PORT_IRQ ) ); + exanic_clear_base ( port_regs + EXANIC_PORT_RX_BASE ); + } + + /* Disable transmit feedback */ + exanic_clear_base ( exanic->regs + EXANIC_TXF_BASE ); +} + +/****************************************************************************** + * + * MAC address + * + ****************************************************************************** + */ + +/** + * Read I2C line status + * + * @v basher Bit-bashing interface + * @v bit_id Bit number + * @ret zero Input is a logic 0 + * @ret non-zero Input is a logic 1 + */ +static int exanic_i2c_read_bit ( struct bit_basher *basher, + unsigned int bit_id ) { + struct exanic *exanic = + container_of ( basher, struct exanic, basher.basher ); + unsigned int shift; + uint32_t i2c; + + /* Identify bit */ + assert ( bit_id == I2C_BIT_SDA ); + shift = exanic->i2cfg.getsda; + + /* Read I2C register */ + DBG_DISABLE ( DBGLVL_IO ); + i2c = readl ( exanic->regs + EXANIC_I2C ); + DBG_ENABLE ( DBGLVL_IO ); + return ( ( i2c >> shift ) & 1 ); +} + +/** + * Write I2C line status + * + * @v basher Bit-bashing interface + * @v bit_id Bit number + * @v data Value to write + */ +static void exanic_i2c_write_bit ( struct bit_basher *basher, + unsigned int bit_id, unsigned long data ) { + struct exanic *exanic = + container_of ( basher, struct exanic, basher.basher ); + unsigned int shift; + uint32_t mask; + uint32_t i2c; + + /* Identify shift */ + assert ( ( bit_id == I2C_BIT_SCL ) || ( bit_id == I2C_BIT_SDA ) ); + shift = ( ( bit_id == I2C_BIT_SCL ) ? + exanic->i2cfg.setscl : exanic->i2cfg.setsda ); + mask = ( 1UL << shift ); + + /* Modify I2C register */ + DBG_DISABLE ( DBGLVL_IO ); + i2c = readl ( exanic->regs + EXANIC_I2C ); + i2c &= ~mask; + if ( ! data ) + i2c |= mask; + writel ( i2c, ( exanic->regs + EXANIC_I2C ) ); + DBG_ENABLE ( DBGLVL_IO ); +} + +/** I2C bit-bashing interface operations */ +static struct bit_basher_operations exanic_i2c_basher_ops = { + .read = exanic_i2c_read_bit, + .write = exanic_i2c_write_bit, +}; + +/** Possible I2C bus configurations */ +static struct exanic_i2c_config exanic_i2cfgs[] = { + /* X2/X10 */ + { .setscl = 7, .setsda = 4, .getsda = 12 }, + /* X4 */ + { .setscl = 7, .setsda = 5, .getsda = 13 }, +}; + +/** + * Initialise EEPROM + * + * @v exanic ExaNIC device + * @v i2cfg I2C bus configuration + * @ret rc Return status code + */ +static int exanic_try_init_eeprom ( struct exanic *exanic, + struct exanic_i2c_config *i2cfg ) { + int rc; + + /* Configure I2C bus */ + memcpy ( &exanic->i2cfg, i2cfg, sizeof ( exanic->i2cfg ) ); + + /* Initialise I2C bus */ + if ( ( rc = init_i2c_bit_basher ( &exanic->basher, + &exanic_i2c_basher_ops ) ) != 0 ) { + DBGC2 ( exanic, "EXANIC %p found no I2C bus via %d/%d/%d\n", + exanic, exanic->i2cfg.setscl, + exanic->i2cfg.setsda, exanic->i2cfg.getsda ); + return rc; + } + + /* Check for EEPROM presence */ + init_i2c_eeprom ( &exanic->eeprom, EXANIC_EEPROM_ADDRESS ); + if ( ( rc = i2c_check_presence ( &exanic->basher.i2c, + &exanic->eeprom ) ) != 0 ) { + DBGC2 ( exanic, "EXANIC %p found no EEPROM via %d/%d/%d\n", + exanic, exanic->i2cfg.setscl, + exanic->i2cfg.setsda, exanic->i2cfg.getsda ); + return rc; + } + + DBGC ( exanic, "EXANIC %p found EEPROM via %d/%d/%d\n", + exanic, exanic->i2cfg.setscl, + exanic->i2cfg.setsda, exanic->i2cfg.getsda ); + return 0; +} + +/** + * Initialise EEPROM + * + * @v exanic ExaNIC device + * @ret rc Return status code + */ +static int exanic_init_eeprom ( struct exanic *exanic ) { + struct exanic_i2c_config *i2cfg; + unsigned int i; + int rc; + + /* Try all possible bus configurations */ + for ( i = 0 ; i < ( sizeof ( exanic_i2cfgs ) / + sizeof ( exanic_i2cfgs[0] ) ) ; i++ ) { + i2cfg = &exanic_i2cfgs[i]; + if ( ( rc = exanic_try_init_eeprom ( exanic, i2cfg ) ) == 0 ) + return 0; + } + + DBGC ( exanic, "EXANIC %p found no EEPROM\n", exanic ); + return -ENODEV; +} + +/** + * Fetch base MAC address + * + * @v exanic ExaNIC device + * @ret rc Return status code + */ +static int exanic_fetch_mac ( struct exanic *exanic ) { + struct i2c_interface *i2c = &exanic->basher.i2c; + int rc; + + /* Initialise EEPROM */ + if ( ( rc = exanic_init_eeprom ( exanic ) ) != 0 ) + return rc; + + /* Fetch base MAC address */ + if ( ( rc = i2c->read ( i2c, &exanic->eeprom, 0, exanic->mac, + sizeof ( exanic->mac ) ) ) != 0 ) { + DBGC ( exanic, "EXANIC %p could not read MAC address: %s\n", + exanic, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/****************************************************************************** + * + * Link state + * + ****************************************************************************** + */ + +/** + * Check link state + * + * @v netdev Network device + */ +static void exanic_check_link ( struct net_device *netdev ) { + struct exanic_port *port = netdev->priv; + uint32_t status; + uint32_t speed; + + /* Report port status changes */ + status = readl ( port->regs + EXANIC_PORT_STATUS ); + speed = readl ( port->regs + EXANIC_PORT_SPEED ); + if ( status != port->status ) { + DBGC ( port, "EXANIC %s port status %#08x speed %dMbps\n", + netdev->name, status, speed ); + if ( status & EXANIC_PORT_STATUS_LINK ) { + netdev_link_up ( netdev ); + } else { + netdev_link_down ( netdev ); + } + port->status = status; + } +} + +/** + * Check link state periodically + * + * @v retry Link state check timer + * @v over Failure indicator + */ +static void exanic_expired ( struct retry_timer *timer, int over __unused ) { + struct exanic_port *port = + container_of ( timer, struct exanic_port, timer ); + struct net_device *netdev = port->netdev; + static const uint32_t speeds[] = { + 100, 1000, 10000, 40000, 100000, + }; + unsigned int index; + + /* Restart timer */ + start_timer_fixed ( timer, EXANIC_LINK_INTERVAL ); + + /* Check link state */ + exanic_check_link ( netdev ); + + /* Do nothing further if link is already up */ + if ( netdev_link_ok ( netdev ) ) + return; + + /* Do nothing further unless we have a valid list of supported speeds */ + if ( ! port->speeds ) + return; + + /* Autonegotiation is not supported; try manually selecting + * the next supported link speed. + */ + do { + if ( ! port->speed ) + port->speed = ( 8 * sizeof ( port->speeds ) ); + port->speed--; + } while ( ! ( ( 1UL << port->speed ) & port->speeds ) ); + index = ( port->speed - ( ffs ( EXANIC_CAPS_SPEED_MASK ) - 1 ) ); + assert ( index < ( sizeof ( speeds ) / sizeof ( speeds[0] ) ) ); + + /* Attempt the selected speed */ + DBGC ( netdev, "EXANIC %s attempting %dMbps\n", + netdev->name, speeds[index] ); + writel ( speeds[index], ( port->regs + EXANIC_PORT_SPEED ) ); +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int exanic_open ( struct net_device *netdev ) { + struct exanic_port *port = netdev->priv; + struct exanic_tx_chunk *tx; + unsigned int i; + + /* Reset transmit region contents */ + for ( i = 0 ; i < port->tx_count ; i++ ) { + tx = ( port->tx + ( i * sizeof ( *tx ) ) ); + writew ( port->txf_slot, &tx->desc.txf_slot ); + writeb ( EXANIC_TYPE_RAW, &tx->desc.type ); + writeb ( 0, &tx->desc.flags ); + writew ( 0, &tx->pad ); + } + + /* Reset receive region contents */ + memset_user ( port->rx, 0, 0xff, EXANIC_RX_LEN ); + + /* Reset transmit feedback region */ + *(port->txf) = 0; + + /* Reset counters */ + port->tx_prod = 0; + port->tx_cons = 0; + port->rx_cons = 0; + + /* Map receive region */ + exanic_write_base ( phys_to_bus ( user_to_phys ( port->rx, 0 ) ), + ( port->regs + EXANIC_PORT_RX_BASE ) ); + + /* Enable promiscuous mode */ + writel ( EXANIC_PORT_FLAGS_PROMISC, + ( port->regs + EXANIC_PORT_FLAGS ) ); + + /* Reset to default speed and clear cached status */ + writel ( port->default_speed, ( port->regs + EXANIC_PORT_SPEED ) ); + port->speed = 0; + port->status = 0; + + /* Enable port */ + wmb(); + writel ( EXANIC_PORT_ENABLE_ENABLED, + ( port->regs + EXANIC_PORT_ENABLE ) ); + + /* Start link state timer */ + start_timer_fixed ( &port->timer, EXANIC_LINK_INTERVAL ); + + return 0; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void exanic_close ( struct net_device *netdev ) { + struct exanic_port *port = netdev->priv; + + /* Stop link state timer */ + stop_timer ( &port->timer ); + + /* Disable port */ + writel ( 0, ( port->regs + EXANIC_PORT_ENABLE ) ); + wmb(); + + /* Clear receive region */ + exanic_clear_base ( port->regs + EXANIC_PORT_RX_BASE ); + + /* Discard any in-progress receive */ + if ( port->rx_iobuf ) { + netdev_rx_err ( netdev, port->rx_iobuf, -ECANCELED ); + port->rx_iobuf = NULL; + } +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int exanic_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct exanic_port *port = netdev->priv; + struct exanic_tx_chunk *tx; + unsigned int tx_fill; + unsigned int tx_index; + size_t offset; + size_t len; + uint8_t *src; + uint8_t *dst; + + /* Sanity check */ + len = iob_len ( iobuf ); + if ( len > sizeof ( tx->data ) ) { + DBGC ( port, "EXANIC %s transmit too large\n", netdev->name ); + return -ENOTSUP; + } + + /* Get next transmit descriptor */ + tx_fill = ( port->tx_prod - port->tx_cons ); + if ( tx_fill >= port->tx_count ) { + DBGC ( port, "EXANIC %s out of transmit descriptors\n", + netdev->name ); + return -ENOBUFS; + } + tx_index = ( port->tx_prod & ( port->tx_count - 1 ) ); + offset = ( tx_index * sizeof ( *tx ) ); + tx = ( port->tx + offset ); + DBGC2 ( port, "EXANIC %s TX %04x at [%05zx,%05zx)\n", + netdev->name, port->tx_prod, ( port->tx_offset + offset ), + ( port->tx_offset + offset + + offsetof ( typeof ( *tx ), data ) + len ) ); + port->tx_prod++; + + /* Populate transmit descriptor */ + writew ( port->tx_prod, &tx->desc.txf_id ); + writew ( ( sizeof ( tx->pad ) + len ), &tx->desc.len ); + + /* Copy data to transmit region. There is no DMA on the + * transmit data path. + */ + src = iobuf->data; + dst = tx->data; + while ( len-- ) + writeb ( *(src++), dst++ ); + + /* Send transmit command */ + wmb(); + writel ( ( port->tx_offset + offset ), + ( port->regs + EXANIC_PORT_TX_COMMAND ) ); + + return 0; +} + +/** + * Poll for completed packets + * + * @v netdev Network device + */ +static void exanic_poll_tx ( struct net_device *netdev ) { + struct exanic_port *port = netdev->priv; + + /* Report any completed packets */ + while ( port->tx_cons != *(port->txf) ) { + DBGC2 ( port, "EXANIC %s TX %04x complete\n", + netdev->name, port->tx_cons ); + netdev_tx_complete_next ( netdev ); + port->tx_cons++; + } +} + +/** + * Poll for received packets + * + * @v netdev Network device + */ +static void exanic_poll_rx ( struct net_device *netdev ) { + struct exanic_port *port = netdev->priv; + struct exanic_rx_chunk *rx; + struct exanic_rx_descriptor desc; + uint8_t current; + uint8_t previous; + size_t offset; + size_t len; + + for ( ; ; port->rx_cons++ ) { + + /* Fetch descriptor */ + offset = ( ( port->rx_cons * sizeof ( *rx ) ) % EXANIC_RX_LEN ); + copy_from_user ( &desc, port->rx, + ( offset + offsetof ( typeof ( *rx ), desc ) ), + sizeof ( desc ) ); + + /* Calculate generation */ + current = ( port->rx_cons / ( EXANIC_RX_LEN / sizeof ( *rx ) )); + previous = ( current - 1 ); + + /* Do nothing if no chunk is ready */ + if ( desc.generation == previous ) + break; + + /* Allocate I/O buffer if needed */ + if ( ! port->rx_iobuf ) { + port->rx_iobuf = alloc_iob ( EXANIC_MAX_RX_LEN ); + if ( ! port->rx_iobuf ) { + /* Wait for next poll */ + break; + } + port->rx_rc = 0; + } + + /* Calculate chunk length */ + len = ( desc.len ? desc.len : sizeof ( rx->data ) ); + + /* Append data to I/O buffer */ + if ( len <= iob_tailroom ( port->rx_iobuf ) ) { + copy_from_user ( iob_put ( port->rx_iobuf, len ), + port->rx, + ( offset + offsetof ( typeof ( *rx ), + data ) ), len ); + } else { + DBGC ( port, "EXANIC %s RX too large\n", + netdev->name ); + port->rx_rc = -ERANGE; + } + + /* Check for overrun */ + rmb(); + copy_from_user ( &desc.generation, port->rx, + ( offset + offsetof ( typeof ( *rx ), + desc.generation ) ), + sizeof ( desc.generation ) ); + if ( desc.generation != current ) { + DBGC ( port, "EXANIC %s RX overrun\n", netdev->name ); + port->rx_rc = -ENOBUFS; + continue; + } + + /* Wait for end of packet */ + if ( ! desc.len ) + continue; + + /* Check for receive errors */ + if ( desc.status & EXANIC_STATUS_ERROR_MASK ) { + port->rx_rc = -EIO_STATUS ( desc.status ); + DBGC ( port, "EXANIC %s RX %04x error: %s\n", + netdev->name, port->rx_cons, + strerror ( port->rx_rc ) ); + } else { + DBGC2 ( port, "EXANIC %s RX %04x\n", + netdev->name, port->rx_cons ); + } + + /* Hand off to network stack */ + if ( port->rx_rc ) { + netdev_rx_err ( netdev, port->rx_iobuf, port->rx_rc ); + } else { + iob_unput ( port->rx_iobuf, 4 /* strip CRC */ ); + netdev_rx ( netdev, port->rx_iobuf ); + } + port->rx_iobuf = NULL; + } +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void exanic_poll ( struct net_device *netdev ) { + + /* Poll for completed packets */ + exanic_poll_tx ( netdev ); + + /* Poll for received packets */ + exanic_poll_rx ( netdev ); +} + +/** ExaNIC network device operations */ +static struct net_device_operations exanic_operations = { + .open = exanic_open, + .close = exanic_close, + .transmit = exanic_transmit, + .poll = exanic_poll, +}; + +/****************************************************************************** + * + * PCI interface + * + ****************************************************************************** + */ + +/** + * Probe port + * + * @v exanic ExaNIC device + * @v dev Parent device + * @v index Port number + * @ret rc Return status code + */ +static int exanic_probe_port ( struct exanic *exanic, struct device *dev, + unsigned int index ) { + struct net_device *netdev; + struct exanic_port *port; + void *port_regs; + uint32_t status; + size_t tx_len; + int rc; + + /* Do nothing if port is not physically present */ + port_regs = ( exanic->regs + EXANIC_PORT_REGS ( index ) ); + status = readl ( port_regs + EXANIC_PORT_STATUS ); + tx_len = readl ( port_regs + EXANIC_PORT_TX_LEN ); + if ( ( status & EXANIC_PORT_STATUS_ABSENT ) || ( tx_len == 0 ) ) { + rc = 0; + goto absent; + } + + /* Allocate network device */ + netdev = alloc_etherdev ( sizeof ( *port ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc_netdev; + } + netdev_init ( netdev, &exanic_operations ); + netdev->dev = dev; + port = netdev->priv; + memset ( port, 0, sizeof ( *port ) ); + exanic->port[index] = port; + port->netdev = netdev; + port->regs = port_regs; + timer_init ( &port->timer, exanic_expired, &netdev->refcnt ); + + /* Identify transmit region */ + port->tx_offset = readl ( port->regs + EXANIC_PORT_TX_OFFSET ); + if ( tx_len > EXANIC_MAX_TX_LEN ) + tx_len = EXANIC_MAX_TX_LEN; + assert ( ! ( tx_len & ( tx_len - 1 ) ) ); + port->tx = ( exanic->tx + port->tx_offset ); + port->tx_count = ( tx_len / sizeof ( struct exanic_tx_chunk ) ); + + /* Identify transmit feedback region */ + port->txf_slot = EXANIC_TXF_SLOT ( index ); + port->txf = ( exanic->txf + + ( port->txf_slot * sizeof ( *(port->txf) ) ) ); + + /* Allocate receive region (via umalloc()) */ + port->rx = umalloc ( EXANIC_RX_LEN ); + if ( ! port->rx ) { + rc = -ENOMEM; + goto err_alloc_rx; + } + + /* Set MAC address */ + memcpy ( netdev->hw_addr, exanic->mac, ETH_ALEN ); + netdev->hw_addr[ ETH_ALEN - 1 ] += index; + + /* Record default link speed and supported speeds */ + port->default_speed = readl ( port->regs + EXANIC_PORT_SPEED ); + port->speeds = ( exanic->caps & EXANIC_CAPS_SPEED_MASK ); + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register_netdev; + DBGC ( port, "EXANIC %s port %d TX [%#05zx,%#05zx) TXF %#02x RX " + "[%#lx,%#lx)\n", netdev->name, index, port->tx_offset, + ( port->tx_offset + tx_len ), port->txf_slot, + user_to_phys ( port->rx, 0 ), + user_to_phys ( port->rx, EXANIC_RX_LEN ) ); + + /* Set initial link state */ + exanic_check_link ( netdev ); + + return 0; + + unregister_netdev ( netdev ); + err_register_netdev: + ufree ( port->rx ); + err_alloc_rx: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc_netdev: + absent: + return rc; +} + +/** + * Probe port + * + * @v exanic ExaNIC device + * @v index Port number + */ +static void exanic_remove_port ( struct exanic *exanic, unsigned int index ) { + struct exanic_port *port; + + /* Do nothing if port is not physically present */ + port = exanic->port[index]; + if ( ! port ) + return; + + /* Unregister network device */ + unregister_netdev ( port->netdev ); + + /* Free receive region */ + ufree ( port->rx ); + + /* Free network device */ + netdev_nullify ( port->netdev ); + netdev_put ( port->netdev ); +} + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int exanic_probe ( struct pci_device *pci ) { + struct exanic *exanic; + unsigned long regs_bar_start; + unsigned long tx_bar_start; + size_t tx_bar_len; + int i; + int rc; + + /* Allocate and initialise structure */ + exanic = zalloc ( sizeof ( *exanic ) ); + if ( ! exanic ) { + rc = -ENOMEM; + goto err_alloc; + } + pci_set_drvdata ( pci, exanic ); + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Map registers */ + regs_bar_start = pci_bar_start ( pci, EXANIC_REGS_BAR ); + exanic->regs = pci_ioremap ( pci, regs_bar_start, EXANIC_REGS_LEN ); + if ( ! exanic->regs ) { + rc = -ENODEV; + goto err_ioremap_regs; + } + + /* Reset device */ + exanic_reset ( exanic ); + + /* Read capabilities */ + exanic->caps = readl ( exanic->regs + EXANIC_CAPS ); + + /* Power up PHYs */ + writel ( EXANIC_POWER_ON, ( exanic->regs + EXANIC_POWER ) ); + + /* Fetch base MAC address */ + if ( ( rc = exanic_fetch_mac ( exanic ) ) != 0 ) + goto err_fetch_mac; + DBGC ( exanic, "EXANIC %p capabilities %#08x base MAC %s\n", + exanic, exanic->caps, eth_ntoa ( exanic->mac ) ); + + /* Map transmit region */ + tx_bar_start = pci_bar_start ( pci, EXANIC_TX_BAR ); + tx_bar_len = pci_bar_size ( pci, EXANIC_TX_BAR ); + exanic->tx = pci_ioremap ( pci, tx_bar_start, tx_bar_len ); + if ( ! exanic->tx ) { + rc = -ENODEV; + goto err_ioremap_tx; + } + + /* Allocate transmit feedback region (shared between all ports) */ + exanic->txf = malloc_dma ( EXANIC_TXF_LEN, EXANIC_ALIGN ); + if ( ! exanic->txf ) { + rc = -ENOMEM; + goto err_alloc_txf; + } + memset ( exanic->txf, 0, EXANIC_TXF_LEN ); + exanic_write_base ( virt_to_bus ( exanic->txf ), + ( exanic->regs + EXANIC_TXF_BASE ) ); + + /* Allocate and initialise per-port network devices */ + for ( i = 0 ; i < EXANIC_MAX_PORTS ; i++ ) { + if ( ( rc = exanic_probe_port ( exanic, &pci->dev, i ) ) != 0 ) + goto err_probe_port; + } + + return 0; + + i = EXANIC_MAX_PORTS; + err_probe_port: + for ( i-- ; i >= 0 ; i-- ) + exanic_remove_port ( exanic, i ); + exanic_reset ( exanic ); + free_dma ( exanic->txf, EXANIC_TXF_LEN ); + err_alloc_txf: + iounmap ( exanic->tx ); + err_ioremap_tx: + iounmap ( exanic->regs ); + err_fetch_mac: + err_ioremap_regs: + free ( exanic ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void exanic_remove ( struct pci_device *pci ) { + struct exanic *exanic = pci_get_drvdata ( pci ); + unsigned int i; + + /* Remove all ports */ + for ( i = 0 ; i < EXANIC_MAX_PORTS ; i++ ) + exanic_remove_port ( exanic, i ); + + /* Reset device */ + exanic_reset ( exanic ); + + /* Free transmit feedback region */ + free_dma ( exanic->txf, EXANIC_TXF_LEN ); + + /* Unmap transmit region */ + iounmap ( exanic->tx ); + + /* Unmap registers */ + iounmap ( exanic->regs ); + + /* Free device */ + free ( exanic ); +} + +/** ExaNIC PCI device IDs */ +static struct pci_device_id exanic_ids[] = { + PCI_ROM ( 0x10ee, 0x2b00, "exanic-old", "ExaNIC (old)", 0 ), + PCI_ROM ( 0x1ce4, 0x0001, "exanic-x4", "ExaNIC X4", 0 ), + PCI_ROM ( 0x1ce4, 0x0002, "exanic-x2", "ExaNIC X2", 0 ), + PCI_ROM ( 0x1ce4, 0x0003, "exanic-x10", "ExaNIC X10", 0 ), + PCI_ROM ( 0x1ce4, 0x0004, "exanic-x10gm", "ExaNIC X10 GM", 0 ), + PCI_ROM ( 0x1ce4, 0x0005, "exanic-x40", "ExaNIC X40", 0 ), + PCI_ROM ( 0x1ce4, 0x0006, "exanic-x10hpt", "ExaNIC X10 HPT", 0 ), + PCI_ROM ( 0x1ce4, 0x0007, "exanic-x40g", "ExaNIC X40", 0 ), +}; + +/** ExaNIC PCI driver */ +struct pci_driver exanic_driver __pci_driver = { + .ids = exanic_ids, + .id_count = ( sizeof ( exanic_ids ) / sizeof ( exanic_ids[0] ) ), + .probe = exanic_probe, + .remove = exanic_remove, +}; diff --git a/src/drivers/net/exanic.h b/src/drivers/net/exanic.h new file mode 100644 index 00000000..041b9e21 --- /dev/null +++ b/src/drivers/net/exanic.h @@ -0,0 +1,261 @@ +#ifndef _EXANIC_H +#define _EXANIC_H + +/** @file + * + * Exablaze ExaNIC driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include + +/** Maximum number of ports */ +#define EXANIC_MAX_PORTS 8 + +/** Register BAR */ +#define EXANIC_REGS_BAR PCI_BASE_ADDRESS_0 + +/** Transmit region BAR */ +#define EXANIC_TX_BAR PCI_BASE_ADDRESS_2 + +/** Alignment for DMA regions */ +#define EXANIC_ALIGN 0x1000 + +/** Flag for 32-bit DMA addresses */ +#define EXANIC_DMA_32_BIT 0x00000001UL + +/** Register set length */ +#define EXANIC_REGS_LEN 0x2000 + +/** Transmit feedback region length */ +#define EXANIC_TXF_LEN 0x1000 + +/** Transmit feedback slot + * + * This is a policy decision. + */ +#define EXANIC_TXF_SLOT( index ) ( 0x40 * (index) ) + +/** Receive region length */ +#define EXANIC_RX_LEN 0x200000 + +/** Transmit feedback base address register */ +#define EXANIC_TXF_BASE 0x0014 + +/** Capabilities register */ +#define EXANIC_CAPS 0x0038 +#define EXANIC_CAPS_100M 0x01000000UL /**< 100Mbps supported */ +#define EXANIC_CAPS_1G 0x02000000UL /**< 1Gbps supported */ +#define EXANIC_CAPS_10G 0x04000000UL /**< 10Gbps supported */ +#define EXANIC_CAPS_40G 0x08000000UL /**< 40Gbps supported */ +#define EXANIC_CAPS_100G 0x10000000UL /**< 100Gbps supported */ +#define EXANIC_CAPS_SPEED_MASK 0x1f000000UL /**< Supported speeds mask */ + +/** I2C GPIO register */ +#define EXANIC_I2C 0x012c + +/** Power control register */ +#define EXANIC_POWER 0x0138 +#define EXANIC_POWER_ON 0x000000f0UL /**< Power on PHYs */ + +/** Port register offset */ +#define EXANIC_PORT_REGS( index ) ( 0x0200 + ( 0x40 * (index) ) ) + +/** Port enable register */ +#define EXANIC_PORT_ENABLE 0x0000 +#define EXANIC_PORT_ENABLE_ENABLED 0x00000001UL /**< Port is enabled */ + +/** Port speed register */ +#define EXANIC_PORT_SPEED 0x0004 + +/** Port status register */ +#define EXANIC_PORT_STATUS 0x0008 +#define EXANIC_PORT_STATUS_LINK 0x00000008UL /**< Link is up */ +#define EXANIC_PORT_STATUS_ABSENT 0x80000000UL /**< Port is not present */ + +/** Port MAC address (second half) register */ +#define EXANIC_PORT_MAC 0x000c + +/** Port flags register */ +#define EXANIC_PORT_FLAGS 0x0010 +#define EXANIC_PORT_FLAGS_PROMISC 0x00000001UL /**< Promiscuous mode */ + +/** Port receive chunk base address register */ +#define EXANIC_PORT_RX_BASE 0x0014 + +/** Port transmit command register */ +#define EXANIC_PORT_TX_COMMAND 0x0020 + +/** Port transmit region offset register */ +#define EXANIC_PORT_TX_OFFSET 0x0024 + +/** Port transmit region length register */ +#define EXANIC_PORT_TX_LEN 0x0028 + +/** Port MAC address (first half) register */ +#define EXANIC_PORT_OUI 0x0030 + +/** Port interrupt configuration register */ +#define EXANIC_PORT_IRQ 0x0034 + +/** An ExaNIC transmit chunk descriptor */ +struct exanic_tx_descriptor { + /** Feedback ID */ + uint16_t txf_id; + /** Feedback slot */ + uint16_t txf_slot; + /** Payload length (including padding */ + uint16_t len; + /** Payload type */ + uint8_t type; + /** Flags */ + uint8_t flags; +} __attribute__ (( packed )); + +/** An ExaNIC transmit chunk */ +struct exanic_tx_chunk { + /** Descriptor */ + struct exanic_tx_descriptor desc; + /** Padding */ + uint8_t pad[2]; + /** Payload data */ + uint8_t data[2038]; +} __attribute__ (( packed )); + +/** Raw Ethernet frame type */ +#define EXANIC_TYPE_RAW 0x01 + +/** An ExaNIC receive chunk descriptor */ +struct exanic_rx_descriptor { + /** Timestamp */ + uint32_t timestamp; + /** Status (valid only on final chunk) */ + uint8_t status; + /** Length (zero except on the final chunk) */ + uint8_t len; + /** Filter number */ + uint8_t filter; + /** Generation */ + uint8_t generation; +} __attribute__ (( packed )); + +/** An ExaNIC receive chunk */ +struct exanic_rx_chunk { + /** Payload data */ + uint8_t data[120]; + /** Descriptor */ + struct exanic_rx_descriptor desc; +} __attribute__ (( packed )); + +/** Receive status error mask */ +#define EXANIC_STATUS_ERROR_MASK 0x0f + +/** An ExaNIC I2C bus configuration */ +struct exanic_i2c_config { + /** GPIO bit for pulling SCL low */ + uint8_t setscl; + /** GPIO bit for pulling SDA low */ + uint8_t setsda; + /** GPIO bit for reading SDA */ + uint8_t getsda; +}; + +/** EEPROM address */ +#define EXANIC_EEPROM_ADDRESS 0x50 + +/** An ExaNIC port */ +struct exanic_port { + /** Network device */ + struct net_device *netdev; + /** Port registers */ + void *regs; + + /** Transmit region offset */ + size_t tx_offset; + /** Transmit region */ + void *tx; + /** Number of transmit descriptors */ + uint16_t tx_count; + /** Transmit producer counter */ + uint16_t tx_prod; + /** Transmit consumer counter */ + uint16_t tx_cons; + /** Transmit feedback slot */ + uint16_t txf_slot; + /** Transmit feedback region */ + uint16_t *txf; + + /** Receive region */ + userptr_t rx; + /** Receive consumer counter */ + unsigned int rx_cons; + /** Receive I/O buffer (if any) */ + struct io_buffer *rx_iobuf; + /** Receive status */ + int rx_rc; + + /** Port status */ + uint32_t status; + /** Default link speed (as raw register value) */ + uint32_t default_speed; + /** Speed capability bitmask */ + uint32_t speeds; + /** Current attempted link speed (as a capability bit index) */ + unsigned int speed; + /** Port status check timer */ + struct retry_timer timer; +}; + +/** An ExaNIC */ +struct exanic { + /** Registers */ + void *regs; + /** Transmit region */ + void *tx; + /** Transmit feedback region */ + void *txf; + + /** I2C bus configuration */ + struct exanic_i2c_config i2cfg; + /** I2C bit-bashing interface */ + struct i2c_bit_basher basher; + /** I2C serial EEPROM */ + struct i2c_device eeprom; + + /** Capabilities */ + uint32_t caps; + /** Base MAC address */ + uint8_t mac[ETH_ALEN]; + + /** Ports */ + struct exanic_port *port[EXANIC_MAX_PORTS]; +}; + +/** Maximum used length of transmit region + * + * This is a policy decision to avoid overflowing the 16-bit transmit + * producer and consumer counters. + */ +#define EXANIC_MAX_TX_LEN ( 256 * sizeof ( struct exanic_tx_chunk ) ) + +/** Maximum length of received packet + * + * This is a policy decision. + */ +#define EXANIC_MAX_RX_LEN ( ETH_FRAME_LEN + 4 /* VLAN */ + 4 /* CRC */ ) + +/** Interval between link state checks + * + * This is a policy decision. + */ +#define EXANIC_LINK_INTERVAL ( 1 * TICKS_PER_SEC ) + +#endif /* _EXANIC_H */ diff --git a/src/drivers/net/icplus.c b/src/drivers/net/icplus.c new file mode 100644 index 00000000..58092fad --- /dev/null +++ b/src/drivers/net/icplus.c @@ -0,0 +1,809 @@ +/* + * Copyright (C) 2018 Sylvie Barlow . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "icplus.h" + +/** @file + * + * IC+ network driver + * + */ + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset hardware + * + * @v icp IC+ device + * @ret rc Return status code + */ +static int icplus_reset ( struct icplus_nic *icp ) { + uint32_t asicctrl; + unsigned int i; + + /* Trigger reset */ + writel ( ( ICP_ASICCTRL_GLOBALRESET | ICP_ASICCTRL_DMA | + ICP_ASICCTRL_FIFO | ICP_ASICCTRL_NETWORK | ICP_ASICCTRL_HOST | + ICP_ASICCTRL_AUTOINIT ), ( icp->regs + ICP_ASICCTRL ) ); + + /* Wait for reset to complete */ + for ( i = 0 ; i < ICP_RESET_MAX_WAIT_MS ; i++ ) { + + /* Check if device is ready */ + asicctrl = readl ( icp->regs + ICP_ASICCTRL ); + if ( ! ( asicctrl & ICP_ASICCTRL_RESETBUSY ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( icp, "ICPLUS %p timed out waiting for reset (asicctrl %#08x)\n", + icp, asicctrl ); + return -ETIMEDOUT; +} + +/****************************************************************************** + * + * EEPROM interface + * + ****************************************************************************** + */ + +/** + * Read data from EEPROM + * + * @v nvs NVS device + * @v address Address from which to read + * @v data Data buffer + * @v len Length of data buffer + * @ret rc Return status code + */ +static int icplus_read_eeprom ( struct nvs_device *nvs, unsigned int address, + void *data, size_t len ) { + struct icplus_nic *icp = + container_of ( nvs, struct icplus_nic, eeprom ); + unsigned int i; + uint16_t eepromctrl; + uint16_t *data_word = data; + + /* Sanity check. We advertise a blocksize of one word, so + * should only ever receive single-word requests. + */ + assert ( len == sizeof ( *data_word ) ); + + /* Initiate read */ + writew ( ( ICP_EEPROMCTRL_OPCODE_READ | + ICP_EEPROMCTRL_ADDRESS ( address ) ), + ( icp->regs + ICP_EEPROMCTRL ) ); + + /* Wait for read to complete */ + for ( i = 0 ; i < ICP_EEPROM_MAX_WAIT_MS ; i++ ) { + + /* If read is not complete, delay 1ms and retry */ + eepromctrl = readw ( icp->regs + ICP_EEPROMCTRL ); + if ( eepromctrl & ICP_EEPROMCTRL_BUSY ) { + mdelay ( 1 ); + continue; + } + + /* Extract data */ + *data_word = cpu_to_le16 ( readw ( icp->regs + ICP_EEPROMDATA )); + return 0; + } + + DBGC ( icp, "ICPLUS %p timed out waiting for EEPROM read\n", icp ); + return -ETIMEDOUT; +} + +/** + * Write data to EEPROM + * + * @v nvs NVS device + * @v address Address to which to write + * @v data Data buffer + * @v len Length of data buffer + * @ret rc Return status code + */ +static int icplus_write_eeprom ( struct nvs_device *nvs, + unsigned int address __unused, + const void *data __unused, + size_t len __unused ) { + struct icplus_nic *icp = + container_of ( nvs, struct icplus_nic, eeprom ); + + DBGC ( icp, "ICPLUS %p EEPROM write not supported\n", icp ); + return -ENOTSUP; +} + +/** + * Initialise EEPROM + * + * @v icp IC+ device + */ +static void icplus_init_eeprom ( struct icplus_nic *icp ) { + + /* The hardware supports only single-word reads */ + icp->eeprom.word_len_log2 = ICP_EEPROM_WORD_LEN_LOG2; + icp->eeprom.size = ICP_EEPROM_MIN_SIZE_WORDS; + icp->eeprom.block_size = 1; + icp->eeprom.read = icplus_read_eeprom; + icp->eeprom.write = icplus_write_eeprom; +} + +/****************************************************************************** + * + * MII interface + * + ****************************************************************************** + */ + +/** Pin mapping for MII bit-bashing interface */ +static const uint8_t icplus_mii_bits[] = { + [MII_BIT_MDC] = ICP_PHYCTRL_MGMTCLK, + [MII_BIT_MDIO] = ICP_PHYCTRL_MGMTDATA, + [MII_BIT_DRIVE] = ICP_PHYCTRL_MGMTDIR, +}; + +/** + * Read input bit + * + * @v basher Bit-bashing interface + * @v bit_id Bit number + * @ret zero Input is a logic 0 + * @ret non-zero Input is a logic 1 + */ +static int icplus_mii_read_bit ( struct bit_basher *basher, + unsigned int bit_id ) { + struct icplus_nic *icp = container_of ( basher, struct icplus_nic, + miibit.basher ); + uint8_t mask = icplus_mii_bits[bit_id]; + uint8_t reg; + + DBG_DISABLE ( DBGLVL_IO ); + reg = readb ( icp->regs + ICP_PHYCTRL ); + DBG_ENABLE ( DBGLVL_IO ); + return ( reg & mask ); +} + +/** + * Set/clear output bit + * + * @v basher Bit-bashing interface + * @v bit_id Bit number + * @v data Value to write + */ +static void icplus_mii_write_bit ( struct bit_basher *basher, + unsigned int bit_id, unsigned long data ) { + struct icplus_nic *icp = container_of ( basher, struct icplus_nic, + miibit.basher ); + uint8_t mask = icplus_mii_bits[bit_id]; + uint8_t reg; + + DBG_DISABLE ( DBGLVL_IO ); + reg = readb ( icp->regs + ICP_PHYCTRL ); + reg &= ~mask; + reg |= ( data & mask ); + writeb ( reg, icp->regs + ICP_PHYCTRL ); + readb ( icp->regs + ICP_PHYCTRL ); /* Ensure write reaches chip */ + DBG_ENABLE ( DBGLVL_IO ); +} + +/** MII bit-bashing interface */ +static struct bit_basher_operations icplus_basher_ops = { + .read = icplus_mii_read_bit, + .write = icplus_mii_write_bit, +}; + +/****************************************************************************** + * + * Link state + * + ****************************************************************************** + */ + +/** + * Configure PHY + * + * @v icp IC+ device + * @ret rc Return status code + */ +static int icplus_init_phy ( struct icplus_nic *icp ) { + uint32_t asicctrl; + int rc; + + /* Find PHY address */ + if ( ( rc = mii_find ( &icp->mii ) ) != 0 ) { + DBGC ( icp, "ICPLUS %p could not find PHY address: %s\n", + icp, strerror ( rc ) ); + return rc; + } + + /* Configure PHY to advertise 1000Mbps if applicable */ + asicctrl = readl ( icp->regs + ICP_ASICCTRL ); + if ( asicctrl & ICP_ASICCTRL_PHYSPEED1000 ) { + if ( ( rc = mii_write ( &icp->mii, MII_CTRL1000, + ADVERTISE_1000FULL ) ) != 0 ) { + DBGC ( icp, "ICPLUS %p could not advertise 1000Mbps: " + "%s\n", icp, strerror ( rc ) ); + return rc; + } + } + + /* Reset PHY */ + if ( ( rc = mii_reset ( &icp->mii ) ) != 0 ) { + DBGC ( icp, "ICPLUS %p could not reset PHY: %s\n", + icp, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Check link state + * + * @v netdev Network device + */ +static void icplus_check_link ( struct net_device *netdev ) { + struct icplus_nic *icp = netdev->priv; + uint8_t phyctrl; + + /* Read link status */ + phyctrl = readb ( icp->regs + ICP_PHYCTRL ); + DBGC ( icp, "ICPLUS %p PHY control is %02x\n", icp, phyctrl ); + + /* Update network device */ + if ( phyctrl & ICP_PHYCTRL_LINKSPEED ) { + netdev_link_up ( netdev ); + } else { + netdev_link_down ( netdev ); + } +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Set descriptor ring base address + * + * @v icp IC+ device + * @v offset Register offset + * @v address Base address + */ +static inline void icplus_set_base ( struct icplus_nic *icp, unsigned int offset, + void *base ) { + physaddr_t phys = virt_to_bus ( base ); + + /* Program base address registers */ + writel ( ( phys & 0xffffffffUL ), + ( icp->regs + offset + ICP_BASE_LO ) ); + if ( sizeof ( phys ) > sizeof ( uint32_t ) ) { + writel ( ( ( ( uint64_t ) phys ) >> 32 ), + ( icp->regs + offset + ICP_BASE_HI ) ); + } else { + writel ( 0, ( icp->regs + offset + ICP_BASE_HI ) ); + } +} + +/** + * Create descriptor ring + * + * @v icp IC+ device + * @v ring Descriptor ring + * @ret rc Return status code + */ +static int icplus_create_ring ( struct icplus_nic *icp, struct icplus_ring *ring ) { + size_t len = ( sizeof ( ring->entry[0] ) * ICP_NUM_DESC ); + int rc; + unsigned int i; + struct icplus_descriptor *desc; + struct icplus_descriptor *next; + + /* Allocate descriptor ring */ + ring->entry = malloc_dma ( len, ICP_ALIGN ); + if ( ! ring->entry ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Initialise descriptor ring */ + memset ( ring->entry, 0, len ); + for ( i = 0 ; i < ICP_NUM_DESC ; i++ ) { + desc = &ring->entry[i]; + next = &ring->entry[ ( i + 1 ) % ICP_NUM_DESC ]; + desc->next = cpu_to_le64 ( virt_to_bus ( next ) ); + desc->flags = ( ICP_TX_UNALIGN | ICP_TX_INDICATE ); + desc->control = ( ICP_TX_SOLE_FRAG | ICP_DONE ); + } + + /* Reset transmit producer & consumer counters */ + ring->prod = 0; + ring->cons = 0; + + DBGC ( icp, "ICP %p %s ring at [%#08lx,%#08lx)\n", + icp, ( ( ring->listptr == ICP_TFDLISTPTR ) ? "TX" : "RX" ), + virt_to_bus ( ring->entry ), + ( virt_to_bus ( ring->entry ) + len ) ); + return 0; + + free_dma ( ring->entry, len ); + ring->entry = NULL; + err_alloc: + return rc; +} + +/** + * Destroy descriptor ring + * + * @v icp IC+ device + * @v ring Descriptor ring + */ +static void icplus_destroy_ring ( struct icplus_nic *icp __unused, + struct icplus_ring *ring ) { + size_t len = ( sizeof ( ring->entry[0] ) * ICP_NUM_DESC ); + + /* Free descriptor ring */ + free_dma ( ring->entry, len ); + ring->entry = NULL; +} + +/** + * Refill receive descriptor ring + * + * @v icp IC+ device + */ +void icplus_refill_rx ( struct icplus_nic *icp ) { + struct icplus_descriptor *desc; + struct io_buffer *iobuf; + unsigned int rx_idx; + physaddr_t address; + unsigned int refilled = 0; + + /* Refill ring */ + while ( ( icp->rx.prod - icp->rx.cons ) < ICP_NUM_DESC ) { + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( ICP_RX_MAX_LEN ); + if ( ! iobuf ) { + /* Wait for next refill */ + break; + } + + /* Get next receive descriptor */ + rx_idx = ( icp->rx.prod++ % ICP_NUM_DESC ); + desc = &icp->rx.entry[rx_idx]; + + /* Populate receive descriptor */ + address = virt_to_bus ( iobuf->data ); + desc->data.address = cpu_to_le64 ( address ); + desc->data.len = cpu_to_le16 ( ICP_RX_MAX_LEN ); + wmb(); + desc->control = 0; + + /* Record I/O buffer */ + assert ( icp->rx_iobuf[rx_idx] == NULL ); + icp->rx_iobuf[rx_idx] = iobuf; + + DBGC2 ( icp, "ICP %p RX %d is [%llx,%llx)\n", icp, rx_idx, + ( ( unsigned long long ) address ), + ( ( unsigned long long ) address + ICP_RX_MAX_LEN ) ); + refilled++; + } + + /* Push descriptors to card, if applicable */ + if ( refilled ) { + wmb(); + writew ( ICP_DMACTRL_RXPOLLNOW, icp->regs + ICP_DMACTRL ); + } +} + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int icplus_open ( struct net_device *netdev ) { + struct icplus_nic *icp = netdev->priv; + int rc; + + /* Create transmit descriptor ring */ + if ( ( rc = icplus_create_ring ( icp, &icp->tx ) ) != 0 ) + goto err_create_tx; + + /* Create receive descriptor ring */ + if ( ( rc = icplus_create_ring ( icp, &icp->rx ) ) != 0 ) + goto err_create_rx; + + /* Program descriptor base address */ + icplus_set_base ( icp, icp->tx.listptr, icp->tx.entry ); + icplus_set_base ( icp, icp->rx.listptr, icp->rx.entry ); + + /* Enable receive mode */ + writew ( ( ICP_RXMODE_UNICAST | ICP_RXMODE_MULTICAST | + ICP_RXMODE_BROADCAST | ICP_RXMODE_ALLFRAMES ), + icp->regs + ICP_RXMODE ); + + /* Enable transmitter and receiver */ + writel ( ( ICP_MACCTRL_TXENABLE | ICP_MACCTRL_RXENABLE | + ICP_MACCTRL_DUPLEX ), icp->regs + ICP_MACCTRL ); + + /* Fill receive ring */ + icplus_refill_rx ( icp ); + + /* Check link state */ + icplus_check_link ( netdev ); + + return 0; + + icplus_reset ( icp ); + icplus_destroy_ring ( icp, &icp->rx ); + err_create_rx: + icplus_destroy_ring ( icp, &icp->tx ); + err_create_tx: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void icplus_close ( struct net_device *netdev ) { + struct icplus_nic *icp = netdev->priv; + unsigned int i; + + /* Perform global reset */ + icplus_reset ( icp ); + + /* Destroy receive descriptor ring */ + icplus_destroy_ring ( icp, &icp->rx ); + + /* Destroy transmit descriptor ring */ + icplus_destroy_ring ( icp, &icp->tx ); + + /* Discard any unused receive buffers */ + for ( i = 0 ; i < ICP_NUM_DESC ; i++ ) { + if ( icp->rx_iobuf[i] ) + free_iob ( icp->rx_iobuf[i] ); + icp->rx_iobuf[i] = NULL; + } +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int icplus_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct icplus_nic *icp = netdev->priv; + struct icplus_descriptor *desc; + unsigned int tx_idx; + physaddr_t address; + + /* Check if ring is full */ + if ( ( icp->tx.prod - icp->tx.cons ) >= ICP_NUM_DESC ) { + DBGC ( icp, "ICP %p out of transmit descriptors\n", icp ); + return -ENOBUFS; + } + + /* Find TX descriptor entry to use */ + tx_idx = ( icp->tx.prod++ % ICP_NUM_DESC ); + desc = &icp->tx.entry[tx_idx]; + + /* Fill in TX descriptor */ + address = virt_to_bus ( iobuf->data ); + desc->data.address = cpu_to_le64 ( address ); + desc->data.len = cpu_to_le16 ( iob_len ( iobuf ) ); + wmb(); + desc->control = ICP_TX_SOLE_FRAG; + wmb(); + + /* Ring doorbell */ + writew ( ICP_DMACTRL_TXPOLLNOW, icp->regs + ICP_DMACTRL ); + + DBGC2 ( icp, "ICP %p TX %d is [%llx,%llx)\n", icp, tx_idx, + ( ( unsigned long long ) address ), + ( ( unsigned long long ) address + iob_len ( iobuf ) ) ); + DBGC2_HDA ( icp, virt_to_phys ( desc ), desc, sizeof ( *desc ) ); + return 0; +} + +/** + * Poll for completed packets + * + * @v netdev Network device + */ +static void icplus_poll_tx ( struct net_device *netdev ) { + struct icplus_nic *icp = netdev->priv; + struct icplus_descriptor *desc; + unsigned int tx_idx; + + /* Check for completed packets */ + while ( icp->tx.cons != icp->tx.prod ) { + + /* Get next transmit descriptor */ + tx_idx = ( icp->tx.cons % ICP_NUM_DESC ); + desc = &icp->tx.entry[tx_idx]; + + /* Stop if descriptor is still in use */ + if ( ! ( desc->control & ICP_DONE ) ) + return; + + /* Complete TX descriptor */ + DBGC2 ( icp, "ICP %p TX %d complete\n", icp, tx_idx ); + netdev_tx_complete_next ( netdev ); + icp->tx.cons++; + } +} + +/** + * Poll for received packets + * + * @v netdev Network device + */ +static void icplus_poll_rx ( struct net_device *netdev ) { + struct icplus_nic *icp = netdev->priv; + struct icplus_descriptor *desc; + struct io_buffer *iobuf; + unsigned int rx_idx; + size_t len; + + /* Check for received packets */ + while ( icp->rx.cons != icp->rx.prod ) { + + /* Get next transmit descriptor */ + rx_idx = ( icp->rx.cons % ICP_NUM_DESC ); + desc = &icp->rx.entry[rx_idx]; + + /* Stop if descriptor is still in use */ + if ( ! ( desc->control & ICP_DONE ) ) + return; + + /* Populate I/O buffer */ + iobuf = icp->rx_iobuf[rx_idx]; + icp->rx_iobuf[rx_idx] = NULL; + len = le16_to_cpu ( desc->len ); + iob_put ( iobuf, len ); + + /* Hand off to network stack */ + if ( desc->flags & ( ICP_RX_ERR_OVERRUN | ICP_RX_ERR_RUNT | + ICP_RX_ERR_ALIGN | ICP_RX_ERR_FCS | + ICP_RX_ERR_OVERSIZED | ICP_RX_ERR_LEN ) ) { + DBGC ( icp, "ICP %p RX %d error (length %zd, " + "flags %02x)\n", icp, rx_idx, len, desc->flags ); + netdev_rx_err ( netdev, iobuf, -EIO ); + } else { + DBGC2 ( icp, "ICP %p RX %d complete (length " + "%zd)\n", icp, rx_idx, len ); + netdev_rx ( netdev, iobuf ); + } + icp->rx.cons++; + } +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void icplus_poll ( struct net_device *netdev ) { + struct icplus_nic *icp = netdev->priv; + uint16_t intstatus; + uint32_t txstatus; + + /* Check for interrupts */ + intstatus = readw ( icp->regs + ICP_INTSTATUS ); + + /* Poll for TX completions, if applicable */ + if ( intstatus & ICP_INTSTATUS_TXCOMPLETE ) { + txstatus = readl ( icp->regs + ICP_TXSTATUS ); + if ( txstatus & ICP_TXSTATUS_ERROR ) + DBGC ( icp, "ICP %p TX error: %08x\n", icp, txstatus ); + icplus_poll_tx ( netdev ); + } + + /* Poll for RX completions, if applicable */ + if ( intstatus & ICP_INTSTATUS_RXDMACOMPLETE ) { + writew ( ICP_INTSTATUS_RXDMACOMPLETE, icp->regs + ICP_INTSTATUS ); + icplus_poll_rx ( netdev ); + } + + /* Check link state, if applicable */ + if ( intstatus & ICP_INTSTATUS_LINKEVENT ) { + writew ( ICP_INTSTATUS_LINKEVENT, icp->regs + ICP_INTSTATUS ); + icplus_check_link ( netdev ); + } + + /* Refill receive ring */ + icplus_refill_rx ( icp ); +} + +/** + * Enable or disable interrupts + * + * @v netdev Network device + * @v enable Interrupts should be enabled + */ +static void icplus_irq ( struct net_device *netdev, int enable ) { + struct icplus_nic *icp = netdev->priv; + + DBGC ( icp, "ICPLUS %p does not yet support interrupts\n", icp ); + ( void ) enable; +} + +/** IC+ network device operations */ +static struct net_device_operations icplus_operations = { + .open = icplus_open, + .close = icplus_close, + .transmit = icplus_transmit, + .poll = icplus_poll, + .irq = icplus_irq, +}; + +/****************************************************************************** + * + * PCI interface + * + ****************************************************************************** + */ + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int icplus_probe ( struct pci_device *pci ) { + struct net_device *netdev; + struct icplus_nic *icp; + int rc; + + /* Allocate and initialise net device */ + netdev = alloc_etherdev ( sizeof ( *icp ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &icplus_operations ); + icp = netdev->priv; + pci_set_drvdata ( pci, netdev ); + netdev->dev = &pci->dev; + memset ( icp, 0, sizeof ( *icp ) ); + icp->miibit.basher.op = &icplus_basher_ops; + init_mii_bit_basher ( &icp->miibit ); + mii_init ( &icp->mii, &icp->miibit.mdio, 0 ); + icp->tx.listptr = ICP_TFDLISTPTR; + icp->rx.listptr = ICP_RFDLISTPTR; + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Map registers */ + icp->regs = pci_ioremap ( pci, pci->membase, ICP_BAR_SIZE ); + if ( ! icp->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + + /* Reset the NIC */ + if ( ( rc = icplus_reset ( icp ) ) != 0 ) + goto err_reset; + + /* Initialise EEPROM */ + icplus_init_eeprom ( icp ); + + /* Read EEPROM MAC address */ + if ( ( rc = nvs_read ( &icp->eeprom, ICP_EEPROM_MAC, + netdev->hw_addr, ETH_ALEN ) ) != 0 ) { + DBGC ( icp, "ICPLUS %p could not read EEPROM MAC address: %s\n", + icp, strerror ( rc ) ); + goto err_eeprom; + } + + /* Configure PHY */ + if ( ( rc = icplus_init_phy ( icp ) ) != 0 ) + goto err_phy; + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register_netdev; + + /* Set initial link state */ + icplus_check_link ( netdev ); + + return 0; + + unregister_netdev ( netdev ); + err_register_netdev: + err_phy: + err_eeprom: + icplus_reset ( icp ); + err_reset: + iounmap ( icp->regs ); + err_ioremap: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void icplus_remove ( struct pci_device *pci ) { + struct net_device *netdev = pci_get_drvdata ( pci ); + struct icplus_nic *icp = netdev->priv; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Reset card */ + icplus_reset ( icp ); + + /* Free network device */ + iounmap ( icp->regs ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** IC+ PCI device IDs */ +static struct pci_device_id icplus_nics[] = { + PCI_ROM ( 0x13f0, 0x1023, "ip1000a", "IP1000A", 0 ), +}; + +/** IC+ PCI driver */ +struct pci_driver icplus_driver __pci_driver = { + .ids = icplus_nics, + .id_count = ( sizeof ( icplus_nics ) / sizeof ( icplus_nics[0] ) ), + .probe = icplus_probe, + .remove = icplus_remove, +}; diff --git a/src/drivers/net/icplus.h b/src/drivers/net/icplus.h new file mode 100644 index 00000000..35fa422a --- /dev/null +++ b/src/drivers/net/icplus.h @@ -0,0 +1,206 @@ +#ifndef _ICPLUS_H +#define _ICPLUS_H + +/** @file + * + * IC+ network driver + * + */ + +#include +#include + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** BAR size */ +#define ICP_BAR_SIZE 0x200 + +/** Alignment requirement */ +#define ICP_ALIGN 0x8 + +/** Base address low register offset */ +#define ICP_BASE_LO 0x0 + +/** Base address high register offset */ +#define ICP_BASE_HI 0x4 + +/** ASIC control register (double word) */ +#define ICP_ASICCTRL 0x30 +#define ICP_ASICCTRL_PHYSPEED1000 0x00000040UL /**< PHY speed 1000 */ +#define ICP_ASICCTRL_GLOBALRESET 0x00010000UL /**< Global reset */ +#define ICP_ASICCTRL_DMA 0x00080000UL /**< DMA */ +#define ICP_ASICCTRL_FIFO 0x00100000UL /**< FIFO */ +#define ICP_ASICCTRL_NETWORK 0x00200000UL /**< Network */ +#define ICP_ASICCTRL_HOST 0x00400000UL /**< Host */ +#define ICP_ASICCTRL_AUTOINIT 0x00800000UL /**< Auto init */ +#define ICP_ASICCTRL_RESETBUSY 0x04000000UL /**< Reset busy */ + +/** Maximum time to wait for reset */ +#define ICP_RESET_MAX_WAIT_MS 1000 + +/** DMA control register (word/double word) */ +#define ICP_DMACTRL 0x00 +#define ICP_DMACTRL_RXPOLLNOW 0x0010 /**< Receive poll now */ +#define ICP_DMACTRL_TXPOLLNOW 0x1000 /**< Transmit poll now */ + +/** EEPROM control register (word) */ +#define ICP_EEPROMCTRL 0x4a +#define ICP_EEPROMCTRL_ADDRESS( x ) ( (x) << 0 ) /**< Address */ +#define ICP_EEPROMCTRL_OPCODE( x ) ( (x) << 8 ) /**< Opcode */ +#define ICP_EEPROMCTRL_OPCODE_READ \ + ICP_EEPROMCTRL_OPCODE ( 2 ) /**< Read register */ +#define ICP_EEPROMCTRL_BUSY 0x8000 /**< EEPROM busy */ + +/** Maximum time to wait for reading EEPROM */ +#define ICP_EEPROM_MAX_WAIT_MS 1000 + +/** EEPROM word length */ +#define ICP_EEPROM_WORD_LEN_LOG2 1 + +/** Minimum EEPROM size, in words */ +#define ICP_EEPROM_MIN_SIZE_WORDS 0x20 + +/** Address of MAC address within EEPROM */ +#define ICP_EEPROM_MAC 0x10 + +/** EEPROM data register (word) */ +#define ICP_EEPROMDATA 0x48 + +/** Interupt status register (word) */ +#define ICP_INTSTATUS 0x5e +#define ICP_INTSTATUS_TXCOMPLETE 0x0004 /**< TX complete */ +#define ICP_INTSTATUS_LINKEVENT 0x0100 /**< Link event */ +#define ICP_INTSTATUS_RXDMACOMPLETE 0x0400 /**< RX DMA complete */ + +/** MAC control register (double word) */ +#define ICP_MACCTRL 0x6c +#define ICP_MACCTRL_DUPLEX 0x00000020UL /**< Duplex select */ +#define ICP_MACCTRL_TXENABLE 0x01000000UL /**< TX enable */ +#define ICP_MACCTRL_TXDISABLE 0x02000000UL /**< TX disable */ +#define ICP_MACCTRL_RXENABLE 0x08000000UL /**< RX enable */ +#define ICP_MACCTRL_RXDISABLE 0x10000000UL /**< RX disable */ + +/** PHY control register (byte) */ +#define ICP_PHYCTRL 0x76 +#define ICP_PHYCTRL_MGMTCLK 0x01 /**< Management clock */ +#define ICP_PHYCTRL_MGMTDATA 0x02 /**< Management data */ +#define ICP_PHYCTRL_MGMTDIR 0x04 /**< Management direction */ +#define ICP_PHYCTRL_LINKSPEED 0xc0 /**< Link speed */ + +/** Receive mode register (word) */ +#define ICP_RXMODE 0x88 +#define ICP_RXMODE_UNICAST 0x0001 /**< Receive unicast */ +#define ICP_RXMODE_MULTICAST 0x0002 /**< Receice multicast */ +#define ICP_RXMODE_BROADCAST 0x0004 /**< Receive broadcast */ +#define ICP_RXMODE_ALLFRAMES 0x0008 /**< Receive all frames */ + +/** List pointer receive register */ +#define ICP_RFDLISTPTR 0x1c + +/** List pointer transmit register */ +#define ICP_TFDLISTPTR 0x10 + +/** Transmit status register */ +#define ICP_TXSTATUS 0x60 +#define ICP_TXSTATUS_ERROR 0x00000001UL /**< TX error */ + +/** Data fragment */ +union icplus_fragment { + /** Address of data */ + uint64_t address; + /** Length */ + struct { + /** Reserved */ + uint8_t reserved[6]; + /** Length of data */ + uint16_t len; + }; +}; + +/** Transmit or receive descriptor */ +struct icplus_descriptor { + /** Address of next descriptor */ + uint64_t next; + /** Actual length */ + uint16_t len; + /** Flags */ + uint8_t flags; + /** Control */ + uint8_t control; + /** VLAN */ + uint16_t vlan; + /** Reserved */ + uint16_t reserved_a; + /** Data buffer */ + union icplus_fragment data; + /** Reserved */ + uint8_t reserved_b[8]; +}; + +/** Descriptor complete */ +#define ICP_DONE 0x80 + +/** Transmit alignment disabled */ +#define ICP_TX_UNALIGN 0x01 + +/** Request transmit completion */ +#define ICP_TX_INDICATE 0x40 + +/** Sole transmit fragment */ +#define ICP_TX_SOLE_FRAG 0x01 + +/** Recieve frame overrun error */ +#define ICP_RX_ERR_OVERRUN 0x01 + +/** Receive runt frame error */ +#define ICP_RX_ERR_RUNT 0x02 + +/** Receive alignment error */ +#define ICP_RX_ERR_ALIGN 0x04 + +/** Receive FCS error */ +#define ICP_RX_ERR_FCS 0x08 + +/** Receive oversized frame error */ +#define ICP_RX_ERR_OVERSIZED 0x10 + +/** Recieve length error */ +#define ICP_RX_ERR_LEN 0x20 + +/** Descriptor ring */ +struct icplus_ring { + /** Producer counter */ + unsigned int prod; + /** Consumer counter */ + unsigned int cons; + /** Ring entries */ + struct icplus_descriptor *entry; + /* List pointer register */ + unsigned int listptr; +}; + +/** Number of descriptors */ +#define ICP_NUM_DESC 4 + +/** Maximum receive packet length */ +#define ICP_RX_MAX_LEN ETH_FRAME_LEN + +/** An IC+ network card */ +struct icplus_nic { + /** Registers */ + void *regs; + /** EEPROM */ + struct nvs_device eeprom; + /** MII bit bashing interface */ + struct mii_bit_basher miibit; + /** MII device */ + struct mii_device mii; + /** Transmit descriptor ring */ + struct icplus_ring tx; + /** Receive descriptor ring */ + struct icplus_ring rx; + /** Receive I/O buffers */ + struct io_buffer *rx_iobuf[ICP_NUM_DESC]; +}; + +#endif /* _ICPLUS_H */ diff --git a/src/drivers/net/intelvf.c b/src/drivers/net/intelvf.c new file mode 100644 index 00000000..0d48b417 --- /dev/null +++ b/src/drivers/net/intelvf.c @@ -0,0 +1,342 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include "intelvf.h" + +/** @file + * + * Intel 10/100/1000 virtual function network card driver + * + */ + +/****************************************************************************** + * + * Mailbox messages + * + ****************************************************************************** + */ + +/** + * Write message to mailbox + * + * @v intel Intel device + * @v msg Message + */ +static void intelvf_mbox_write ( struct intel_nic *intel, + const union intelvf_msg *msg ) { + const struct intelvf_msg_raw *raw = &msg->raw; + unsigned int i; + + /* Write message */ + DBGC2 ( intel, "INTEL %p sending message", intel ); + for ( i = 0 ; i < ( sizeof ( *msg ) / sizeof ( raw->dword[0] ) ) ; i++){ + DBGC2 ( intel, "%c%08x", ( i ? ':' : ' ' ), raw->dword[i] ); + writel ( raw->dword[i], ( intel->regs + intel->mbox.mem + + ( i * sizeof ( raw->dword[0] ) ) ) ); + } + DBGC2 ( intel, "\n" ); +} + +/** + * Read message from mailbox + * + * @v intel Intel device + * @v msg Message + */ +static void intelvf_mbox_read ( struct intel_nic *intel, + union intelvf_msg *msg ) { + struct intelvf_msg_raw *raw = &msg->raw; + unsigned int i; + + /* Read message */ + DBGC2 ( intel, "INTEL %p received message", intel ); + for ( i = 0 ; i < ( sizeof ( *msg ) / sizeof ( raw->dword[0] ) ) ; i++){ + raw->dword[i] = readl ( intel->regs + intel->mbox.mem + + ( i * sizeof ( raw->dword[0] ) ) ); + DBGC2 ( intel, "%c%08x", ( i ? ':' : ' ' ), raw->dword[i] ); + } + DBGC2 ( intel, "\n" ); +} + +/** + * Poll mailbox + * + * @v intel Intel device + * @ret rc Return status code + * + * Note that polling the mailbox may fail if the underlying PF is + * reset. + */ +int intelvf_mbox_poll ( struct intel_nic *intel ) { + struct intel_mailbox *mbox = &intel->mbox; + union intelvf_msg msg; + uint32_t ctrl; + + /* Get mailbox status */ + ctrl = readl ( intel->regs + mbox->ctrl ); + + /* Fail if a reset is in progress */ + if ( ctrl & INTELVF_MBCTRL_RSTI ) + return -EPIPE; + + /* Acknowledge (and ignore) any received messages */ + if ( ctrl & INTELVF_MBCTRL_PFSTS ) { + intelvf_mbox_read ( intel, &msg ); + writel ( INTELVF_MBCTRL_ACK, intel->regs + mbox->ctrl ); + } + + return 0; +} + +/** + * Wait for PF reset to complete + * + * @v intel Intel device + * @ret rc Return status code + */ +int intelvf_mbox_wait ( struct intel_nic *intel ) { + unsigned int i; + int rc; + + /* Wait until a poll completes successfully */ + for ( i = 0 ; i < INTELVF_MBOX_MAX_WAIT_MS ; i++ ) { + + /* Check for successful poll */ + if ( ( rc = intelvf_mbox_poll ( intel ) ) == 0 ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( intel, "INTEL %p timed out waiting for reset\n", intel ); + return -ETIMEDOUT; +} + +/** + * Send/receive mailbox message + * + * @v intel Intel device + * @v msg Message buffer + * @ret rc Return status code + */ +int intelvf_mbox_msg ( struct intel_nic *intel, union intelvf_msg *msg ) { + struct intel_mailbox *mbox = &intel->mbox; + uint32_t ctrl; + uint32_t seen = 0; + unsigned int i; + + /* Sanity check */ + assert ( ! ( msg->hdr & INTELVF_MSG_RESPONSE ) ); + + /* Handle mailbox */ + for ( i = 0 ; i < INTELVF_MBOX_MAX_WAIT_MS ; i++ ) { + + /* Attempt to claim mailbox, if we have not yet sent + * our message. + */ + if ( ! ( seen & INTELVF_MBCTRL_VFU ) ) + writel ( INTELVF_MBCTRL_VFU, intel->regs + mbox->ctrl ); + + /* Get mailbox status and record observed flags */ + ctrl = readl ( intel->regs + mbox->ctrl ); + seen |= ctrl; + + /* If a reset is in progress, clear VFU and abort */ + if ( ctrl & INTELVF_MBCTRL_RSTI ) { + writel ( 0, intel->regs + mbox->ctrl ); + return -EPIPE; + } + + /* Write message to mailbox, if applicable. This + * potentially overwrites a message sent by the PF (if + * the PF has simultaneously released PFU (thus + * allowing our VFU) and asserted PFSTS), but that + * doesn't really matter since there are no + * unsolicited PF->VF messages that require the actual + * message content to be observed. + */ + if ( ctrl & INTELVF_MBCTRL_VFU ) + intelvf_mbox_write ( intel, msg ); + + /* Read message from mailbox, if applicable. */ + if ( ( seen & INTELVF_MBCTRL_VFU ) && + ( seen & INTELVF_MBCTRL_PFACK ) && + ( ctrl & INTELVF_MBCTRL_PFSTS ) ) + intelvf_mbox_read ( intel, msg ); + + /* Acknowledge received message (if applicable), + * release VFU lock, and send message (if applicable). + */ + ctrl = ( ( ( ctrl & INTELVF_MBCTRL_PFSTS ) ? + INTELVF_MBCTRL_ACK : 0 ) | + ( ( ctrl & INTELVF_MBCTRL_VFU ) ? + INTELVF_MBCTRL_REQ : 0 ) ); + writel ( ctrl, intel->regs + mbox->ctrl ); + + /* Exit successfully if we have received a response */ + if ( msg->hdr & INTELVF_MSG_RESPONSE ) { + + /* Sanity check */ + assert ( seen & INTELVF_MBCTRL_VFU ); + assert ( seen & INTELVF_MBCTRL_PFACK ); + assert ( seen & INTELVF_MBCTRL_PFSTS ); + + return 0; + } + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( intel, "INTEL %p timed out waiting for mailbox (seen %08x)\n", + intel, seen ); + return -ETIMEDOUT; +} + +/** + * Send reset message and get initial MAC address + * + * @v intel Intel device + * @v hw_addr Hardware address to fill in, or NULL + * @ret rc Return status code + */ +int intelvf_mbox_reset ( struct intel_nic *intel, uint8_t *hw_addr ) { + union intelvf_msg msg; + int rc; + + /* Send reset message */ + memset ( &msg, 0, sizeof ( msg ) ); + msg.hdr = INTELVF_MSG_TYPE_RESET; + if ( ( rc = intelvf_mbox_msg ( intel, &msg ) ) != 0 ) { + DBGC ( intel, "INTEL %p reset failed: %s\n", + intel, strerror ( rc ) ); + return rc; + } + + /* Check response */ + if ( ( msg.hdr & INTELVF_MSG_TYPE_MASK ) != INTELVF_MSG_TYPE_RESET ) { + DBGC ( intel, "INTEL %p reset unexpected response:\n", intel ); + DBGC_HDA ( intel, 0, &msg, sizeof ( msg ) ); + return -EPROTO; + } + + /* Fill in MAC address, if applicable */ + if ( hw_addr ) { + if ( msg.hdr & INTELVF_MSG_ACK ) { + memcpy ( hw_addr, msg.mac.mac, sizeof ( msg.mac.mac ) ); + DBGC ( intel, "INTEL %p reset assigned MAC address " + "%s\n", intel, eth_ntoa ( hw_addr ) ); + } else { + eth_random_addr ( hw_addr ); + DBGC ( intel, "INTEL %p reset generated MAC address " + "%s\n", intel, eth_ntoa ( hw_addr ) ); + } + } + + return 0; +} + +/** + * Send set MAC address message + * + * @v intel Intel device + * @v ll_addr Link-layer address + * @ret rc Return status code + */ +int intelvf_mbox_set_mac ( struct intel_nic *intel, const uint8_t *ll_addr ) { + union intelvf_msg msg; + int rc; + + /* Send set MAC address message */ + memset ( &msg, 0, sizeof ( msg ) ); + msg.hdr = INTELVF_MSG_TYPE_SET_MAC; + memcpy ( msg.mac.mac, ll_addr, sizeof ( msg.mac.mac ) ); + if ( ( rc = intelvf_mbox_msg ( intel, &msg ) ) != 0 ) { + DBGC ( intel, "INTEL %p set MAC address failed: %s\n", + intel, strerror ( rc ) ); + return rc; + } + + /* Check response */ + if ( ( msg.hdr & INTELVF_MSG_TYPE_MASK ) != INTELVF_MSG_TYPE_SET_MAC ) { + DBGC ( intel, "INTEL %p set MAC address unexpected response:\n", + intel ); + DBGC_HDA ( intel, 0, &msg, sizeof ( msg ) ); + return -EPROTO; + } + + /* Check that we were allowed to set the MAC address */ + if ( ! ( msg.hdr & INTELVF_MSG_ACK ) ) { + DBGC ( intel, "INTEL %p set MAC address refused\n", intel ); + return -EPERM; + } + + return 0; +} + +/** + * Send set MTU message + * + * @v intel Intel device + * @v mtu Maximum packet size + * @ret rc Return status code + */ +int intelvf_mbox_set_mtu ( struct intel_nic *intel, size_t mtu ) { + union intelvf_msg msg; + int rc; + + /* Send set MTU message */ + memset ( &msg, 0, sizeof ( msg ) ); + msg.hdr = INTELVF_MSG_TYPE_SET_MTU; + msg.mtu.mtu = mtu; + if ( ( rc = intelvf_mbox_msg ( intel, &msg ) ) != 0 ) { + DBGC ( intel, "INTEL %p set MTU failed: %s\n", + intel, strerror ( rc ) ); + return rc; + } + + /* Check response */ + if ( ( msg.hdr & INTELVF_MSG_TYPE_MASK ) != INTELVF_MSG_TYPE_SET_MTU ) { + DBGC ( intel, "INTEL %p set MTU unexpected response:\n", + intel ); + DBGC_HDA ( intel, 0, &msg, sizeof ( msg ) ); + return -EPROTO; + } + + /* Check that we were allowed to set the MTU */ + if ( ! ( msg.hdr & INTELVF_MSG_ACK ) ) { + DBGC ( intel, "INTEL %p set MTU refused\n", intel ); + return -EPERM; + } + + return 0; +} diff --git a/src/drivers/net/intelvf.h b/src/drivers/net/intelvf.h new file mode 100644 index 00000000..ffb18e04 --- /dev/null +++ b/src/drivers/net/intelvf.h @@ -0,0 +1,158 @@ +#ifndef _INTELVF_H +#define _INTELVF_H + +/** @file + * + * Intel 10/100/1000 virtual function network card driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include "intel.h" + +/** Intel VF BAR size */ +#define INTELVF_BAR_SIZE ( 16 * 1024 ) + +/** Mailbox Control Register */ +#define INTELVF_MBCTRL 0x0c40UL +#define INTELVF_MBCTRL_REQ 0x00000001UL /**< Request for PF ready */ +#define INTELVF_MBCTRL_ACK 0x00000002UL /**< PF message received */ +#define INTELVF_MBCTRL_VFU 0x00000004UL /**< Buffer taken by VF */ +#define INTELVF_MBCTRL_PFU 0x00000008UL /**< Buffer taken to PF */ +#define INTELVF_MBCTRL_PFSTS 0x00000010UL /**< PF wrote a message */ +#define INTELVF_MBCTRL_PFACK 0x00000020UL /**< PF acknowledged message */ +#define INTELVF_MBCTRL_RSTI 0x00000040UL /**< PF reset in progress */ +#define INTELVF_MBCTRL_RSTD 0x00000080UL /**< PF reset complete */ + +/** Mailbox Memory Register Base */ +#define INTELVF_MBMEM 0x0800UL + +/** Reset mailbox message */ +#define INTELVF_MSG_TYPE_RESET 0x00000001UL + +/** Set MAC address mailbox message */ +#define INTELVF_MSG_TYPE_SET_MAC 0x00000002UL + +/** Set MTU mailbox message */ +#define INTELVF_MSG_TYPE_SET_MTU 0x00000005UL + +/** Get queue configuration message */ +#define INTELVF_MSG_TYPE_GET_QUEUES 0x00000009UL + +/** Control ("ping") mailbox message */ +#define INTELVF_MSG_TYPE_CONTROL 0x00000100UL + +/** Message type mask */ +#define INTELVF_MSG_TYPE_MASK 0x0000ffffUL + +/** Message NACK flag */ +#define INTELVF_MSG_NACK 0x40000000UL + +/** Message ACK flag */ +#define INTELVF_MSG_ACK 0x80000000UL + +/** Message is a response */ +#define INTELVF_MSG_RESPONSE ( INTELVF_MSG_ACK | INTELVF_MSG_NACK ) + +/** MAC address mailbox message */ +struct intelvf_msg_mac { + /** Message header */ + uint32_t hdr; + /** MAC address */ + uint8_t mac[ETH_ALEN]; + /** Alignment padding */ + uint8_t reserved[ (-ETH_ALEN) & 0x3 ]; +} __attribute__ (( packed )); + +/** Version number mailbox message */ +struct intelvf_msg_version { + /** Message header */ + uint32_t hdr; + /** API version */ + uint32_t version; +} __attribute__ (( packed )); + +/** MTU mailbox message */ +struct intelvf_msg_mtu { + /** Message header */ + uint32_t hdr; + /** Maximum packet size */ + uint32_t mtu; +} __attribute__ (( packed )); + +/** Queue configuration mailbox message (API v1.1+ only) */ +struct intelvf_msg_queues { + /** Message header */ + uint32_t hdr; + /** Maximum number of transmit queues */ + uint32_t tx; + /** Maximum number of receive queues */ + uint32_t rx; + /** VLAN hand-waving thing + * + * This is labelled IXGBE_VF_TRANS_VLAN in the Linux driver. + * + * A comment in the Linux PF driver describes it as "notify VF + * of need for VLAN tag stripping, and correct queue". It + * will be filled with a non-zero value if the PF is enforcing + * the use of a single VLAN tag. It will also be filled with + * a non-zero value if the PF is using multiple traffic + * classes. + * + * The Linux VF driver seems to treat this field as being + * simply the number of traffic classes, and gives it no + * VLAN-related interpretation. + * + * If the PF is enforcing the use of a single VLAN tag for the + * VF, then the VLAN tag will be transparently inserted in + * transmitted packets (via the PFVMVIR register) but will + * still be visible in received packets. The Linux VF driver + * handles this unexpected VLAN tag by simply ignoring any + * unrecognised VLAN tags. + * + * We choose to strip and ignore the VLAN tag if this field + * has a non-zero value. + */ + uint32_t vlan_thing; + /** Default queue */ + uint32_t dflt; +} __attribute__ (( packed )); + +/** Raw mailbox message */ +struct intelvf_msg_raw { + /** Raw dwords */ + uint32_t dword[0]; +} __attribute__ (( packed )); + +/** Mailbox message */ +union intelvf_msg { + /** Message header */ + uint32_t hdr; + /** MAC address message */ + struct intelvf_msg_mac mac; + /** Version number message */ + struct intelvf_msg_version version; + /** MTU message */ + struct intelvf_msg_mtu mtu; + /** Queue configuration message */ + struct intelvf_msg_queues queues; + /** Raw dwords */ + struct intelvf_msg_raw raw; +}; + +/** Maximum time to wait for mailbox message + * + * This is a policy decision. + */ +#define INTELVF_MBOX_MAX_WAIT_MS 500 + +extern int intelvf_mbox_msg ( struct intel_nic *intel, union intelvf_msg *msg ); +extern int intelvf_mbox_poll ( struct intel_nic *intel ); +extern int intelvf_mbox_wait ( struct intel_nic *intel ); +extern int intelvf_mbox_reset ( struct intel_nic *intel, uint8_t *hw_addr ); +extern int intelvf_mbox_set_mac ( struct intel_nic *intel, + const uint8_t *ll_addr ); +extern int intelvf_mbox_set_mtu ( struct intel_nic *intel, size_t mtu ); + +#endif /* _INTELVF_H */ diff --git a/src/drivers/net/intelxl.c b/src/drivers/net/intelxl.c new file mode 100644 index 00000000..08c90bc4 --- /dev/null +++ b/src/drivers/net/intelxl.c @@ -0,0 +1,1829 @@ +/* + * Copyright (C) 2018 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "intelxl.h" + +/** @file + * + * Intel 40 Gigabit Ethernet network card driver + * + */ + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset hardware + * + * @v intelxl Intel device + * @ret rc Return status code + */ +static int intelxl_reset ( struct intelxl_nic *intelxl ) { + uint32_t pfgen_ctrl; + + /* Perform a global software reset */ + pfgen_ctrl = readl ( intelxl->regs + INTELXL_PFGEN_CTRL ); + writel ( ( pfgen_ctrl | INTELXL_PFGEN_CTRL_PFSWR ), + intelxl->regs + INTELXL_PFGEN_CTRL ); + mdelay ( INTELXL_RESET_DELAY_MS ); + + return 0; +} + +/****************************************************************************** + * + * MAC address + * + ****************************************************************************** + */ + +/** + * Fetch initial MAC address and maximum frame size + * + * @v intelxl Intel device + * @v netdev Network device + * @ret rc Return status code + */ +static int intelxl_fetch_mac ( struct intelxl_nic *intelxl, + struct net_device *netdev ) { + union intelxl_receive_address mac; + uint32_t prtgl_sal; + uint32_t prtgl_sah; + size_t mfs; + + /* Read NVM-loaded address */ + prtgl_sal = readl ( intelxl->regs + INTELXL_PRTGL_SAL ); + prtgl_sah = readl ( intelxl->regs + INTELXL_PRTGL_SAH ); + mac.reg.low = cpu_to_le32 ( prtgl_sal ); + mac.reg.high = cpu_to_le32 ( prtgl_sah ); + + /* Check that address is valid */ + if ( ! is_valid_ether_addr ( mac.raw ) ) { + DBGC ( intelxl, "INTELXL %p has invalid MAC address (%s)\n", + intelxl, eth_ntoa ( mac.raw ) ); + return -ENOENT; + } + + /* Copy MAC address */ + DBGC ( intelxl, "INTELXL %p has autoloaded MAC address %s\n", + intelxl, eth_ntoa ( mac.raw ) ); + memcpy ( netdev->hw_addr, mac.raw, ETH_ALEN ); + + /* Get maximum frame size */ + mfs = INTELXL_PRTGL_SAH_MFS_GET ( prtgl_sah ); + netdev->max_pkt_len = ( mfs - 4 /* CRC */ ); + + return 0; +} + +/****************************************************************************** + * + * MSI-X interrupts + * + ****************************************************************************** + */ + +/** + * Enable MSI-X dummy interrupt + * + * @v intelxl Intel device + * @v pci PCI device + * @ret rc Return status code + */ +int intelxl_msix_enable ( struct intelxl_nic *intelxl, + struct pci_device *pci ) { + int rc; + + /* Enable MSI-X capability */ + if ( ( rc = pci_msix_enable ( pci, &intelxl->msix ) ) != 0 ) { + DBGC ( intelxl, "INTELXL %p could not enable MSI-X: %s\n", + intelxl, strerror ( rc ) ); + return rc; + } + + /* Configure interrupt zero to write to dummy location */ + pci_msix_map ( &intelxl->msix, 0, virt_to_bus ( &intelxl->msg ), 0 ); + + /* Enable dummy interrupt zero */ + pci_msix_unmask ( &intelxl->msix, 0 ); + + return 0; +} + +/** + * Disable MSI-X dummy interrupt + * + * @v intelxl Intel device + * @v pci PCI device + */ +void intelxl_msix_disable ( struct intelxl_nic *intelxl, + struct pci_device *pci ) { + + /* Disable dummy interrupt zero */ + pci_msix_mask ( &intelxl->msix, 0 ); + + /* Disable MSI-X capability */ + pci_msix_disable ( pci, &intelxl->msix ); +} + +/****************************************************************************** + * + * Admin queue + * + ****************************************************************************** + */ + +/** Admin queue register offsets */ +static const struct intelxl_admin_offsets intelxl_admin_offsets = { + .bal = INTELXL_ADMIN_BAL, + .bah = INTELXL_ADMIN_BAH, + .len = INTELXL_ADMIN_LEN, + .head = INTELXL_ADMIN_HEAD, + .tail = INTELXL_ADMIN_TAIL, +}; + +/** + * Allocate admin queue + * + * @v intelxl Intel device + * @v admin Admin queue + * @ret rc Return status code + */ +static int intelxl_alloc_admin ( struct intelxl_nic *intelxl, + struct intelxl_admin *admin ) { + size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC ); + size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC ); + + /* Allocate admin queue */ + admin->buf = malloc_dma ( ( buf_len + len ), INTELXL_ALIGN ); + if ( ! admin->buf ) + return -ENOMEM; + admin->desc = ( ( ( void * ) admin->buf ) + buf_len ); + + DBGC ( intelxl, "INTELXL %p A%cQ is at [%08llx,%08llx) buf " + "[%08llx,%08llx)\n", intelxl, + ( ( admin == &intelxl->command ) ? 'T' : 'R' ), + ( ( unsigned long long ) virt_to_bus ( admin->desc ) ), + ( ( unsigned long long ) ( virt_to_bus ( admin->desc ) + len ) ), + ( ( unsigned long long ) virt_to_bus ( admin->buf ) ), + ( ( unsigned long long ) ( virt_to_bus ( admin->buf ) + + buf_len ) ) ); + return 0; +} + +/** + * Enable admin queue + * + * @v intelxl Intel device + * @v admin Admin queue + */ +static void intelxl_enable_admin ( struct intelxl_nic *intelxl, + struct intelxl_admin *admin ) { + size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC ); + const struct intelxl_admin_offsets *regs = admin->regs; + void *admin_regs = ( intelxl->regs + admin->base ); + physaddr_t address; + + /* Initialise admin queue */ + memset ( admin->desc, 0, len ); + + /* Reset head and tail registers */ + writel ( 0, admin_regs + regs->head ); + writel ( 0, admin_regs + regs->tail ); + + /* Reset queue index */ + admin->index = 0; + + /* Program queue address */ + address = virt_to_bus ( admin->desc ); + writel ( ( address & 0xffffffffUL ), admin_regs + regs->bal ); + if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) { + writel ( ( ( ( uint64_t ) address ) >> 32 ), + admin_regs + regs->bah ); + } else { + writel ( 0, admin_regs + regs->bah ); + } + + /* Program queue length and enable queue */ + writel ( ( INTELXL_ADMIN_LEN_LEN ( INTELXL_ADMIN_NUM_DESC ) | + INTELXL_ADMIN_LEN_ENABLE ), + admin_regs + regs->len ); +} + +/** + * Disable admin queue + * + * @v intelxl Intel device + * @v admin Admin queue + */ +static void intelxl_disable_admin ( struct intelxl_nic *intelxl, + struct intelxl_admin *admin ) { + const struct intelxl_admin_offsets *regs = admin->regs; + void *admin_regs = ( intelxl->regs + admin->base ); + + /* Disable queue */ + writel ( 0, admin_regs + regs->len ); +} + +/** + * Free admin queue + * + * @v intelxl Intel device + * @v admin Admin queue + */ +static void intelxl_free_admin ( struct intelxl_nic *intelxl __unused, + struct intelxl_admin *admin ) { + size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC ); + size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC ); + + /* Free queue */ + free_dma ( admin->buf, ( buf_len + len ) ); +} + +/** + * Get next admin command queue descriptor + * + * @v intelxl Intel device + * @ret cmd Command descriptor + */ +struct intelxl_admin_descriptor * +intelxl_admin_command_descriptor ( struct intelxl_nic *intelxl ) { + struct intelxl_admin *admin = &intelxl->command; + struct intelxl_admin_descriptor *cmd; + + /* Get and initialise next descriptor */ + cmd = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ]; + memset ( cmd, 0, sizeof ( *cmd ) ); + return cmd; +} + +/** + * Get next admin command queue data buffer + * + * @v intelxl Intel device + * @ret buf Data buffer + */ +union intelxl_admin_buffer * +intelxl_admin_command_buffer ( struct intelxl_nic *intelxl ) { + struct intelxl_admin *admin = &intelxl->command; + union intelxl_admin_buffer *buf; + + /* Get next data buffer */ + buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ]; + memset ( buf, 0, sizeof ( *buf ) ); + return buf; +} + +/** + * Initialise admin event queue descriptor + * + * @v intelxl Intel device + * @v index Event queue index + */ +static void intelxl_admin_event_init ( struct intelxl_nic *intelxl, + unsigned int index ) { + struct intelxl_admin *admin = &intelxl->event; + struct intelxl_admin_descriptor *evt; + union intelxl_admin_buffer *buf; + uint64_t address; + + /* Initialise descriptor */ + evt = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ]; + buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ]; + address = virt_to_bus ( buf ); + evt->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ); + evt->len = cpu_to_le16 ( sizeof ( *buf ) ); + evt->params.buffer.high = cpu_to_le32 ( address >> 32 ); + evt->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL ); +} + +/** + * Issue admin queue command + * + * @v intelxl Intel device + * @ret rc Return status code + */ +int intelxl_admin_command ( struct intelxl_nic *intelxl ) { + struct intelxl_admin *admin = &intelxl->command; + const struct intelxl_admin_offsets *regs = admin->regs; + void *admin_regs = ( intelxl->regs + admin->base ); + struct intelxl_admin_descriptor *cmd; + union intelxl_admin_buffer *buf; + uint64_t address; + uint32_t cookie; + unsigned int index; + unsigned int tail; + unsigned int i; + int rc; + + /* Get next queue entry */ + index = admin->index++; + tail = ( admin->index % INTELXL_ADMIN_NUM_DESC ); + cmd = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ]; + buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ]; + DBGC2 ( intelxl, "INTELXL %p admin command %#x opcode %#04x", + intelxl, index, le16_to_cpu ( cmd->opcode ) ); + if ( cmd->vopcode ) + DBGC2 ( intelxl, "/%#08x", le32_to_cpu ( cmd->vopcode ) ); + DBGC2 ( intelxl, ":\n" ); + + /* Sanity checks */ + assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_DD ) ) ); + assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_CMP ) ) ); + assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_ERR ) ) ); + assert ( cmd->ret == 0 ); + + /* Populate data buffer address if applicable */ + if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) { + address = virt_to_bus ( buf ); + cmd->params.buffer.high = cpu_to_le32 ( address >> 32 ); + cmd->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL ); + } + + /* Populate cookie, if not being (ab)used for VF opcode */ + if ( ! cmd->vopcode ) + cmd->cookie = cpu_to_le32 ( index ); + + /* Record cookie */ + cookie = cmd->cookie; + + /* Post command descriptor */ + DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) ); + if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) { + DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf, + le16_to_cpu ( cmd->len ) ); + } + wmb(); + writel ( tail, admin_regs + regs->tail ); + + /* Wait for completion */ + for ( i = 0 ; i < INTELXL_ADMIN_MAX_WAIT_MS ; i++ ) { + + /* If response is not complete, delay 1ms and retry */ + if ( ! ( cmd->flags & INTELXL_ADMIN_FL_DD ) ) { + mdelay ( 1 ); + continue; + } + DBGC2 ( intelxl, "INTELXL %p admin command %#x response:\n", + intelxl, index ); + DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd, + sizeof ( *cmd ) ); + + /* Check for cookie mismatch */ + if ( cmd->cookie != cookie ) { + DBGC ( intelxl, "INTELXL %p admin command %#x bad " + "cookie %#x\n", intelxl, index, + le32_to_cpu ( cmd->cookie ) ); + rc = -EPROTO; + goto err; + } + + /* Check for errors */ + if ( cmd->ret != 0 ) { + DBGC ( intelxl, "INTELXL %p admin command %#x error " + "%d\n", intelxl, index, + le16_to_cpu ( cmd->ret ) ); + rc = -EIO; + goto err; + } + + /* Success */ + return 0; + } + + rc = -ETIMEDOUT; + DBGC ( intelxl, "INTELXL %p timed out waiting for admin command %#x:\n", + intelxl, index ); + err: + DBGC_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) ); + return rc; +} + +/** + * Get firmware version + * + * @v intelxl Intel device + * @ret rc Return status code + */ +static int intelxl_admin_version ( struct intelxl_nic *intelxl ) { + struct intelxl_admin_descriptor *cmd; + struct intelxl_admin_version_params *version; + unsigned int api; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VERSION ); + version = &cmd->params.version; + + /* Issue command */ + if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 ) + return rc; + api = le16_to_cpu ( version->api.major ); + DBGC ( intelxl, "INTELXL %p firmware v%d.%d API v%d.%d\n", + intelxl, le16_to_cpu ( version->firmware.major ), + le16_to_cpu ( version->firmware.minor ), + api, le16_to_cpu ( version->api.minor ) ); + + /* Check for API compatibility */ + if ( api > INTELXL_ADMIN_API_MAJOR ) { + DBGC ( intelxl, "INTELXL %p unsupported API v%d\n", + intelxl, api ); + return -ENOTSUP; + } + + return 0; +} + +/** + * Report driver version + * + * @v intelxl Intel device + * @ret rc Return status code + */ +static int intelxl_admin_driver ( struct intelxl_nic *intelxl ) { + struct intelxl_admin_descriptor *cmd; + struct intelxl_admin_driver_params *driver; + union intelxl_admin_buffer *buf; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_DRIVER ); + cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_RD | INTELXL_ADMIN_FL_BUF ); + cmd->len = cpu_to_le16 ( sizeof ( buf->driver ) ); + driver = &cmd->params.driver; + driver->major = product_major_version; + driver->minor = product_minor_version; + buf = intelxl_admin_command_buffer ( intelxl ); + snprintf ( buf->driver.name, sizeof ( buf->driver.name ), "%s", + ( product_name[0] ? product_name : product_short_name ) ); + + /* Issue command */ + if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Shutdown admin queues + * + * @v intelxl Intel device + * @ret rc Return status code + */ +static int intelxl_admin_shutdown ( struct intelxl_nic *intelxl ) { + struct intelxl_admin_descriptor *cmd; + struct intelxl_admin_shutdown_params *shutdown; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SHUTDOWN ); + shutdown = &cmd->params.shutdown; + shutdown->unloading = INTELXL_ADMIN_SHUTDOWN_UNLOADING; + + /* Issue command */ + if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Clear PXE mode + * + * @v intelxl Intel device + * @ret rc Return status code + */ +static int intelxl_admin_clear_pxe ( struct intelxl_nic *intelxl ) { + struct intelxl_admin_descriptor *cmd; + struct intelxl_admin_clear_pxe_params *pxe; + uint32_t gllan_rctl_0; + int rc; + + /* Do nothing if device is already out of PXE mode */ + gllan_rctl_0 = readl ( intelxl->regs + INTELXL_GLLAN_RCTL_0 ); + if ( ! ( gllan_rctl_0 & INTELXL_GLLAN_RCTL_0_PXE_MODE ) ) { + DBGC2 ( intelxl, "INTELXL %p already in non-PXE mode\n", + intelxl ); + return 0; + } + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_CLEAR_PXE ); + pxe = &cmd->params.pxe; + pxe->magic = INTELXL_ADMIN_CLEAR_PXE_MAGIC; + + /* Issue command */ + if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Get switch configuration + * + * @v intelxl Intel device + * @ret rc Return status code + */ +static int intelxl_admin_switch ( struct intelxl_nic *intelxl ) { + struct intelxl_admin_descriptor *cmd; + struct intelxl_admin_switch_params *sw; + union intelxl_admin_buffer *buf; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SWITCH ); + cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ); + cmd->len = cpu_to_le16 ( sizeof ( buf->sw ) ); + sw = &cmd->params.sw; + buf = intelxl_admin_command_buffer ( intelxl ); + + /* Get each configuration in turn */ + do { + /* Issue command */ + if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 ) + return rc; + + /* Dump raw configuration */ + DBGC2 ( intelxl, "INTELXL %p SEID %#04x:\n", + intelxl, le16_to_cpu ( buf->sw.cfg.seid ) ); + DBGC2_HDA ( intelxl, 0, &buf->sw.cfg, sizeof ( buf->sw.cfg ) ); + + /* Parse response */ + if ( buf->sw.cfg.type == INTELXL_ADMIN_SWITCH_TYPE_VSI ) { + intelxl->vsi = le16_to_cpu ( buf->sw.cfg.seid ); + DBGC ( intelxl, "INTELXL %p VSI %#04x uplink %#04x " + "downlink %#04x conn %#02x\n", intelxl, + intelxl->vsi, le16_to_cpu ( buf->sw.cfg.uplink ), + le16_to_cpu ( buf->sw.cfg.downlink ), + buf->sw.cfg.connection ); + } + + } while ( sw->next ); + + /* Check that we found a VSI */ + if ( ! intelxl->vsi ) { + DBGC ( intelxl, "INTELXL %p has no VSI\n", intelxl ); + return -ENOENT; + } + + return 0; +} + +/** + * Get VSI parameters + * + * @v intelxl Intel device + * @ret rc Return status code + */ +static int intelxl_admin_vsi ( struct intelxl_nic *intelxl ) { + struct intelxl_admin_descriptor *cmd; + struct intelxl_admin_vsi_params *vsi; + union intelxl_admin_buffer *buf; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VSI ); + cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ); + cmd->len = cpu_to_le16 ( sizeof ( buf->vsi ) ); + vsi = &cmd->params.vsi; + vsi->vsi = cpu_to_le16 ( intelxl->vsi ); + buf = intelxl_admin_command_buffer ( intelxl ); + + /* Issue command */ + if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 ) + return rc; + + /* Parse response */ + intelxl->queue = le16_to_cpu ( buf->vsi.queue[0] ); + intelxl->qset = le16_to_cpu ( buf->vsi.qset[0] ); + DBGC ( intelxl, "INTELXL %p VSI %#04x queue %#04x qset %#04x\n", + intelxl, intelxl->vsi, intelxl->queue, intelxl->qset ); + + return 0; +} + +/** + * Set VSI promiscuous modes + * + * @v intelxl Intel device + * @ret rc Return status code + */ +static int intelxl_admin_promisc ( struct intelxl_nic *intelxl ) { + struct intelxl_admin_descriptor *cmd; + struct intelxl_admin_promisc_params *promisc; + uint16_t flags; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_PROMISC ); + flags = ( INTELXL_ADMIN_PROMISC_FL_UNICAST | + INTELXL_ADMIN_PROMISC_FL_MULTICAST | + INTELXL_ADMIN_PROMISC_FL_BROADCAST | + INTELXL_ADMIN_PROMISC_FL_VLAN ); + promisc = &cmd->params.promisc; + promisc->flags = cpu_to_le16 ( flags ); + promisc->valid = cpu_to_le16 ( flags ); + promisc->vsi = cpu_to_le16 ( intelxl->vsi ); + + /* Issue command */ + if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Restart autonegotiation + * + * @v intelxl Intel device + * @ret rc Return status code + */ +static int intelxl_admin_autoneg ( struct intelxl_nic *intelxl ) { + struct intelxl_admin_descriptor *cmd; + struct intelxl_admin_autoneg_params *autoneg; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_AUTONEG ); + autoneg = &cmd->params.autoneg; + autoneg->flags = ( INTELXL_ADMIN_AUTONEG_FL_RESTART | + INTELXL_ADMIN_AUTONEG_FL_ENABLE ); + + /* Issue command */ + if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Get link status + * + * @v netdev Network device + * @ret rc Return status code + */ +static int intelxl_admin_link ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + struct intelxl_admin_descriptor *cmd; + struct intelxl_admin_link_params *link; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_LINK ); + link = &cmd->params.link; + link->notify = INTELXL_ADMIN_LINK_NOTIFY; + + /* Issue command */ + if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 ) + return rc; + DBGC ( intelxl, "INTELXL %p PHY %#02x speed %#02x status %#02x\n", + intelxl, link->phy, link->speed, link->status ); + + /* Update network device */ + if ( link->status & INTELXL_ADMIN_LINK_UP ) { + netdev_link_up ( netdev ); + } else { + netdev_link_down ( netdev ); + } + + return 0; +} + +/** + * Handle virtual function event (when VF driver is not present) + * + * @v netdev Network device + * @v evt Admin queue event descriptor + * @v buf Admin queue event data buffer + */ +__weak void +intelxlvf_admin_event ( struct net_device *netdev __unused, + struct intelxl_admin_descriptor *evt __unused, + union intelxl_admin_buffer *buf __unused ) { + + /* Nothing to do */ +} + +/** + * Refill admin event queue + * + * @v intelxl Intel device + */ +static void intelxl_refill_admin ( struct intelxl_nic *intelxl ) { + struct intelxl_admin *admin = &intelxl->event; + const struct intelxl_admin_offsets *regs = admin->regs; + void *admin_regs = ( intelxl->regs + admin->base ); + unsigned int tail; + + /* Update tail pointer */ + tail = ( ( admin->index + INTELXL_ADMIN_NUM_DESC - 1 ) % + INTELXL_ADMIN_NUM_DESC ); + wmb(); + writel ( tail, admin_regs + regs->tail ); +} + +/** + * Poll admin event queue + * + * @v netdev Network device + */ +void intelxl_poll_admin ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + struct intelxl_admin *admin = &intelxl->event; + struct intelxl_admin_descriptor *evt; + union intelxl_admin_buffer *buf; + + /* Check for events */ + while ( 1 ) { + + /* Get next event descriptor and data buffer */ + evt = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ]; + buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ]; + + /* Stop if descriptor is not yet completed */ + if ( ! ( evt->flags & INTELXL_ADMIN_FL_DD ) ) + return; + DBGC2 ( intelxl, "INTELXL %p admin event %#x:\n", + intelxl, admin->index ); + DBGC2_HDA ( intelxl, virt_to_phys ( evt ), evt, + sizeof ( *evt ) ); + if ( evt->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) { + DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf, + le16_to_cpu ( evt->len ) ); + } + + /* Handle event */ + switch ( evt->opcode ) { + case cpu_to_le16 ( INTELXL_ADMIN_LINK ): + intelxl_admin_link ( netdev ); + break; + case cpu_to_le16 ( INTELXL_ADMIN_SEND_TO_VF ): + intelxlvf_admin_event ( netdev, evt, buf ); + break; + default: + DBGC ( intelxl, "INTELXL %p admin event %#x " + "unrecognised opcode %#04x\n", intelxl, + admin->index, le16_to_cpu ( evt->opcode ) ); + break; + } + + /* Reset descriptor and refill queue */ + intelxl_admin_event_init ( intelxl, admin->index ); + admin->index++; + intelxl_refill_admin ( intelxl ); + } +} + +/** + * Open admin queues + * + * @v intelxl Intel device + * @ret rc Return status code + */ +int intelxl_open_admin ( struct intelxl_nic *intelxl ) { + int rc; + + /* Allocate admin event queue */ + if ( ( rc = intelxl_alloc_admin ( intelxl, &intelxl->event ) ) != 0 ) + goto err_alloc_event; + + /* Allocate admin command queue */ + if ( ( rc = intelxl_alloc_admin ( intelxl, &intelxl->command ) ) != 0 ) + goto err_alloc_command; + + /* (Re)open admin queues */ + intelxl_reopen_admin ( intelxl ); + + /* Get firmware version */ + if ( ( rc = intelxl_admin_version ( intelxl ) ) != 0 ) + goto err_version; + + /* Report driver version */ + if ( ( rc = intelxl_admin_driver ( intelxl ) ) != 0 ) + goto err_driver; + + return 0; + + err_driver: + err_version: + intelxl_disable_admin ( intelxl, &intelxl->command ); + intelxl_disable_admin ( intelxl, &intelxl->event ); + intelxl_free_admin ( intelxl, &intelxl->command ); + err_alloc_command: + intelxl_free_admin ( intelxl, &intelxl->event ); + err_alloc_event: + return rc; +} + +/** + * Reopen admin queues (after virtual function reset) + * + * @v intelxl Intel device + */ +void intelxl_reopen_admin ( struct intelxl_nic *intelxl ) { + unsigned int i; + + /* Enable admin event queue */ + intelxl_enable_admin ( intelxl, &intelxl->event ); + + /* Enable admin command queue */ + intelxl_enable_admin ( intelxl, &intelxl->command ); + + /* Initialise all admin event queue descriptors */ + for ( i = 0 ; i < INTELXL_ADMIN_NUM_DESC ; i++ ) + intelxl_admin_event_init ( intelxl, i ); + + /* Post all descriptors to event queue */ + intelxl_refill_admin ( intelxl ); +} + +/** + * Close admin queues + * + * @v intelxl Intel device + */ +void intelxl_close_admin ( struct intelxl_nic *intelxl ) { + + /* Shut down admin queues */ + intelxl_admin_shutdown ( intelxl ); + + /* Disable admin queues */ + intelxl_disable_admin ( intelxl, &intelxl->command ); + intelxl_disable_admin ( intelxl, &intelxl->event ); + + /* Free admin queues */ + intelxl_free_admin ( intelxl, &intelxl->command ); + intelxl_free_admin ( intelxl, &intelxl->event ); +} + +/****************************************************************************** + * + * Descriptor rings + * + ****************************************************************************** + */ + +/** + * Allocate descriptor ring + * + * @v intelxl Intel device + * @v ring Descriptor ring + * @ret rc Return status code + */ +int intelxl_alloc_ring ( struct intelxl_nic *intelxl, + struct intelxl_ring *ring ) { + physaddr_t address; + int rc; + + /* Allocate descriptor ring */ + ring->desc.raw = malloc_dma ( ring->len, INTELXL_ALIGN ); + if ( ! ring->desc.raw ) { + rc = -ENOMEM; + goto err_alloc; + } + address = virt_to_bus ( ring->desc.raw ); + + /* Initialise descriptor ring */ + memset ( ring->desc.raw, 0, ring->len ); + + /* Reset tail pointer */ + writel ( 0, ( intelxl->regs + ring->tail ) ); + + /* Reset counters */ + ring->prod = 0; + ring->cons = 0; + + DBGC ( intelxl, "INTELXL %p ring %06x is at [%08llx,%08llx)\n", + intelxl, ( ring->reg + ring->tail ), + ( ( unsigned long long ) address ), + ( ( unsigned long long ) address + ring->len ) ); + + return 0; + + free_dma ( ring->desc.raw, ring->len ); + err_alloc: + return rc; +} + +/** + * Free descriptor ring + * + * @v intelxl Intel device + * @v ring Descriptor ring + */ +void intelxl_free_ring ( struct intelxl_nic *intelxl __unused, + struct intelxl_ring *ring ) { + + /* Free descriptor ring */ + free_dma ( ring->desc.raw, ring->len ); + ring->desc.raw = NULL; +} + +/** + * Dump queue context (for debugging) + * + * @v intelxl Intel device + * @v op Context operation + * @v len Size of context + */ +static __attribute__ (( unused )) void +intelxl_context_dump ( struct intelxl_nic *intelxl, uint32_t op, size_t len ) { + struct intelxl_context_line line; + uint32_t pfcm_lanctxctl; + uint32_t pfcm_lanctxstat; + unsigned int queue; + unsigned int index; + unsigned int i; + + /* Do nothing unless debug output is enabled */ + if ( ! DBG_EXTRA ) + return; + + /* Dump context */ + DBGC2 ( intelxl, "INTELXL %p context %#08x:\n", intelxl, op ); + for ( index = 0 ; ( sizeof ( line ) * index ) < len ; index++ ) { + + /* Start context operation */ + queue = ( intelxl->base + intelxl->queue ); + pfcm_lanctxctl = + ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) | + INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) | + INTELXL_PFCM_LANCTXCTL_OP_CODE_READ | op ); + writel ( pfcm_lanctxctl, + intelxl->regs + INTELXL_PFCM_LANCTXCTL ); + + /* Wait for operation to complete */ + for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) { + + /* Check if operation is complete */ + pfcm_lanctxstat = readl ( intelxl->regs + + INTELXL_PFCM_LANCTXSTAT ); + if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE ) + break; + + /* Delay */ + mdelay ( 1 ); + } + + /* Read context data */ + for ( i = 0 ; i < ( sizeof ( line ) / + sizeof ( line.raw[0] ) ) ; i++ ) { + line.raw[i] = readl ( intelxl->regs + + INTELXL_PFCM_LANCTXDATA ( i ) ); + } + DBGC2_HDA ( intelxl, ( sizeof ( line ) * index ), + &line, sizeof ( line ) ); + } +} + +/** + * Program queue context line + * + * @v intelxl Intel device + * @v line Queue context line + * @v index Line number + * @v op Context operation + * @ret rc Return status code + */ +static int intelxl_context_line ( struct intelxl_nic *intelxl, + struct intelxl_context_line *line, + unsigned int index, uint32_t op ) { + uint32_t pfcm_lanctxctl; + uint32_t pfcm_lanctxstat; + unsigned int queue; + unsigned int i; + + /* Write context data */ + for ( i = 0; i < ( sizeof ( *line ) / sizeof ( line->raw[0] ) ); i++ ) { + writel ( le32_to_cpu ( line->raw[i] ), + intelxl->regs + INTELXL_PFCM_LANCTXDATA ( i ) ); + } + + /* Start context operation */ + queue = ( intelxl->base + intelxl->queue ); + pfcm_lanctxctl = ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) | + INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) | + INTELXL_PFCM_LANCTXCTL_OP_CODE_WRITE | op ); + writel ( pfcm_lanctxctl, intelxl->regs + INTELXL_PFCM_LANCTXCTL ); + + /* Wait for operation to complete */ + for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) { + + /* Check if operation is complete */ + pfcm_lanctxstat = readl ( intelxl->regs + + INTELXL_PFCM_LANCTXSTAT ); + if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( intelxl, "INTELXL %p timed out waiting for context: %#08x\n", + intelxl, pfcm_lanctxctl ); + return -ETIMEDOUT; +} + +/** + * Program queue context + * + * @v intelxl Intel device + * @v line Queue context lines + * @v len Size of context + * @v op Context operation + * @ret rc Return status code + */ +static int intelxl_context ( struct intelxl_nic *intelxl, + struct intelxl_context_line *line, + size_t len, uint32_t op ) { + unsigned int index; + int rc; + + DBGC2 ( intelxl, "INTELXL %p context %#08x len %#zx:\n", + intelxl, op, len ); + DBGC2_HDA ( intelxl, 0, line, len ); + + /* Program one line at a time */ + for ( index = 0 ; ( sizeof ( *line ) * index ) < len ; index++ ) { + if ( ( rc = intelxl_context_line ( intelxl, line++, index, + op ) ) != 0 ) + return rc; + } + + return 0; +} + +/** + * Program transmit queue context + * + * @v intelxl Intel device + * @v address Descriptor ring base address + * @ret rc Return status code + */ +static int intelxl_context_tx ( struct intelxl_nic *intelxl, + physaddr_t address ) { + union { + struct intelxl_context_tx tx; + struct intelxl_context_line line; + } ctx; + int rc; + + /* Initialise context */ + memset ( &ctx, 0, sizeof ( ctx ) ); + ctx.tx.flags = cpu_to_le16 ( INTELXL_CTX_TX_FL_NEW ); + ctx.tx.base = cpu_to_le64 ( INTELXL_CTX_TX_BASE ( address ) ); + ctx.tx.count = + cpu_to_le16 ( INTELXL_CTX_TX_COUNT ( INTELXL_TX_NUM_DESC ) ); + ctx.tx.qset = INTELXL_CTX_TX_QSET ( intelxl->qset ); + + /* Program context */ + if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ), + INTELXL_PFCM_LANCTXCTL_TYPE_TX ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Program receive queue context + * + * @v intelxl Intel device + * @v address Descriptor ring base address + * @ret rc Return status code + */ +static int intelxl_context_rx ( struct intelxl_nic *intelxl, + physaddr_t address ) { + union { + struct intelxl_context_rx rx; + struct intelxl_context_line line; + } ctx; + uint64_t base_count; + int rc; + + /* Initialise context */ + memset ( &ctx, 0, sizeof ( ctx ) ); + base_count = INTELXL_CTX_RX_BASE_COUNT ( address, INTELXL_RX_NUM_DESC ); + ctx.rx.base_count = cpu_to_le64 ( base_count ); + ctx.rx.len = cpu_to_le16 ( INTELXL_CTX_RX_LEN ( intelxl->mfs ) ); + ctx.rx.flags = ( INTELXL_CTX_RX_FL_DSIZE | INTELXL_CTX_RX_FL_CRCSTRIP ); + ctx.rx.mfs = cpu_to_le16 ( INTELXL_CTX_RX_MFS ( intelxl->mfs ) ); + + /* Program context */ + if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ), + INTELXL_PFCM_LANCTXCTL_TYPE_RX ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Enable descriptor ring + * + * @v intelxl Intel device + * @v ring Descriptor ring + * @ret rc Return status code + */ +static int intelxl_enable_ring ( struct intelxl_nic *intelxl, + struct intelxl_ring *ring ) { + void *ring_regs = ( intelxl->regs + ring->reg ); + uint32_t qxx_ena; + + /* Enable ring */ + writel ( INTELXL_QXX_ENA_REQ, ( ring_regs + INTELXL_QXX_ENA ) ); + udelay ( INTELXL_QUEUE_ENABLE_DELAY_US ); + qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA ); + if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) ) { + DBGC ( intelxl, "INTELXL %p ring %06x failed to enable: " + "%#08x\n", intelxl, ring->reg, qxx_ena ); + return -EIO; + } + + return 0; +} + +/** + * Disable descriptor ring + * + * @v intelxl Intel device + * @v ring Descriptor ring + * @ret rc Return status code + */ +static int intelxl_disable_ring ( struct intelxl_nic *intelxl, + struct intelxl_ring *ring ) { + void *ring_regs = ( intelxl->regs + ring->reg ); + uint32_t qxx_ena; + unsigned int i; + + /* Disable ring */ + writel ( 0, ( ring_regs + INTELXL_QXX_ENA ) ); + + /* Wait for ring to be disabled */ + for ( i = 0 ; i < INTELXL_QUEUE_DISABLE_MAX_WAIT_MS ; i++ ) { + + /* Check if ring is disabled */ + qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA ); + if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( intelxl, "INTELXL %p ring %06x timed out waiting for disable: " + "%#08x\n", intelxl, ring->reg, qxx_ena ); + return -ETIMEDOUT; +} + +/** + * Create descriptor ring + * + * @v intelxl Intel device + * @v ring Descriptor ring + * @ret rc Return status code + */ +static int intelxl_create_ring ( struct intelxl_nic *intelxl, + struct intelxl_ring *ring ) { + physaddr_t address; + int rc; + + /* Allocate descriptor ring */ + if ( ( rc = intelxl_alloc_ring ( intelxl, ring ) ) != 0 ) + goto err_alloc; + + /* Program queue context */ + address = virt_to_bus ( ring->desc.raw ); + if ( ( rc = ring->context ( intelxl, address ) ) != 0 ) + goto err_context; + + /* Enable ring */ + if ( ( rc = intelxl_enable_ring ( intelxl, ring ) ) != 0 ) + goto err_enable; + + return 0; + + intelxl_disable_ring ( intelxl, ring ); + err_enable: + err_context: + intelxl_free_ring ( intelxl, ring ); + err_alloc: + return rc; +} + +/** + * Destroy descriptor ring + * + * @v intelxl Intel device + * @v ring Descriptor ring + */ +static void intelxl_destroy_ring ( struct intelxl_nic *intelxl, + struct intelxl_ring *ring ) { + int rc; + + /* Disable ring */ + if ( ( rc = intelxl_disable_ring ( intelxl, ring ) ) != 0 ) { + /* Leak memory; there's nothing else we can do */ + return; + } + + /* Free descriptor ring */ + intelxl_free_ring ( intelxl, ring ); +} + +/** + * Refill receive descriptor ring + * + * @v intelxl Intel device + */ +static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) { + struct intelxl_rx_data_descriptor *rx; + struct io_buffer *iobuf; + unsigned int rx_idx; + unsigned int rx_tail; + physaddr_t address; + unsigned int refilled = 0; + + /* Refill ring */ + while ( ( intelxl->rx.prod - intelxl->rx.cons ) < INTELXL_RX_FILL ) { + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( intelxl->mfs ); + if ( ! iobuf ) { + /* Wait for next refill */ + break; + } + + /* Get next receive descriptor */ + rx_idx = ( intelxl->rx.prod++ % INTELXL_RX_NUM_DESC ); + rx = &intelxl->rx.desc.rx[rx_idx].data; + + /* Populate receive descriptor */ + address = virt_to_bus ( iobuf->data ); + rx->address = cpu_to_le64 ( address ); + rx->flags = 0; + + /* Record I/O buffer */ + assert ( intelxl->rx_iobuf[rx_idx] == NULL ); + intelxl->rx_iobuf[rx_idx] = iobuf; + + DBGC2 ( intelxl, "INTELXL %p RX %d is [%llx,%llx)\n", intelxl, + rx_idx, ( ( unsigned long long ) address ), + ( ( unsigned long long ) address + intelxl->mfs ) ); + refilled++; + } + + /* Push descriptors to card, if applicable */ + if ( refilled ) { + wmb(); + rx_tail = ( intelxl->rx.prod % INTELXL_RX_NUM_DESC ); + writel ( rx_tail, ( intelxl->regs + intelxl->rx.tail ) ); + } +} + +/** + * Discard unused receive I/O buffers + * + * @v intelxl Intel device + */ +void intelxl_empty_rx ( struct intelxl_nic *intelxl ) { + unsigned int i; + + /* Discard any unused receive buffers */ + for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) { + if ( intelxl->rx_iobuf[i] ) + free_iob ( intelxl->rx_iobuf[i] ); + intelxl->rx_iobuf[i] = NULL; + } +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int intelxl_open ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + union intelxl_receive_address mac; + unsigned int queue; + uint32_t prtgl_sal; + uint32_t prtgl_sah; + int rc; + + /* Calculate maximum frame size */ + intelxl->mfs = ( ( ETH_HLEN + netdev->mtu + 4 /* CRC */ + + INTELXL_ALIGN - 1 ) & ~( INTELXL_ALIGN - 1 ) ); + + /* Program MAC address and maximum frame size */ + memset ( &mac, 0, sizeof ( mac ) ); + memcpy ( mac.raw, netdev->ll_addr, sizeof ( mac.raw ) ); + prtgl_sal = le32_to_cpu ( mac.reg.low ); + prtgl_sah = ( le32_to_cpu ( mac.reg.high ) | + INTELXL_PRTGL_SAH_MFS_SET ( intelxl->mfs ) ); + writel ( prtgl_sal, intelxl->regs + INTELXL_PRTGL_SAL ); + writel ( prtgl_sah, intelxl->regs + INTELXL_PRTGL_SAH ); + + /* Associate transmit queue to PF */ + writel ( ( INTELXL_QXX_CTL_PFVF_Q_PF | + INTELXL_QXX_CTL_PFVF_PF_INDX ( intelxl->pf ) ), + ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_CTL ) ); + + /* Clear transmit pre queue disable */ + queue = ( intelxl->base + intelxl->queue ); + writel ( ( INTELXL_GLLAN_TXPRE_QDIS_CLEAR_QDIS | + INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ), + ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) ); + + /* Reset transmit queue head */ + writel ( 0, ( intelxl->regs + INTELXL_QTX_HEAD ( intelxl->queue ) ) ); + + /* Create receive descriptor ring */ + if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->rx ) ) != 0 ) + goto err_create_rx; + + /* Create transmit descriptor ring */ + if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->tx ) ) != 0 ) + goto err_create_tx; + + /* Fill receive ring */ + intelxl_refill_rx ( intelxl ); + + /* Restart autonegotiation */ + intelxl_admin_autoneg ( intelxl ); + + /* Update link state */ + intelxl_admin_link ( netdev ); + + return 0; + + writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS | + INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ), + ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) ); + udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US ); + intelxl_destroy_ring ( intelxl, &intelxl->tx ); + err_create_tx: + intelxl_destroy_ring ( intelxl, &intelxl->rx ); + err_create_rx: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void intelxl_close ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + unsigned int queue; + + /* Dump contexts (for debugging) */ + intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_TX, + sizeof ( struct intelxl_context_tx ) ); + intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_RX, + sizeof ( struct intelxl_context_rx ) ); + + /* Pre-disable transmit queue */ + queue = ( intelxl->base + intelxl->queue ); + writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS | + INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ), + ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) ); + udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US ); + + /* Destroy transmit descriptor ring */ + intelxl_destroy_ring ( intelxl, &intelxl->tx ); + + /* Destroy receive descriptor ring */ + intelxl_destroy_ring ( intelxl, &intelxl->rx ); + + /* Discard any unused receive buffers */ + intelxl_empty_rx ( intelxl ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +int intelxl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { + struct intelxl_nic *intelxl = netdev->priv; + struct intelxl_tx_data_descriptor *tx; + unsigned int tx_idx; + unsigned int tx_tail; + physaddr_t address; + size_t len; + + /* Get next transmit descriptor */ + if ( ( intelxl->tx.prod - intelxl->tx.cons ) >= INTELXL_TX_FILL ) { + DBGC ( intelxl, "INTELXL %p out of transmit descriptors\n", + intelxl ); + return -ENOBUFS; + } + tx_idx = ( intelxl->tx.prod++ % INTELXL_TX_NUM_DESC ); + tx_tail = ( intelxl->tx.prod % INTELXL_TX_NUM_DESC ); + tx = &intelxl->tx.desc.tx[tx_idx].data; + + /* Populate transmit descriptor */ + address = virt_to_bus ( iobuf->data ); + len = iob_len ( iobuf ); + tx->address = cpu_to_le64 ( address ); + tx->len = cpu_to_le32 ( INTELXL_TX_DATA_LEN ( len ) ); + tx->flags = cpu_to_le32 ( INTELXL_TX_DATA_DTYP | INTELXL_TX_DATA_EOP | + INTELXL_TX_DATA_RS | INTELXL_TX_DATA_JFDI ); + wmb(); + + /* Notify card that there are packets ready to transmit */ + writel ( tx_tail, ( intelxl->regs + intelxl->tx.tail ) ); + + DBGC2 ( intelxl, "INTELXL %p TX %d is [%llx,%llx)\n", intelxl, tx_idx, + ( ( unsigned long long ) address ), + ( ( unsigned long long ) address + len ) ); + return 0; +} + +/** + * Poll for completed packets + * + * @v netdev Network device + */ +static void intelxl_poll_tx ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + struct intelxl_tx_writeback_descriptor *tx_wb; + unsigned int tx_idx; + + /* Check for completed packets */ + while ( intelxl->tx.cons != intelxl->tx.prod ) { + + /* Get next transmit descriptor */ + tx_idx = ( intelxl->tx.cons % INTELXL_TX_NUM_DESC ); + tx_wb = &intelxl->tx.desc.tx[tx_idx].wb; + + /* Stop if descriptor is still in use */ + if ( ! ( tx_wb->flags & INTELXL_TX_WB_FL_DD ) ) + return; + DBGC2 ( intelxl, "INTELXL %p TX %d complete\n", + intelxl, tx_idx ); + + /* Complete TX descriptor */ + netdev_tx_complete_next ( netdev ); + intelxl->tx.cons++; + } +} + +/** + * Poll for received packets + * + * @v netdev Network device + */ +static void intelxl_poll_rx ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + struct intelxl_rx_writeback_descriptor *rx_wb; + struct io_buffer *iobuf; + unsigned int rx_idx; + unsigned int tag; + size_t len; + + /* Check for received packets */ + while ( intelxl->rx.cons != intelxl->rx.prod ) { + + /* Get next receive descriptor */ + rx_idx = ( intelxl->rx.cons % INTELXL_RX_NUM_DESC ); + rx_wb = &intelxl->rx.desc.rx[rx_idx].wb; + + /* Stop if descriptor is still in use */ + if ( ! ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_DD ) ) ) + return; + + /* Populate I/O buffer */ + iobuf = intelxl->rx_iobuf[rx_idx]; + intelxl->rx_iobuf[rx_idx] = NULL; + len = INTELXL_RX_WB_LEN ( le32_to_cpu ( rx_wb->len ) ); + iob_put ( iobuf, len ); + + /* Find VLAN device, if applicable */ + if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_VLAN ) ) { + tag = VLAN_TAG ( le16_to_cpu ( rx_wb->vlan ) ); + } else { + tag = 0; + } + + /* Hand off to network stack */ + if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_RXE ) ) { + DBGC ( intelxl, "INTELXL %p RX %d error (length %zd, " + "flags %08x)\n", intelxl, rx_idx, len, + le32_to_cpu ( rx_wb->flags ) ); + vlan_netdev_rx_err ( netdev, tag, iobuf, -EIO ); + } else { + DBGC2 ( intelxl, "INTELXL %p RX %d complete (length " + "%zd)\n", intelxl, rx_idx, len ); + vlan_netdev_rx ( netdev, tag, iobuf ); + } + intelxl->rx.cons++; + } +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +void intelxl_poll ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + + /* Poll for completed packets */ + intelxl_poll_tx ( netdev ); + + /* Poll for received packets */ + intelxl_poll_rx ( netdev ); + + /* Poll for admin events */ + intelxl_poll_admin ( netdev ); + + /* Refill RX ring */ + intelxl_refill_rx ( intelxl ); + + /* Rearm interrupt, since otherwise receive descriptors will + * be written back only after a complete cacheline (four + * packets) have been received. + * + * There is unfortunately no efficient way to determine + * whether or not rearming the interrupt is necessary. If we + * are running inside a hypervisor (e.g. using a VF or PF as a + * passed-through PCI device), then the MSI-X write is + * redirected by the hypervisor to the real host APIC and the + * host ISR then raises an interrupt within the guest. We + * therefore cannot poll the nominal MSI-X target location to + * watch for the value being written. We could read from the + * INT_DYN_CTL register, but this is even less efficient than + * just unconditionally rearming the interrupt. + */ + writel ( INTELXL_INT_DYN_CTL_INTENA, intelxl->regs + intelxl->intr ); +} + +/** Network device operations */ +static struct net_device_operations intelxl_operations = { + .open = intelxl_open, + .close = intelxl_close, + .transmit = intelxl_transmit, + .poll = intelxl_poll, +}; + +/****************************************************************************** + * + * PCI interface + * + ****************************************************************************** + */ + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int intelxl_probe ( struct pci_device *pci ) { + struct net_device *netdev; + struct intelxl_nic *intelxl; + uint32_t pfgen_portnum; + uint32_t pflan_qalloc; + int rc; + + /* Allocate and initialise net device */ + netdev = alloc_etherdev ( sizeof ( *intelxl ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &intelxl_operations ); + intelxl = netdev->priv; + pci_set_drvdata ( pci, netdev ); + netdev->dev = &pci->dev; + memset ( intelxl, 0, sizeof ( *intelxl ) ); + intelxl->pf = PCI_FUNC ( pci->busdevfn ); + intelxl->intr = INTELXL_PFINT_DYN_CTL0; + intelxl_init_admin ( &intelxl->command, INTELXL_ADMIN_CMD, + &intelxl_admin_offsets ); + intelxl_init_admin ( &intelxl->event, INTELXL_ADMIN_EVT, + &intelxl_admin_offsets ); + intelxl_init_ring ( &intelxl->tx, INTELXL_TX_NUM_DESC, + sizeof ( intelxl->tx.desc.tx[0] ), + intelxl_context_tx ); + intelxl_init_ring ( &intelxl->rx, INTELXL_RX_NUM_DESC, + sizeof ( intelxl->rx.desc.rx[0] ), + intelxl_context_rx ); + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Map registers */ + intelxl->regs = pci_ioremap ( pci, pci->membase, INTELXL_BAR_SIZE ); + if ( ! intelxl->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + + /* Reset the NIC */ + if ( ( rc = intelxl_reset ( intelxl ) ) != 0 ) + goto err_reset; + + /* Get port number and base queue number */ + pfgen_portnum = readl ( intelxl->regs + INTELXL_PFGEN_PORTNUM ); + intelxl->port = INTELXL_PFGEN_PORTNUM_PORT_NUM ( pfgen_portnum ); + pflan_qalloc = readl ( intelxl->regs + INTELXL_PFLAN_QALLOC ); + intelxl->base = INTELXL_PFLAN_QALLOC_FIRSTQ ( pflan_qalloc ); + DBGC ( intelxl, "INTELXL %p PF %d using port %d queues [%#04x-%#04x]\n", + intelxl, intelxl->pf, intelxl->port, intelxl->base, + INTELXL_PFLAN_QALLOC_LASTQ ( pflan_qalloc ) ); + + /* Fetch MAC address and maximum frame size */ + if ( ( rc = intelxl_fetch_mac ( intelxl, netdev ) ) != 0 ) + goto err_fetch_mac; + + /* Enable MSI-X dummy interrupt */ + if ( ( rc = intelxl_msix_enable ( intelxl, pci ) ) != 0 ) + goto err_msix; + + /* Open admin queues */ + if ( ( rc = intelxl_open_admin ( intelxl ) ) != 0 ) + goto err_open_admin; + + /* Clear PXE mode */ + if ( ( rc = intelxl_admin_clear_pxe ( intelxl ) ) != 0 ) + goto err_admin_clear_pxe; + + /* Get switch configuration */ + if ( ( rc = intelxl_admin_switch ( intelxl ) ) != 0 ) + goto err_admin_switch; + + /* Get VSI configuration */ + if ( ( rc = intelxl_admin_vsi ( intelxl ) ) != 0 ) + goto err_admin_vsi; + + /* Configure switch for promiscuous mode */ + if ( ( rc = intelxl_admin_promisc ( intelxl ) ) != 0 ) + goto err_admin_promisc; + + /* Configure queue register addresses */ + intelxl->tx.reg = INTELXL_QTX ( intelxl->queue ); + intelxl->tx.tail = ( intelxl->tx.reg + INTELXL_QXX_TAIL ); + intelxl->rx.reg = INTELXL_QRX ( intelxl->queue ); + intelxl->rx.tail = ( intelxl->rx.reg + INTELXL_QXX_TAIL ); + + /* Configure interrupt causes */ + writel ( ( INTELXL_QINT_TQCTL_NEXTQ_INDX_NONE | + INTELXL_QINT_TQCTL_CAUSE_ENA ), + intelxl->regs + INTELXL_QINT_TQCTL ( intelxl->queue ) ); + writel ( ( INTELXL_QINT_RQCTL_NEXTQ_INDX ( intelxl->queue ) | + INTELXL_QINT_RQCTL_NEXTQ_TYPE_TX | + INTELXL_QINT_RQCTL_CAUSE_ENA ), + intelxl->regs + INTELXL_QINT_RQCTL ( intelxl->queue ) ); + writel ( ( INTELXL_PFINT_LNKLST0_FIRSTQ_INDX ( intelxl->queue ) | + INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE_RX ), + intelxl->regs + INTELXL_PFINT_LNKLST0 ); + writel ( INTELXL_PFINT_ICR0_ENA_ADMINQ, + intelxl->regs + INTELXL_PFINT_ICR0_ENA ); + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register_netdev; + + /* Set initial link state */ + intelxl_admin_link ( netdev ); + + return 0; + + unregister_netdev ( netdev ); + err_register_netdev: + err_admin_promisc: + err_admin_vsi: + err_admin_switch: + err_admin_clear_pxe: + intelxl_close_admin ( intelxl ); + err_open_admin: + intelxl_msix_disable ( intelxl, pci ); + err_msix: + err_fetch_mac: + intelxl_reset ( intelxl ); + err_reset: + iounmap ( intelxl->regs ); + err_ioremap: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void intelxl_remove ( struct pci_device *pci ) { + struct net_device *netdev = pci_get_drvdata ( pci ); + struct intelxl_nic *intelxl = netdev->priv; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Close admin queues */ + intelxl_close_admin ( intelxl ); + + /* Disable MSI-X dummy interrupt */ + intelxl_msix_disable ( intelxl, pci ); + + /* Reset the NIC */ + intelxl_reset ( intelxl ); + + /* Free network device */ + iounmap ( intelxl->regs ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** PCI device IDs */ +static struct pci_device_id intelxl_nics[] = { + PCI_ROM ( 0x8086, 0x1572, "x710-sfp", "X710 10GbE SFP+", 0 ), + PCI_ROM ( 0x8086, 0x1574, "xl710-qemu", "Virtual XL710", 0 ), + PCI_ROM ( 0x8086, 0x1580, "xl710-kx-b", "XL710 40GbE backplane", 0 ), + PCI_ROM ( 0x8086, 0x1581, "xl710-kx-c", "XL710 10GbE backplane", 0 ), + PCI_ROM ( 0x8086, 0x1583, "xl710-qda2", "XL710 40GbE QSFP+", 0 ), + PCI_ROM ( 0x8086, 0x1584, "xl710-qda1", "XL710 40GbE QSFP+", 0 ), + PCI_ROM ( 0x8086, 0x1585, "x710-qsfp", "X710 10GbE QSFP+", 0 ), + PCI_ROM ( 0x8086, 0x1586, "x710-10gt", "X710 10GBASE-T", 0 ), + PCI_ROM ( 0x8086, 0x1587, "x710-kr2", "XL710 20GbE backplane", 0 ), + PCI_ROM ( 0x8086, 0x1588, "x710-kr2-a", "XL710 20GbE backplane", 0 ), + PCI_ROM ( 0x8086, 0x1589, "x710-10gt4", "X710 10GBASE-T4", 0 ), + PCI_ROM ( 0x8086, 0x158a, "xxv710", "XXV710 25GbE backplane", 0 ), + PCI_ROM ( 0x8086, 0x158b, "xxv710-sfp28", "XXV710 25GbE SFP28", 0 ), + PCI_ROM ( 0x8086, 0x37ce, "x722-kx", "X722 10GbE backplane", 0 ), + PCI_ROM ( 0x8086, 0x37cf, "x722-qsfp", "X722 10GbE QSFP+", 0 ), + PCI_ROM ( 0x8086, 0x37d0, "x722-sfp", "X722 10GbE SFP+", 0 ), + PCI_ROM ( 0x8086, 0x37d1, "x722-1gt", "X722 1GBASE-T", 0 ), + PCI_ROM ( 0x8086, 0x37d2, "x722-10gt", "X722 10GBASE-T", 0 ), + PCI_ROM ( 0x8086, 0x37d3, "x722-sfp-i", "X722 10GbE SFP+", 0 ), +}; + +/** PCI driver */ +struct pci_driver intelxl_driver __pci_driver = { + .ids = intelxl_nics, + .id_count = ( sizeof ( intelxl_nics ) / sizeof ( intelxl_nics[0] ) ), + .probe = intelxl_probe, + .remove = intelxl_remove, +}; diff --git a/src/drivers/net/intelxl.h b/src/drivers/net/intelxl.h new file mode 100644 index 00000000..80586cef --- /dev/null +++ b/src/drivers/net/intelxl.h @@ -0,0 +1,1091 @@ +#ifndef _INTELX_H +#define _INTELX_H + +/** @file + * + * Intel 40 Gigabit Ethernet network card driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +struct intelxl_nic; + +/** BAR size */ +#define INTELXL_BAR_SIZE 0x200000 + +/** Alignment + * + * No data structure requires greater than 256 byte alignment. + */ +#define INTELXL_ALIGN 256 + +/****************************************************************************** + * + * Admin queue + * + ****************************************************************************** + */ + +/** PF Admin Command Queue register block */ +#define INTELXL_ADMIN_CMD 0x080000 + +/** PF Admin Event Queue register block */ +#define INTELXL_ADMIN_EVT 0x080080 + +/** Admin Queue Base Address Low Register (offset) */ +#define INTELXL_ADMIN_BAL 0x000 + +/** Admin Queue Base Address High Register (offset) */ +#define INTELXL_ADMIN_BAH 0x100 + +/** Admin Queue Length Register (offset) */ +#define INTELXL_ADMIN_LEN 0x200 +#define INTELXL_ADMIN_LEN_LEN(x) ( (x) << 0 ) /**< Queue length */ +#define INTELXL_ADMIN_LEN_ENABLE 0x80000000UL /**< Queue enable */ + +/** Admin Queue Head Register (offset) */ +#define INTELXL_ADMIN_HEAD 0x300 + +/** Admin Queue Tail Register (offset) */ +#define INTELXL_ADMIN_TAIL 0x400 + +/** Admin queue register offsets + * + * The physical and virtual function register maps have no discernible + * relationship. + */ +struct intelxl_admin_offsets { + /** Base Address Low Register offset */ + unsigned int bal; + /** Base Address High Register offset */ + unsigned int bah; + /** Length Register offset */ + unsigned int len; + /** Head Register offset */ + unsigned int head; + /** Tail Register offset */ + unsigned int tail; +}; + +/** Admin queue data buffer command parameters */ +struct intelxl_admin_buffer_params { + /** Reserved */ + uint8_t reserved[8]; + /** Buffer address high */ + uint32_t high; + /** Buffer address low */ + uint32_t low; +} __attribute__ (( packed )); + +/** Admin queue Get Version command */ +#define INTELXL_ADMIN_VERSION 0x0001 + +/** Admin queue version number */ +struct intelxl_admin_version { + /** Major version number */ + uint16_t major; + /** Minor version number */ + uint16_t minor; +} __attribute__ (( packed )); + +/** Admin queue Get Version command parameters */ +struct intelxl_admin_version_params { + /** ROM version */ + uint32_t rom; + /** Firmware build ID */ + uint32_t build; + /** Firmware version */ + struct intelxl_admin_version firmware; + /** API version */ + struct intelxl_admin_version api; +} __attribute__ (( packed )); + +/** Admin queue Driver Version command */ +#define INTELXL_ADMIN_DRIVER 0x0002 + +/** Admin queue Driver Version command parameters */ +struct intelxl_admin_driver_params { + /** Driver version */ + uint8_t major; + /** Minor version */ + uint8_t minor; + /** Build version */ + uint8_t build; + /** Sub-build version */ + uint8_t sub; + /** Reserved */ + uint8_t reserved[4]; + /** Data buffer address */ + uint64_t address; +} __attribute__ (( packed )); + +/** Admin queue Driver Version data buffer */ +struct intelxl_admin_driver_buffer { + /** Driver name */ + char name[32]; +} __attribute__ (( packed )); + +/** Admin queue Shutdown command */ +#define INTELXL_ADMIN_SHUTDOWN 0x0003 + +/** Admin queue Shutdown command parameters */ +struct intelxl_admin_shutdown_params { + /** Driver unloading */ + uint8_t unloading; + /** Reserved */ + uint8_t reserved[15]; +} __attribute__ (( packed )); + +/** Driver is unloading */ +#define INTELXL_ADMIN_SHUTDOWN_UNLOADING 0x01 + +/** Admin queue Clear PXE Mode command */ +#define INTELXL_ADMIN_CLEAR_PXE 0x0110 + +/** Admin queue Clear PXE Mode command parameters */ +struct intelxl_admin_clear_pxe_params { + /** Magic value */ + uint8_t magic; + /** Reserved */ + uint8_t reserved[15]; +} __attribute__ (( packed )); + +/** Clear PXE Mode magic value */ +#define INTELXL_ADMIN_CLEAR_PXE_MAGIC 0x02 + +/** Admin queue Get Switch Configuration command */ +#define INTELXL_ADMIN_SWITCH 0x0200 + +/** Switching element configuration */ +struct intelxl_admin_switch_config { + /** Switching element type */ + uint8_t type; + /** Revision */ + uint8_t revision; + /** Switching element ID */ + uint16_t seid; + /** Uplink switching element ID */ + uint16_t uplink; + /** Downlink switching element ID */ + uint16_t downlink; + /** Reserved */ + uint8_t reserved_b[3]; + /** Connection type */ + uint8_t connection; + /** Reserved */ + uint8_t reserved_c[2]; + /** Element specific information */ + uint16_t info; +} __attribute__ (( packed )); + +/** Virtual Station Inferface element type */ +#define INTELXL_ADMIN_SWITCH_TYPE_VSI 19 + +/** Admin queue Get Switch Configuration command parameters */ +struct intelxl_admin_switch_params { + /** Starting switching element identifier */ + uint16_t next; + /** Reserved */ + uint8_t reserved[6]; + /** Data buffer address */ + uint64_t address; +} __attribute__ (( packed )); + +/** Admin queue Get Switch Configuration data buffer */ +struct intelxl_admin_switch_buffer { + /** Number of switching elements reported */ + uint16_t count; + /** Total number of switching elements */ + uint16_t total; + /** Reserved */ + uint8_t reserved_a[12]; + /** Switch configuration */ + struct intelxl_admin_switch_config cfg; +} __attribute__ (( packed )); + +/** Admin queue Get VSI Parameters command */ +#define INTELXL_ADMIN_VSI 0x0212 + +/** Admin queue Get VSI Parameters command parameters */ +struct intelxl_admin_vsi_params { + /** VSI switching element ID */ + uint16_t vsi; + /** Reserved */ + uint8_t reserved[6]; + /** Data buffer address */ + uint64_t address; +} __attribute__ (( packed )); + +/** Admin queue Get VSI Parameters data buffer */ +struct intelxl_admin_vsi_buffer { + /** Reserved */ + uint8_t reserved_a[30]; + /** Queue numbers */ + uint16_t queue[16]; + /** Reserved */ + uint8_t reserved_b[34]; + /** Queue set handles for each traffic class */ + uint16_t qset[8]; + /** Reserved */ + uint8_t reserved_c[16]; +} __attribute__ (( packed )); + +/** Admin queue Set VSI Promiscuous Modes command */ +#define INTELXL_ADMIN_PROMISC 0x0254 + +/** Admin queue Set VSI Promiscuous Modes command parameters */ +struct intelxl_admin_promisc_params { + /** Flags */ + uint16_t flags; + /** Valid flags */ + uint16_t valid; + /** VSI switching element ID */ + uint16_t vsi; + /** Reserved */ + uint8_t reserved[10]; +} __attribute__ (( packed )); + +/** Promiscuous unicast mode */ +#define INTELXL_ADMIN_PROMISC_FL_UNICAST 0x0001 + +/** Promiscuous multicast mode */ +#define INTELXL_ADMIN_PROMISC_FL_MULTICAST 0x0002 + +/** Promiscuous broadcast mode */ +#define INTELXL_ADMIN_PROMISC_FL_BROADCAST 0x0004 + +/** Promiscuous VLAN mode */ +#define INTELXL_ADMIN_PROMISC_FL_VLAN 0x0010 + +/** Admin queue Restart Autonegotiation command */ +#define INTELXL_ADMIN_AUTONEG 0x0605 + +/** Admin queue Restart Autonegotiation command parameters */ +struct intelxl_admin_autoneg_params { + /** Flags */ + uint8_t flags; + /** Reserved */ + uint8_t reserved[15]; +} __attribute__ (( packed )); + +/** Restart autonegotiation */ +#define INTELXL_ADMIN_AUTONEG_FL_RESTART 0x02 + +/** Enable link */ +#define INTELXL_ADMIN_AUTONEG_FL_ENABLE 0x04 + +/** Admin queue Get Link Status command */ +#define INTELXL_ADMIN_LINK 0x0607 + +/** Admin queue Get Link Status command parameters */ +struct intelxl_admin_link_params { + /** Link status notification */ + uint8_t notify; + /** Reserved */ + uint8_t reserved_a; + /** PHY type */ + uint8_t phy; + /** Link speed */ + uint8_t speed; + /** Link status */ + uint8_t status; + /** Reserved */ + uint8_t reserved_b[11]; +} __attribute__ (( packed )); + +/** Notify driver of link status changes */ +#define INTELXL_ADMIN_LINK_NOTIFY 0x03 + +/** Link is up */ +#define INTELXL_ADMIN_LINK_UP 0x01 + +/** Admin queue Send Message to PF command */ +#define INTELXL_ADMIN_SEND_TO_PF 0x0801 + +/** Admin queue Send Message to VF command */ +#define INTELXL_ADMIN_SEND_TO_VF 0x0802 + +/** Admin Queue VF Reset opcode */ +#define INTELXL_ADMIN_VF_RESET 0x00000002 + +/** Admin Queue VF Get Resources opcode */ +#define INTELXL_ADMIN_VF_GET_RESOURCES 0x00000003 + +/** Admin Queue VF Get Resources data buffer */ +struct intelxl_admin_vf_get_resources_buffer { + /** Reserved */ + uint8_t reserved_a[20]; + /** VSI switching element ID */ + uint16_t vsi; + /** Reserved */ + uint8_t reserved_b[8]; + /** MAC address */ + uint8_t mac[ETH_ALEN]; +} __attribute__ (( packed )); + +/** Admin Queue VF Status Change Event opcode */ +#define INTELXL_ADMIN_VF_STATUS 0x00000011 + +/** Link status change event type */ +#define INTELXL_ADMIN_VF_STATUS_LINK 0x00000001 + +/** Link status change event data */ +struct intelxl_admin_vf_status_link { + /** Link speed */ + uint32_t speed; + /** Link status */ + uint8_t status; + /** Reserved */ + uint8_t reserved[3]; +} __attribute__ (( packed )); + +/** Admin Queue VF Status Change Event data buffer */ +struct intelxl_admin_vf_status_buffer { + /** Event type */ + uint32_t event; + /** Event data */ + union { + /** Link change event data */ + struct intelxl_admin_vf_status_link link; + } data; + /** Reserved */ + uint8_t reserved[4]; +} __attribute__ (( packed )); + +/** Admin Queue VF Configure Queues opcode */ +#define INTELXL_ADMIN_VF_CONFIGURE 0x00000006 + +/** Admin Queue VF Configure Queues data buffer */ +struct intelxl_admin_vf_configure_buffer { + /** VSI switching element ID */ + uint16_t vsi; + /** Number of queue pairs */ + uint16_t count; + /** Reserved */ + uint8_t reserved_a[4]; + /** Transmit queue */ + struct { + /** VSI switching element ID */ + uint16_t vsi; + /** Queue ID */ + uint16_t id; + /** Queue count */ + uint16_t count; + /** Reserved */ + uint8_t reserved_a[2]; + /** Base address */ + uint64_t base; + /** Reserved */ + uint8_t reserved_b[8]; + } __attribute__ (( packed )) tx; + /** Receive queue */ + struct { + /** VSI switching element ID */ + uint16_t vsi; + /** Queue ID */ + uint16_t id; + /** Queue count */ + uint32_t count; + /** Reserved */ + uint8_t reserved_a[4]; + /** Data buffer length */ + uint32_t len; + /** Maximum frame size */ + uint32_t mfs; + /** Reserved */ + uint8_t reserved_b[4]; + /** Base address */ + uint64_t base; + /** Reserved */ + uint8_t reserved_c[8]; + } __attribute__ (( packed )) rx; + /** Reserved + * + * This field exists only due to a bug in the PF driver's + * message validation logic, which causes it to miscalculate + * the expected message length. + */ + uint8_t reserved_b[64]; +} __attribute__ (( packed )); + +/** Admin Queue VF IRQ Map opcode */ +#define INTELXL_ADMIN_VF_IRQ_MAP 0x00000007 + +/** Admin Queue VF IRQ Map data buffer */ +struct intelxl_admin_vf_irq_map_buffer { + /** Number of interrupt vectors */ + uint16_t count; + /** VSI switching element ID */ + uint16_t vsi; + /** Interrupt vector ID */ + uint16_t vec; + /** Receive queue bitmap */ + uint16_t rxmap; + /** Transmit queue bitmap */ + uint16_t txmap; + /** Receive interrupt throttling index */ + uint16_t rxitr; + /** Transmit interrupt throttling index */ + uint16_t txitr; + /** Reserved + * + * This field exists only due to a bug in the PF driver's + * message validation logic, which causes it to miscalculate + * the expected message length. + */ + uint8_t reserved[12]; +} __attribute__ (( packed )); + +/** Admin Queue VF Enable Queues opcode */ +#define INTELXL_ADMIN_VF_ENABLE 0x00000008 + +/** Admin Queue VF Disable Queues opcode */ +#define INTELXL_ADMIN_VF_DISABLE 0x00000009 + +/** Admin Queue VF Enable/Disable Queues data buffer */ +struct intelxl_admin_vf_queues_buffer { + /** VSI switching element ID */ + uint16_t vsi; + /** Reserved */ + uint8_t reserved[2]; + /** Receive queue bitmask */ + uint32_t rx; + /** Transmit queue bitmask */ + uint32_t tx; +} __attribute__ (( packed )); + +/** Admin Queue VF Configure Promiscuous Mode opcode */ +#define INTELXL_ADMIN_VF_PROMISC 0x0000000e + +/** Admin Queue VF Configure Promiscuous Mode data buffer */ +struct intelxl_admin_vf_promisc_buffer { + /** VSI switching element ID */ + uint16_t vsi; + /** Flags */ + uint16_t flags; +} __attribute__ (( packed )); + +/** Admin queue command parameters */ +union intelxl_admin_params { + /** Additional data buffer command parameters */ + struct intelxl_admin_buffer_params buffer; + /** Get Version command parameters */ + struct intelxl_admin_version_params version; + /** Driver Version command parameters */ + struct intelxl_admin_driver_params driver; + /** Shutdown command parameters */ + struct intelxl_admin_shutdown_params shutdown; + /** Clear PXE Mode command parameters */ + struct intelxl_admin_clear_pxe_params pxe; + /** Get Switch Configuration command parameters */ + struct intelxl_admin_switch_params sw; + /** Get VSI Parameters command parameters */ + struct intelxl_admin_vsi_params vsi; + /** Set VSI Promiscuous Modes command parameters */ + struct intelxl_admin_promisc_params promisc; + /** Restart Autonegotiation command parameters */ + struct intelxl_admin_autoneg_params autoneg; + /** Get Link Status command parameters */ + struct intelxl_admin_link_params link; +} __attribute__ (( packed )); + +/** Admin queue data buffer */ +union intelxl_admin_buffer { + /** Driver Version data buffer */ + struct intelxl_admin_driver_buffer driver; + /** Get Switch Configuration data buffer */ + struct intelxl_admin_switch_buffer sw; + /** Get VSI Parameters data buffer */ + struct intelxl_admin_vsi_buffer vsi; + /** VF Get Resources data buffer */ + struct intelxl_admin_vf_get_resources_buffer res; + /** VF Status Change Event data buffer */ + struct intelxl_admin_vf_status_buffer stat; + /** VF Configure Queues data buffer */ + struct intelxl_admin_vf_configure_buffer cfg; + /** VF Enable/Disable Queues data buffer */ + struct intelxl_admin_vf_queues_buffer queues; + /** VF Configure Promiscuous Mode data buffer */ + struct intelxl_admin_vf_promisc_buffer promisc; + /*** VF IRQ Map data buffer */ + struct intelxl_admin_vf_irq_map_buffer irq; + /** Alignment padding */ + uint8_t pad[INTELXL_ALIGN]; +} __attribute__ (( packed )); + +/** Admin queue descriptor */ +struct intelxl_admin_descriptor { + /** Flags */ + uint16_t flags; + /** Opcode */ + uint16_t opcode; + /** Data length */ + uint16_t len; + /** Return value */ + uint16_t ret; + /** Opaque cookie / VF opcode */ + union { + /** Cookie */ + uint32_t cookie; + /** VF opcode */ + uint32_t vopcode; + }; + /** VF return value */ + int32_t vret; + /** Parameters */ + union intelxl_admin_params params; +} __attribute__ (( packed )); + +/** Admin descriptor done */ +#define INTELXL_ADMIN_FL_DD 0x0001 + +/** Admin descriptor contains a completion */ +#define INTELXL_ADMIN_FL_CMP 0x0002 + +/** Admin descriptor completed in error */ +#define INTELXL_ADMIN_FL_ERR 0x0004 + +/** Admin descriptor uses data buffer for command parameters */ +#define INTELXL_ADMIN_FL_RD 0x0400 + +/** Admin descriptor uses data buffer */ +#define INTELXL_ADMIN_FL_BUF 0x1000 + +/** Admin queue */ +struct intelxl_admin { + /** Descriptors */ + struct intelxl_admin_descriptor *desc; + /** Data buffers */ + union intelxl_admin_buffer *buf; + /** Queue index */ + unsigned int index; + + /** Register block base */ + unsigned int base; + /** Register offsets */ + const struct intelxl_admin_offsets *regs; +}; + +/** + * Initialise admin queue + * + * @v admin Admin queue + * @v base Register block base + * @v regs Register offsets + */ +static inline __attribute__ (( always_inline )) void +intelxl_init_admin ( struct intelxl_admin *admin, unsigned int base, + const struct intelxl_admin_offsets *regs ) { + + admin->base = base; + admin->regs = regs; +} + +/** Number of admin queue descriptors */ +#define INTELXL_ADMIN_NUM_DESC 4 + +/** Maximum time to wait for an admin request to complete */ +#define INTELXL_ADMIN_MAX_WAIT_MS 100 + +/** Admin queue API major version */ +#define INTELXL_ADMIN_API_MAJOR 1 + +/****************************************************************************** + * + * Transmit and receive queue context + * + ****************************************************************************** + */ + +/** CMLAN Context Data Register */ +#define INTELXL_PFCM_LANCTXDATA(x) ( 0x10c100 + ( 0x80 * (x) ) ) + +/** CMLAN Context Control Register */ +#define INTELXL_PFCM_LANCTXCTL 0x10c300 +#define INTELXL_PFCM_LANCTXCTL_QUEUE_NUM(x) \ + ( (x) << 0 ) /**< Queue number */ +#define INTELXL_PFCM_LANCTXCTL_SUB_LINE(x) \ + ( (x) << 12 ) /**< Sub-line */ +#define INTELXL_PFCM_LANCTXCTL_TYPE(x) \ + ( (x) << 15 ) /**< Queue type */ +#define INTELXL_PFCM_LANCTXCTL_TYPE_RX \ + INTELXL_PFCM_LANCTXCTL_TYPE ( 0x0 ) /**< RX queue type */ +#define INTELXL_PFCM_LANCTXCTL_TYPE_TX \ + INTELXL_PFCM_LANCTXCTL_TYPE ( 0x1 ) /**< TX queue type */ +#define INTELXL_PFCM_LANCTXCTL_OP_CODE(x) \ + ( (x) << 17 ) /**< Op code */ +#define INTELXL_PFCM_LANCTXCTL_OP_CODE_READ \ + INTELXL_PFCM_LANCTXCTL_OP_CODE ( 0x0 ) /**< Read context */ +#define INTELXL_PFCM_LANCTXCTL_OP_CODE_WRITE \ + INTELXL_PFCM_LANCTXCTL_OP_CODE ( 0x1 ) /**< Write context */ + +/** CMLAN Context Status Register */ +#define INTELXL_PFCM_LANCTXSTAT 0x10c380 +#define INTELXL_PFCM_LANCTXSTAT_DONE 0x00000001UL /**< Complete */ + +/** Queue context line */ +struct intelxl_context_line { + /** Raw data */ + uint32_t raw[4]; +} __attribute__ (( packed )); + +/** Transmit queue context */ +struct intelxl_context_tx { + /** Head pointer */ + uint16_t head; + /** Flags */ + uint16_t flags; + /** Base address */ + uint64_t base; + /** Reserved */ + uint8_t reserved_a[8]; + /** Queue count */ + uint16_t count; + /** Reserved */ + uint8_t reserved_b[100]; + /** Queue set */ + uint16_t qset; + /** Reserved */ + uint8_t reserved_c[4]; +} __attribute__ (( packed )); + +/** New transmit queue context */ +#define INTELXL_CTX_TX_FL_NEW 0x4000 + +/** Transmit queue base address */ +#define INTELXL_CTX_TX_BASE( base ) ( (base) >> 7 ) + +/** Transmit queue count */ +#define INTELXL_CTX_TX_COUNT( count ) ( (count) << 1 ) + +/** Transmit queue set */ +#define INTELXL_CTX_TX_QSET( qset) ( (qset) << 4 ) + +/** Receive queue context */ +struct intelxl_context_rx { + /** Head pointer */ + uint16_t head; + /** Reserved */ + uint8_t reserved_a[2]; + /** Base address and queue count */ + uint64_t base_count; + /** Data buffer length */ + uint16_t len; + /** Flags */ + uint8_t flags; + /** Reserved */ + uint8_t reserved_b[7]; + /** Maximum frame size */ + uint16_t mfs; +} __attribute__ (( packed )); + +/** Receive queue base address and queue count */ +#define INTELXL_CTX_RX_BASE_COUNT( base, count ) \ + ( ( (base) >> 7 ) | ( ( ( uint64_t ) (count) ) << 57 ) ) + +/** Receive queue data buffer length */ +#define INTELXL_CTX_RX_LEN( len ) ( (len) >> 1 ) + +/** Use 32-byte receive descriptors */ +#define INTELXL_CTX_RX_FL_DSIZE 0x10 + +/** Strip CRC from received packets */ +#define INTELXL_CTX_RX_FL_CRCSTRIP 0x20 + +/** Receive queue maximum frame size */ +#define INTELXL_CTX_RX_MFS( mfs ) ( (mfs) >> 2 ) + +/** Maximum time to wait for a context operation to complete */ +#define INTELXL_CTX_MAX_WAIT_MS 100 + +/** Time to wait for a queue to become enabled */ +#define INTELXL_QUEUE_ENABLE_DELAY_US 20 + +/** Time to wait for a transmit queue to become pre-disabled */ +#define INTELXL_QUEUE_PRE_DISABLE_DELAY_US 400 + +/** Maximum time to wait for a queue to become disabled */ +#define INTELXL_QUEUE_DISABLE_MAX_WAIT_MS 1000 + +/****************************************************************************** + * + * Transmit and receive descriptors + * + ****************************************************************************** + */ + +/** Global Transmit Queue Head register */ +#define INTELXL_QTX_HEAD(x) ( 0x0e4000 + ( 0x4 * (x) ) ) + +/** Global Transmit Pre Queue Disable register */ +#define INTELXL_GLLAN_TXPRE_QDIS(x) ( 0x0e6500 + ( 0x4 * ( (x) / 0x80 ) ) ) +#define INTELXL_GLLAN_TXPRE_QDIS_QINDX(x) \ + ( (x) << 0 ) /**< Queue index */ +#define INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS \ + 0x40000000UL /**< Set disable */ +#define INTELXL_GLLAN_TXPRE_QDIS_CLEAR_QDIS \ + 0x80000000UL /**< Clear disable */ + +/** Global Transmit Queue register block */ +#define INTELXL_QTX(x) ( 0x100000 + ( 0x4 * (x) ) ) + +/** Global Receive Queue register block */ +#define INTELXL_QRX(x) ( 0x120000 + ( 0x4 * (x) ) ) + +/** Queue Enable Register (offset) */ +#define INTELXL_QXX_ENA 0x0000 +#define INTELXL_QXX_ENA_REQ 0x00000001UL /**< Enable request */ +#define INTELXL_QXX_ENA_STAT 0x00000004UL /**< Enabled status */ + +/** Queue Control Register (offset) */ +#define INTELXL_QXX_CTL 0x4000 +#define INTELXL_QXX_CTL_PFVF_Q(x) ( (x) << 0 ) /**< PF/VF queue */ +#define INTELXL_QXX_CTL_PFVF_Q_PF \ + INTELXL_QXX_CTL_PFVF_Q ( 0x2 ) /**< PF queue */ +#define INTELXL_QXX_CTL_PFVF_PF_INDX(x) ( (x) << 2 ) /**< PF index */ + +/** Queue Tail Pointer Register (offset) */ +#define INTELXL_QXX_TAIL 0x8000 + +/** Global RLAN Control 0 register */ +#define INTELXL_GLLAN_RCTL_0 0x12a500 +#define INTELXL_GLLAN_RCTL_0_PXE_MODE 0x00000001UL /**< PXE mode */ + +/** Transmit data descriptor */ +struct intelxl_tx_data_descriptor { + /** Buffer address */ + uint64_t address; + /** Flags */ + uint32_t flags; + /** Length */ + uint32_t len; +} __attribute__ (( packed )); + +/** Transmit data descriptor type */ +#define INTELXL_TX_DATA_DTYP 0x0 + +/** Transmit data descriptor end of packet */ +#define INTELXL_TX_DATA_EOP 0x10 + +/** Transmit data descriptor report status */ +#define INTELXL_TX_DATA_RS 0x20 + +/** Transmit data descriptor pretty please + * + * This bit is completely missing from older versions of the XL710 + * datasheet. Later versions describe it innocuously as "reserved, + * must be 1". Without this bit, everything will appear to work (up + * to and including the port "transmit good octets" counter), but no + * packet will actually be sent. + */ +#define INTELXL_TX_DATA_JFDI 0x40 + +/** Transmit data descriptor length */ +#define INTELXL_TX_DATA_LEN( len ) ( (len) << 2 ) + +/** Transmit writeback descriptor */ +struct intelxl_tx_writeback_descriptor { + /** Reserved */ + uint8_t reserved_a[8]; + /** Flags */ + uint8_t flags; + /** Reserved */ + uint8_t reserved_b[7]; +} __attribute__ (( packed )); + +/** Transmit writeback descriptor complete */ +#define INTELXL_TX_WB_FL_DD 0x01 + +/** Transmit descriptor */ +union intelxl_tx_descriptor { + /** Transmit data descriptor */ + struct intelxl_tx_data_descriptor data; + /** Transmit writeback descriptor */ + struct intelxl_tx_writeback_descriptor wb; +}; + +/** Receive data descriptor */ +struct intelxl_rx_data_descriptor { + /** Buffer address */ + uint64_t address; + /** Flags */ + uint32_t flags; + /** Reserved */ + uint8_t reserved[20]; +} __attribute__ (( packed )); + +/** Receive writeback descriptor */ +struct intelxl_rx_writeback_descriptor { + /** Reserved */ + uint8_t reserved_a[2]; + /** VLAN tag */ + uint16_t vlan; + /** Reserved */ + uint8_t reserved_b[4]; + /** Flags */ + uint32_t flags; + /** Length */ + uint32_t len; + /** Reserved */ + uint8_t reserved_c[16]; +} __attribute__ (( packed )); + +/** Receive writeback descriptor complete */ +#define INTELXL_RX_WB_FL_DD 0x00000001UL + +/** Receive writeback descriptor VLAN tag present */ +#define INTELXL_RX_WB_FL_VLAN 0x00000004UL + +/** Receive writeback descriptor error */ +#define INTELXL_RX_WB_FL_RXE 0x00080000UL + +/** Receive writeback descriptor length */ +#define INTELXL_RX_WB_LEN(len) ( ( (len) >> 6 ) & 0x3fff ) + +/** Packet descriptor */ +union intelxl_rx_descriptor { + /** Receive data descriptor */ + struct intelxl_rx_data_descriptor data; + /** Receive writeback descriptor */ + struct intelxl_rx_writeback_descriptor wb; +}; + +/** Descriptor ring */ +struct intelxl_ring { + /** Descriptors */ + union { + /** Transmit descriptors */ + union intelxl_tx_descriptor *tx; + /** Receive descriptors */ + union intelxl_rx_descriptor *rx; + /** Raw data */ + void *raw; + } desc; + /** Producer index */ + unsigned int prod; + /** Consumer index */ + unsigned int cons; + + /** Register block */ + unsigned int reg; + /** Tail register */ + unsigned int tail; + /** Length (in bytes) */ + size_t len; + /** Program queue context + * + * @v intelxl Intel device + * @v address Descriptor ring base address + */ + int ( * context ) ( struct intelxl_nic *intelxl, physaddr_t address ); +}; + +/** + * Initialise descriptor ring + * + * @v ring Descriptor ring + * @v count Number of descriptors + * @v len Length of a single descriptor + * @v context Method to program queue context + */ +static inline __attribute__ (( always_inline)) void +intelxl_init_ring ( struct intelxl_ring *ring, unsigned int count, size_t len, + int ( * context ) ( struct intelxl_nic *intelxl, + physaddr_t address ) ) { + + ring->len = ( count * len ); + ring->context = context; +} + +/** Number of transmit descriptors + * + * Chosen to exceed the receive ring fill level, in order to avoid + * running out of transmit descriptors when sending TCP ACKs. + */ +#define INTELXL_TX_NUM_DESC 64 + +/** Transmit descriptor ring maximum fill level */ +#define INTELXL_TX_FILL ( INTELXL_TX_NUM_DESC - 1 ) + +/** Number of receive descriptors + * + * Must be a multiple of 32. + */ +#define INTELXL_RX_NUM_DESC 32 + +/** Receive descriptor ring fill level + * + * Must be a multiple of 8 and greater than 8. + */ +#define INTELXL_RX_FILL 16 + +/****************************************************************************** + * + * Top level + * + ****************************************************************************** + */ + +/** PF Interrupt Zero Dynamic Control Register */ +#define INTELXL_PFINT_DYN_CTL0 0x038480 +#define INTELXL_INT_DYN_CTL_INTENA 0x00000001UL /**< Enable */ +#define INTELXL_INT_DYN_CTL_CLEARPBA 0x00000002UL /**< Acknowledge */ +#define INTELXL_INT_DYN_CTL_INTENA_MASK 0x80000000UL /**< Ignore enable */ + +/** PF Interrupt Zero Linked List Register */ +#define INTELXL_PFINT_LNKLST0 0x038500 +#define INTELXL_PFINT_LNKLST0_FIRSTQ_INDX(x) \ + ( (x) << 0 ) /**< Queue index */ +#define INTELXL_PFINT_LNKLST0_FIRSTQ_INDX_NONE \ + INTELXL_PFINT_LNKLST0_FIRSTQ_INDX ( 0x7ff ) /**< End of list */ +#define INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE(x) \ + ( (x) << 11 ) /**< Queue type */ +#define INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE_RX \ + INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE ( 0x0 ) /**< Receive queue */ +#define INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE_TX \ + INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE ( 0x1 ) /**< Transmit queue */ + +/** PF Interrupt Zero Cause Enablement Register */ +#define INTELXL_PFINT_ICR0_ENA 0x038800 +#define INTELXL_PFINT_ICR0_ENA_ADMINQ 0x40000000UL /**< Admin event */ + +/** Receive Queue Interrupt Cause Control Register */ +#define INTELXL_QINT_RQCTL(x) ( 0x03a000 + ( 0x4 * (x) ) ) +#define INTELXL_QINT_RQCTL_NEXTQ_INDX(x) ( (x) << 16 ) /**< Queue index */ +#define INTELXL_QINT_RQCTL_NEXTQ_INDX_NONE \ + INTELXL_QINT_RQCTL_NEXTQ_INDX ( 0x7ff ) /**< End of list */ +#define INTELXL_QINT_RQCTL_NEXTQ_TYPE(x) ( (x) << 27 ) /**< Queue type */ +#define INTELXL_QINT_RQCTL_NEXTQ_TYPE_RX \ + INTELXL_QINT_RQCTL_NEXTQ_TYPE ( 0x0 ) /**< Receive queue */ +#define INTELXL_QINT_RQCTL_NEXTQ_TYPE_TX \ + INTELXL_QINT_RQCTL_NEXTQ_TYPE ( 0x1 ) /**< Transmit queue */ +#define INTELXL_QINT_RQCTL_CAUSE_ENA 0x40000000UL /**< Enable */ + +/** Transmit Queue Interrupt Cause Control Register */ +#define INTELXL_QINT_TQCTL(x) ( 0x03c000 + ( 0x4 * (x) ) ) +#define INTELXL_QINT_TQCTL_NEXTQ_INDX(x) ( (x) << 16 ) /**< Queue index */ +#define INTELXL_QINT_TQCTL_NEXTQ_INDX_NONE \ + INTELXL_QINT_TQCTL_NEXTQ_INDX ( 0x7ff ) /**< End of list */ +#define INTELXL_QINT_TQCTL_NEXTQ_TYPE(x) ( (x) << 27 ) /**< Queue type */ +#define INTELXL_QINT_TQCTL_NEXTQ_TYPE_RX \ + INTELXL_QINT_TQCTL_NEXTQ_TYPE ( 0x0 ) /**< Receive queue */ +#define INTELXL_QINT_TQCTL_NEXTQ_TYPE_TX \ + INTELXL_QINT_TQCTL_NEXTQ_TYPE ( 0x1 ) /**< Transmit queue */ +#define INTELXL_QINT_TQCTL_CAUSE_ENA 0x40000000UL /**< Enable */ + +/** PF Control Register */ +#define INTELXL_PFGEN_CTRL 0x092400 +#define INTELXL_PFGEN_CTRL_PFSWR 0x00000001UL /**< Software Reset */ + +/** Time to delay for device reset, in milliseconds */ +#define INTELXL_RESET_DELAY_MS 100 + +/** PF Queue Allocation Register */ +#define INTELXL_PFLAN_QALLOC 0x1c0400 +#define INTELXL_PFLAN_QALLOC_FIRSTQ(x) \ + ( ( (x) >> 0 ) & 0x7ff ) /**< First queue */ +#define INTELXL_PFLAN_QALLOC_LASTQ(x) \ + ( ( (x) >> 16 ) & 0x7ff ) /**< Last queue */ + +/** PF LAN Port Number Register */ +#define INTELXL_PFGEN_PORTNUM 0x1c0480 +#define INTELXL_PFGEN_PORTNUM_PORT_NUM(x) \ + ( ( (x) >> 0 ) & 0x3 ) /**< Port number */ + +/** Port MAC Address Low Register */ +#define INTELXL_PRTGL_SAL 0x1e2120 + +/** Port MAC Address High Register */ +#define INTELXL_PRTGL_SAH 0x1e2140 +#define INTELXL_PRTGL_SAH_MFS_GET(x) ( (x) >> 16 ) /**< Max frame size */ +#define INTELXL_PRTGL_SAH_MFS_SET(x) ( (x) << 16 ) /**< Max frame size */ + +/** Receive address */ +union intelxl_receive_address { + struct { + uint32_t low; + uint32_t high; + } __attribute__ (( packed )) reg; + uint8_t raw[ETH_ALEN]; +}; + +/** An Intel 40Gigabit network card */ +struct intelxl_nic { + /** Registers */ + void *regs; + /** Maximum frame size */ + size_t mfs; + + /** Physical function number */ + unsigned int pf; + /** Absolute queue number base */ + unsigned int base; + /** Port number */ + unsigned int port; + /** Queue number */ + unsigned int queue; + /** Virtual Station Interface switching element ID */ + unsigned int vsi; + /** Queue set handle */ + unsigned int qset; + /** Interrupt control register */ + unsigned int intr; + /** MSI-X capability */ + struct pci_msix msix; + /** MSI-X dummy interrupt target */ + uint32_t msg; + /** PCI Express capability offset */ + unsigned int exp; + + /** Admin command queue */ + struct intelxl_admin command; + /** Admin event queue */ + struct intelxl_admin event; + + /** Current VF opcode */ + unsigned int vopcode; + /** Current VF return value */ + int vret; + /** Current VF event data buffer */ + union intelxl_admin_buffer vbuf; + + /** Transmit descriptor ring */ + struct intelxl_ring tx; + /** Receive descriptor ring */ + struct intelxl_ring rx; + /** Receive I/O buffers */ + struct io_buffer *rx_iobuf[INTELXL_RX_NUM_DESC]; +}; + +extern int intelxl_msix_enable ( struct intelxl_nic *intelxl, + struct pci_device *pci ); +extern void intelxl_msix_disable ( struct intelxl_nic *intelxl, + struct pci_device *pci ); +extern struct intelxl_admin_descriptor * +intelxl_admin_command_descriptor ( struct intelxl_nic *intelxl ); +extern union intelxl_admin_buffer * +intelxl_admin_command_buffer ( struct intelxl_nic *intelxl ); +extern int intelxl_admin_command ( struct intelxl_nic *intelxl ); +extern void intelxl_poll_admin ( struct net_device *netdev ); +extern int intelxl_open_admin ( struct intelxl_nic *intelxl ); +extern void intelxl_reopen_admin ( struct intelxl_nic *intelxl ); +extern void intelxl_close_admin ( struct intelxl_nic *intelxl ); +extern int intelxl_alloc_ring ( struct intelxl_nic *intelxl, + struct intelxl_ring *ring ); +extern void intelxl_free_ring ( struct intelxl_nic *intelxl, + struct intelxl_ring *ring ); +extern void intelxl_empty_rx ( struct intelxl_nic *intelxl ); +extern int intelxl_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ); +extern void intelxl_poll ( struct net_device *netdev ); + +extern void intelxlvf_admin_event ( struct net_device *netdev, + struct intelxl_admin_descriptor *evt, + union intelxl_admin_buffer *buf ); + +#endif /* _INTELXL_H */ diff --git a/src/drivers/net/intelxlvf.c b/src/drivers/net/intelxlvf.c new file mode 100644 index 00000000..83e484c8 --- /dev/null +++ b/src/drivers/net/intelxlvf.c @@ -0,0 +1,719 @@ +/* + * Copyright (C) 2019 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include "intelxlvf.h" + +/** @file + * + * Intel 40 Gigabit Ethernet virtual function network card driver + * + */ + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset hardware via PCIe function-level reset + * + * @v intelxl Intel device + */ +static void intelxlvf_reset_flr ( struct intelxl_nic *intelxl, + struct pci_device *pci ) { + uint16_t control; + + /* Perform a PCIe function-level reset */ + pci_read_config_word ( pci, ( intelxl->exp + PCI_EXP_DEVCTL ), + &control ); + pci_write_config_word ( pci, ( intelxl->exp + PCI_EXP_DEVCTL ), + ( control | PCI_EXP_DEVCTL_FLR ) ); + mdelay ( INTELXL_RESET_DELAY_MS ); +} + +/** + * Wait for admin event queue to be torn down + * + * @v intelxl Intel device + * @ret rc Return status code + */ +static int intelxlvf_reset_wait_teardown ( struct intelxl_nic *intelxl ) { + uint32_t admin_evt_len; + unsigned int i; + + /* Wait for admin event queue to be torn down */ + for ( i = 0 ; i < INTELXLVF_RESET_MAX_WAIT_MS ; i++ ) { + + /* Check admin event queue length register */ + admin_evt_len = readl ( intelxl->regs + INTELXLVF_ADMIN + + INTELXLVF_ADMIN_EVT_LEN ); + if ( ! ( admin_evt_len & INTELXL_ADMIN_LEN_ENABLE ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( intelxl, "INTELXL %p timed out waiting for teardown (%#08x)\n", + intelxl, admin_evt_len ); + return -ETIMEDOUT; +} + +/** + * Wait for virtual function to be marked as active + * + * @v intelxl Intel device + * @ret rc Return status code + */ +static int intelxlvf_reset_wait_active ( struct intelxl_nic *intelxl ) { + uint32_t vfgen_rstat; + unsigned int vfr_state; + unsigned int i; + + /* Wait for virtual function to be marked as active */ + for ( i = 0 ; i < INTELXLVF_RESET_MAX_WAIT_MS ; i++ ) { + + /* Check status as written by physical function driver */ + vfgen_rstat = readl ( intelxl->regs + INTELXLVF_VFGEN_RSTAT ); + vfr_state = INTELXLVF_VFGEN_RSTAT_VFR_STATE ( vfgen_rstat ); + if ( vfr_state == INTELXLVF_VFGEN_RSTAT_VFR_STATE_ACTIVE ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( intelxl, "INTELXL %p timed out waiting for activation " + "(%#08x)\n", intelxl, vfgen_rstat ); + return -ETIMEDOUT; +} + +/** + * Reset hardware via admin queue + * + * @v intelxl Intel device + * @ret rc Return status code + */ +static int intelxlvf_reset_admin ( struct intelxl_nic *intelxl ) { + struct intelxl_admin_descriptor *cmd; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SEND_TO_PF ); + cmd->vopcode = cpu_to_le32 ( INTELXL_ADMIN_VF_RESET ); + + /* Issue command */ + if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 ) + goto err_command; + + /* Wait for minimum reset time */ + mdelay ( INTELXL_RESET_DELAY_MS ); + + /* Wait for reset to take effect */ + if ( ( rc = intelxlvf_reset_wait_teardown ( intelxl ) ) != 0 ) + goto err_teardown; + + /* Wait for virtual function to become active */ + if ( ( rc = intelxlvf_reset_wait_active ( intelxl ) ) != 0 ) + goto err_active; + + err_active: + err_teardown: + intelxl_reopen_admin ( intelxl ); + err_command: + return rc; +} + +/****************************************************************************** + * + * Admin queue + * + ****************************************************************************** + */ + +/** Admin command queue register offsets */ +static const struct intelxl_admin_offsets intelxlvf_admin_command_offsets = { + .bal = INTELXLVF_ADMIN_CMD_BAL, + .bah = INTELXLVF_ADMIN_CMD_BAH, + .len = INTELXLVF_ADMIN_CMD_LEN, + .head = INTELXLVF_ADMIN_CMD_HEAD, + .tail = INTELXLVF_ADMIN_CMD_TAIL, +}; + +/** Admin event queue register offsets */ +static const struct intelxl_admin_offsets intelxlvf_admin_event_offsets = { + .bal = INTELXLVF_ADMIN_EVT_BAL, + .bah = INTELXLVF_ADMIN_EVT_BAH, + .len = INTELXLVF_ADMIN_EVT_LEN, + .head = INTELXLVF_ADMIN_EVT_HEAD, + .tail = INTELXLVF_ADMIN_EVT_TAIL, +}; + +/** + * Issue admin queue virtual function command + * + * @v netdev Network device + * @ret rc Return status code + */ +static int intelxlvf_admin_command ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + struct intelxl_admin *admin = &intelxl->command; + struct intelxl_admin_descriptor *cmd; + unsigned int i; + int rc; + + /* Populate descriptor */ + cmd = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ]; + cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SEND_TO_PF ); + + /* Record opcode */ + intelxl->vopcode = le32_to_cpu ( cmd->vopcode ); + + /* Issue command */ + if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 ) + goto err_command; + + /* Wait for response */ + for ( i = 0 ; i < INTELXLVF_ADMIN_MAX_WAIT_MS ; i++ ) { + + /* Poll admin event queue */ + intelxl_poll_admin ( netdev ); + + /* If response has not arrived, delay 1ms and retry */ + if ( intelxl->vopcode ) { + mdelay ( 1 ); + continue; + } + + /* Check for errors */ + if ( intelxl->vret != 0 ) + return -EIO; + + return 0; + } + + rc = -ETIMEDOUT; + DBGC ( intelxl, "INTELXL %p timed out waiting for admin VF command " + "%#x\n", intelxl, intelxl->vopcode ); + err_command: + intelxl->vopcode = 0; + return rc; +} + +/** + * Handle link status event + * + * @v netdev Network device + * @v link Link status + */ +static void intelxlvf_admin_link ( struct net_device *netdev, + struct intelxl_admin_vf_status_link *link ) { + struct intelxl_nic *intelxl = netdev->priv; + + DBGC ( intelxl, "INTELXL %p link %#02x speed %#02x\n", intelxl, + link->status, link->speed ); + + /* Update network device */ + if ( link->status ) { + netdev_link_up ( netdev ); + } else { + netdev_link_down ( netdev ); + } +} + +/** + * Handle status change event + * + * @v netdev Network device + * @v stat Status change event + */ +static void +intelxlvf_admin_status ( struct net_device *netdev, + struct intelxl_admin_vf_status_buffer *stat ) { + struct intelxl_nic *intelxl = netdev->priv; + + /* Handle event */ + switch ( stat->event ) { + case cpu_to_le32 ( INTELXL_ADMIN_VF_STATUS_LINK ): + intelxlvf_admin_link ( netdev, &stat->data.link ); + break; + default: + DBGC ( intelxl, "INTELXL %p unrecognised status change " + "event %#x:\n", intelxl, le32_to_cpu ( stat->event ) ); + DBGC_HDA ( intelxl, 0, stat, sizeof ( *stat ) ); + break; + } +} + +/** + * Handle virtual function event + * + * @v netdev Network device + * @v evt Admin queue event descriptor + * @v buf Admin queue event data buffer + */ +void intelxlvf_admin_event ( struct net_device *netdev, + struct intelxl_admin_descriptor *evt, + union intelxl_admin_buffer *buf ) { + struct intelxl_nic *intelxl = netdev->priv; + unsigned int vopcode = le32_to_cpu ( evt->vopcode ); + + /* Record command response if applicable */ + if ( vopcode == intelxl->vopcode ) { + memcpy ( &intelxl->vbuf, buf, sizeof ( intelxl->vbuf ) ); + intelxl->vopcode = 0; + intelxl->vret = le32_to_cpu ( evt->vret ); + if ( intelxl->vret != 0 ) { + DBGC ( intelxl, "INTELXL %p admin VF command %#x " + "error %d\n", intelxl, vopcode, intelxl->vret ); + DBGC_HDA ( intelxl, virt_to_bus ( evt ), evt, + sizeof ( *evt ) ); + DBGC_HDA ( intelxl, virt_to_bus ( buf ), buf, + le16_to_cpu ( evt->len ) ); + } + return; + } + + /* Handle unsolicited events */ + switch ( vopcode ) { + case INTELXL_ADMIN_VF_STATUS: + intelxlvf_admin_status ( netdev, &buf->stat ); + break; + default: + DBGC ( intelxl, "INTELXL %p unrecognised VF event %#x:\n", + intelxl, vopcode ); + DBGC_HDA ( intelxl, 0, evt, sizeof ( *evt ) ); + DBGC_HDA ( intelxl, 0, buf, le16_to_cpu ( evt->len ) ); + break; + } +} + +/** + * Get resources + * + * @v netdev Network device + * @ret rc Return status code + */ +static int intelxlvf_admin_get_resources ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + struct intelxl_admin_descriptor *cmd; + struct intelxl_admin_vf_get_resources_buffer *res; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->vopcode = cpu_to_le32 ( INTELXL_ADMIN_VF_GET_RESOURCES ); + + /* Issue command */ + if ( ( rc = intelxlvf_admin_command ( netdev ) ) != 0 ) + return rc; + + /* Parse response */ + res = &intelxl->vbuf.res; + intelxl->vsi = le16_to_cpu ( res->vsi ); + memcpy ( netdev->hw_addr, res->mac, ETH_ALEN ); + DBGC ( intelxl, "INTELXL %p VSI %#04x\n", intelxl, intelxl->vsi ); + + return 0; +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Configure queues + * + * @v netdev Network device + * @ret rc Return status code + */ +static int intelxlvf_admin_configure ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + struct intelxl_admin_descriptor *cmd; + union intelxl_admin_buffer *buf; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->vopcode = cpu_to_le32 ( INTELXL_ADMIN_VF_CONFIGURE ); + cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_RD | INTELXL_ADMIN_FL_BUF ); + cmd->len = cpu_to_le16 ( sizeof ( buf->cfg ) ); + buf = intelxl_admin_command_buffer ( intelxl ); + buf->cfg.vsi = cpu_to_le16 ( intelxl->vsi ); + buf->cfg.count = cpu_to_le16 ( 1 ); + buf->cfg.tx.vsi = cpu_to_le16 ( intelxl->vsi ); + buf->cfg.tx.count = cpu_to_le16 ( INTELXL_TX_NUM_DESC ); + buf->cfg.tx.base = cpu_to_le64 ( virt_to_bus ( intelxl->tx.desc.raw ) ); + buf->cfg.rx.vsi = cpu_to_le16 ( intelxl->vsi ); + buf->cfg.rx.count = cpu_to_le32 ( INTELXL_RX_NUM_DESC ); + buf->cfg.rx.len = cpu_to_le32 ( intelxl->mfs ); + buf->cfg.rx.mfs = cpu_to_le32 ( intelxl->mfs ); + buf->cfg.rx.base = cpu_to_le64 ( virt_to_bus ( intelxl->rx.desc.raw ) ); + + /* Issue command */ + if ( ( rc = intelxlvf_admin_command ( netdev ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Configure IRQ mapping + * + * @v netdev Network device + * @ret rc Return status code + */ +static int intelxlvf_admin_irq_map ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + struct intelxl_admin_descriptor *cmd; + union intelxl_admin_buffer *buf; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->vopcode = cpu_to_le32 ( INTELXL_ADMIN_VF_IRQ_MAP ); + cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_RD | INTELXL_ADMIN_FL_BUF ); + cmd->len = cpu_to_le16 ( sizeof ( buf->irq ) ); + buf = intelxl_admin_command_buffer ( intelxl ); + buf->irq.count = cpu_to_le16 ( 1 ); + buf->irq.vsi = cpu_to_le16 ( intelxl->vsi ); + buf->irq.rxmap = cpu_to_le16 ( 0x0001 ); + buf->irq.txmap = cpu_to_le16 ( 0x0001 ); + + /* Issue command */ + if ( ( rc = intelxlvf_admin_command ( netdev ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Enable/disable queues + * + * @v netdev Network device + * @v enable Enable queues + * @ret rc Return status code + */ +static int intelxlvf_admin_queues ( struct net_device *netdev, int enable ) { + struct intelxl_nic *intelxl = netdev->priv; + struct intelxl_admin_descriptor *cmd; + union intelxl_admin_buffer *buf; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->vopcode = ( enable ? cpu_to_le32 ( INTELXL_ADMIN_VF_ENABLE ) : + cpu_to_le32 ( INTELXL_ADMIN_VF_DISABLE ) ); + cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_RD | INTELXL_ADMIN_FL_BUF ); + cmd->len = cpu_to_le16 ( sizeof ( buf->queues ) ); + buf = intelxl_admin_command_buffer ( intelxl ); + buf->queues.vsi = cpu_to_le16 ( intelxl->vsi ); + buf->queues.rx = cpu_to_le32 ( 1 ); + buf->queues.tx = cpu_to_le32 ( 1 ); + + /* Issue command */ + if ( ( rc = intelxlvf_admin_command ( netdev ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Configure promiscuous mode + * + * @v netdev Network device + * @ret rc Return status code + */ +static int intelxlvf_admin_promisc ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + struct intelxl_admin_descriptor *cmd; + union intelxl_admin_buffer *buf; + int rc; + + /* Populate descriptor */ + cmd = intelxl_admin_command_descriptor ( intelxl ); + cmd->vopcode = cpu_to_le32 ( INTELXL_ADMIN_VF_PROMISC ); + cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_RD | INTELXL_ADMIN_FL_BUF ); + cmd->len = cpu_to_le16 ( sizeof ( buf->promisc ) ); + buf = intelxl_admin_command_buffer ( intelxl ); + buf->promisc.vsi = cpu_to_le16 ( intelxl->vsi ); + buf->promisc.flags = cpu_to_le16 ( INTELXL_ADMIN_PROMISC_FL_UNICAST | + INTELXL_ADMIN_PROMISC_FL_MULTICAST ); + + /* Issue command */ + if ( ( rc = intelxlvf_admin_command ( netdev ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int intelxlvf_open ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + int rc; + + /* Calculate maximum frame size */ + intelxl->mfs = ( ( ETH_HLEN + netdev->mtu + 4 /* CRC */ + + INTELXL_ALIGN - 1 ) & ~( INTELXL_ALIGN - 1 ) ); + + /* Allocate transmit descriptor ring */ + if ( ( rc = intelxl_alloc_ring ( intelxl, &intelxl->tx ) ) != 0 ) + goto err_alloc_tx; + + /* Allocate receive descriptor ring */ + if ( ( rc = intelxl_alloc_ring ( intelxl, &intelxl->rx ) ) != 0 ) + goto err_alloc_rx; + + /* Configure queues */ + if ( ( rc = intelxlvf_admin_configure ( netdev ) ) != 0 ) + goto err_configure; + + /* Configure IRQ map */ + if ( ( rc = intelxlvf_admin_irq_map ( netdev ) ) != 0 ) + goto err_irq_map; + + /* Enable queues */ + if ( ( rc = intelxlvf_admin_queues ( netdev, 1 ) ) != 0 ) + goto err_enable; + + /* Configure promiscuous mode */ + if ( ( rc = intelxlvf_admin_promisc ( netdev ) ) != 0 ) + goto err_promisc; + + return 0; + + err_promisc: + intelxlvf_admin_queues ( netdev, INTELXL_ADMIN_VF_DISABLE ); + err_enable: + err_irq_map: + err_configure: + intelxl_free_ring ( intelxl, &intelxl->rx ); + err_alloc_rx: + intelxl_free_ring ( intelxl, &intelxl->tx ); + err_alloc_tx: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void intelxlvf_close ( struct net_device *netdev ) { + struct intelxl_nic *intelxl = netdev->priv; + int rc; + + /* Disable queues */ + if ( ( rc = intelxlvf_admin_queues ( netdev, 0 ) ) != 0 ) { + /* Leak memory; there's nothing else we can do */ + return; + } + + /* Free receive descriptor ring */ + intelxl_free_ring ( intelxl, &intelxl->rx ); + + /* Free transmit descriptor ring */ + intelxl_free_ring ( intelxl, &intelxl->tx ); + + /* Discard any unused receive buffers */ + intelxl_empty_rx ( intelxl ); +} + +/** Network device operations */ +static struct net_device_operations intelxlvf_operations = { + .open = intelxlvf_open, + .close = intelxlvf_close, + .transmit = intelxl_transmit, + .poll = intelxl_poll, +}; + +/****************************************************************************** + * + * PCI interface + * + ****************************************************************************** + */ + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int intelxlvf_probe ( struct pci_device *pci ) { + struct net_device *netdev; + struct intelxl_nic *intelxl; + int rc; + + /* Allocate and initialise net device */ + netdev = alloc_etherdev ( sizeof ( *intelxl ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &intelxlvf_operations ); + intelxl = netdev->priv; + pci_set_drvdata ( pci, netdev ); + netdev->dev = &pci->dev; + memset ( intelxl, 0, sizeof ( *intelxl ) ); + intelxl->intr = INTELXLVF_VFINT_DYN_CTL0; + intelxl_init_admin ( &intelxl->command, INTELXLVF_ADMIN, + &intelxlvf_admin_command_offsets ); + intelxl_init_admin ( &intelxl->event, INTELXLVF_ADMIN, + &intelxlvf_admin_event_offsets ); + intelxlvf_init_ring ( &intelxl->tx, INTELXL_TX_NUM_DESC, + sizeof ( intelxl->tx.desc.tx[0] ), + INTELXLVF_QTX_TAIL ); + intelxlvf_init_ring ( &intelxl->rx, INTELXL_RX_NUM_DESC, + sizeof ( intelxl->rx.desc.rx[0] ), + INTELXLVF_QRX_TAIL ); + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Map registers */ + intelxl->regs = pci_ioremap ( pci, pci->membase, INTELXLVF_BAR_SIZE ); + if ( ! intelxl->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + + /* Locate PCI Express capability */ + intelxl->exp = pci_find_capability ( pci, PCI_CAP_ID_EXP ); + if ( ! intelxl->exp ) { + DBGC ( intelxl, "INTELXL %p missing PCIe capability\n", + intelxl ); + rc = -ENXIO; + goto err_exp; + } + + /* Reset the function via PCIe FLR */ + intelxlvf_reset_flr ( intelxl, pci ); + + /* Enable MSI-X dummy interrupt */ + if ( ( rc = intelxl_msix_enable ( intelxl, pci ) ) != 0 ) + goto err_msix; + + /* Open admin queues */ + if ( ( rc = intelxl_open_admin ( intelxl ) ) != 0 ) + goto err_open_admin; + + /* Reset the function via admin queue */ + if ( ( rc = intelxlvf_reset_admin ( intelxl ) ) != 0 ) + goto err_reset_admin; + + /* Get MAC address */ + if ( ( rc = intelxlvf_admin_get_resources ( netdev ) ) != 0 ) + goto err_get_resources; + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register_netdev; + + return 0; + + unregister_netdev ( netdev ); + err_register_netdev: + err_get_resources: + err_reset_admin: + intelxl_close_admin ( intelxl ); + err_open_admin: + intelxl_msix_disable ( intelxl, pci ); + err_msix: + intelxlvf_reset_flr ( intelxl, pci ); + err_exp: + iounmap ( intelxl->regs ); + err_ioremap: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void intelxlvf_remove ( struct pci_device *pci ) { + struct net_device *netdev = pci_get_drvdata ( pci ); + struct intelxl_nic *intelxl = netdev->priv; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Reset the function via admin queue */ + intelxlvf_reset_admin ( intelxl ); + + /* Close admin queues */ + intelxl_close_admin ( intelxl ); + + /* Disable MSI-X dummy interrupt */ + intelxl_msix_disable ( intelxl, pci ); + + /* Reset the function via PCIe FLR */ + intelxlvf_reset_flr ( intelxl, pci ); + + /* Free network device */ + iounmap ( intelxl->regs ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** PCI device IDs */ +static struct pci_device_id intelxlvf_nics[] = { + PCI_ROM ( 0x8086, 0x154c, "xl710-vf", "XL710 VF", 0 ), + PCI_ROM ( 0x8086, 0x1571, "xl710-vf-hv", "XL710 VF (Hyper-V)", 0 ), + PCI_ROM ( 0x8086, 0x1889, "xl710-vf-ad", "XL710 VF (adaptive)", 0 ), + PCI_ROM ( 0x8086, 0x37cd, "x722-vf", "X722 VF", 0 ), + PCI_ROM ( 0x8086, 0x37d9, "x722-vf-hv", "X722 VF (Hyper-V)", 0 ), +}; + +/** PCI driver */ +struct pci_driver intelxlvf_driver __pci_driver = { + .ids = intelxlvf_nics, + .id_count = ( sizeof ( intelxlvf_nics ) / + sizeof ( intelxlvf_nics[0] ) ), + .probe = intelxlvf_probe, + .remove = intelxlvf_remove, +}; diff --git a/src/drivers/net/intelxlvf.h b/src/drivers/net/intelxlvf.h new file mode 100644 index 00000000..ffcae567 --- /dev/null +++ b/src/drivers/net/intelxlvf.h @@ -0,0 +1,86 @@ +#ifndef _INTELXLVF_H +#define _INTELXLVF_H + +/** @file + * + * Intel 40 Gigabit Ethernet virtual function network card driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include "intelxl.h" + +/** BAR size */ +#define INTELXLVF_BAR_SIZE 0x10000 + +/** Transmit Queue Tail Register */ +#define INTELXLVF_QTX_TAIL 0x00000 + +/** Receive Queue Tail Register */ +#define INTELXLVF_QRX_TAIL 0x02000 + +/** VF Interrupt Zero Dynamic Control Register */ +#define INTELXLVF_VFINT_DYN_CTL0 0x5c00 + +/** VF Admin Queue register block */ +#define INTELXLVF_ADMIN 0x6000 + +/** Admin Command Queue Base Address Low Register (offset) */ +#define INTELXLVF_ADMIN_CMD_BAL 0x1c00 + +/** Admin Command Queue Base Address High Register (offset) */ +#define INTELXLVF_ADMIN_CMD_BAH 0x1800 + +/** Admin Command Queue Length Register (offset) */ +#define INTELXLVF_ADMIN_CMD_LEN 0x0800 + +/** Admin Command Queue Head Register (offset) */ +#define INTELXLVF_ADMIN_CMD_HEAD 0x0400 + +/** Admin Command Queue Tail Register (offset) */ +#define INTELXLVF_ADMIN_CMD_TAIL 0x2400 + +/** Admin Event Queue Base Address Low Register (offset) */ +#define INTELXLVF_ADMIN_EVT_BAL 0x0c00 + +/** Admin Event Queue Base Address High Register (offset) */ +#define INTELXLVF_ADMIN_EVT_BAH 0x0000 + +/** Admin Event Queue Length Register (offset) */ +#define INTELXLVF_ADMIN_EVT_LEN 0x2000 + +/** Admin Event Queue Head Register (offset) */ +#define INTELXLVF_ADMIN_EVT_HEAD 0x1400 + +/** Admin Event Queue Tail Register (offset) */ +#define INTELXLVF_ADMIN_EVT_TAIL 0x1000 + +/** Maximum time to wait for a VF admin request to complete */ +#define INTELXLVF_ADMIN_MAX_WAIT_MS 2000 + +/** VF Reset Status Register */ +#define INTELXLVF_VFGEN_RSTAT 0x8800 +#define INTELXLVF_VFGEN_RSTAT_VFR_STATE(x) ( (x) & 0x3 ) +#define INTELXLVF_VFGEN_RSTAT_VFR_STATE_ACTIVE 0x2 + +/** Maximum time to wait for reset to complete */ +#define INTELXLVF_RESET_MAX_WAIT_MS 1000 + +/** + * Initialise descriptor ring + * + * @v ring Descriptor ring + * @v count Number of descriptors + * @v len Length of a single descriptor + * @v tail Tail register offset + */ +static inline __attribute__ (( always_inline)) void +intelxlvf_init_ring ( struct intelxl_ring *ring, unsigned int count, + size_t len, unsigned int tail ) { + + ring->len = ( count * len ); + ring->tail = tail; +} + +#endif /* _INTELXLVF_H */ diff --git a/src/drivers/net/intelxvf.c b/src/drivers/net/intelxvf.c new file mode 100644 index 00000000..fef3024e --- /dev/null +++ b/src/drivers/net/intelxvf.c @@ -0,0 +1,536 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include "intelx.h" +#include "intelxvf.h" + +/** @file + * + * Intel 10 Gigabit Ethernet virtual function network card driver + * + */ + +/****************************************************************************** + * + * Diagnostics + * + ****************************************************************************** + */ + +/** + * Dump statistics + * + * @v intel Intel device + */ +static __attribute__ (( unused )) void +intelxvf_stats ( struct intel_nic *intel ) { + + DBGC ( intel, "INTEL %p TX %d (%#x%08x) RX %d (%#x%08x) multi %d\n", + intel, readl ( intel->regs + INTELXVF_GPTC ), + readl ( intel->regs + INTELXVF_GOTCH ), + readl ( intel->regs + INTELXVF_GOTCL ), + readl ( intel->regs + INTELXVF_GPRC ), + readl ( intel->regs + INTELXVF_GORCH ), + readl ( intel->regs + INTELXVF_GORCL ), + readl ( intel->regs + INTELXVF_MPRC ) ); +} + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset hardware + * + * @v intel Intel device + */ +static void intelxvf_reset ( struct intel_nic *intel ) { + + /* Perform a function-level reset */ + writel ( INTELXVF_CTRL_RST, intel->regs + INTELXVF_CTRL ); +} + +/****************************************************************************** + * + * Link state + * + ****************************************************************************** + */ + +/** + * Check link state + * + * @v netdev Network device + */ +static void intelxvf_check_link ( struct net_device *netdev ) { + struct intel_nic *intel = netdev->priv; + uint32_t links; + + /* Read link status */ + links = readl ( intel->regs + INTELXVF_LINKS ); + DBGC ( intel, "INTEL %p link status is %08x\n", intel, links ); + + /* Update network device */ + if ( links & INTELXVF_LINKS_UP ) { + netdev_link_up ( netdev ); + } else { + netdev_link_down ( netdev ); + } +} + +/****************************************************************************** + * + * Mailbox messages + * + ****************************************************************************** + */ + +/** + * Send negotiate API version message + * + * @v intel Intel device + * @v version Requested version + * @ret rc Return status code + */ +static int intelxvf_mbox_version ( struct intel_nic *intel, + unsigned int version ) { + union intelvf_msg msg; + int rc; + + /* Send set MTU message */ + memset ( &msg, 0, sizeof ( msg ) ); + msg.hdr = INTELXVF_MSG_TYPE_VERSION; + msg.version.version = version; + if ( ( rc = intelvf_mbox_msg ( intel, &msg ) ) != 0 ) { + DBGC ( intel, "INTEL %p negotiate API version failed: %s\n", + intel, strerror ( rc ) ); + return rc; + } + + /* Check response */ + if ( ( msg.hdr & INTELVF_MSG_TYPE_MASK ) != INTELXVF_MSG_TYPE_VERSION ){ + DBGC ( intel, "INTEL %p negotiate API version unexpected " + "response:\n", intel ); + DBGC_HDA ( intel, 0, &msg, sizeof ( msg ) ); + return -EPROTO; + } + + /* Check that this version is supported */ + if ( ! ( msg.hdr & INTELVF_MSG_ACK ) ) { + DBGC ( intel, "INTEL %p negotiate API version failed\n", + intel ); + return -EPERM; + } + + return 0; +} + +/** + * Get queue configuration + * + * @v intel Intel device + * @v vlan_thing VLAN hand-waving thing to fill in + * @ret rc Return status code + */ +static int intelxvf_mbox_queues ( struct intel_nic *intel, int *vlan_thing ) { + union intelvf_msg msg; + int rc; + + /* Send queue configuration message */ + memset ( &msg, 0, sizeof ( msg ) ); + msg.hdr = INTELVF_MSG_TYPE_GET_QUEUES; + if ( ( rc = intelvf_mbox_msg ( intel, &msg ) ) != 0 ) { + DBGC ( intel, "INTEL %p get queue configuration failed: %s\n", + intel, strerror ( rc ) ); + return rc; + } + + /* Check response */ + if ( ( msg.hdr & INTELVF_MSG_TYPE_MASK ) !=INTELVF_MSG_TYPE_GET_QUEUES){ + DBGC ( intel, "INTEL %p get queue configuration unexpected " + "response:\n", intel ); + DBGC_HDA ( intel, 0, &msg, sizeof ( msg ) ); + return -EPROTO; + } + + /* Check that we were allowed to get the queue configuration */ + if ( ! ( msg.hdr & INTELVF_MSG_ACK ) ) { + DBGC ( intel, "INTEL %p get queue configuration refused\n", + intel ); + return -EPERM; + } + + /* Extract VLAN hand-waving thing */ + *vlan_thing = msg.queues.vlan_thing; + + return 0; +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int intelxvf_open ( struct net_device *netdev ) { + struct intel_nic *intel = netdev->priv; + uint32_t rxdctl; + uint32_t srrctl; + uint32_t dca_rxctrl; + unsigned int i; + int vlan_thing; + int rc; + + /* Reset the function */ + intelxvf_reset ( intel ); + + /* Notify PF that reset is complete */ + if ( ( rc = intelvf_mbox_reset ( intel, NULL ) ) != 0 ) { + DBGC ( intel, "INTEL %p could not reset: %s\n", + intel, strerror ( rc ) ); + goto err_mbox_reset; + } + + /* Negotiate API version 1.1. If we do not negotiate at least + * this version, then the RX datapath will remain disabled if + * the PF has jumbo frames enabled. + * + * Ignore failures, since the host may not actually support + * v1.1. + */ + intelxvf_mbox_version ( intel, INTELXVF_MSG_VERSION_1_1 ); + + /* Set MAC address */ + if ( ( rc = intelvf_mbox_set_mac ( intel, netdev->ll_addr ) ) != 0 ) { + DBGC ( intel, "INTEL %p could not set MAC address: %s\n", + intel, strerror ( rc ) ); + goto err_mbox_set_mac; + } + + /* Set MTU */ + if ( ( rc = intelvf_mbox_set_mtu ( intel, netdev->max_pkt_len ) ) != 0){ + DBGC ( intel, "INTEL %p could not set MTU %zd: %s\n", + intel, netdev->max_pkt_len, strerror ( rc ) ); + goto err_mbox_set_mtu; + } + + /* Reset all descriptor rings */ + for ( i = 0 ; i < INTELXVF_NUM_RINGS ; i++ ) { + intel_reset_ring ( intel, INTELXVF_TD ( i ) ); + intel_reset_ring ( intel, INTELXVF_RD ( i ) ); + } + + /* Reset packet split receive type register */ + writel ( 0, intel->regs + INTELXVF_PSRTYPE ); + + /* Get queue configuration. Ignore failures, since the host + * may not support this message. + */ + vlan_thing = 0; + intelxvf_mbox_queues ( intel, &vlan_thing ); + if ( vlan_thing ) { + DBGC ( intel, "INTEL %p stripping VLAN tags (thing=%d)\n", + intel, vlan_thing ); + rxdctl = readl ( intel->regs + INTELXVF_RD(0) + INTEL_xDCTL ); + rxdctl |= INTELX_RXDCTL_VME; + writel ( rxdctl, intel->regs + INTELXVF_RD(0) + INTEL_xDCTL ); + } + + /* Create transmit descriptor ring */ + if ( ( rc = intel_create_ring ( intel, &intel->tx ) ) != 0 ) + goto err_create_tx; + + /* Create receive descriptor ring */ + if ( ( rc = intel_create_ring ( intel, &intel->rx ) ) != 0 ) + goto err_create_rx; + + /* Allocate interrupt vectors */ + writel ( ( INTELXVF_IVAR_RX0_DEFAULT | INTELXVF_IVAR_RX0_VALID | + INTELXVF_IVAR_TX0_DEFAULT | INTELXVF_IVAR_TX0_VALID ), + intel->regs + INTELXVF_IVAR ); + writel ( ( INTELXVF_IVARM_MBOX_DEFAULT | INTELXVF_IVARM_MBOX_VALID ), + intel->regs + INTELXVF_IVARM ); + + /* Configure receive buffer sizes and set receive descriptor type */ + srrctl = readl ( intel->regs + INTELXVF_SRRCTL ); + srrctl &= ~( INTELXVF_SRRCTL_BSIZE_MASK | + INTELXVF_SRRCTL_BHDRSIZE_MASK | + INTELXVF_SRRCTL_DESCTYPE_MASK ); + srrctl |= ( INTELXVF_SRRCTL_BSIZE_DEFAULT | + INTELXVF_SRRCTL_BHDRSIZE_DEFAULT | + INTELXVF_SRRCTL_DESCTYPE_DEFAULT | + INTELXVF_SRRCTL_DROP_EN ); + writel ( srrctl, intel->regs + INTELXVF_SRRCTL ); + + /* Clear "must-be-zero" bit for direct cache access (DCA). We + * leave DCA disabled anyway, but if we do not clear this bit + * then the received packets contain garbage data. + */ + dca_rxctrl = readl ( intel->regs + INTELXVF_DCA_RXCTRL ); + dca_rxctrl &= ~INTELXVF_DCA_RXCTRL_MUST_BE_ZERO; + writel ( dca_rxctrl, intel->regs + INTELXVF_DCA_RXCTRL ); + + /* Fill receive ring */ + intel_refill_rx ( intel ); + + /* Update link state */ + intelxvf_check_link ( netdev ); + + return 0; + + intel_destroy_ring ( intel, &intel->rx ); + err_create_rx: + intel_destroy_ring ( intel, &intel->tx ); + err_create_tx: + err_mbox_set_mtu: + err_mbox_set_mac: + err_mbox_reset: + intelxvf_reset ( intel ); + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void intelxvf_close ( struct net_device *netdev ) { + struct intel_nic *intel = netdev->priv; + + /* Destroy receive descriptor ring */ + intel_destroy_ring ( intel, &intel->rx ); + + /* Discard any unused receive buffers */ + intel_empty_rx ( intel ); + + /* Destroy transmit descriptor ring */ + intel_destroy_ring ( intel, &intel->tx ); + + /* Reset the function */ + intelxvf_reset ( intel ); +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void intelxvf_poll ( struct net_device *netdev ) { + struct intel_nic *intel = netdev->priv; + uint32_t eicr; + int rc; + + /* Check for and acknowledge interrupts */ + eicr = readl ( intel->regs + INTELXVF_EICR ); + if ( ! eicr ) + return; + + /* Poll for TX completions, if applicable */ + if ( eicr & INTELXVF_EIRQ_TX0 ) + intel_poll_tx ( netdev ); + + /* Poll for RX completions, if applicable */ + if ( eicr & INTELXVF_EIRQ_RX0 ) + intel_poll_rx ( netdev ); + + /* Poll for mailbox messages, if applicable */ + if ( eicr & INTELXVF_EIRQ_MBOX ) { + + /* Poll mailbox */ + if ( ( rc = intelvf_mbox_poll ( intel ) ) != 0 ) { + DBGC ( intel, "INTEL %p mailbox poll failed!\n", + intel ); + netdev_rx_err ( netdev, NULL, rc ); + } + + /* Update link state */ + intelxvf_check_link ( netdev ); + } + + /* Refill RX ring */ + intel_refill_rx ( intel ); +} + +/** + * Enable or disable interrupts + * + * @v netdev Network device + * @v enable Interrupts should be enabled + */ +static void intelxvf_irq ( struct net_device *netdev, int enable ) { + struct intel_nic *intel = netdev->priv; + uint32_t mask; + + mask = ( INTELXVF_EIRQ_MBOX | INTELXVF_EIRQ_TX0 | INTELXVF_EIRQ_RX0 ); + if ( enable ) { + writel ( mask, intel->regs + INTELXVF_EIMS ); + } else { + writel ( mask, intel->regs + INTELXVF_EIMC ); + } +} + +/** Network device operations */ +static struct net_device_operations intelxvf_operations = { + .open = intelxvf_open, + .close = intelxvf_close, + .transmit = intel_transmit, + .poll = intelxvf_poll, + .irq = intelxvf_irq, +}; + +/****************************************************************************** + * + * PCI interface + * + ****************************************************************************** + */ + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int intelxvf_probe ( struct pci_device *pci ) { + struct net_device *netdev; + struct intel_nic *intel; + int rc; + + /* Allocate and initialise net device */ + netdev = alloc_etherdev ( sizeof ( *intel ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &intelxvf_operations ); + intel = netdev->priv; + pci_set_drvdata ( pci, netdev ); + netdev->dev = &pci->dev; + memset ( intel, 0, sizeof ( *intel ) ); + intel_init_mbox ( &intel->mbox, INTELXVF_MBCTRL, INTELXVF_MBMEM ); + intel_init_ring ( &intel->tx, INTEL_NUM_TX_DESC, INTELXVF_TD(0), + intel_describe_tx_adv ); + intel_init_ring ( &intel->rx, INTEL_NUM_RX_DESC, INTELXVF_RD(0), + intel_describe_rx ); + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Map registers */ + intel->regs = pci_ioremap ( pci, pci->membase, INTELVF_BAR_SIZE ); + if ( ! intel->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + + /* Reset the function */ + intelxvf_reset ( intel ); + + /* Send reset message and fetch MAC address */ + if ( ( rc = intelvf_mbox_reset ( intel, netdev->hw_addr ) ) != 0 ) { + DBGC ( intel, "INTEL %p could not reset and fetch MAC: %s\n", + intel, strerror ( rc ) ); + goto err_mbox_reset; + } + + /* Reset the function (since we will not respond to Control + * ("ping") mailbox messages until the network device is opened. + */ + intelxvf_reset ( intel ); + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register_netdev; + + /* Set initial link state */ + intelxvf_check_link ( netdev ); + + return 0; + + unregister_netdev ( netdev ); + err_register_netdev: + err_mbox_reset: + intelxvf_reset ( intel ); + iounmap ( intel->regs ); + err_ioremap: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void intelxvf_remove ( struct pci_device *pci ) { + struct net_device *netdev = pci_get_drvdata ( pci ); + struct intel_nic *intel = netdev->priv; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Reset the NIC */ + intelxvf_reset ( intel ); + + /* Free network device */ + iounmap ( intel->regs ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** PCI device IDs */ +static struct pci_device_id intelxvf_nics[] = { + PCI_ROM ( 0x8086, 0x10ed, "82599-vf", "82599 VF", 0 ), + PCI_ROM ( 0x8086, 0x1515, "x540-vf", "X540 VF", 0 ), + PCI_ROM ( 0x8086, 0x1565, "x550-vf", "X550 VF", 0 ), + PCI_ROM ( 0x8086, 0x15a8, "x552-vf", "X552 VF", 0 ), +}; + +/** PCI driver */ +struct pci_driver intelxvf_driver __pci_driver = { + .ids = intelxvf_nics, + .id_count = ( sizeof ( intelxvf_nics ) / sizeof ( intelxvf_nics[0] ) ), + .probe = intelxvf_probe, + .remove = intelxvf_remove, +}; diff --git a/src/drivers/net/intelxvf.h b/src/drivers/net/intelxvf.h new file mode 100644 index 00000000..4663272a --- /dev/null +++ b/src/drivers/net/intelxvf.h @@ -0,0 +1,114 @@ +#ifndef _INTELXVF_H +#define _INTELXVF_H + +/** @file + * + * Intel 10 Gigabit Ethernet virtual function network card driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include "intelvf.h" + +/** Control Register */ +#define INTELXVF_CTRL 0x0000UL +#define INTELXVF_CTRL_RST 0x04000000UL /**< Function-level reset */ + +/** Link Status Register */ +#define INTELXVF_LINKS 0x0010UL +#define INTELXVF_LINKS_UP 0x40000000UL /**< Link up */ + +/** Extended Interrupt Cause Read Register */ +#define INTELXVF_EICR 0x0100UL +#define INTELXVF_EIRQ_RX0 0x00000001UL /**< RX queue 0 (via IVAR) */ +#define INTELXVF_EIRQ_TX0 0x00000002UL /**< TX queue 0 (via IVAR) */ +#define INTELXVF_EIRQ_MBOX 0x00000004UL /**< Mailbox (via IVARM) */ + +/** Extended Interrupt Mask Set/Read Register */ +#define INTELXVF_EIMS 0x0108UL + +/** Extended Interrupt Mask Clear Register */ +#define INTELXVF_EIMC 0x010cUL + +/** Interrupt Vector Allocation Register */ +#define INTELXVF_IVAR 0x0120UL +#define INTELXVF_IVAR_RX0(bit) ( (bit) << 0 ) /**< RX queue 0 allocation */ +#define INTELXVF_IVAR_RX0_DEFAULT INTELXVF_IVAR_RX0 ( 0x00 ) +#define INTELXVF_IVAR_RX0_MASK INTELXVF_IVAR_RX0 ( 0x01 ) +#define INTELXVF_IVAR_RX0_VALID 0x00000080UL /**< RX queue 0 valid */ +#define INTELXVF_IVAR_TX0(bit) ( (bit) << 8 ) /**< TX queue 0 allocation */ +#define INTELXVF_IVAR_TX0_DEFAULT INTELXVF_IVAR_TX0 ( 0x01 ) +#define INTELXVF_IVAR_TX0_MASK INTELXVF_IVAR_TX0 ( 0x01 ) +#define INTELXVF_IVAR_TX0_VALID 0x00008000UL /**< TX queue 0 valid */ + +/** Interrupt Vector Allocation Miscellaneous Register */ +#define INTELXVF_IVARM 0x0140UL +#define INTELXVF_IVARM_MBOX(bit) ( (bit) << 0 ) /**< Mailbox allocation */ +#define INTELXVF_IVARM_MBOX_DEFAULT INTELXVF_IVARM_MBOX ( 0x02 ) +#define INTELXVF_IVARM_MBOX_MASK INTELXVF_IVARM_MBOX ( 0x03 ) +#define INTELXVF_IVARM_MBOX_VALID 0x00000080UL /**< Mailbox valid */ + +/** Mailbox Memory Register Base */ +#define INTELXVF_MBMEM 0x0200UL + +/** Mailbox Control Register */ +#define INTELXVF_MBCTRL 0x02fcUL + +/** Packet Split Receive Type */ +#define INTELXVF_PSRTYPE 0x0300UL + +/** Receive Descriptor register block */ +#define INTELXVF_RD(n) ( 0x1000UL + ( 0x40 * (n) ) ) + +/** RX DCA Control Register */ +#define INTELXVF_DCA_RXCTRL 0x100cUL +#define INTELXVF_DCA_RXCTRL_MUST_BE_ZERO 0x00001000UL /**< Must be zero */ + +/** Split Receive Control Register */ +#define INTELXVF_SRRCTL 0x1014UL +#define INTELXVF_SRRCTL_BSIZE(kb) ( (kb) << 0 ) /**< Receive buffer size */ +#define INTELXVF_SRRCTL_BSIZE_DEFAULT INTELXVF_SRRCTL_BSIZE ( 0x02 ) +#define INTELXVF_SRRCTL_BSIZE_MASK INTELXVF_SRRCTL_BSIZE ( 0x1f ) +#define INTELXVF_SRRCTL_BHDRSIZE(kb) ( (kb) << 8 ) /**< Header size */ +#define INTELXVF_SRRCTL_BHDRSIZE_DEFAULT INTELXVF_SRRCTL_BHDRSIZE ( 0x04 ) +#define INTELXVF_SRRCTL_BHDRSIZE_MASK INTELXVF_SRRCTL_BHDRSIZE ( 0x0f ) +#define INTELXVF_SRRCTL_DESCTYPE(typ) ( (typ) << 25 ) /**< Descriptor type */ +#define INTELXVF_SRRCTL_DESCTYPE_DEFAULT INTELXVF_SRRCTL_DESCTYPE ( 0x00 ) +#define INTELXVF_SRRCTL_DESCTYPE_MASK INTELXVF_SRRCTL_DESCTYPE ( 0x07 ) +#define INTELXVF_SRRCTL_DROP_EN 0x10000000UL + +/** Good Packets Received Count */ +#define INTELXVF_GPRC 0x101c + +/** Good Packets Received Count Low */ +#define INTELXVF_GORCL 0x1020 + +/** Good Packets Received Count High */ +#define INTELXVF_GORCH 0x1024 + +/* Multicast Packets Received Count */ +#define INTELXVF_MPRC 0x1034 + +/** Transmit Descriptor register block */ +#define INTELXVF_TD(n) ( 0x2000UL + ( 0x40 * (n) ) ) + +/** Good Packets Transmitted Count */ +#define INTELXVF_GPTC 0x201c + +/** Good Packets Transmitted Count Low */ +#define INTELXVF_GOTCL 0x2020 + +/** Good Packets Transmitted Count High */ +#define INTELXVF_GOTCH 0x2024 + +/** Negotiate API version mailbox message */ +#define INTELXVF_MSG_TYPE_VERSION 0x00000008UL + +/** API version 1.1 */ +#define INTELXVF_MSG_VERSION_1_1 0x00000002UL + +/** Number of queues */ +#define INTELXVF_NUM_RINGS 8 + +#endif /* _INTELXVF_H */ diff --git a/src/drivers/net/lan78xx.c b/src/drivers/net/lan78xx.c new file mode 100644 index 00000000..3f4f21b6 --- /dev/null +++ b/src/drivers/net/lan78xx.c @@ -0,0 +1,409 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include "lan78xx.h" + +/** @file + * + * Microchip LAN78xx USB Ethernet driver + * + */ + +/****************************************************************************** + * + * MAC address + * + ****************************************************************************** + */ + +/** + * Fetch MAC address from EEPROM + * + * @v smscusb SMSC USB device + * @ret rc Return status code + */ +static int lan78xx_eeprom_fetch_mac ( struct smscusb_device *smscusb ) { + uint32_t hw_cfg; + uint32_t orig_hw_cfg; + int rc; + + /* Read original HW_CFG value */ + if ( ( rc = smscusb_readl ( smscusb, LAN78XX_HW_CFG, &hw_cfg ) ) != 0 ) + goto err_read_hw_cfg; + orig_hw_cfg = hw_cfg; + + /* Temporarily disable LED0 and LED1 (which share physical + * pins with EEDO and EECLK respectively). + */ + hw_cfg &= ~( LAN78XX_HW_CFG_LED0_EN | LAN78XX_HW_CFG_LED1_EN ); + if ( ( rc = smscusb_writel ( smscusb, LAN78XX_HW_CFG, hw_cfg ) ) != 0 ) + goto err_write_hw_cfg; + + /* Fetch MAC address from EEPROM */ + if ( ( rc = smscusb_eeprom_fetch_mac ( smscusb, + LAN78XX_E2P_BASE ) ) != 0 ) + goto err_fetch_mac; + + err_fetch_mac: + smscusb_writel ( smscusb, LAN78XX_HW_CFG, orig_hw_cfg ); + err_write_hw_cfg: + err_read_hw_cfg: + return rc; +} + +/** + * Fetch MAC address + * + * @v smscusb SMSC USB device + * @ret rc Return status code + */ +static int lan78xx_fetch_mac ( struct smscusb_device *smscusb ) { + struct net_device *netdev = smscusb->netdev; + int rc; + + /* Read MAC address from EEPROM, if present */ + if ( ( rc = lan78xx_eeprom_fetch_mac ( smscusb ) ) == 0 ) + return 0; + + /* Read MAC address from OTP, if present */ + if ( ( rc = smscusb_otp_fetch_mac ( smscusb, LAN78XX_OTP_BASE ) ) == 0 ) + return 0; + + /* Read MAC address from device tree, if present */ + if ( ( rc = smscusb_fdt_fetch_mac ( smscusb ) ) == 0 ) + return 0; + + /* Otherwise, generate a random MAC address */ + eth_random_addr ( netdev->hw_addr ); + DBGC ( smscusb, "LAN78XX %p using random MAC %s\n", + smscusb, eth_ntoa ( netdev->hw_addr ) ); + return 0; +} + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset device + * + * @v smscusb SMSC USB device + * @ret rc Return status code + */ +static int lan78xx_reset ( struct smscusb_device *smscusb ) { + uint32_t hw_cfg; + unsigned int i; + int rc; + + /* Reset device */ + if ( ( rc = smscusb_writel ( smscusb, LAN78XX_HW_CFG, + LAN78XX_HW_CFG_LRST ) ) != 0 ) + return rc; + + /* Wait for reset to complete */ + for ( i = 0 ; i < LAN78XX_RESET_MAX_WAIT_MS ; i++ ) { + + /* Check if reset has completed */ + if ( ( rc = smscusb_readl ( smscusb, LAN78XX_HW_CFG, + &hw_cfg ) ) != 0 ) + return rc; + if ( ! ( hw_cfg & LAN78XX_HW_CFG_LRST ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( smscusb, "LAN78XX %p timed out waiting for reset\n", + smscusb ); + return -ETIMEDOUT; +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int lan78xx_open ( struct net_device *netdev ) { + struct smscusb_device *smscusb = netdev->priv; + uint32_t usb_cfg0; + int rc; + + /* Clear stored interrupt status */ + smscusb->int_sts = 0; + + /* Configure bulk IN empty response */ + if ( ( rc = smscusb_readl ( smscusb, LAN78XX_USB_CFG0, + &usb_cfg0 ) ) != 0 ) + goto err_usb_cfg0_read; + usb_cfg0 |= LAN78XX_USB_CFG0_BIR; + if ( ( rc = smscusb_writel ( smscusb, LAN78XX_USB_CFG0, + usb_cfg0 ) ) != 0 ) + goto err_usb_cfg0_write; + + /* Open USB network device */ + if ( ( rc = usbnet_open ( &smscusb->usbnet ) ) != 0 ) { + DBGC ( smscusb, "LAN78XX %p could not open: %s\n", + smscusb, strerror ( rc ) ); + goto err_open; + } + + /* Configure interrupt endpoint */ + if ( ( rc = smscusb_writel ( smscusb, LAN78XX_INT_EP_CTL, + ( LAN78XX_INT_EP_CTL_RDFO_EN | + LAN78XX_INT_EP_CTL_PHY_EN ) ) ) != 0 ) + goto err_int_ep_ctl; + + /* Configure bulk IN delay */ + if ( ( rc = smscusb_writel ( smscusb, LAN78XX_BULK_IN_DLY, + LAN78XX_BULK_IN_DLY_SET ( 0 ) ) ) != 0 ) + goto err_bulk_in_dly; + + /* Enable automatic speed and duplex detection */ + if ( ( rc = smscusb_writel ( smscusb, LAN78XX_MAC_CR, + ( LAN78XX_MAC_CR_ADP | + LAN78XX_MAC_CR_ADD | + LAN78XX_MAC_CR_ASD ) ) ) != 0 ) + goto err_mac_cr; + + /* Configure receive filters */ + if ( ( rc = smscusb_writel ( smscusb, LAN78XX_RFE_CTL, + ( LAN78XX_RFE_CTL_AB | + LAN78XX_RFE_CTL_AM | + LAN78XX_RFE_CTL_AU ) ) ) != 0 ) + goto err_rfe_ctl; + + /* Configure receive FIFO */ + if ( ( rc = smscusb_writel ( smscusb, LAN78XX_FCT_RX_CTL, + ( LAN78XX_FCT_RX_CTL_EN | + LAN78XX_FCT_RX_CTL_BAD ) ) ) != 0 ) + goto err_fct_rx_ctl; + + /* Configure transmit FIFO */ + if ( ( rc = smscusb_writel ( smscusb, LAN78XX_FCT_TX_CTL, + LAN78XX_FCT_TX_CTL_EN ) ) != 0 ) + goto err_fct_tx_ctl; + + /* Configure receive datapath */ + if ( ( rc = smscusb_writel ( smscusb, LAN78XX_MAC_RX, + ( LAN78XX_MAC_RX_MAX_SIZE_DEFAULT | + LAN78XX_MAC_RX_FCS | + LAN78XX_MAC_RX_EN ) ) ) != 0 ) + goto err_mac_rx; + + /* Configure transmit datapath */ + if ( ( rc = smscusb_writel ( smscusb, LAN78XX_MAC_TX, + LAN78XX_MAC_TX_EN ) ) != 0 ) + goto err_mac_tx; + + /* Set MAC address */ + if ( ( rc = smscusb_set_address ( smscusb, + LAN78XX_RX_ADDR_BASE ) ) != 0 ) + goto err_set_address; + + /* Set MAC address perfect filter */ + if ( ( rc = smscusb_set_filter ( smscusb, + LAN78XX_ADDR_FILT_BASE ) ) != 0 ) + goto err_set_filter; + + /* Enable PHY interrupts and update link status */ + if ( ( rc = smscusb_mii_open ( smscusb, LAN78XX_MII_PHY_INTR_MASK, + ( LAN78XX_PHY_INTR_ENABLE | + LAN78XX_PHY_INTR_LINK | + LAN78XX_PHY_INTR_ANEG_ERR | + LAN78XX_PHY_INTR_ANEG_DONE ) ) ) != 0 ) + goto err_mii_open; + + return 0; + + err_mii_open: + err_set_filter: + err_set_address: + err_mac_tx: + err_mac_rx: + err_fct_tx_ctl: + err_fct_rx_ctl: + err_rfe_ctl: + err_mac_cr: + err_bulk_in_dly: + err_int_ep_ctl: + usbnet_close ( &smscusb->usbnet ); + err_open: + err_usb_cfg0_write: + err_usb_cfg0_read: + lan78xx_reset ( smscusb ); + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void lan78xx_close ( struct net_device *netdev ) { + struct smscusb_device *smscusb = netdev->priv; + + /* Close USB network device */ + usbnet_close ( &smscusb->usbnet ); + + /* Dump statistics (for debugging) */ + if ( DBG_LOG ) + smsc75xx_dump_statistics ( smscusb ); + + /* Reset device */ + lan78xx_reset ( smscusb ); +} + +/** LAN78xx network device operations */ +static struct net_device_operations lan78xx_operations = { + .open = lan78xx_open, + .close = lan78xx_close, + .transmit = smsc75xx_transmit, + .poll = smsc75xx_poll, +}; + +/****************************************************************************** + * + * USB interface + * + ****************************************************************************** + */ + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int lan78xx_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct net_device *netdev; + struct smscusb_device *smscusb; + int rc; + + /* Allocate and initialise structure */ + netdev = alloc_etherdev ( sizeof ( *smscusb ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &lan78xx_operations ); + netdev->dev = &func->dev; + smscusb = netdev->priv; + memset ( smscusb, 0, sizeof ( *smscusb ) ); + smscusb_init ( smscusb, netdev, func, &smsc75xx_in_operations ); + smscusb_mii_init ( smscusb, LAN78XX_MII_BASE, + LAN78XX_MII_PHY_INTR_SOURCE ); + usb_refill_init ( &smscusb->usbnet.in, 0, SMSC75XX_IN_MTU, + SMSC75XX_IN_MAX_FILL ); + DBGC ( smscusb, "LAN78XX %p on %s\n", smscusb, func->name ); + + /* Describe USB network device */ + if ( ( rc = usbnet_describe ( &smscusb->usbnet, config ) ) != 0 ) { + DBGC ( smscusb, "LAN78XX %p could not describe: %s\n", + smscusb, strerror ( rc ) ); + goto err_describe; + } + + /* Reset device */ + if ( ( rc = lan78xx_reset ( smscusb ) ) != 0 ) + goto err_reset; + + /* Read MAC address */ + if ( ( rc = lan78xx_fetch_mac ( smscusb ) ) != 0 ) + goto err_fetch_mac; + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register; + + usb_func_set_drvdata ( func, netdev ); + return 0; + + unregister_netdev ( netdev ); + err_register: + err_fetch_mac: + err_reset: + err_describe: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void lan78xx_remove ( struct usb_function *func ) { + struct net_device *netdev = usb_func_get_drvdata ( func ); + + unregister_netdev ( netdev ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** LAN78xx device IDs */ +static struct usb_device_id lan78xx_ids[] = { + { + .name = "lan7800", + .vendor = 0x0424, + .product = 0x7800, + }, + { + .name = "lan7850", + .vendor = 0x0424, + .product = 0x7850, + }, +}; + +/** LAN78xx driver */ +struct usb_driver lan78xx_driver __usb_driver = { + .ids = lan78xx_ids, + .id_count = ( sizeof ( lan78xx_ids ) / sizeof ( lan78xx_ids[0] ) ), + .class = USB_CLASS_ID ( 0xff, 0x00, 0xff ), + .score = USB_SCORE_NORMAL, + .probe = lan78xx_probe, + .remove = lan78xx_remove, +}; diff --git a/src/drivers/net/lan78xx.h b/src/drivers/net/lan78xx.h new file mode 100644 index 00000000..39422aec --- /dev/null +++ b/src/drivers/net/lan78xx.h @@ -0,0 +1,103 @@ +#ifndef _LAN78XX_H +#define _LAN78XX_H + +/** @file + * + * Microchip LAN78xx USB Ethernet driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include "smscusb.h" +#include "smsc75xx.h" + +/** Hardware configuration register */ +#define LAN78XX_HW_CFG 0x0010 +#define LAN78XX_HW_CFG_LED1_EN 0x00200000UL /**< LED1 enable */ +#define LAN78XX_HW_CFG_LED0_EN 0x00100000UL /**< LED1 enable */ +#define LAN78XX_HW_CFG_LRST 0x00000002UL /**< Soft lite reset */ + +/** Interrupt endpoint control register */ +#define LAN78XX_INT_EP_CTL 0x0098 +#define LAN78XX_INT_EP_CTL_RDFO_EN 0x00400000UL /**< RX FIFO overflow */ +#define LAN78XX_INT_EP_CTL_PHY_EN 0x00020000UL /**< PHY interrupt */ + +/** Bulk IN delay register */ +#define LAN78XX_BULK_IN_DLY 0x0094 +#define LAN78XX_BULK_IN_DLY_SET(ticks) ( (ticks) << 0 ) /**< Delay / 16.7ns */ + +/** EEPROM register base */ +#define LAN78XX_E2P_BASE 0x0040 + +/** USB configuration register 0 */ +#define LAN78XX_USB_CFG0 0x0080 +#define LAN78XX_USB_CFG0_BIR 0x00000040UL /**< Bulk IN use NAK */ + +/** Receive filtering engine control register */ +#define LAN78XX_RFE_CTL 0x00b0 +#define LAN78XX_RFE_CTL_AB 0x00000400UL /**< Accept broadcast */ +#define LAN78XX_RFE_CTL_AM 0x00000200UL /**< Accept multicast */ +#define LAN78XX_RFE_CTL_AU 0x00000100UL /**< Accept unicast */ + +/** FIFO controller RX FIFO control register */ +#define LAN78XX_FCT_RX_CTL 0x00c0 +#define LAN78XX_FCT_RX_CTL_EN 0x80000000UL /**< FCT RX enable */ +#define LAN78XX_FCT_RX_CTL_BAD 0x02000000UL /**< Store bad frames */ + +/** FIFO controller TX FIFO control register */ +#define LAN78XX_FCT_TX_CTL 0x00c4 +#define LAN78XX_FCT_TX_CTL_EN 0x80000000UL /**< FCT TX enable */ + +/** MAC control register */ +#define LAN78XX_MAC_CR 0x0100 +#define LAN78XX_MAC_CR_ADP 0x00002000UL /**< Duplex polarity */ +#define LAN78XX_MAC_CR_ADD 0x00001000UL /**< Auto duplex */ +#define LAN78XX_MAC_CR_ASD 0x00000800UL /**< Auto speed */ + +/** MAC receive register */ +#define LAN78XX_MAC_RX 0x0104 +#define LAN78XX_MAC_RX_MAX_SIZE(mtu) ( (mtu) << 16 ) /**< Max frame size */ +#define LAN78XX_MAC_RX_MAX_SIZE_DEFAULT \ + LAN78XX_MAC_RX_MAX_SIZE ( ETH_FRAME_LEN + 4 /* VLAN */ + 4 /* CRC */ ) +#define LAN78XX_MAC_RX_FCS 0x00000010UL /**< FCS stripping */ +#define LAN78XX_MAC_RX_EN 0x00000001UL /**< RX enable */ + +/** MAC transmit register */ +#define LAN78XX_MAC_TX 0x0108 +#define LAN78XX_MAC_TX_EN 0x00000001UL /**< TX enable */ + +/** MAC receive address register base */ +#define LAN78XX_RX_ADDR_BASE 0x0118 + +/** MII register base */ +#define LAN78XX_MII_BASE 0x0120 + +/** PHY interrupt mask MII register */ +#define LAN78XX_MII_PHY_INTR_MASK 25 + +/** PHY interrupt source MII register */ +#define LAN78XX_MII_PHY_INTR_SOURCE 26 + +/** PHY interrupt: global enable */ +#define LAN78XX_PHY_INTR_ENABLE 0x8000 + +/** PHY interrupt: link state change */ +#define LAN78XX_PHY_INTR_LINK 0x2000 + +/** PHY interrupt: auto-negotiation failure */ +#define LAN78XX_PHY_INTR_ANEG_ERR 0x0800 + +/** PHY interrupt: auto-negotiation complete */ +#define LAN78XX_PHY_INTR_ANEG_DONE 0x0400 + +/** MAC address perfect filter register base */ +#define LAN78XX_ADDR_FILT_BASE 0x0400 + +/** OTP register base */ +#define LAN78XX_OTP_BASE 0x1000 + +/** Maximum time to wait for reset (in milliseconds) */ +#define LAN78XX_RESET_MAX_WAIT_MS 100 + +#endif /* _LAN78XX_H */ diff --git a/src/drivers/net/ncm.c b/src/drivers/net/ncm.c new file mode 100644 index 00000000..cc07a438 --- /dev/null +++ b/src/drivers/net/ncm.c @@ -0,0 +1,681 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ecm.h" +#include "ncm.h" + +/** @file + * + * CDC-NCM USB Ethernet driver + * + */ + +/** Interrupt completion profiler */ +static struct profiler ncm_intr_profiler __profiler = + { .name = "ncm.intr" }; + +/** Bulk IN completion profiler */ +static struct profiler ncm_in_profiler __profiler = + { .name = "ncm.in" }; + +/** Bulk IN per-datagram profiler */ +static struct profiler ncm_in_datagram_profiler __profiler = + { .name = "ncm.in_dgram" }; + +/** Bulk OUT profiler */ +static struct profiler ncm_out_profiler __profiler = + { .name = "ncm.out" }; + +/****************************************************************************** + * + * CDC-NCM communications interface + * + ****************************************************************************** + */ + +/** + * Complete interrupt transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void ncm_intr_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct ncm_device *ncm = container_of ( ep, struct ncm_device, + usbnet.intr ); + struct net_device *netdev = ncm->netdev; + struct usb_setup_packet *message; + size_t len = iob_len ( iobuf ); + + /* Profile completions */ + profile_start ( &ncm_intr_profiler ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto ignore; + + /* Ignore packets with errors */ + if ( rc != 0 ) { + DBGC ( ncm, "NCM %p interrupt failed: %s\n", + ncm, strerror ( rc ) ); + DBGC_HDA ( ncm, 0, iobuf->data, iob_len ( iobuf ) ); + goto error; + } + + /* Extract message header */ + if ( len < sizeof ( *message ) ) { + DBGC ( ncm, "NCM %p underlength interrupt:\n", ncm ); + DBGC_HDA ( ncm, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto error; + } + message = iobuf->data; + + /* Parse message header */ + switch ( message->request ) { + + case cpu_to_le16 ( CDC_NETWORK_CONNECTION ) : + if ( message->value ) { + DBGC ( ncm, "NCM %p link up\n", ncm ); + netdev_link_up ( netdev ); + } else { + DBGC ( ncm, "NCM %p link down\n", ncm ); + netdev_link_down ( netdev ); + } + break; + + case cpu_to_le16 ( CDC_CONNECTION_SPEED_CHANGE ) : + /* Ignore */ + break; + + default: + DBGC ( ncm, "NCM %p unrecognised interrupt:\n", ncm ); + DBGC_HDA ( ncm, 0, iobuf->data, iob_len ( iobuf ) ); + goto error; + } + + /* Free I/O buffer */ + free_iob ( iobuf ); + profile_stop ( &ncm_intr_profiler ); + + return; + + error: + netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); + ignore: + free_iob ( iobuf ); + return; +} + +/** Interrupt endpoint operations */ +static struct usb_endpoint_driver_operations ncm_intr_operations = { + .complete = ncm_intr_complete, +}; + +/****************************************************************************** + * + * CDC-NCM data interface + * + ****************************************************************************** + */ + +/** + * Prefill bulk IN endpoint + * + * @v ncm CDC-NCM device + * @ret rc Return status code + */ +static int ncm_in_prefill ( struct ncm_device *ncm ) { + struct usb_bus *bus = ncm->bus; + size_t mtu; + unsigned int count; + int rc; + + /* Some devices have a very small number of internal buffers, + * and rely on being able to pack multiple packets into each + * buffer. We therefore want to use large buffers if + * possible. However, large allocations have a reasonable + * chance of failure, especially if this is not the first or + * only device to be opened. + * + * We therefore attempt to find a usable buffer size, starting + * large and working downwards until allocation succeeds. + * Smaller buffers will still work, albeit with a higher + * chance of packet loss and so lower overall throughput. + */ + for ( mtu = ncm->mtu ; mtu >= NCM_MIN_NTB_INPUT_SIZE ; mtu >>= 1 ) { + + /* Attempt allocation at this MTU */ + if ( mtu > NCM_MAX_NTB_INPUT_SIZE ) + continue; + if ( mtu > bus->mtu ) + continue; + count = ( NCM_IN_MIN_SIZE / mtu ); + if ( count < NCM_IN_MIN_COUNT ) + count = NCM_IN_MIN_COUNT; + if ( ( count * mtu ) > NCM_IN_MAX_SIZE ) + continue; + usb_refill_init ( &ncm->usbnet.in, 0, mtu, count ); + if ( ( rc = usb_prefill ( &ncm->usbnet.in ) ) != 0 ) { + DBGC ( ncm, "NCM %p could not prefill %dx %zd-byte " + "buffers for bulk IN\n", ncm, count, mtu ); + continue; + } + + DBGC ( ncm, "NCM %p using %dx %zd-byte buffers for bulk IN\n", + ncm, count, mtu ); + return 0; + } + + DBGC ( ncm, "NCM %p could not prefill bulk IN endpoint\n", ncm ); + return -ENOMEM; +} + +/** + * Complete bulk IN transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void ncm_in_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf, + int rc ) { + struct ncm_device *ncm = container_of ( ep, struct ncm_device, + usbnet.in ); + struct net_device *netdev = ncm->netdev; + struct ncm_transfer_header *nth; + struct ncm_datagram_pointer *ndp; + struct ncm_datagram_descriptor *desc; + struct io_buffer *pkt; + unsigned int remaining; + size_t ndp_offset; + size_t ndp_len; + size_t pkt_offset; + size_t pkt_len; + size_t headroom; + size_t len; + + /* Profile overall bulk IN completion */ + profile_start ( &ncm_in_profiler ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto ignore; + + /* Record USB errors against the network device */ + if ( rc != 0 ) { + DBGC ( ncm, "NCM %p bulk IN failed: %s\n", + ncm, strerror ( rc ) ); + goto error; + } + + /* Locate transfer header */ + len = iob_len ( iobuf ); + if ( sizeof ( *nth ) > len ) { + DBGC ( ncm, "NCM %p packet too short for NTH:\n", ncm ); + rc = -EINVAL; + goto error; + } + nth = iobuf->data; + + /* Locate datagram pointer */ + ndp_offset = le16_to_cpu ( nth->offset ); + if ( ( ndp_offset + sizeof ( *ndp ) ) > len ) { + DBGC ( ncm, "NCM %p packet too short for NDP:\n", ncm ); + rc = -EINVAL; + goto error; + } + ndp = ( iobuf->data + ndp_offset ); + ndp_len = le16_to_cpu ( ndp->header_len ); + if ( ndp_len < offsetof ( typeof ( *ndp ), desc ) ) { + DBGC ( ncm, "NCM %p NDP header length too short:\n", ncm ); + rc = -EINVAL; + goto error; + } + if ( ( ndp_offset + ndp_len ) > len ) { + DBGC ( ncm, "NCM %p packet too short for NDP:\n", ncm ); + rc = -EINVAL; + goto error; + } + + /* Process datagrams */ + remaining = ( ( ndp_len - offsetof ( typeof ( *ndp ), desc ) ) / + sizeof ( ndp->desc[0] ) ); + for ( desc = ndp->desc ; remaining && desc->offset ; remaining-- ) { + + /* Profile individual datagrams */ + profile_start ( &ncm_in_datagram_profiler ); + + /* Locate datagram */ + pkt_offset = le16_to_cpu ( desc->offset ); + pkt_len = le16_to_cpu ( desc->len ); + if ( pkt_len < ETH_HLEN ) { + DBGC ( ncm, "NCM %p underlength datagram:\n", ncm ); + rc = -EINVAL; + goto error; + } + if ( ( pkt_offset + pkt_len ) > len ) { + DBGC ( ncm, "NCM %p datagram exceeds packet:\n", ncm ); + rc = -EINVAL; + goto error; + } + + /* Move to next descriptor */ + desc++; + + /* Copy data to a new I/O buffer. Our USB buffers may + * be very large and so we choose to recycle the + * buffers directly rather than attempt reallocation + * while the device is running. We therefore copy the + * data to a new I/O buffer even if this is the only + * (or last) packet within the buffer. + * + * We reserve enough space at the start of each buffer + * to allow for our own transmission header, to + * support protocols such as ARP which may modify the + * received packet and reuse the same I/O buffer for + * transmission. + */ + headroom = ( sizeof ( struct ncm_ntb_header ) + ncm->padding ); + pkt = alloc_iob ( headroom + pkt_len ); + if ( ! pkt ) { + /* Record error and continue */ + netdev_rx_err ( netdev, NULL, -ENOMEM ); + continue; + } + iob_reserve ( pkt, headroom ); + memcpy ( iob_put ( pkt, pkt_len ), + ( iobuf->data + pkt_offset ), pkt_len ); + + /* Strip CRC, if present */ + if ( ndp->magic & cpu_to_le32 ( NCM_DATAGRAM_POINTER_MAGIC_CRC)) + iob_unput ( pkt, 4 /* CRC32 */ ); + + /* Hand off to network stack */ + netdev_rx ( netdev, pkt ); + profile_stop ( &ncm_in_datagram_profiler ); + } + + /* Recycle I/O buffer */ + usb_recycle ( &ncm->usbnet.in, iobuf ); + profile_stop ( &ncm_in_profiler ); + + return; + + error: + /* Record error against network device */ + DBGC_HDA ( ncm, 0, iobuf->data, iob_len ( iobuf ) ); + netdev_rx_err ( netdev, NULL, rc ); + ignore: + usb_recycle ( &ncm->usbnet.in, iobuf ); +} + +/** Bulk IN endpoint operations */ +static struct usb_endpoint_driver_operations ncm_in_operations = { + .complete = ncm_in_complete, +}; + +/** + * Transmit packet + * + * @v ncm CDC-NCM device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int ncm_out_transmit ( struct ncm_device *ncm, + struct io_buffer *iobuf ) { + struct ncm_ntb_header *header; + size_t len = iob_len ( iobuf ); + size_t header_len = ( sizeof ( *header ) + ncm->padding ); + int rc; + + /* Profile transmissions */ + profile_start ( &ncm_out_profiler ); + + /* Prepend header */ + if ( ( rc = iob_ensure_headroom ( iobuf, header_len ) ) != 0 ) + return rc; + header = iob_push ( iobuf, header_len ); + + /* Populate header */ + header->nth.magic = cpu_to_le32 ( NCM_TRANSFER_HEADER_MAGIC ); + header->nth.header_len = cpu_to_le16 ( sizeof ( header->nth ) ); + header->nth.sequence = cpu_to_le16 ( ncm->sequence ); + header->nth.len = cpu_to_le16 ( iob_len ( iobuf ) ); + header->nth.offset = + cpu_to_le16 ( offsetof ( typeof ( *header ), ndp ) ); + header->ndp.magic = cpu_to_le32 ( NCM_DATAGRAM_POINTER_MAGIC ); + header->ndp.header_len = cpu_to_le16 ( sizeof ( header->ndp ) + + sizeof ( header->desc ) ); + header->ndp.offset = cpu_to_le16 ( 0 ); + header->desc[0].offset = cpu_to_le16 ( header_len ); + header->desc[0].len = cpu_to_le16 ( len ); + memset ( &header->desc[1], 0, sizeof ( header->desc[1] ) ); + + /* Enqueue I/O buffer */ + if ( ( rc = usb_stream ( &ncm->usbnet.out, iobuf, 0 ) ) != 0 ) + return rc; + + /* Increment sequence number */ + ncm->sequence++; + + profile_stop ( &ncm_out_profiler ); + return 0; +} + +/** + * Complete bulk OUT transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void ncm_out_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf, + int rc ) { + struct ncm_device *ncm = container_of ( ep, struct ncm_device, + usbnet.out ); + struct net_device *netdev = ncm->netdev; + + /* Report TX completion */ + netdev_tx_complete_err ( netdev, iobuf, rc ); +} + +/** Bulk OUT endpoint operations */ +static struct usb_endpoint_driver_operations ncm_out_operations = { + .complete = ncm_out_complete, +}; + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int ncm_open ( struct net_device *netdev ) { + struct ncm_device *ncm = netdev->priv; + struct usb_device *usb = ncm->usb; + struct ncm_set_ntb_input_size size; + int rc; + + /* Reset sequence number */ + ncm->sequence = 0; + + /* Prefill I/O buffers */ + if ( ( rc = ncm_in_prefill ( ncm ) ) != 0 ) + goto err_prefill; + + /* Set maximum input size */ + memset ( &size, 0, sizeof ( size ) ); + size.mtu = cpu_to_le32 ( ncm->usbnet.in.len ); + if ( ( rc = usb_control ( usb, NCM_SET_NTB_INPUT_SIZE, 0, + ncm->usbnet.comms, &size, + sizeof ( size ) ) ) != 0 ) { + DBGC ( ncm, "NCM %p could not set input size to %zd: %s\n", + ncm, ncm->usbnet.in.len, strerror ( rc ) ); + goto err_set_ntb_input_size; + } + + /* Set MAC address */ + if ( ( rc = usb_control ( usb, NCM_SET_NET_ADDRESS, 0, + ncm->usbnet.comms, netdev->ll_addr, + netdev->ll_protocol->ll_addr_len ) ) != 0 ) { + DBGC ( ncm, "NCM %p could not set MAC address: %s\n", + ncm, strerror ( rc ) ); + /* Ignore error and continue */ + } + + /* Open USB network device */ + if ( ( rc = usbnet_open ( &ncm->usbnet ) ) != 0 ) { + DBGC ( ncm, "NCM %p could not open: %s\n", + ncm, strerror ( rc ) ); + goto err_open; + } + + return 0; + + usbnet_close ( &ncm->usbnet ); + err_open: + err_set_ntb_input_size: + usb_flush ( &ncm->usbnet.in ); + err_prefill: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void ncm_close ( struct net_device *netdev ) { + struct ncm_device *ncm = netdev->priv; + + /* Close USB network device */ + usbnet_close ( &ncm->usbnet ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int ncm_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct ncm_device *ncm = netdev->priv; + int rc; + + /* Transmit packet */ + if ( ( rc = ncm_out_transmit ( ncm, iobuf ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void ncm_poll ( struct net_device *netdev ) { + struct ncm_device *ncm = netdev->priv; + int rc; + + /* Poll USB bus */ + usb_poll ( ncm->bus ); + + /* Refill endpoints */ + if ( ( rc = usbnet_refill ( &ncm->usbnet ) ) != 0 ) + netdev_rx_err ( netdev, NULL, rc ); + +} + +/** CDC-NCM network device operations */ +static struct net_device_operations ncm_operations = { + .open = ncm_open, + .close = ncm_close, + .transmit = ncm_transmit, + .poll = ncm_poll, +}; + +/****************************************************************************** + * + * USB interface + * + ****************************************************************************** + */ + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int ncm_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct usb_device *usb = func->usb; + struct net_device *netdev; + struct ncm_device *ncm; + struct usb_interface_descriptor *comms; + struct ecm_ethernet_descriptor *ethernet; + struct ncm_ntb_parameters params; + unsigned int remainder; + unsigned int divisor; + int rc; + + /* Allocate and initialise structure */ + netdev = alloc_etherdev ( sizeof ( *ncm ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &ncm_operations ); + netdev->dev = &func->dev; + ncm = netdev->priv; + memset ( ncm, 0, sizeof ( *ncm ) ); + ncm->usb = usb; + ncm->bus = usb->port->hub->bus; + ncm->netdev = netdev; + usbnet_init ( &ncm->usbnet, func, &ncm_intr_operations, + &ncm_in_operations, &ncm_out_operations ); + usb_refill_init ( &ncm->usbnet.intr, 0, 0, NCM_INTR_COUNT ); + DBGC ( ncm, "NCM %p on %s\n", ncm, func->name ); + + /* Describe USB network device */ + if ( ( rc = usbnet_describe ( &ncm->usbnet, config ) ) != 0 ) { + DBGC ( ncm, "NCM %p could not describe: %s\n", + ncm, strerror ( rc ) ); + goto err_describe; + } + + /* Locate Ethernet descriptor */ + comms = usb_interface_descriptor ( config, ncm->usbnet.comms, 0 ); + assert ( comms != NULL ); + ethernet = ecm_ethernet_descriptor ( config, comms ); + if ( ! ethernet ) { + DBGC ( ncm, "NCM %p has no Ethernet descriptor\n", ncm ); + rc = -EINVAL; + goto err_ethernet; + } + + /* Fetch MAC address */ + if ( ( rc = ecm_fetch_mac ( usb, ethernet, netdev->hw_addr ) ) != 0 ) { + DBGC ( ncm, "NCM %p could not fetch MAC address: %s\n", + ncm, strerror ( rc ) ); + goto err_fetch_mac; + } + + /* Get NTB parameters */ + if ( ( rc = usb_control ( usb, NCM_GET_NTB_PARAMETERS, 0, + ncm->usbnet.comms, ¶ms, + sizeof ( params ) ) ) != 0 ) { + DBGC ( ncm, "NCM %p could not get NTB parameters: %s\n", + ncm, strerror ( rc ) ); + goto err_ntb_parameters; + } + + /* Get maximum supported input size */ + ncm->mtu = le32_to_cpu ( params.in.mtu ); + DBGC2 ( ncm, "NCM %p maximum IN size is %zd bytes\n", ncm, ncm->mtu ); + + /* Calculate transmit padding */ + divisor = ( params.out.divisor ? + le16_to_cpu ( params.out.divisor ) : 1 ); + remainder = le16_to_cpu ( params.out.remainder ); + ncm->padding = ( ( remainder - sizeof ( struct ncm_ntb_header ) - + ETH_HLEN ) & ( divisor - 1 ) ); + DBGC2 ( ncm, "NCM %p using %zd-byte transmit padding\n", + ncm, ncm->padding ); + assert ( ( ( sizeof ( struct ncm_ntb_header ) + ncm->padding + + ETH_HLEN ) % divisor ) == remainder ); + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register; + + usb_func_set_drvdata ( func, ncm ); + return 0; + + unregister_netdev ( netdev ); + err_register: + err_ntb_parameters: + err_fetch_mac: + err_ethernet: + err_describe: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void ncm_remove ( struct usb_function *func ) { + struct ncm_device *ncm = usb_func_get_drvdata ( func ); + struct net_device *netdev = ncm->netdev; + + unregister_netdev ( netdev ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** CDC-NCM device IDs */ +static struct usb_device_id ncm_ids[] = { + { + .name = "cdc-ncm", + .vendor = USB_ANY_ID, + .product = USB_ANY_ID, + }, +}; + +/** CDC-NCM driver */ +struct usb_driver ncm_driver __usb_driver = { + .ids = ncm_ids, + .id_count = ( sizeof ( ncm_ids ) / sizeof ( ncm_ids[0] ) ), + .class = USB_CLASS_ID ( USB_CLASS_CDC, USB_SUBCLASS_CDC_NCM, 0 ), + .score = USB_SCORE_NORMAL, + .probe = ncm_probe, + .remove = ncm_remove, +}; diff --git a/src/drivers/net/ncm.h b/src/drivers/net/ncm.h new file mode 100644 index 00000000..6b0d21cd --- /dev/null +++ b/src/drivers/net/ncm.h @@ -0,0 +1,178 @@ +#ifndef _NCM_H +#define _NCM_H + +/** @file + * + * CDC-NCM USB Ethernet driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include "ecm.h" + +/** CDC-NCM subclass */ +#define USB_SUBCLASS_CDC_NCM 0x0d + +/** Get NTB parameters */ +#define NCM_GET_NTB_PARAMETERS \ + ( USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 0x80 ) ) + +/** NTB datagram parameters */ +struct ncm_ntb_datagram_parameters { + /** Maximum size */ + uint32_t mtu; + /** Alignment divisor */ + uint16_t divisor; + /** Alignment remainder */ + uint16_t remainder; + /** Alignment modulus */ + uint16_t modulus; +} __attribute__ (( packed )); + +/** NTB parameters */ +struct ncm_ntb_parameters { + /** Length */ + uint16_t len; + /** Supported formats */ + uint16_t formats; + /** IN datagram parameters */ + struct ncm_ntb_datagram_parameters in; + /** Reserved */ + uint16_t reserved; + /** OUT datagram parameters */ + struct ncm_ntb_datagram_parameters out; + /** Maximum number of datagrams per OUT NTB */ + uint16_t max; +} __attribute__ (( packed )); + +/** Set MAC address */ +#define NCM_SET_NET_ADDRESS \ + ( USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 0x82 ) ) + +/** Set NTB input size */ +#define NCM_SET_NTB_INPUT_SIZE \ + ( USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 0x86 ) ) + +/** Set NTB input size */ +struct ncm_set_ntb_input_size { + /** Maximum size */ + uint32_t mtu; +} __attribute__ (( packed )); + +/** Minimum allowed NTB input size */ +#define NCM_MIN_NTB_INPUT_SIZE 2048 + +/** Maximum allowed NTB input size (16-bit) */ +#define NCM_MAX_NTB_INPUT_SIZE 65536 + +/** CDC-NCM transfer header (16-bit) */ +struct ncm_transfer_header { + /** Signature */ + uint32_t magic; + /** Header length */ + uint16_t header_len; + /** Sequence number */ + uint16_t sequence; + /** Total length */ + uint16_t len; + /** Offset of first datagram pointer */ + uint16_t offset; +} __attribute__ (( packed )); + +/** CDC-NCM transfer header magic */ +#define NCM_TRANSFER_HEADER_MAGIC 0x484d434eUL + +/** CDC-NCM datagram descriptor (16-bit) */ +struct ncm_datagram_descriptor { + /** Starting offset */ + uint16_t offset; + /** Length */ + uint16_t len; +} __attribute__ (( packed )); + +/** CDC-NCM datagram pointer (16-bit) */ +struct ncm_datagram_pointer { + /** Signature */ + uint32_t magic; + /** Header length */ + uint16_t header_len; + /** Offset of next datagram pointer */ + uint16_t offset; + /** Datagram descriptors + * + * Must be terminated by an empty descriptor. + */ + struct ncm_datagram_descriptor desc[0]; +} __attribute__ (( packed )); + +/** CDC-NCM datagram pointer magic */ +#define NCM_DATAGRAM_POINTER_MAGIC 0x304d434eUL + +/** CDC-NCM datagram pointer CRC present flag */ +#define NCM_DATAGRAM_POINTER_MAGIC_CRC 0x01000000UL + +/** NTB constructed for transmitted packets (excluding padding) + * + * This is a policy decision. + */ +struct ncm_ntb_header { + /** Transfer header */ + struct ncm_transfer_header nth; + /** Datagram pointer */ + struct ncm_datagram_pointer ndp; + /** Datagram descriptors */ + struct ncm_datagram_descriptor desc[2]; +} __attribute__ (( packed )); + +/** A CDC-NCM network device */ +struct ncm_device { + /** USB device */ + struct usb_device *usb; + /** USB bus */ + struct usb_bus *bus; + /** Network device */ + struct net_device *netdev; + /** USB network device */ + struct usbnet_device usbnet; + + /** Maximum supported NTB input size */ + size_t mtu; + /** Transmitted packet sequence number */ + uint16_t sequence; + /** Alignment padding required on transmitted packets */ + size_t padding; +}; + +/** Bulk IN ring minimum buffer count + * + * This is a policy decision. + */ +#define NCM_IN_MIN_COUNT 3 + +/** Bulk IN ring minimum total buffer size + * + * This is a policy decision. + */ +#define NCM_IN_MIN_SIZE 16384 + +/** Bulk IN ring maximum total buffer size + * + * This is a policy decision. + */ +#define NCM_IN_MAX_SIZE 131072 + +/** Interrupt ring buffer count + * + * This is a policy decision. + */ +#define NCM_INTR_COUNT 2 + +#endif /* _NCM_H */ diff --git a/src/drivers/net/netfront.c b/src/drivers/net/netfront.c new file mode 100644 index 00000000..b6205542 --- /dev/null +++ b/src/drivers/net/netfront.c @@ -0,0 +1,953 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "netfront.h" + +/** @file + * + * Xen netfront driver + * + */ + +/* Disambiguate the various error causes */ +#define EIO_NETIF_RSP_ERROR \ + __einfo_error ( EINFO_EIO_NETIF_RSP_ERROR ) +#define EINFO_EIO_NETIF_RSP_ERROR \ + __einfo_uniqify ( EINFO_EIO, -NETIF_RSP_ERROR, \ + "Unspecified network error" ) +#define EIO_NETIF_RSP_DROPPED \ + __einfo_error ( EINFO_EIO_NETIF_RSP_DROPPED ) +#define EINFO_EIO_NETIF_RSP_DROPPED \ + __einfo_uniqify ( EINFO_EIO, -NETIF_RSP_DROPPED, \ + "Packet dropped" ) +#define EIO_NETIF_RSP( status ) \ + EUNIQ ( EINFO_EIO, -(status), \ + EIO_NETIF_RSP_ERROR, EIO_NETIF_RSP_DROPPED ) + +/****************************************************************************** + * + * XenStore interface + * + ****************************************************************************** + */ + +/** + * Reset device + * + * @v netfront Netfront device + * @ret rc Return status code + */ +static int netfront_reset ( struct netfront_nic *netfront ) { + struct xen_device *xendev = netfront->xendev; + int state; + int rc; + + /* Get current backend state */ + if ( ( state = xenbus_backend_state ( xendev ) ) < 0 ) { + rc = state; + DBGC ( netfront, "NETFRONT %s could not read backend state: " + "%s\n", xendev->key, strerror ( rc ) ); + return rc; + } + + /* If the backend is not already in InitWait, then mark + * frontend as Closed to shut down the backend. + */ + if ( state != XenbusStateInitWait ) { + + /* Set state to Closed */ + xenbus_set_state ( xendev, XenbusStateClosed ); + + /* Wait for backend to reach Closed */ + if ( ( rc = xenbus_backend_wait ( xendev, + XenbusStateClosed ) ) != 0 ) { + DBGC ( netfront, "NETFRONT %s backend did not reach " + "Closed: %s\n", xendev->key, strerror ( rc ) ); + return rc; + } + } + + /* Reset state to Initialising */ + xenbus_set_state ( xendev, XenbusStateInitialising ); + + /* Wait for backend to reach InitWait */ + if ( ( rc = xenbus_backend_wait ( xendev, XenbusStateInitWait ) ) != 0){ + DBGC ( netfront, "NETFRONT %s backend did not reach InitWait: " + "%s\n", xendev->key, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Fetch MAC address + * + * @v netfront Netfront device + * @v hw_addr Hardware address to fill in + * @ret rc Return status code + */ +static int netfront_read_mac ( struct netfront_nic *netfront, void *hw_addr ) { + struct xen_device *xendev = netfront->xendev; + struct xen_hypervisor *xen = xendev->xen; + char *mac; + int len; + int rc; + + /* Fetch MAC address */ + if ( ( rc = xenstore_read ( xen, &mac, xendev->key, "mac", NULL ) )!=0){ + DBGC ( netfront, "NETFRONT %s could not read MAC address: %s\n", + xendev->key, strerror ( rc ) ); + goto err_xenstore_read; + } + DBGC2 ( netfront, "NETFRONT %s has MAC address \"%s\"\n", + xendev->key, mac ); + + /* Decode MAC address */ + len = hex_decode ( ':', mac, hw_addr, ETH_ALEN ); + if ( len < 0 ) { + rc = len; + DBGC ( netfront, "NETFRONT %s could not decode MAC address " + "\"%s\": %s\n", xendev->key, mac, strerror ( rc ) ); + goto err_decode; + } + + /* Success */ + rc = 0; + + err_decode: + free ( mac ); + err_xenstore_read: + return rc; +} + +/** + * Write XenStore numeric value + * + * @v netfront Netfront device + * @v subkey Subkey + * @v num Numeric value + * @ret rc Return status code + */ +static int netfront_write_num ( struct netfront_nic *netfront, + const char *subkey, unsigned long num ) { + struct xen_device *xendev = netfront->xendev; + struct xen_hypervisor *xen = xendev->xen; + int rc; + + /* Write value */ + if ( ( rc = xenstore_write_num ( xen, num, xendev->key, subkey, + NULL ) ) != 0 ) { + DBGC ( netfront, "NETFRONT %s could not set %s=\"%ld\": %s\n", + xendev->key, subkey, num, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Write XenStore flag value + * + * @v netfront Netfront device + * @v subkey Subkey + * @v num Numeric value + * @ret rc Return status code + */ +static int netfront_write_flag ( struct netfront_nic *netfront, + const char *subkey ) { + + return netfront_write_num ( netfront, subkey, 1 ); +} + +/** + * Delete XenStore value + * + * @v netfront Netfront device + * @v subkey Subkey + * @ret rc Return status code + */ +static int netfront_rm ( struct netfront_nic *netfront, const char *subkey ) { + struct xen_device *xendev = netfront->xendev; + struct xen_hypervisor *xen = xendev->xen; + int rc; + + /* Remove value */ + if ( ( rc = xenstore_rm ( xen, xendev->key, subkey, NULL ) ) != 0 ) { + DBGC ( netfront, "NETFRONT %s could not delete %s: %s\n", + xendev->key, subkey, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/****************************************************************************** + * + * Events + * + ****************************************************************************** + */ + +/** + * Create event channel + * + * @v netfront Netfront device + * @ret rc Return status code + */ +static int netfront_create_event ( struct netfront_nic *netfront ) { + struct xen_device *xendev = netfront->xendev; + struct xen_hypervisor *xen = xendev->xen; + struct evtchn_alloc_unbound alloc_unbound; + struct evtchn_close close; + int xenrc; + int rc; + + /* Allocate event */ + alloc_unbound.dom = DOMID_SELF; + alloc_unbound.remote_dom = xendev->backend_id; + if ( ( xenrc = xenevent_alloc_unbound ( xen, &alloc_unbound ) ) != 0 ) { + rc = -EXEN ( xenrc ); + DBGC ( netfront, "NETFRONT %s could not allocate event: %s\n", + xendev->key, strerror ( rc ) ); + goto err_alloc_unbound; + } + netfront->event.port = alloc_unbound.port; + + /* Publish event channel */ + if ( ( rc = netfront_write_num ( netfront, "event-channel", + netfront->event.port ) ) != 0 ) + goto err_write_num; + + DBGC ( netfront, "NETFRONT %s event-channel=\"%d\"\n", + xendev->key, netfront->event.port ); + return 0; + + netfront_rm ( netfront, "event-channel" ); + err_write_num: + close.port = netfront->event.port; + xenevent_close ( xen, &close ); + err_alloc_unbound: + return rc; +} + +/** + * Send event + * + * @v netfront Netfront device + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +netfront_send_event ( struct netfront_nic *netfront ) { + struct xen_device *xendev = netfront->xendev; + struct xen_hypervisor *xen = xendev->xen; + int xenrc; + int rc; + + /* Send event */ + if ( ( xenrc = xenevent_send ( xen, &netfront->event ) ) != 0 ) { + rc = -EXEN ( xenrc ); + DBGC ( netfront, "NETFRONT %s could not send event: %s\n", + xendev->key, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Destroy event channel + * + * @v netfront Netfront device + */ +static void netfront_destroy_event ( struct netfront_nic *netfront ) { + struct xen_device *xendev = netfront->xendev; + struct xen_hypervisor *xen = xendev->xen; + struct evtchn_close close; + + /* Unpublish event channel */ + netfront_rm ( netfront, "event-channel" ); + + /* Close event channel */ + close.port = netfront->event.port; + xenevent_close ( xen, &close ); +} + +/****************************************************************************** + * + * Descriptor rings + * + ****************************************************************************** + */ + +/** + * Create descriptor ring + * + * @v netfront Netfront device + * @v ring Descriptor ring + * @ret rc Return status code + */ +static int netfront_create_ring ( struct netfront_nic *netfront, + struct netfront_ring *ring ) { + struct xen_device *xendev = netfront->xendev; + struct xen_hypervisor *xen = xendev->xen; + unsigned int i; + int rc; + + /* Initialise buffer ID ring */ + for ( i = 0 ; i < ring->count ; i++ ) { + ring->ids[i] = i; + assert ( ring->iobufs[i] == NULL ); + } + ring->id_prod = 0; + ring->id_cons = 0; + + /* Allocate and initialise shared ring */ + ring->sring.raw = malloc_dma ( PAGE_SIZE, PAGE_SIZE ); + if ( ! ring->sring.raw ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Grant access to shared ring */ + if ( ( rc = xengrant_permit_access ( xen, ring->ref, xendev->backend_id, + 0, ring->sring.raw ) ) != 0 ) { + DBGC ( netfront, "NETFRONT %s could not permit access to " + "%#08lx: %s\n", xendev->key, + virt_to_phys ( ring->sring.raw ), strerror ( rc ) ); + goto err_permit_access; + } + + /* Publish shared ring reference */ + if ( ( rc = netfront_write_num ( netfront, ring->ref_key, + ring->ref ) ) != 0 ) + goto err_write_num; + + DBGC ( netfront, "NETFRONT %s %s=\"%d\" [%08lx,%08lx)\n", + xendev->key, ring->ref_key, ring->ref, + virt_to_phys ( ring->sring.raw ), + ( virt_to_phys ( ring->sring.raw ) + PAGE_SIZE ) ); + return 0; + + netfront_rm ( netfront, ring->ref_key ); + err_write_num: + xengrant_invalidate ( xen, ring->ref ); + err_permit_access: + free_dma ( ring->sring.raw, PAGE_SIZE ); + err_alloc: + return rc; +} + +/** + * Add buffer to descriptor ring + * + * @v netfront Netfront device + * @v ring Descriptor ring + * @v iobuf I/O buffer + * @v id Buffer ID to fill in + * @v ref Grant reference to fill in + * @ret rc Return status code + * + * The caller is responsible for ensuring that there is space in the + * ring. + */ +static int netfront_push ( struct netfront_nic *netfront, + struct netfront_ring *ring, struct io_buffer *iobuf, + uint16_t *id, grant_ref_t *ref ) { + struct xen_device *xendev = netfront->xendev; + struct xen_hypervisor *xen = xendev->xen; + unsigned int next_id; + unsigned int next_ref; + int rc; + + /* Sanity check */ + assert ( ! netfront_ring_is_full ( ring ) ); + + /* Allocate buffer ID */ + next_id = ring->ids[ ring->id_prod & ( ring->count - 1 ) ]; + next_ref = ring->refs[next_id]; + + /* Grant access to I/O buffer page. I/O buffers are naturally + * aligned, so we never need to worry about crossing a page + * boundary. + */ + if ( ( rc = xengrant_permit_access ( xen, next_ref, xendev->backend_id, + 0, iobuf->data ) ) != 0 ) { + DBGC ( netfront, "NETFRONT %s could not permit access to " + "%#08lx: %s\n", xendev->key, + virt_to_phys ( iobuf->data ), strerror ( rc ) ); + return rc; + } + + /* Store I/O buffer */ + assert ( ring->iobufs[next_id] == NULL ); + ring->iobufs[next_id] = iobuf; + + /* Consume buffer ID */ + ring->id_prod++; + + /* Return buffer ID and grant reference */ + *id = next_id; + *ref = next_ref; + + return 0; +} + +/** + * Remove buffer from descriptor ring + * + * @v netfront Netfront device + * @v ring Descriptor ring + * @v id Buffer ID + * @ret iobuf I/O buffer + */ +static struct io_buffer * netfront_pull ( struct netfront_nic *netfront, + struct netfront_ring *ring, + unsigned int id ) { + struct xen_device *xendev = netfront->xendev; + struct xen_hypervisor *xen = xendev->xen; + struct io_buffer *iobuf; + + /* Sanity check */ + assert ( id < ring->count ); + + /* Revoke access from I/O buffer page */ + xengrant_invalidate ( xen, ring->refs[id] ); + + /* Retrieve I/O buffer */ + iobuf = ring->iobufs[id]; + assert ( iobuf != NULL ); + ring->iobufs[id] = NULL; + + /* Free buffer ID */ + ring->ids[ ( ring->id_cons++ ) & ( ring->count - 1 ) ] = id; + + return iobuf; +} + +/** + * Destroy descriptor ring + * + * @v netfront Netfront device + * @v ring Descriptor ring + * @v discard Method used to discard outstanding buffer, or NULL + */ +static void netfront_destroy_ring ( struct netfront_nic *netfront, + struct netfront_ring *ring, + void ( * discard ) ( struct io_buffer * ) ){ + struct xen_device *xendev = netfront->xendev; + struct xen_hypervisor *xen = xendev->xen; + struct io_buffer *iobuf; + unsigned int id; + + /* Flush any outstanding buffers */ + while ( ! netfront_ring_is_empty ( ring ) ) { + id = ring->ids[ ring->id_cons & ( ring->count - 1 ) ]; + iobuf = netfront_pull ( netfront, ring, id ); + if ( discard ) + discard ( iobuf ); + } + + /* Unpublish shared ring reference */ + netfront_rm ( netfront, ring->ref_key ); + + /* Revoke access from shared ring */ + xengrant_invalidate ( xen, ring->ref ); + + /* Free page */ + free_dma ( ring->sring.raw, PAGE_SIZE ); + ring->sring.raw = NULL; +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Refill receive descriptor ring + * + * @v netdev Network device + */ +static void netfront_refill_rx ( struct net_device *netdev ) { + struct netfront_nic *netfront = netdev->priv; + struct xen_device *xendev = netfront->xendev; + struct io_buffer *iobuf; + struct netif_rx_request *request; + unsigned int refilled = 0; + int notify; + int rc; + + /* Refill ring */ + while ( netfront_ring_fill ( &netfront->rx ) < NETFRONT_RX_FILL ) { + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( PAGE_SIZE ); + if ( ! iobuf ) { + /* Wait for next refill */ + break; + } + + /* Add to descriptor ring */ + request = RING_GET_REQUEST ( &netfront->rx_fring, + netfront->rx_fring.req_prod_pvt ); + if ( ( rc = netfront_push ( netfront, &netfront->rx, + iobuf, &request->id, + &request->gref ) ) != 0 ) { + netdev_rx_err ( netdev, iobuf, rc ); + break; + } + DBGC2 ( netfront, "NETFRONT %s RX id %d ref %d is %#08lx+%zx\n", + xendev->key, request->id, request->gref, + virt_to_phys ( iobuf->data ), iob_tailroom ( iobuf ) ); + + /* Move to next descriptor */ + netfront->rx_fring.req_prod_pvt++; + refilled++; + + } + + /* Push new descriptors and notify backend if applicable */ + if ( refilled ) { + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY ( &netfront->rx_fring, + notify ); + if ( notify ) + netfront_send_event ( netfront ); + } +} + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int netfront_open ( struct net_device *netdev ) { + struct netfront_nic *netfront = netdev->priv; + struct xen_device *xendev = netfront->xendev; + int rc; + + /* Ensure device is in a suitable initial state */ + if ( ( rc = netfront_reset ( netfront ) ) != 0 ) + goto err_reset; + + /* Create transmit descriptor ring */ + if ( ( rc = netfront_create_ring ( netfront, &netfront->tx ) ) != 0 ) + goto err_create_tx; + SHARED_RING_INIT ( netfront->tx_sring ); + FRONT_RING_INIT ( &netfront->tx_fring, netfront->tx_sring, PAGE_SIZE ); + assert ( RING_SIZE ( &netfront->tx_fring ) >= netfront->tx.count ); + + /* Create receive descriptor ring */ + if ( ( rc = netfront_create_ring ( netfront, &netfront->rx ) ) != 0 ) + goto err_create_rx; + SHARED_RING_INIT ( netfront->rx_sring ); + FRONT_RING_INIT ( &netfront->rx_fring, netfront->rx_sring, PAGE_SIZE ); + assert ( RING_SIZE ( &netfront->rx_fring ) >= netfront->rx.count ); + + /* Create event channel */ + if ( ( rc = netfront_create_event ( netfront ) ) != 0 ) + goto err_create_event; + + /* "Request" the rx-copy feature. Current versions of + * xen_netback.ko will fail silently if this parameter is not + * present. + */ + if ( ( rc = netfront_write_flag ( netfront, "request-rx-copy" ) ) != 0 ) + goto err_request_rx_copy; + + /* Disable checksum offload, since we will always do the work anyway */ + if ( ( rc = netfront_write_flag ( netfront, + "feature-no-csum-offload" ) ) != 0 ) + goto err_feature_no_csum_offload; + + /* Inform backend that we will send notifications for RX requests */ + if ( ( rc = netfront_write_flag ( netfront, + "feature-rx-notify" ) ) != 0 ) + goto err_feature_rx_notify; + + /* Set state to Connected */ + if ( ( rc = xenbus_set_state ( xendev, XenbusStateConnected ) ) != 0 ) { + DBGC ( netfront, "NETFRONT %s could not set state=\"%d\": %s\n", + xendev->key, XenbusStateConnected, strerror ( rc ) ); + goto err_set_state; + } + + /* Wait for backend to connect */ + if ( ( rc = xenbus_backend_wait ( xendev, XenbusStateConnected ) ) !=0){ + DBGC ( netfront, "NETFRONT %s could not connect to backend: " + "%s\n", xendev->key, strerror ( rc ) ); + goto err_backend_wait; + } + + /* Refill receive descriptor ring */ + netfront_refill_rx ( netdev ); + + /* Set link up */ + netdev_link_up ( netdev ); + + return 0; + + err_backend_wait: + netfront_reset ( netfront ); + err_set_state: + netfront_rm ( netfront, "feature-rx-notify" ); + err_feature_rx_notify: + netfront_rm ( netfront, "feature-no-csum-offload" ); + err_feature_no_csum_offload: + netfront_rm ( netfront, "request-rx-copy" ); + err_request_rx_copy: + netfront_destroy_event ( netfront ); + err_create_event: + netfront_destroy_ring ( netfront, &netfront->rx, NULL ); + err_create_rx: + netfront_destroy_ring ( netfront, &netfront->tx, NULL ); + err_create_tx: + err_reset: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void netfront_close ( struct net_device *netdev ) { + struct netfront_nic *netfront = netdev->priv; + struct xen_device *xendev = netfront->xendev; + int rc; + + /* Reset devic, thereby ensuring that grant references are no + * longer in use, etc. + */ + if ( ( rc = netfront_reset ( netfront ) ) != 0 ) { + DBGC ( netfront, "NETFRONT %s could not disconnect from " + "backend: %s\n", xendev->key, strerror ( rc ) ); + /* Things will probably go _very_ badly wrong if this + * happens, since it means the backend may still write + * to the outstanding RX buffers that we are about to + * free. The best we can do is report the error via + * the link status, but there's a good chance the + * machine will crash soon. + */ + netdev_link_err ( netdev, rc ); + } else { + netdev_link_down ( netdev ); + } + + /* Delete flags */ + netfront_rm ( netfront, "feature-rx-notify" ); + netfront_rm ( netfront, "feature-no-csum-offload" ); + netfront_rm ( netfront, "request-rx-copy" ); + + /* Destroy event channel */ + netfront_destroy_event ( netfront ); + + /* Destroy receive descriptor ring, freeing any outstanding + * I/O buffers. + */ + netfront_destroy_ring ( netfront, &netfront->rx, free_iob ); + + /* Destroy transmit descriptor ring. Leave any outstanding + * I/O buffers to be freed by netdev_tx_flush(). + */ + netfront_destroy_ring ( netfront, &netfront->tx, NULL ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int netfront_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct netfront_nic *netfront = netdev->priv; + struct xen_device *xendev = netfront->xendev; + struct netif_tx_request *request; + int notify; + int rc; + + /* Check that we have space in the ring */ + if ( netfront_ring_is_full ( &netfront->tx ) ) { + DBGC ( netfront, "NETFRONT %s out of transmit descriptors\n", + xendev->key ); + return -ENOBUFS; + } + + /* Add to descriptor ring */ + request = RING_GET_REQUEST ( &netfront->tx_fring, + netfront->tx_fring.req_prod_pvt ); + if ( ( rc = netfront_push ( netfront, &netfront->tx, iobuf, + &request->id, &request->gref ) ) != 0 ) { + return rc; + } + request->offset = ( virt_to_phys ( iobuf->data ) & ( PAGE_SIZE - 1 ) ); + request->flags = NETTXF_data_validated; + request->size = iob_len ( iobuf ); + DBGC2 ( netfront, "NETFRONT %s TX id %d ref %d is %#08lx+%zx\n", + xendev->key, request->id, request->gref, + virt_to_phys ( iobuf->data ), iob_len ( iobuf ) ); + + /* Consume descriptor */ + netfront->tx_fring.req_prod_pvt++; + + /* Push new descriptor and notify backend if applicable */ + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY ( &netfront->tx_fring, notify ); + if ( notify ) + netfront_send_event ( netfront ); + + return 0; +} + +/** + * Poll for completed packets + * + * @v netdev Network device + */ +static void netfront_poll_tx ( struct net_device *netdev ) { + struct netfront_nic *netfront = netdev->priv; + struct xen_device *xendev = netfront->xendev; + struct netif_tx_response *response; + struct io_buffer *iobuf; + unsigned int status; + int rc; + + /* Consume any unconsumed responses */ + while ( RING_HAS_UNCONSUMED_RESPONSES ( &netfront->tx_fring ) ) { + + /* Get next response */ + response = RING_GET_RESPONSE ( &netfront->tx_fring, + netfront->tx_fring.rsp_cons++ ); + + /* Retrieve from descriptor ring */ + iobuf = netfront_pull ( netfront, &netfront->tx, response->id ); + status = response->status; + if ( status == NETIF_RSP_OKAY ) { + DBGC2 ( netfront, "NETFRONT %s TX id %d complete\n", + xendev->key, response->id ); + netdev_tx_complete ( netdev, iobuf ); + } else { + rc = -EIO_NETIF_RSP ( status ); + DBGC2 ( netfront, "NETFRONT %s TX id %d error %d: %s\n", + xendev->key, response->id, status, + strerror ( rc ) ); + netdev_tx_complete_err ( netdev, iobuf, rc ); + } + } +} + +/** + * Poll for received packets + * + * @v netdev Network device + */ +static void netfront_poll_rx ( struct net_device *netdev ) { + struct netfront_nic *netfront = netdev->priv; + struct xen_device *xendev = netfront->xendev; + struct netif_rx_response *response; + struct io_buffer *iobuf; + int status; + size_t len; + int rc; + + /* Consume any unconsumed responses */ + while ( RING_HAS_UNCONSUMED_RESPONSES ( &netfront->rx_fring ) ) { + + /* Get next response */ + response = RING_GET_RESPONSE ( &netfront->rx_fring, + netfront->rx_fring.rsp_cons++ ); + + /* Retrieve from descriptor ring */ + iobuf = netfront_pull ( netfront, &netfront->rx, response->id ); + status = response->status; + if ( status >= 0 ) { + len = status; + iob_reserve ( iobuf, response->offset ); + iob_put ( iobuf, len ); + DBGC2 ( netfront, "NETFRONT %s RX id %d complete " + "%#08lx+%zx\n", xendev->key, response->id, + virt_to_phys ( iobuf->data ), len ); + netdev_rx ( netdev, iobuf ); + } else { + rc = -EIO_NETIF_RSP ( status ); + DBGC2 ( netfront, "NETFRONT %s RX id %d error %d: %s\n", + xendev->key, response->id, status, + strerror ( rc ) ); + netdev_rx_err ( netdev, iobuf, rc ); + } + } +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void netfront_poll ( struct net_device *netdev ) { + + /* Poll for TX completions */ + netfront_poll_tx ( netdev ); + + /* Poll for RX completions */ + netfront_poll_rx ( netdev ); + + /* Refill RX descriptor ring */ + netfront_refill_rx ( netdev ); +} + +/** Network device operations */ +static struct net_device_operations netfront_operations = { + .open = netfront_open, + .close = netfront_close, + .transmit = netfront_transmit, + .poll = netfront_poll, +}; + +/****************************************************************************** + * + * Xen device bus interface + * + ****************************************************************************** + */ + +/** + * Probe Xen device + * + * @v xendev Xen device + * @ret rc Return status code + */ +static int netfront_probe ( struct xen_device *xendev ) { + struct xen_hypervisor *xen = xendev->xen; + struct net_device *netdev; + struct netfront_nic *netfront; + int rc; + + /* Allocate and initialise structure */ + netdev = alloc_etherdev ( sizeof ( *netfront ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &netfront_operations ); + netdev->dev = &xendev->dev; + netfront = netdev->priv; + netfront->xendev = xendev; + DBGC ( netfront, "NETFRONT %s backend=\"%s\" in domain %ld\n", + xendev->key, xendev->backend, xendev->backend_id ); + + /* Allocate grant references and initialise descriptor rings */ + if ( ( rc = xengrant_alloc ( xen, netfront->refs, + NETFRONT_REF_COUNT ) ) != 0 ) { + DBGC ( netfront, "NETFRONT %s could not allocate grant " + "references: %s\n", xendev->key, strerror ( rc ) ); + goto err_grant_alloc; + } + netfront_init_ring ( &netfront->tx, "tx-ring-ref", + netfront->refs[NETFRONT_REF_TX_RING], + NETFRONT_NUM_TX_DESC, netfront->tx_iobufs, + &netfront->refs[NETFRONT_REF_TX_BASE], + netfront->tx_ids ); + netfront_init_ring ( &netfront->rx, "rx-ring-ref", + netfront->refs[NETFRONT_REF_RX_RING], + NETFRONT_NUM_RX_DESC, netfront->rx_iobufs, + &netfront->refs[NETFRONT_REF_RX_BASE], + netfront->rx_ids ); + + /* Fetch MAC address */ + if ( ( rc = netfront_read_mac ( netfront, netdev->hw_addr ) ) != 0 ) + goto err_read_mac; + + /* Reset device. Ignore failures; allow the device to be + * registered so that reset errors can be observed by the user + * when attempting to open the device. + */ + netfront_reset ( netfront ); + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register_netdev; + + /* Set initial link state */ + netdev_link_down ( netdev ); + + xen_set_drvdata ( xendev, netdev ); + return 0; + + unregister_netdev ( netdev ); + err_register_netdev: + err_read_mac: + xengrant_free ( xen, netfront->refs, NETFRONT_REF_COUNT ); + err_grant_alloc: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove Xen device + * + * @v xendev Xen device + */ +static void netfront_remove ( struct xen_device *xendev ) { + struct net_device *netdev = xen_get_drvdata ( xendev ); + struct netfront_nic *netfront = netdev->priv; + struct xen_hypervisor *xen = xendev->xen; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Free resources */ + xengrant_free ( xen, netfront->refs, NETFRONT_REF_COUNT ); + + /* Free network device */ + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** Xen netfront driver */ +struct xen_driver netfront_driver __xen_driver = { + .name = "netfront", + .type = "vif", + .probe = netfront_probe, + .remove = netfront_remove, +}; diff --git a/src/drivers/net/netfront.h b/src/drivers/net/netfront.h new file mode 100644 index 00000000..c95ed264 --- /dev/null +++ b/src/drivers/net/netfront.h @@ -0,0 +1,178 @@ +#ifndef _NETFRONT_H +#define _NETFRONT_H + +/** @file + * + * Xen netfront driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** Number of transmit ring entries */ +#define NETFRONT_NUM_TX_DESC 16 + +/** Number of receive ring entries */ +#define NETFRONT_NUM_RX_DESC 32 + +/** Receive ring fill level + * + * The xen-netback driver from kernels 3.18 to 4.2 inclusive have a + * bug (CA-163395) which prevents packet reception if fewer than 18 + * receive descriptors are available. This was fixed in upstream + * kernel commit d5d4852 ("xen-netback: require fewer guest Rx slots + * when not using GSO"). + * + * We provide 18 receive descriptors to avoid unpleasant silent + * failures on these kernel versions. + */ +#define NETFRONT_RX_FILL 18 + +/** Grant reference indices */ +enum netfront_ref_index { + /** Transmit ring grant reference index */ + NETFRONT_REF_TX_RING = 0, + /** Transmit descriptor grant reference base index */ + NETFRONT_REF_TX_BASE, + /** Receive ring grant reference index */ + NETFRONT_REF_RX_RING = ( NETFRONT_REF_TX_BASE + NETFRONT_NUM_TX_DESC ), + /** Receive descriptor grant reference base index */ + NETFRONT_REF_RX_BASE, + /** Total number of grant references required */ + NETFRONT_REF_COUNT = ( NETFRONT_REF_RX_BASE + NETFRONT_NUM_RX_DESC ) +}; + +/** A netfront descriptor ring */ +struct netfront_ring { + /** Shared ring */ + union { + /** Transmit shared ring */ + netif_tx_sring_t *tx; + /** Receive shared ring */ + netif_rx_sring_t *rx; + /** Raw pointer */ + void *raw; + } sring; + /** Shared ring grant reference key */ + const char *ref_key; + /** Shared ring grant reference */ + grant_ref_t ref; + + /** Maximum number of used descriptors */ + size_t count; + /** I/O buffers, indexed by buffer ID */ + struct io_buffer **iobufs; + /** I/O buffer grant references, indexed by buffer ID */ + grant_ref_t *refs; + + /** Buffer ID ring */ + uint8_t *ids; + /** Buffer ID ring producer counter */ + unsigned int id_prod; + /** Buffer ID ring consumer counter */ + unsigned int id_cons; +}; + +/** + * Initialise descriptor ring + * + * @v ring Descriptor ring + * @v ref_key Shared ring grant reference key + * @v ref Shared ring grant reference + * @v count Maxium number of used descriptors + * @v iobufs I/O buffers + * @v refs I/O buffer grant references + * @v ids Buffer IDs + */ +static inline __attribute__ (( always_inline )) void +netfront_init_ring ( struct netfront_ring *ring, const char *ref_key, + grant_ref_t ref, unsigned int count, + struct io_buffer **iobufs, grant_ref_t *refs, + uint8_t *ids ) { + + ring->ref_key = ref_key; + ring->ref = ref; + ring->count = count; + ring->iobufs = iobufs; + ring->refs = refs; + ring->ids = ids; +} + +/** + * Calculate descriptor ring fill level + * + * @v ring Descriptor ring + * @v fill Fill level + */ +static inline __attribute__ (( always_inline )) unsigned int +netfront_ring_fill ( struct netfront_ring *ring ) { + unsigned int fill_level; + + fill_level = ( ring->id_prod - ring->id_cons ); + assert ( fill_level <= ring->count ); + return fill_level; +} + +/** + * Check whether or not descriptor ring is full + * + * @v ring Descriptor ring + * @v is_full Ring is full + */ +static inline __attribute__ (( always_inline )) int +netfront_ring_is_full ( struct netfront_ring *ring ) { + + return ( netfront_ring_fill ( ring ) >= ring->count ); +} + +/** + * Check whether or not descriptor ring is empty + * + * @v ring Descriptor ring + * @v is_empty Ring is empty + */ +static inline __attribute__ (( always_inline )) int +netfront_ring_is_empty ( struct netfront_ring *ring ) { + + return ( netfront_ring_fill ( ring ) == 0 ); +} + +/** A netfront NIC */ +struct netfront_nic { + /** Xen device */ + struct xen_device *xendev; + /** Grant references */ + grant_ref_t refs[NETFRONT_REF_COUNT]; + + /** Transmit ring */ + struct netfront_ring tx; + /** Transmit front ring */ + netif_tx_front_ring_t tx_fring; + /** Transmit I/O buffers */ + struct io_buffer *tx_iobufs[NETFRONT_NUM_TX_DESC]; + /** Transmit I/O buffer IDs */ + uint8_t tx_ids[NETFRONT_NUM_TX_DESC]; + + /** Receive ring */ + struct netfront_ring rx; + /** Receive front ring */ + netif_rx_front_ring_t rx_fring; + /** Receive I/O buffers */ + struct io_buffer *rx_iobufs[NETFRONT_NUM_RX_DESC]; + /** Receive I/O buffer IDs */ + uint8_t rx_ids[NETFRONT_NUM_RX_DESC]; + + /** Event channel */ + struct evtchn_send event; +}; + +/** Transmit shared ring field */ +#define tx_sring tx.sring.tx + +/** Receive shared ring field */ +#define rx_sring rx.sring.rx + +#endif /* _NETFRONT_H */ diff --git a/src/drivers/net/netvsc.c b/src/drivers/net/netvsc.c new file mode 100644 index 00000000..5be52fb8 --- /dev/null +++ b/src/drivers/net/netvsc.c @@ -0,0 +1,895 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * Hyper-V network virtual service client + * + * The network virtual service client (NetVSC) connects to the network + * virtual service provider (NetVSP) via the Hyper-V virtual machine + * bus (VMBus). It provides a transport layer for RNDIS packets. + */ + +#include +#include +#include +#include +#include +#include +#include "netvsc.h" + +/** + * Send control message and wait for completion + * + * @v netvsc NetVSC device + * @v xrid Relative transaction ID + * @v data Data + * @v len Length of data + * @ret rc Return status code + */ +static int netvsc_control ( struct netvsc_device *netvsc, unsigned int xrid, + const void *data, size_t len ) { + uint64_t xid = ( NETVSC_BASE_XID + xrid ); + unsigned int i; + int rc; + + /* Send control message */ + if ( ( rc = vmbus_send_control ( netvsc->vmdev, xid, data, len ) ) !=0){ + DBGC ( netvsc, "NETVSC %s could not send control message: %s\n", + netvsc->name, strerror ( rc ) ); + return rc; + } + + /* Record transaction ID */ + netvsc->wait_xrid = xrid; + + /* Wait for operation to complete */ + for ( i = 0 ; i < NETVSC_MAX_WAIT_MS ; i++ ) { + + /* Check for completion */ + if ( ! netvsc->wait_xrid ) + return netvsc->wait_rc; + + /* Poll VMBus device */ + vmbus_poll ( netvsc->vmdev ); + + /* Delay for 1ms */ + mdelay ( 1 ); + } + + DBGC ( netvsc, "NETVSC %s timed out waiting for XRID %d\n", + netvsc->name, xrid ); + vmbus_dump_channel ( netvsc->vmdev ); + return -ETIMEDOUT; +} + +/** + * Handle generic completion + * + * @v netvsc NetVSC device + * @v data Data + * @v len Length of data + * @ret rc Return status code + */ +static int netvsc_completed ( struct netvsc_device *netvsc __unused, + const void *data __unused, size_t len __unused ) { + return 0; +} + +/** + * Initialise communication + * + * @v netvsc NetVSC device + * @ret rc Return status code + */ +static int netvsc_initialise ( struct netvsc_device *netvsc ) { + struct netvsc_init_message msg; + int rc; + + /* Construct message */ + memset ( &msg, 0, sizeof ( msg ) ); + msg.header.type = cpu_to_le32 ( NETVSC_INIT_MSG ); + msg.min = cpu_to_le32 ( NETVSC_VERSION_1 ); + msg.max = cpu_to_le32 ( NETVSC_VERSION_1 ); + + /* Send message and wait for completion */ + if ( ( rc = netvsc_control ( netvsc, NETVSC_INIT_XRID, &msg, + sizeof ( msg ) ) ) != 0 ) { + DBGC ( netvsc, "NETVSC %s could not initialise: %s\n", + netvsc->name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Handle initialisation completion + * + * @v netvsc NetVSC device + * @v data Data + * @v len Length of data + * @ret rc Return status code + */ +static int +netvsc_initialised ( struct netvsc_device *netvsc, const void *data, + size_t len ) { + const struct netvsc_init_completion *cmplt = data; + + /* Check completion */ + if ( len < sizeof ( *cmplt ) ) { + DBGC ( netvsc, "NETVSC %s underlength initialisation " + "completion (%zd bytes)\n", netvsc->name, len ); + return -EINVAL; + } + if ( cmplt->header.type != cpu_to_le32 ( NETVSC_INIT_CMPLT ) ) { + DBGC ( netvsc, "NETVSC %s unexpected initialisation completion " + "type %d\n", netvsc->name, + le32_to_cpu ( cmplt->header.type ) ); + return -EPROTO; + } + if ( cmplt->status != cpu_to_le32 ( NETVSC_OK ) ) { + DBGC ( netvsc, "NETVSC %s initialisation failure status %d\n", + netvsc->name, le32_to_cpu ( cmplt->status ) ); + return -EPROTO; + } + + return 0; +} + +/** + * Set NDIS version + * + * @v netvsc NetVSC device + * @ret rc Return status code + */ +static int netvsc_ndis_version ( struct netvsc_device *netvsc ) { + struct netvsc_ndis_version_message msg; + int rc; + + /* Construct message */ + memset ( &msg, 0, sizeof ( msg ) ); + msg.header.type = cpu_to_le32 ( NETVSC_NDIS_VERSION_MSG ); + msg.major = cpu_to_le32 ( NETVSC_NDIS_MAJOR ); + msg.minor = cpu_to_le32 ( NETVSC_NDIS_MINOR ); + + /* Send message and wait for completion */ + if ( ( rc = netvsc_control ( netvsc, NETVSC_NDIS_VERSION_XRID, + &msg, sizeof ( msg ) ) ) != 0 ) { + DBGC ( netvsc, "NETVSC %s could not set NDIS version: %s\n", + netvsc->name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Establish data buffer + * + * @v netvsc NetVSC device + * @v buffer Data buffer + * @ret rc Return status code + */ +static int netvsc_establish_buffer ( struct netvsc_device *netvsc, + struct netvsc_buffer *buffer ) { + struct netvsc_establish_buffer_message msg; + int rc; + + /* Construct message */ + memset ( &msg, 0, sizeof ( msg ) ); + msg.header.type = cpu_to_le32 ( buffer->establish_type ); + msg.gpadl = cpu_to_le32 ( buffer->gpadl ); + msg.pageset = buffer->pages.pageset; /* Already protocol-endian */ + + /* Send message and wait for completion */ + if ( ( rc = netvsc_control ( netvsc, buffer->establish_xrid, &msg, + sizeof ( msg ) ) ) != 0 ) { + DBGC ( netvsc, "NETVSC %s could not establish buffer: %s\n", + netvsc->name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Handle establish receive data buffer completion + * + * @v netvsc NetVSC device + * @v data Data + * @v len Length of data + * @ret rc Return status code + */ +static int netvsc_rx_established_buffer ( struct netvsc_device *netvsc, + const void *data, size_t len ) { + const struct netvsc_rx_establish_buffer_completion *cmplt = data; + + /* Check completion */ + if ( len < sizeof ( *cmplt ) ) { + DBGC ( netvsc, "NETVSC %s underlength buffer completion (%zd " + "bytes)\n", netvsc->name, len ); + return -EINVAL; + } + if ( cmplt->header.type != cpu_to_le32 ( NETVSC_RX_ESTABLISH_CMPLT ) ) { + DBGC ( netvsc, "NETVSC %s unexpected buffer completion type " + "%d\n", netvsc->name, le32_to_cpu ( cmplt->header.type)); + return -EPROTO; + } + if ( cmplt->status != cpu_to_le32 ( NETVSC_OK ) ) { + DBGC ( netvsc, "NETVSC %s buffer failure status %d\n", + netvsc->name, le32_to_cpu ( cmplt->status ) ); + return -EPROTO; + } + + return 0; +} + +/** + * Revoke data buffer + * + * @v netvsc NetVSC device + * @v buffer Data buffer + * @ret rc Return status code + */ +static int netvsc_revoke_buffer ( struct netvsc_device *netvsc, + struct netvsc_buffer *buffer ) { + struct netvsc_revoke_buffer_message msg; + int rc; + + /* If the buffer's GPADL is obsolete (i.e. was created before + * the most recent Hyper-V reset), then we will never receive + * a response to the revoke message. Since the GPADL is + * already destroyed as far as the hypervisor is concerned, no + * further action is required. + */ + if ( netvsc_is_obsolete ( netvsc ) ) + return 0; + + /* Construct message */ + memset ( &msg, 0, sizeof ( msg ) ); + msg.header.type = cpu_to_le32 ( buffer->revoke_type ); + msg.pageset = buffer->pages.pageset; /* Already protocol-endian */ + + /* Send message and wait for completion */ + if ( ( rc = netvsc_control ( netvsc, buffer->revoke_xrid, + &msg, sizeof ( msg ) ) ) != 0 ) { + DBGC ( netvsc, "NETVSC %s could not revoke buffer: %s\n", + netvsc->name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Handle received control packet + * + * @v vmdev VMBus device + * @v xid Transaction ID + * @v data Data + * @v len Length of data + * @ret rc Return status code + */ +static int netvsc_recv_control ( struct vmbus_device *vmdev, uint64_t xid, + const void *data, size_t len ) { + struct rndis_device *rndis = vmbus_get_drvdata ( vmdev ); + struct netvsc_device *netvsc = rndis->priv; + + DBGC ( netvsc, "NETVSC %s received unsupported control packet " + "(%08llx):\n", netvsc->name, xid ); + DBGC_HDA ( netvsc, 0, data, len ); + return -ENOTSUP; +} + +/** + * Handle received data packet + * + * @v vmdev VMBus device + * @v xid Transaction ID + * @v data Data + * @v len Length of data + * @v list List of I/O buffers + * @ret rc Return status code + */ +static int netvsc_recv_data ( struct vmbus_device *vmdev, uint64_t xid, + const void *data, size_t len, + struct list_head *list ) { + struct rndis_device *rndis = vmbus_get_drvdata ( vmdev ); + struct netvsc_device *netvsc = rndis->priv; + const struct netvsc_rndis_message *msg = data; + struct io_buffer *iobuf; + struct io_buffer *tmp; + int rc; + + /* Sanity check */ + if ( len < sizeof ( *msg ) ) { + DBGC ( netvsc, "NETVSC %s received underlength RNDIS packet " + "(%zd bytes)\n", netvsc->name, len ); + rc = -EINVAL; + goto err_sanity; + } + if ( msg->header.type != cpu_to_le32 ( NETVSC_RNDIS_MSG ) ) { + DBGC ( netvsc, "NETVSC %s received unexpected RNDIS packet " + "type %d\n", netvsc->name, + le32_to_cpu ( msg->header.type ) ); + rc = -EINVAL; + goto err_sanity; + } + + /* Send completion back to host */ + if ( ( rc = vmbus_send_completion ( vmdev, xid, NULL, 0 ) ) != 0 ) { + DBGC ( netvsc, "NETVSC %s could not send completion: %s\n", + netvsc->name, strerror ( rc ) ); + goto err_completion; + } + + /* Hand off to RNDIS */ + list_for_each_entry_safe ( iobuf, tmp, list, list ) { + list_del ( &iobuf->list ); + rndis_rx ( rndis, iob_disown ( iobuf ) ); + } + + return 0; + + err_completion: + err_sanity: + list_for_each_entry_safe ( iobuf, tmp, list, list ) { + list_del ( &iobuf->list ); + free_iob ( iobuf ); + } + return rc; +} + +/** + * Handle received completion packet + * + * @v vmdev VMBus device + * @v xid Transaction ID + * @v data Data + * @v len Length of data + * @ret rc Return status code + */ +static int netvsc_recv_completion ( struct vmbus_device *vmdev, uint64_t xid, + const void *data, size_t len ) { + struct rndis_device *rndis = vmbus_get_drvdata ( vmdev ); + struct netvsc_device *netvsc = rndis->priv; + struct io_buffer *iobuf; + int ( * completion ) ( struct netvsc_device *netvsc, + const void *data, size_t len ); + unsigned int xrid = ( xid - NETVSC_BASE_XID ); + unsigned int tx_id; + int rc; + + /* Handle transmit completion, if applicable */ + tx_id = ( xrid - NETVSC_TX_BASE_XRID ); + if ( ( tx_id < NETVSC_TX_NUM_DESC ) && + ( ( iobuf = netvsc->tx.iobufs[tx_id] ) != NULL ) ) { + + /* Free buffer ID */ + netvsc->tx.iobufs[tx_id] = NULL; + netvsc->tx.ids[ ( netvsc->tx.id_cons++ ) & + ( netvsc->tx.count - 1 ) ] = tx_id; + + /* Hand back to RNDIS */ + rndis_tx_complete ( rndis, iobuf ); + return 0; + } + + /* Otherwise determine completion handler */ + if ( xrid == NETVSC_INIT_XRID ) { + completion = netvsc_initialised; + } else if ( xrid == NETVSC_RX_ESTABLISH_XRID ) { + completion = netvsc_rx_established_buffer; + } else if ( ( netvsc->wait_xrid != 0 ) && + ( xrid == netvsc->wait_xrid ) ) { + completion = netvsc_completed; + } else { + DBGC ( netvsc, "NETVSC %s received unexpected completion " + "(%08llx)\n", netvsc->name, xid ); + return -EPIPE; + } + + /* Hand off to completion handler */ + rc = completion ( netvsc, data, len ); + + /* Record completion handler result if applicable */ + if ( xrid == netvsc->wait_xrid ) { + netvsc->wait_xrid = 0; + netvsc->wait_rc = rc; + } + + return rc; +} + +/** + * Handle received cancellation packet + * + * @v vmdev VMBus device + * @v xid Transaction ID + * @ret rc Return status code + */ +static int netvsc_recv_cancellation ( struct vmbus_device *vmdev, + uint64_t xid ) { + struct rndis_device *rndis = vmbus_get_drvdata ( vmdev ); + struct netvsc_device *netvsc = rndis->priv; + + DBGC ( netvsc, "NETVSC %s received unsupported cancellation packet " + "(%08llx):\n", netvsc->name, xid ); + return -ENOTSUP; +} + +/** VMBus channel operations */ +static struct vmbus_channel_operations netvsc_channel_operations = { + .recv_control = netvsc_recv_control, + .recv_data = netvsc_recv_data, + .recv_completion = netvsc_recv_completion, + .recv_cancellation = netvsc_recv_cancellation, +}; + +/** + * Poll for completed and received packets + * + * @v rndis RNDIS device + */ +static void netvsc_poll ( struct rndis_device *rndis ) { + struct netvsc_device *netvsc = rndis->priv; + struct vmbus_device *vmdev = netvsc->vmdev; + + /* Poll VMBus device */ + while ( vmbus_has_data ( vmdev ) ) + vmbus_poll ( vmdev ); +} + +/** + * Transmit packet + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + * @ret rc Return status code + * + * If this method returns success then the RNDIS device must + * eventually report completion via rndis_tx_complete(). + */ +static int netvsc_transmit ( struct rndis_device *rndis, + struct io_buffer *iobuf ) { + struct netvsc_device *netvsc = rndis->priv; + struct rndis_header *header = iobuf->data; + struct netvsc_rndis_message msg; + unsigned int tx_id; + unsigned int xrid; + uint64_t xid; + int rc; + + /* If the device is obsolete (i.e. was opened before the most + * recent Hyper-V reset), then we will never receive transmit + * completions. Fail transmissions immediately to minimise + * the delay in closing and reopening the device. + */ + if ( netvsc_is_obsolete ( netvsc ) ) + return -EPIPE; + + /* Sanity check */ + assert ( iob_len ( iobuf ) >= sizeof ( *header ) ); + assert ( iob_len ( iobuf ) == le32_to_cpu ( header->len ) ); + + /* Check that we have space in the transmit ring */ + if ( netvsc_ring_is_full ( &netvsc->tx ) ) + return rndis_tx_defer ( rndis, iobuf ); + + /* Allocate buffer ID and calculate transaction ID */ + tx_id = netvsc->tx.ids[ netvsc->tx.id_prod & ( netvsc->tx.count - 1 ) ]; + assert ( netvsc->tx.iobufs[tx_id] == NULL ); + xrid = ( NETVSC_TX_BASE_XRID + tx_id ); + xid = ( NETVSC_BASE_XID + xrid ); + + /* Construct message */ + memset ( &msg, 0, sizeof ( msg ) ); + msg.header.type = cpu_to_le32 ( NETVSC_RNDIS_MSG ); + msg.channel = ( ( header->type == cpu_to_le32 ( RNDIS_PACKET_MSG ) ) ? + NETVSC_RNDIS_DATA : NETVSC_RNDIS_CONTROL ); + msg.buffer = cpu_to_le32 ( NETVSC_RNDIS_NO_BUFFER ); + + /* Send message */ + if ( ( rc = vmbus_send_data ( netvsc->vmdev, xid, &msg, sizeof ( msg ), + iobuf ) ) != 0 ) { + DBGC ( netvsc, "NETVSC %s could not send RNDIS message: %s\n", + netvsc->name, strerror ( rc ) ); + return rc; + } + + /* Store I/O buffer and consume buffer ID */ + netvsc->tx.iobufs[tx_id] = iobuf; + netvsc->tx.id_prod++; + + return 0; +} + +/** + * Cancel transmission + * + * @v netvsc NetVSC device + * @v iobuf I/O buffer + * @v tx_id Transmission ID + */ +static void netvsc_cancel_transmit ( struct netvsc_device *netvsc, + struct io_buffer *iobuf, + unsigned int tx_id ) { + unsigned int xrid; + uint64_t xid; + + /* Send cancellation */ + xrid = ( NETVSC_TX_BASE_XRID + tx_id ); + xid = ( NETVSC_BASE_XID + xrid ); + DBGC ( netvsc, "NETVSC %s cancelling transmission %#x\n", + netvsc->name, tx_id ); + vmbus_send_cancellation ( netvsc->vmdev, xid ); + + /* Report back to RNDIS */ + rndis_tx_complete_err ( netvsc->rndis, iobuf, -ECANCELED ); +} + +/** + * Create descriptor ring + * + * @v netvsc NetVSC device + * @v ring Descriptor ring + * @ret rc Return status code + */ +static int netvsc_create_ring ( struct netvsc_device *netvsc __unused, + struct netvsc_ring *ring ) { + unsigned int i; + + /* Initialise buffer ID ring */ + for ( i = 0 ; i < ring->count ; i++ ) { + ring->ids[i] = i; + assert ( ring->iobufs[i] == NULL ); + } + ring->id_prod = 0; + ring->id_cons = 0; + + return 0; +} + +/** + * Destroy descriptor ring + * + * @v netvsc NetVSC device + * @v ring Descriptor ring + * @v discard Method used to discard outstanding buffer, or NULL + */ +static void netvsc_destroy_ring ( struct netvsc_device *netvsc, + struct netvsc_ring *ring, + void ( * discard ) ( struct netvsc_device *, + struct io_buffer *, + unsigned int ) ) { + struct io_buffer *iobuf; + unsigned int i; + + /* Flush any outstanding buffers */ + for ( i = 0 ; i < ring->count ; i++ ) { + iobuf = ring->iobufs[i]; + if ( ! iobuf ) + continue; + ring->iobufs[i] = NULL; + ring->ids[ ( ring->id_cons++ ) & ( ring->count - 1 ) ] = i; + if ( discard ) + discard ( netvsc, iobuf, i ); + } + + /* Sanity check */ + assert ( netvsc_ring_is_empty ( ring ) ); +} + +/** + * Copy data from data buffer + * + * @v pages Transfer page set + * @v data Data buffer + * @v offset Offset within page set + * @v len Length within page set + * @ret rc Return status code + */ +static int netvsc_buffer_copy ( struct vmbus_xfer_pages *pages, void *data, + size_t offset, size_t len ) { + struct netvsc_buffer *buffer = + container_of ( pages, struct netvsc_buffer, pages ); + + /* Sanity check */ + if ( ( offset > buffer->len ) || ( len > ( buffer->len - offset ) ) ) + return -ERANGE; + + /* Copy data from buffer */ + copy_from_user ( data, buffer->data, offset, len ); + + return 0; +} + +/** Transfer page set operations */ +static struct vmbus_xfer_pages_operations netvsc_xfer_pages_operations = { + .copy = netvsc_buffer_copy, +}; + +/** + * Create data buffer + * + * @v netvsc NetVSC device + * @v buffer Data buffer + * @ret rc Return status code + */ +static int netvsc_create_buffer ( struct netvsc_device *netvsc, + struct netvsc_buffer *buffer ) { + struct vmbus_device *vmdev = netvsc->vmdev; + int gpadl; + int rc; + + /* Allocate receive buffer */ + buffer->data = umalloc ( buffer->len ); + if ( ! buffer->data ) { + DBGC ( netvsc, "NETVSC %s could not allocate %zd-byte buffer\n", + netvsc->name, buffer->len ); + rc = -ENOMEM; + goto err_alloc; + } + + /* Establish GPA descriptor list */ + gpadl = vmbus_establish_gpadl ( vmdev, buffer->data, buffer->len ); + if ( gpadl < 0 ) { + rc = gpadl; + DBGC ( netvsc, "NETVSC %s could not establish GPADL: %s\n", + netvsc->name, strerror ( rc ) ); + goto err_establish_gpadl; + } + buffer->gpadl = gpadl; + + /* Register transfer page set */ + if ( ( rc = vmbus_register_pages ( vmdev, &buffer->pages ) ) != 0 ) { + DBGC ( netvsc, "NETVSC %s could not register transfer pages: " + "%s\n", netvsc->name, strerror ( rc ) ); + goto err_register_pages; + } + + return 0; + + vmbus_unregister_pages ( vmdev, &buffer->pages ); + err_register_pages: + vmbus_gpadl_teardown ( vmdev, gpadl ); + err_establish_gpadl: + ufree ( buffer->data ); + err_alloc: + return rc; +} + +/** + * Destroy data buffer + * + * @v netvsc NetVSC device + * @v buffer Data buffer + */ +static void netvsc_destroy_buffer ( struct netvsc_device *netvsc, + struct netvsc_buffer *buffer ) { + struct vmbus_device *vmdev = netvsc->vmdev; + int rc; + + /* Unregister transfer pages */ + vmbus_unregister_pages ( vmdev, &buffer->pages ); + + /* Tear down GPA descriptor list */ + if ( ( rc = vmbus_gpadl_teardown ( vmdev, buffer->gpadl ) ) != 0 ) { + DBGC ( netvsc, "NETVSC %s could not tear down GPADL: %s\n", + netvsc->name, strerror ( rc ) ); + /* Death is imminent. The host may well continue to + * write to the data buffer. The best we can do is + * leak memory for now and hope that the host doesn't + * write to this region after we load an OS. + */ + return; + } + + /* Free buffer */ + ufree ( buffer->data ); +} + +/** + * Open device + * + * @v rndis RNDIS device + * @ret rc Return status code + */ +static int netvsc_open ( struct rndis_device *rndis ) { + struct netvsc_device *netvsc = rndis->priv; + int rc; + + /* Initialise receive buffer */ + if ( ( rc = netvsc_create_buffer ( netvsc, &netvsc->rx ) ) != 0 ) + goto err_create_rx; + + /* Open channel */ + if ( ( rc = vmbus_open ( netvsc->vmdev, &netvsc_channel_operations, + PAGE_SIZE, PAGE_SIZE, NETVSC_MTU ) ) != 0 ) { + DBGC ( netvsc, "NETVSC %s could not open VMBus: %s\n", + netvsc->name, strerror ( rc ) ); + goto err_vmbus_open; + } + + /* Initialise communication with NetVSP */ + if ( ( rc = netvsc_initialise ( netvsc ) ) != 0 ) + goto err_initialise; + if ( ( rc = netvsc_ndis_version ( netvsc ) ) != 0 ) + goto err_ndis_version; + + /* Initialise transmit ring */ + if ( ( rc = netvsc_create_ring ( netvsc, &netvsc->tx ) ) != 0 ) + goto err_create_tx; + + /* Establish receive buffer */ + if ( ( rc = netvsc_establish_buffer ( netvsc, &netvsc->rx ) ) != 0 ) + goto err_establish_rx; + + return 0; + + netvsc_revoke_buffer ( netvsc, &netvsc->rx ); + err_establish_rx: + netvsc_destroy_ring ( netvsc, &netvsc->tx, NULL ); + err_create_tx: + err_ndis_version: + err_initialise: + vmbus_close ( netvsc->vmdev ); + err_vmbus_open: + netvsc_destroy_buffer ( netvsc, &netvsc->rx ); + err_create_rx: + return rc; +} + +/** + * Close device + * + * @v rndis RNDIS device + */ +static void netvsc_close ( struct rndis_device *rndis ) { + struct netvsc_device *netvsc = rndis->priv; + + /* Revoke receive buffer */ + netvsc_revoke_buffer ( netvsc, &netvsc->rx ); + + /* Destroy transmit ring */ + netvsc_destroy_ring ( netvsc, &netvsc->tx, netvsc_cancel_transmit ); + + /* Close channel */ + vmbus_close ( netvsc->vmdev ); + + /* Destroy receive buffer */ + netvsc_destroy_buffer ( netvsc, &netvsc->rx ); +} + +/** RNDIS operations */ +static struct rndis_operations netvsc_operations = { + .open = netvsc_open, + .close = netvsc_close, + .transmit = netvsc_transmit, + .poll = netvsc_poll, +}; + +/** + * Probe device + * + * @v vmdev VMBus device + * @ret rc Return status code + */ +static int netvsc_probe ( struct vmbus_device *vmdev ) { + struct netvsc_device *netvsc; + struct rndis_device *rndis; + int rc; + + /* Allocate and initialise structure */ + rndis = alloc_rndis ( sizeof ( *netvsc ) ); + if ( ! rndis ) { + rc = -ENOMEM; + goto err_alloc; + } + rndis_init ( rndis, &netvsc_operations ); + rndis->netdev->dev = &vmdev->dev; + netvsc = rndis->priv; + netvsc->vmdev = vmdev; + netvsc->rndis = rndis; + netvsc->name = vmdev->dev.name; + netvsc_init_ring ( &netvsc->tx, NETVSC_TX_NUM_DESC, + netvsc->tx_iobufs, netvsc->tx_ids ); + netvsc_init_buffer ( &netvsc->rx, NETVSC_RX_BUF_PAGESET, + &netvsc_xfer_pages_operations, + NETVSC_RX_ESTABLISH_MSG, NETVSC_RX_ESTABLISH_XRID, + NETVSC_RX_REVOKE_MSG, NETVSC_RX_REVOKE_XRID, + NETVSC_RX_BUF_LEN ); + vmbus_set_drvdata ( vmdev, rndis ); + + /* Register RNDIS device */ + if ( ( rc = register_rndis ( rndis ) ) != 0 ) { + DBGC ( netvsc, "NETVSC %s could not register: %s\n", + netvsc->name, strerror ( rc ) ); + goto err_register; + } + + return 0; + + unregister_rndis ( rndis ); + err_register: + free_rndis ( rndis ); + err_alloc: + return rc; +} + +/** + * Reset device + * + * @v vmdev VMBus device + * @ret rc Return status code + */ +static int netvsc_reset ( struct vmbus_device *vmdev ) { + struct rndis_device *rndis = vmbus_get_drvdata ( vmdev ); + struct netvsc_device *netvsc = rndis->priv; + struct net_device *netdev = rndis->netdev; + int rc; + + /* A closed device holds no NetVSC (or RNDIS) state, so there + * is nothing to reset. + */ + if ( ! netdev_is_open ( netdev ) ) + return 0; + + /* Close and reopen device to reset any stale state */ + netdev_close ( netdev ); + if ( ( rc = netdev_open ( netdev ) ) != 0 ) { + DBGC ( netvsc, "NETVSC %s could not reopen: %s\n", + netvsc->name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Remove device + * + * @v vmdev VMBus device + */ +static void netvsc_remove ( struct vmbus_device *vmdev ) { + struct rndis_device *rndis = vmbus_get_drvdata ( vmdev ); + + /* Unregister RNDIS device */ + unregister_rndis ( rndis ); + + /* Free RNDIS device */ + free_rndis ( rndis ); +} + +/** NetVSC driver */ +struct vmbus_driver netvsc_driver __vmbus_driver = { + .name = "netvsc", + .type = VMBUS_TYPE ( 0xf8615163, 0xdf3e, 0x46c5, 0x913f, + 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e ), + .probe = netvsc_probe, + .reset = netvsc_reset, + .remove = netvsc_remove, +}; diff --git a/src/drivers/net/netvsc.h b/src/drivers/net/netvsc.h new file mode 100644 index 00000000..93192357 --- /dev/null +++ b/src/drivers/net/netvsc.h @@ -0,0 +1,380 @@ +#ifndef _NETVSC_H +#define _NETVSC_H + +/** @file + * + * Hyper-V network virtual service client + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** Maximum supported NetVSC message length */ +#define NETVSC_MTU 512 + +/** Maximum time to wait for a transaction to complete + * + * This is a policy decision. + */ +#define NETVSC_MAX_WAIT_MS 1000 + +/** Number of transmit ring entries + * + * Must be a power of two. This is a policy decision. This value + * must be sufficiently small to guarantee that we never run out of + * space in the VMBus outbound ring buffer. + */ +#define NETVSC_TX_NUM_DESC 32 + +/** RX data buffer page set ID + * + * This is a policy decision. + */ +#define NETVSC_RX_BUF_PAGESET 0xbead + +/** RX data buffer length + * + * This is a policy decision. + */ +#define NETVSC_RX_BUF_LEN ( 16 * PAGE_SIZE ) + +/** Base transaction ID + * + * This is a policy decision. + */ +#define NETVSC_BASE_XID 0x18ae0000UL + +/** Relative transaction IDs */ +enum netvsc_xrid { + /** Transmit descriptors (one per transmit buffer ID) */ + NETVSC_TX_BASE_XRID = 0, + /** Initialisation */ + NETVSC_INIT_XRID = ( NETVSC_TX_BASE_XRID + NETVSC_TX_NUM_DESC ), + /** NDIS version */ + NETVSC_NDIS_VERSION_XRID, + /** Establish receive buffer */ + NETVSC_RX_ESTABLISH_XRID, + /** Revoke receive buffer */ + NETVSC_RX_REVOKE_XRID, +}; + +/** NetVSC status codes */ +enum netvsc_status { + NETVSC_NONE = 0, + NETVSC_OK = 1, + NETVSC_FAIL = 2, + NETVSC_TOO_NEW = 3, + NETVSC_TOO_OLD = 4, + NETVSC_BAD_PACKET = 5, + NETVSC_BUSY = 6, + NETVSC_UNSUPPORTED = 7, +}; + +/** NetVSC message header */ +struct netvsc_header { + /** Type */ + uint32_t type; +} __attribute__ (( packed )); + +/** NetVSC initialisation message */ +#define NETVSC_INIT_MSG 1 + +/** NetVSC initialisation message */ +struct netvsc_init_message { + /** Message header */ + struct netvsc_header header; + /** Minimum supported protocol version */ + uint32_t min; + /** Maximum supported protocol version */ + uint32_t max; + /** Reserved */ + uint8_t reserved[20]; +} __attribute__ (( packed )); + +/** Oldest known NetVSC protocol version */ +#define NETVSC_VERSION_1 2 /* sic */ + +/** NetVSC initialisation completion */ +#define NETVSC_INIT_CMPLT 2 + +/** NetVSC initialisation completion */ +struct netvsc_init_completion { + /** Message header */ + struct netvsc_header header; + /** Protocol version */ + uint32_t version; + /** Maximum memory descriptor list length */ + uint32_t max_mdl_len; + /** Status */ + uint32_t status; + /** Reserved */ + uint8_t reserved[16]; +} __attribute__ (( packed )); + +/** NetVSC NDIS version message */ +#define NETVSC_NDIS_VERSION_MSG 100 + +/** NetVSC NDIS version message */ +struct netvsc_ndis_version_message { + /** Message header */ + struct netvsc_header header; + /** Major version */ + uint32_t major; + /** Minor version */ + uint32_t minor; + /** Reserved */ + uint8_t reserved[20]; +} __attribute__ (( packed )); + +/** NetVSC NDIS major version */ +#define NETVSC_NDIS_MAJOR 6 + +/** NetVSC NDIS minor version */ +#define NETVSC_NDIS_MINOR 1 + +/** NetVSC establish receive data buffer message */ +#define NETVSC_RX_ESTABLISH_MSG 101 + +/** NetVSC establish receive data buffer completion */ +#define NETVSC_RX_ESTABLISH_CMPLT 102 + +/** NetVSC revoke receive data buffer message */ +#define NETVSC_RX_REVOKE_MSG 103 + +/** NetVSC establish transmit data buffer message */ +#define NETVSC_TX_ESTABLISH_MSG 104 + +/** NetVSC establish transmit data buffer completion */ +#define NETVSC_TX_ESTABLISH_CMPLT 105 + +/** NetVSC revoke transmit data buffer message */ +#define NETVSC_TX_REVOKE_MSG 106 + +/** NetVSC establish data buffer message */ +struct netvsc_establish_buffer_message { + /** Message header */ + struct netvsc_header header; + /** GPADL ID */ + uint32_t gpadl; + /** Page set ID */ + uint16_t pageset; + /** Reserved */ + uint8_t reserved[22]; +} __attribute__ (( packed )); + +/** NetVSC receive data buffer section */ +struct netvsc_rx_buffer_section { + /** Starting offset */ + uint32_t start; + /** Subsection length */ + uint32_t len; + /** Number of subsections */ + uint32_t count; + /** Ending offset */ + uint32_t end; +} __attribute__ (( packed )); + +/** NetVSC establish receive data buffer completion */ +struct netvsc_rx_establish_buffer_completion { + /** Message header */ + struct netvsc_header header; + /** Status */ + uint32_t status; + /** Number of sections (must be 1) */ + uint32_t count; + /** Section descriptors */ + struct netvsc_rx_buffer_section section[1]; +} __attribute__ (( packed )); + +/** NetVSC establish transmit data buffer completion */ +struct netvsc_tx_establish_buffer_completion { + /** Message header */ + struct netvsc_header header; + /** Status */ + uint32_t status; + /** Section length */ + uint32_t len; +} __attribute__ (( packed )); + +/** NetVSC revoke data buffer message */ +struct netvsc_revoke_buffer_message { + /** Message header */ + struct netvsc_header header; + /** Page set ID */ + uint16_t pageset; + /** Reserved */ + uint8_t reserved[26]; +} __attribute__ (( packed )); + +/** NetVSC RNDIS message */ +#define NETVSC_RNDIS_MSG 107 + +/** NetVSC RNDIS message */ +struct netvsc_rndis_message { + /** Message header */ + struct netvsc_header header; + /** RNDIS channel */ + uint32_t channel; + /** Buffer index (or NETVSC_RNDIS_NO_BUFFER) */ + uint32_t buffer; + /** Buffer length */ + uint32_t len; + /** Reserved */ + uint8_t reserved[16]; +} __attribute__ (( packed )); + +/** RNDIS data channel (for RNDIS_PACKET_MSG only) */ +#define NETVSC_RNDIS_DATA 0 + +/** RNDIS control channel (for all other RNDIS messages) */ +#define NETVSC_RNDIS_CONTROL 1 + +/** "No buffer used" index */ +#define NETVSC_RNDIS_NO_BUFFER 0xffffffffUL + +/** A NetVSC descriptor ring */ +struct netvsc_ring { + /** Number of descriptors */ + unsigned int count; + /** I/O buffers, indexed by buffer ID */ + struct io_buffer **iobufs; + /** Buffer ID ring */ + uint8_t *ids; + /** Buffer ID producer counter */ + unsigned int id_prod; + /** Buffer ID consumer counter */ + unsigned int id_cons; +}; + +/** + * Initialise descriptor ring + * + * @v ring Descriptor ring + * @v count Maximum number of used descriptors + * @v iobufs I/O buffers + * @v ids Buffer IDs + */ +static inline __attribute__ (( always_inline )) void +netvsc_init_ring ( struct netvsc_ring *ring, unsigned int count, + struct io_buffer **iobufs, uint8_t *ids ) { + + ring->count = count; + ring->iobufs = iobufs; + ring->ids = ids; +} + +/** + * Check whether or not descriptor ring is full + * + * @v ring Descriptor ring + * @v is_full Ring is full + */ +static inline __attribute__ (( always_inline )) int +netvsc_ring_is_full ( struct netvsc_ring *ring ) { + unsigned int fill_level; + + fill_level = ( ring->id_prod - ring->id_cons ); + assert ( fill_level <= ring->count ); + return ( fill_level >= ring->count ); +} + +/** + * Check whether or not descriptor ring is empty + * + * @v ring Descriptor ring + * @v is_empty Ring is empty + */ +static inline __attribute__ (( always_inline )) int +netvsc_ring_is_empty ( struct netvsc_ring *ring ) { + + return ( ring->id_prod == ring->id_cons ); +} + +/** A NetVSC data buffer */ +struct netvsc_buffer { + /** Transfer page set */ + struct vmbus_xfer_pages pages; + /** Establish data buffer message type */ + uint8_t establish_type; + /** Establish data buffer relative transaction ID */ + uint8_t establish_xrid; + /** Revoke data buffer message type */ + uint8_t revoke_type; + /** Revoke data buffer relative transaction ID */ + uint8_t revoke_xrid; + /** Buffer length */ + size_t len; + /** Buffer */ + userptr_t data; + /** GPADL ID */ + unsigned int gpadl; +}; + +/** + * Initialise data buffer + * + * @v buffer Data buffer + * @v pageset Page set ID + * @v op Page set operations + * @v establish_type Establish data buffer message type + * @v establish_xrid Establish data buffer relative transaction ID + * @v revoke_type Revoke data buffer message type + * @v revoke_type Revoke data buffer relative transaction ID + * @v len Required length + */ +static inline __attribute__ (( always_inline )) void +netvsc_init_buffer ( struct netvsc_buffer *buffer, uint16_t pageset, + struct vmbus_xfer_pages_operations *op, + uint8_t establish_type, uint8_t establish_xrid, + uint8_t revoke_type, uint8_t revoke_xrid, size_t len ) { + + buffer->pages.pageset = cpu_to_le16 ( pageset ); + buffer->pages.op = op; + buffer->establish_type = establish_type; + buffer->establish_xrid = establish_xrid; + buffer->revoke_type = revoke_type; + buffer->revoke_xrid = revoke_xrid; + buffer->len = len; +} + +/** A NetVSC device */ +struct netvsc_device { + /** VMBus device */ + struct vmbus_device *vmdev; + /** RNDIS device */ + struct rndis_device *rndis; + /** Name */ + const char *name; + + /** Transmit ring */ + struct netvsc_ring tx; + /** Transmit buffer IDs */ + uint8_t tx_ids[NETVSC_TX_NUM_DESC]; + /** Transmit I/O buffers */ + struct io_buffer *tx_iobufs[NETVSC_TX_NUM_DESC]; + + /** Receive buffer */ + struct netvsc_buffer rx; + + /** Relative transaction ID for current blocking transaction */ + unsigned int wait_xrid; + /** Return status code for current blocking transaction */ + int wait_rc; +}; + +/** + * Check if NetVSC device is obsolete + * + * @v netvsc NetVSC device + * @v is_obsolete NetVSC device is obsolete + * + * Check if NetVSC device is obsolete (i.e. was opened before the most + * recent Hyper-V reset). + */ +static inline __attribute__ (( always_inline )) int +netvsc_is_obsolete ( struct netvsc_device *netvsc ) { + + return vmbus_gpadl_is_obsolete ( netvsc->rx.gpadl ); +} + +#endif /* _NETVSC_H */ diff --git a/src/drivers/net/sfc/ef10_regs.h b/src/drivers/net/sfc/ef10_regs.h new file mode 100644 index 00000000..0510e8ff --- /dev/null +++ b/src/drivers/net/sfc/ef10_regs.h @@ -0,0 +1,364 @@ +/**************************************************************************** + * + * Driver for Solarflare network controllers and boards + * Copyright 2012-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +#ifndef EFX_EF10_REGS_H +#define EFX_EF10_REGS_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** \file ef10_regs.h + * EF10 hardware architecture definitions + * + * EF10 hardware architecture definitions have a name prefix following + * the format: + * + * E__ + * + * The following strings are used: + * + * MMIO register Host memory structure + * Address R + * Bitfield RF SF + * Enumerator FE SE + * + * is the first revision to which the definition applies: + * + * D: Huntington A0 + * + * If the definition has been changed or removed in later revisions + * then is the last revision to which the definition applies; + * otherwise it is "Z". + */ + +/************************************************************************** + * + * EF10 registers and descriptors + * + ************************************************************************** + */ + +/* BIU_HW_REV_ID_REG: */ +#define ER_DZ_BIU_HW_REV_ID 0x00000000 +#define ERF_DZ_HW_REV_ID_LBN 0 +#define ERF_DZ_HW_REV_ID_WIDTH 32 + +/* BIU_MC_SFT_STATUS_REG: */ +#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010 +#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4 +#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8 +#define ERF_DZ_MC_SFT_STATUS_LBN 0 +#define ERF_DZ_MC_SFT_STATUS_WIDTH 32 + +/* BIU_INT_ISR_REG: */ +#define ER_DZ_BIU_INT_ISR 0x00000090 +#define ERF_DZ_ISR_REG_LBN 0 +#define ERF_DZ_ISR_REG_WIDTH 32 + +/* MC_DB_LWRD_REG: */ +#define ER_DZ_MC_DB_LWRD 0x00000200 +#define ERF_DZ_MC_DOORBELL_L_LBN 0 +#define ERF_DZ_MC_DOORBELL_L_WIDTH 32 + +/* MC_DB_HWRD_REG: */ +#define ER_DZ_MC_DB_HWRD 0x00000204 +#define ERF_DZ_MC_DOORBELL_H_LBN 0 +#define ERF_DZ_MC_DOORBELL_H_WIDTH 32 + +/* EVQ_RPTR_REG: */ +#define ER_DZ_EVQ_RPTR 0x00000400 +#define ER_DZ_EVQ_RPTR_STEP 8192 +#define ER_DZ_EVQ_RPTR_ROWS 2048 +#define ERF_DZ_EVQ_RPTR_VLD_LBN 15 +#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1 +#define ERF_DZ_EVQ_RPTR_LBN 0 +#define ERF_DZ_EVQ_RPTR_WIDTH 15 + +/* EVQ_TMR_REG: */ +#define ER_DZ_EVQ_TMR 0x00000420 +#define ER_DZ_EVQ_TMR_STEP 8192 +#define ER_DZ_EVQ_TMR_ROWS 2048 +#define ERF_DZ_TC_TIMER_MODE_LBN 14 +#define ERF_DZ_TC_TIMER_MODE_WIDTH 2 +#define ERF_DZ_TC_TIMER_VAL_LBN 0 +#define ERF_DZ_TC_TIMER_VAL_WIDTH 14 + +/* RX_DESC_UPD_REG: */ +#define ER_DZ_RX_DESC_UPD 0x00000830 +#define ER_DZ_RX_DESC_UPD_STEP 8192 +#define ER_DZ_RX_DESC_UPD_ROWS 2048 +#define ERF_DZ_RX_DESC_WPTR_LBN 0 +#define ERF_DZ_RX_DESC_WPTR_WIDTH 12 + +/* TX_DESC_UPD_REG: */ +#define ER_DZ_TX_DESC_UPD 0x00000a10 +#define ER_DZ_TX_DESC_UPD_STEP 8192 +#define ER_DZ_TX_DESC_UPD_ROWS 2048 +#define ERF_DZ_RSVD_LBN 76 +#define ERF_DZ_RSVD_WIDTH 20 +#define ERF_DZ_TX_DESC_WPTR_LBN 64 +#define ERF_DZ_TX_DESC_WPTR_WIDTH 12 +#define ERF_DZ_TX_DESC_HWORD_LBN 32 +#define ERF_DZ_TX_DESC_HWORD_WIDTH 32 +#define ERF_DZ_TX_DESC_LWORD_LBN 0 +#define ERF_DZ_TX_DESC_LWORD_WIDTH 32 + +/* DRIVER_EV */ +#define ESF_DZ_DRV_CODE_LBN 60 +#define ESF_DZ_DRV_CODE_WIDTH 4 +#define ESF_DZ_DRV_SUB_CODE_LBN 56 +#define ESF_DZ_DRV_SUB_CODE_WIDTH 4 +#define ESE_DZ_DRV_TIMER_EV 3 +#define ESE_DZ_DRV_START_UP_EV 2 +#define ESE_DZ_DRV_WAKE_UP_EV 1 +#define ESF_DZ_DRV_SUB_DATA_LBN 0 +#define ESF_DZ_DRV_SUB_DATA_WIDTH 56 +#define ESF_DZ_DRV_EVQ_ID_LBN 0 +#define ESF_DZ_DRV_EVQ_ID_WIDTH 14 +#define ESF_DZ_DRV_TMR_ID_LBN 0 +#define ESF_DZ_DRV_TMR_ID_WIDTH 14 + +/* EVENT_ENTRY */ +#define ESF_DZ_EV_CODE_LBN 60 +#define ESF_DZ_EV_CODE_WIDTH 4 +#define ESE_DZ_EV_CODE_MCDI_EV 12 +#define ESE_DZ_EV_CODE_DRIVER_EV 5 +#define ESE_DZ_EV_CODE_TX_EV 2 +#define ESE_DZ_EV_CODE_RX_EV 0 +#define ESE_DZ_OTHER other +#define ESF_DZ_EV_DATA_LBN 0 +#define ESF_DZ_EV_DATA_WIDTH 60 + +/* MC_EVENT */ +#define ESF_DZ_MC_CODE_LBN 60 +#define ESF_DZ_MC_CODE_WIDTH 4 +#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_MC_DROP_EVENT_LBN 58 +#define ESF_DZ_MC_DROP_EVENT_WIDTH 1 +#define ESF_DZ_MC_SOFT_LBN 0 +#define ESF_DZ_MC_SOFT_WIDTH 58 + +/* RX_EVENT */ +#define ESF_DZ_RX_CODE_LBN 60 +#define ESF_DZ_RX_CODE_WIDTH 4 +#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_RX_DROP_EVENT_LBN 58 +#define ESF_DZ_RX_DROP_EVENT_WIDTH 1 +#define ESF_DZ_RX_EV_RSVD2_LBN 54 +#define ESF_DZ_RX_EV_RSVD2_WIDTH 4 +#define ESF_DZ_RX_EV_SOFT2_LBN 52 +#define ESF_DZ_RX_EV_SOFT2_WIDTH 2 +#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48 +#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4 +#define ESF_DZ_RX_L4_CLASS_LBN 45 +#define ESF_DZ_RX_L4_CLASS_WIDTH 3 +#define ESE_DZ_L4_CLASS_RSVD7 7 +#define ESE_DZ_L4_CLASS_RSVD6 6 +#define ESE_DZ_L4_CLASS_RSVD5 5 +#define ESE_DZ_L4_CLASS_RSVD4 4 +#define ESE_DZ_L4_CLASS_RSVD3 3 +#define ESE_DZ_L4_CLASS_UDP 2 +#define ESE_DZ_L4_CLASS_TCP 1 +#define ESE_DZ_L4_CLASS_UNKNOWN 0 +#define ESF_DZ_RX_L3_CLASS_LBN 42 +#define ESF_DZ_RX_L3_CLASS_WIDTH 3 +#define ESE_DZ_L3_CLASS_RSVD7 7 +#define ESE_DZ_L3_CLASS_IP6_FRAG 6 +#define ESE_DZ_L3_CLASS_ARP 5 +#define ESE_DZ_L3_CLASS_IP4_FRAG 4 +#define ESE_DZ_L3_CLASS_FCOE 3 +#define ESE_DZ_L3_CLASS_IP6 2 +#define ESE_DZ_L3_CLASS_IP4 1 +#define ESE_DZ_L3_CLASS_UNKNOWN 0 +#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39 +#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3 +#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7 +#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6 +#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5 +#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4 +#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3 +#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2 +#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1 +#define ESE_DZ_ETH_TAG_CLASS_NONE 0 +#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36 +#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3 +#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2 +#define ESE_DZ_ETH_BASE_CLASS_LLC 1 +#define ESE_DZ_ETH_BASE_CLASS_ETH2 0 +#define ESF_DZ_RX_MAC_CLASS_LBN 35 +#define ESF_DZ_RX_MAC_CLASS_WIDTH 1 +#define ESE_DZ_MAC_CLASS_MCAST 1 +#define ESE_DZ_MAC_CLASS_UCAST 0 +#define ESF_DZ_RX_EV_SOFT1_LBN 32 +#define ESF_DZ_RX_EV_SOFT1_WIDTH 3 +#define ESF_DZ_RX_EV_RSVD1_LBN 31 +#define ESF_DZ_RX_EV_RSVD1_WIDTH 1 +#define ESF_DZ_RX_ABORT_LBN 30 +#define ESF_DZ_RX_ABORT_WIDTH 1 +#define ESF_DZ_RX_ECC_ERR_LBN 29 +#define ESF_DZ_RX_ECC_ERR_WIDTH 1 +#define ESF_DZ_RX_CRC1_ERR_LBN 28 +#define ESF_DZ_RX_CRC1_ERR_WIDTH 1 +#define ESF_DZ_RX_CRC0_ERR_LBN 27 +#define ESF_DZ_RX_CRC0_ERR_WIDTH 1 +#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26 +#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1 +#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25 +#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1 +#define ESF_DZ_RX_ECRC_ERR_LBN 24 +#define ESF_DZ_RX_ECRC_ERR_WIDTH 1 +#define ESF_DZ_RX_QLABEL_LBN 16 +#define ESF_DZ_RX_QLABEL_WIDTH 5 +#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15 +#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1 +#define ESF_DZ_RX_CONT_LBN 14 +#define ESF_DZ_RX_CONT_WIDTH 1 +#define ESF_DZ_RX_BYTES_LBN 0 +#define ESF_DZ_RX_BYTES_WIDTH 14 + +/* RX_KER_DESC */ +#define ESF_DZ_RX_KER_RESERVED_LBN 62 +#define ESF_DZ_RX_KER_RESERVED_WIDTH 2 +#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48 +#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14 +#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0 +#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48 + +/* TX_CSUM_TSTAMP_DESC */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_TIMESTAMP_LBN 5 +#define ESF_DZ_TX_TIMESTAMP_WIDTH 1 +#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2 +#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3 +#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5 +#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4 +#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3 +#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2 +#define ESE_DZ_TX_OPTION_CRC_FCOE 1 +#define ESE_DZ_TX_OPTION_CRC_OFF 0 +#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1 +#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1 +#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0 +#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1 + +/* TX_EVENT */ +#define ESF_DZ_TX_CODE_LBN 60 +#define ESF_DZ_TX_CODE_WIDTH 4 +#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_TX_DROP_EVENT_LBN 58 +#define ESF_DZ_TX_DROP_EVENT_WIDTH 1 +#define ESF_DZ_TX_EV_RSVD_LBN 48 +#define ESF_DZ_TX_EV_RSVD_WIDTH 10 +#define ESF_DZ_TX_SOFT2_LBN 32 +#define ESF_DZ_TX_SOFT2_WIDTH 16 +#define ESF_DZ_TX_CAN_MERGE_LBN 31 +#define ESF_DZ_TX_CAN_MERGE_WIDTH 1 +#define ESF_DZ_TX_SOFT1_LBN 24 +#define ESF_DZ_TX_SOFT1_WIDTH 7 +#define ESF_DZ_TX_QLABEL_LBN 16 +#define ESF_DZ_TX_QLABEL_WIDTH 5 +#define ESF_DZ_TX_DESCR_INDX_LBN 0 +#define ESF_DZ_TX_DESCR_INDX_WIDTH 16 + +/* TX_KER_DESC */ +#define ESF_DZ_TX_KER_TYPE_LBN 63 +#define ESF_DZ_TX_KER_TYPE_WIDTH 1 +#define ESF_DZ_TX_KER_CONT_LBN 62 +#define ESF_DZ_TX_KER_CONT_WIDTH 1 +#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48 +#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14 +#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0 +#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48 + +/* TX_PIO_DESC */ +#define ESF_DZ_TX_PIO_TYPE_LBN 63 +#define ESF_DZ_TX_PIO_TYPE_WIDTH 1 +#define ESF_DZ_TX_PIO_OPT_LBN 60 +#define ESF_DZ_TX_PIO_OPT_WIDTH 3 +#define ESF_DZ_TX_PIO_CONT_LBN 59 +#define ESF_DZ_TX_PIO_CONT_WIDTH 1 +#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32 +#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12 +#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0 +#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12 + +/* TX_TSO_DESC */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48 +#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8 +#define ESF_DZ_TX_TSO_IP_ID_LBN 32 +#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16 +#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 +#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 + +/*************************************************************************/ + +/* TX_DESC_UPD_REG: Transmit descriptor update register. + * We may write just one dword of these registers. + */ +#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4) +#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32) +#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH + +/* The workaround for bug 35388 requires multiplexing writes through + * the TX_DESC_UPD_DWORD address. + * TX_DESC_UPD: 0ppppppppppp (bit 11 lost) + * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits) + * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost) + */ +#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD +#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8 +#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4 +#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8 +#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9 +#define ERF_DD_EVQ_IND_RPTR_LBN 0 +#define ERF_DD_EVQ_IND_RPTR_WIDTH 8 +#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10 +#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2 +#define EFE_DD_EVQ_IND_TIMER_FLAGS 3 +#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8 +#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2 +#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0 +#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8 + +/* TX_PIOBUF + * PIO buffer aperture (paged) + */ +#define ER_DZ_TX_PIOBUF 4096 +#define ER_DZ_TX_PIOBUF_SIZE 2048 + +/* RX packet prefix */ +#define ES_DZ_RX_PREFIX_HASH_OFST 0 +#define ES_DZ_RX_PREFIX_VLAN1_OFST 4 +#define ES_DZ_RX_PREFIX_VLAN2_OFST 6 +#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8 +#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10 +#define ES_DZ_RX_PREFIX_SIZE 14 + +#endif /* EFX_EF10_REGS_H */ diff --git a/src/drivers/net/sfc/efx_bitfield.h b/src/drivers/net/sfc/efx_bitfield.h new file mode 100644 index 00000000..f1e9b932 --- /dev/null +++ b/src/drivers/net/sfc/efx_bitfield.h @@ -0,0 +1,555 @@ +/**************************************************************************** + * + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +#ifndef EFX_BITFIELD_H +#define EFX_BITFIELD_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** \file efx_bitfield.h + * Efx bitfield access + * + * Efx NICs make extensive use of bitfields up to 128 bits + * wide. Since there is no native 128-bit datatype on most systems, + * and since 64-bit datatypes are inefficient on 32-bit systems and + * vice versa, we wrap accesses in a way that uses the most efficient + * datatype. + * + * The NICs are PCI devices and therefore little-endian. Since most + * of the quantities that we deal with are DMAed to/from host memory, + * we define our datatypes (efx_oword_t, efx_qword_t and + * efx_dword_t) to be little-endian. + */ + +/* Lowest bit numbers and widths */ +#define EFX_DUMMY_FIELD_LBN 0 +#define EFX_DUMMY_FIELD_WIDTH 0 +#define EFX_WORD_0_LBN 0 +#define EFX_WORD_0_WIDTH 16 +#define EFX_WORD_1_LBN 16 +#define EFX_WORD_1_WIDTH 16 +#define EFX_DWORD_0_LBN 0 +#define EFX_DWORD_0_WIDTH 32 +#define EFX_DWORD_1_LBN 32 +#define EFX_DWORD_1_WIDTH 32 +#define EFX_DWORD_2_LBN 64 +#define EFX_DWORD_2_WIDTH 32 +#define EFX_DWORD_3_LBN 96 +#define EFX_DWORD_3_WIDTH 32 +#define EFX_QWORD_0_LBN 0 +#define EFX_QWORD_0_WIDTH 64 + +/* Specified attribute (e.g. LBN) of the specified field */ +#define EFX_VAL(field, attribute) field ## _ ## attribute +/* Low bit number of the specified field */ +#define EFX_LOW_BIT(field) EFX_VAL(field, LBN) +/* Bit width of the specified field */ +#define EFX_WIDTH(field) EFX_VAL(field, WIDTH) +/* High bit number of the specified field */ +#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1) +/* Mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x1f. + * + * The maximum width mask that can be generated is 64 bits. + */ +#define EFX_MASK64(width) \ + ((width) == 64 ? ~((u64) 0) : \ + (((((u64) 1) << (width))) - 1)) + +/* Mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x1f. + * + * The maximum width mask that can be generated is 32 bits. Use + * EFX_MASK64 for higher width fields. + */ +#define EFX_MASK32(width) \ + ((width) == 32 ? ~((u32) 0) : \ + (((((u32) 1) << (width))) - 1)) + +/** A doubleword (4 byte) datatype - little-endian in HW */ +typedef union efx_dword { + __le32 u32[1]; +} efx_dword_t; + +/** A quadword (8 byte) datatype - little-endian in HW */ +typedef union efx_qword { + __le64 u64[1]; + __le32 u32[2]; + efx_dword_t dword[2]; +} efx_qword_t; + +/** An octword (eight-word, so 16 byte) datatype - little-endian in HW */ +typedef union efx_oword { + __le64 u64[2]; + efx_qword_t qword[2]; + __le32 u32[4]; + efx_dword_t dword[4]; +} efx_oword_t; + +/* Format string and value expanders for printk */ +#define EFX_DWORD_FMT "%08x" +#define EFX_QWORD_FMT "%08x:%08x" +#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x" +#define EFX_DWORD_VAL(dword) \ + ((unsigned int) le32_to_cpu((dword).u32[0])) +#define EFX_QWORD_VAL(qword) \ + ((unsigned int) le32_to_cpu((qword).u32[1])), \ + ((unsigned int) le32_to_cpu((qword).u32[0])) +#define EFX_OWORD_VAL(oword) \ + ((unsigned int) le32_to_cpu((oword).u32[3])), \ + ((unsigned int) le32_to_cpu((oword).u32[2])), \ + ((unsigned int) le32_to_cpu((oword).u32[1])), \ + ((unsigned int) le32_to_cpu((oword).u32[0])) + +/* + * Extract bit field portion [low,high) from the native-endian element + * which contains bits [min,max). + * + * For example, suppose "element" represents the high 32 bits of a + * 64-bit value, and we wish to extract the bits belonging to the bit + * field occupying bits 28-45 of this 64-bit value. + * + * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give + * + * ( element ) << 4 + * + * The result will contain the relevant bits filled in in the range + * [0,high-low), with garbage in bits [high-low+1,...). + */ +#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \ + ((low) > (max) || (high) < (min) ? 0 : \ + (low) > (min) ? \ + (native_element) >> ((low) - (min)) : \ + (native_element) << ((min) - (low))) + +/* + * Extract bit field portion [low,high) from the 64-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT64(element, min, max, low, high) \ + EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high) + +/* + * Extract bit field portion [low,high) from the 32-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT32(element, min, max, low, high) \ + EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high) + +#define EFX_EXTRACT_OWORD64(oword, low, high) \ + ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \ + EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \ + EFX_MASK64((high) + 1 - (low))) + +#define EFX_EXTRACT_QWORD64(qword, low, high) \ + (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \ + EFX_MASK64((high) + 1 - (low))) + +#define EFX_EXTRACT_OWORD32(oword, low, high) \ + ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \ + EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \ + EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \ + EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_EXTRACT_QWORD32(qword, low, high) \ + ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \ + EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_EXTRACT_DWORD(dword, low, high) \ + (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_OWORD_FIELD64(oword, field) \ + EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_QWORD_FIELD64(qword, field) \ + EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_OWORD_FIELD32(oword, field) \ + EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_QWORD_FIELD32(qword, field) \ + EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_DWORD_FIELD(dword, field) \ + EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_OWORD_IS_ZERO64(oword) \ + (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0) + +#define EFX_QWORD_IS_ZERO64(qword) \ + (((qword).u64[0]) == (__force __le64) 0) + +#define EFX_OWORD_IS_ZERO32(oword) \ + (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \ + == (__force __le32) 0) + +#define EFX_QWORD_IS_ZERO32(qword) \ + (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0) + +#define EFX_DWORD_IS_ZERO(dword) \ + (((dword).u32[0]) == (__force __le32) 0) + +#define EFX_OWORD_IS_ALL_ONES64(oword) \ + (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0)) + +#define EFX_QWORD_IS_ALL_ONES64(qword) \ + ((qword).u64[0] == ~((__force __le64) 0)) + +#define EFX_OWORD_IS_ALL_ONES32(oword) \ + (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \ + == ~((__force __le32) 0)) + +#define EFX_QWORD_IS_ALL_ONES32(qword) \ + (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0)) + +#define EFX_DWORD_IS_ALL_ONES(dword) \ + ((dword).u32[0] == ~((__force __le32) 0)) + +#if BITS_PER_LONG == 64 +#define EFX_OWORD_FIELD EFX_OWORD_FIELD64 +#define EFX_QWORD_FIELD EFX_QWORD_FIELD64 +#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64 +#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64 +#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64 +#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64 +#else +#define EFX_OWORD_FIELD EFX_OWORD_FIELD32 +#define EFX_QWORD_FIELD EFX_QWORD_FIELD32 +#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32 +#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32 +#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32 +#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32 +#endif + +/* + * Construct bit field portion + * + * Creates the portion of the bit field [low,high) that lies within + * the range [min,max). + */ +#define EFX_INSERT_NATIVE64(min, max, low, high, value) \ + (((low > max) || (high < min)) ? 0 : \ + ((low > min) ? \ + (((u64) (value)) << (low - min)) : \ + (((u64) (value)) >> (min - low)))) + +#define EFX_INSERT_NATIVE32(min, max, low, high, value) \ + (((low > max) || (high < min)) ? 0 : \ + ((low > min) ? \ + (((u32) (value)) << (low - min)) : \ + (((u32) (value)) >> (min - low)))) + +#define EFX_INSERT_NATIVE(min, max, low, high, value) \ + ((((max - min) >= 32) || ((high - low) >= 32)) ? \ + EFX_INSERT_NATIVE64(min, max, low, high, value) : \ + EFX_INSERT_NATIVE32(min, max, low, high, value)) + +/* + * Construct bit field portion + * + * Creates the portion of the named bit field that lies within the + * range [min,max). + */ +#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \ + EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +/* + * Construct bit field + * + * Creates the portion of the named bit fields that lie within the + * range [min,max). + */ +#define EFX_INSERT_FIELDS_NATIVE(min, max, \ + field1, value1, \ + field2, value2, \ + field3, value3, \ + field4, value4, \ + field5, value5, \ + field6, value6, \ + field7, value7, \ + field8, value8, \ + field9, value9, \ + field10, value10) \ + (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10))) + +#define EFX_INSERT_FIELDS64(...) \ + cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) + +#define EFX_INSERT_FIELDS32(...) \ + cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) + +#define EFX_POPULATE_OWORD64(oword, ...) do { \ + (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ + (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_QWORD64(qword, ...) do { \ + (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_OWORD32(oword, ...) do { \ + (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ + (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \ + (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_QWORD32(qword, ...) do { \ + (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_DWORD(dword, ...) do { \ + (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + } while (0) + +#if BITS_PER_LONG == 64 +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64 +#else +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32 +#endif + +/* Populate an octword field with various numbers of arguments */ +#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD +#define EFX_POPULATE_OWORD_9(oword, ...) \ + EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_8(oword, ...) \ + EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_7(oword, ...) \ + EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_6(oword, ...) \ + EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_5(oword, ...) \ + EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_4(oword, ...) \ + EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_3(oword, ...) \ + EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_2(oword, ...) \ + EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_1(oword, ...) \ + EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_OWORD(oword) \ + EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_OWORD(oword) \ + EFX_POPULATE_OWORD_4(oword, \ + EFX_DWORD_0, 0xffffffff, \ + EFX_DWORD_1, 0xffffffff, \ + EFX_DWORD_2, 0xffffffff, \ + EFX_DWORD_3, 0xffffffff) + +/* Populate a quadword field with various numbers of arguments */ +#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD +#define EFX_POPULATE_QWORD_9(qword, ...) \ + EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_8(qword, ...) \ + EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_7(qword, ...) \ + EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_6(qword, ...) \ + EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_5(qword, ...) \ + EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_4(qword, ...) \ + EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_3(qword, ...) \ + EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_2(qword, ...) \ + EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_1(qword, ...) \ + EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_QWORD(qword) \ + EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_QWORD(qword) \ + EFX_POPULATE_QWORD_2(qword, \ + EFX_DWORD_0, 0xffffffff, \ + EFX_DWORD_1, 0xffffffff) + +/* Populate a dword field with various numbers of arguments */ +#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD +#define EFX_POPULATE_DWORD_9(dword, ...) \ + EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_8(dword, ...) \ + EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_7(dword, ...) \ + EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_6(dword, ...) \ + EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_5(dword, ...) \ + EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_4(dword, ...) \ + EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_3(dword, ...) \ + EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_2(dword, ...) \ + EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_1(dword, ...) \ + EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_DWORD(dword) \ + EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_DWORD(dword) \ + EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff) + +/* + * Modify a named field within an already-populated structure. Used + * for read-modify-write operations. + * + */ +#define EFX_INVERT_OWORD(oword) do { \ + (oword).u64[0] = ~((oword).u64[0]); \ + (oword).u64[1] = ~((oword).u64[1]); \ + } while (0) + +#define EFX_AND_OWORD(oword, from, mask) \ + do { \ + (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \ + (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \ + } while (0) + +#define EFX_AND_QWORD(qword, from, mask) \ + (qword).u64[0] = (from).u64[0] & (mask).u64[0] + +#define EFX_OR_OWORD(oword, from, mask) \ + do { \ + (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \ + (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \ + } while (0) + +#define EFX_INSERT64(min, max, low, high, value) \ + cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value)) + +#define EFX_INSERT32(min, max, low, high, value) \ + cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value)) + +#define EFX_INPLACE_MASK64(min, max, low, high) \ + EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low))) + +#define EFX_INPLACE_MASK32(min, max, low, high) \ + EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low))) + +#define EFX_SET_OWORD64(oword, low, high, value) do { \ + (oword).u64[0] = (((oword).u64[0] \ + & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ + | EFX_INSERT64(0, 63, low, high, value)); \ + (oword).u64[1] = (((oword).u64[1] \ + & ~EFX_INPLACE_MASK64(64, 127, low, high)) \ + | EFX_INSERT64(64, 127, low, high, value)); \ + } while (0) + +#define EFX_SET_QWORD64(qword, low, high, value) do { \ + (qword).u64[0] = (((qword).u64[0] \ + & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ + | EFX_INSERT64(0, 63, low, high, value)); \ + } while (0) + +#define EFX_SET_OWORD32(oword, low, high, value) do { \ + (oword).u32[0] = (((oword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + (oword).u32[1] = (((oword).u32[1] \ + & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ + | EFX_INSERT32(32, 63, low, high, value)); \ + (oword).u32[2] = (((oword).u32[2] \ + & ~EFX_INPLACE_MASK32(64, 95, low, high)) \ + | EFX_INSERT32(64, 95, low, high, value)); \ + (oword).u32[3] = (((oword).u32[3] \ + & ~EFX_INPLACE_MASK32(96, 127, low, high)) \ + | EFX_INSERT32(96, 127, low, high, value)); \ + } while (0) + +#define EFX_SET_QWORD32(qword, low, high, value) do { \ + (qword).u32[0] = (((qword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + (qword).u32[1] = (((qword).u32[1] \ + & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ + | EFX_INSERT32(32, 63, low, high, value)); \ + } while (0) + +#define EFX_SET_DWORD32(dword, low, high, value) do { \ + (dword).u32[0] = (((dword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + } while (0) + +#define EFX_SET_OWORD_FIELD64(oword, field, value) \ + EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_QWORD_FIELD64(qword, field, value) \ + EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_OWORD_FIELD32(oword, field, value) \ + EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_QWORD_FIELD32(qword, field, value) \ + EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_DWORD_FIELD(dword, field, value) \ + EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + + + +#if BITS_PER_LONG == 64 +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64 +#else +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 +#endif + +/* Used to avoid compiler warnings about shift range exceeding width + * of the data types when dma_addr_t is only 32 bits wide. + */ +#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) +#define EFX_DMA_TYPE_WIDTH(width) \ + (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) + + +/* Static initialiser */ +#define EFX_OWORD32(a, b, c, d) \ + { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \ + cpu_to_le32(c), cpu_to_le32(d) } } + +#endif /* EFX_BITFIELD_H */ diff --git a/src/drivers/net/sfc/efx_common.c b/src/drivers/net/sfc/efx_common.c new file mode 100644 index 00000000..fd465612 --- /dev/null +++ b/src/drivers/net/sfc/efx_common.c @@ -0,0 +1,103 @@ +/************************************************************************** + * + * Driver datapath common code for Solarflare network cards + * + * Written by Shradha Shah + * + * Copyright Fen Systems Ltd. 2005 + * Copyright Level 5 Networks Inc. 2005 + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + ***************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "efx_common.h" +#include "efx_bitfield.h" +#include "mc_driver_pcol.h" + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/******************************************************************************* + * + * + * Low-level hardware access + * + * + ******************************************************************************/ + +void +efx_writel(struct efx_nic *efx, efx_dword_t *value, unsigned int reg) +{ + DBGCIO(efx, "Writing partial register %x with " EFX_DWORD_FMT "\n", + reg, EFX_DWORD_VAL(*value)); + _efx_writel(efx, value->u32[0], reg); +} + +void +efx_readl(struct efx_nic *efx, efx_dword_t *value, unsigned int reg) +{ + value->u32[0] = _efx_readl(efx, reg); + DBGCIO(efx, "Read from register %x, got " EFX_DWORD_FMT "\n", + reg, EFX_DWORD_VAL(*value)); +} + +/******************************************************************************* + * + * + * Inititialization and Close + * + * + ******************************************************************************/ +void efx_probe(struct net_device *netdev, enum efx_revision revision) +{ + struct efx_nic *efx = netdev_priv(netdev); + struct pci_device *pci = container_of(netdev->dev, + struct pci_device, dev); + unsigned int reg = PCI_BASE_ADDRESS_0; + uint32_t bar_low; + + efx->netdev = netdev; + efx->revision = revision; + + /* Find the memory bar to use */ + pci_read_config_dword(pci, reg, &bar_low); + if ((bar_low & PCI_BASE_ADDRESS_IO_MASK) == PCI_BASE_ADDRESS_SPACE_IO) + reg = PCI_BASE_ADDRESS_2; + + efx->mmio_start = pci_bar_start(pci, reg); + efx->mmio_len = pci_bar_size(pci, reg); + efx->membase = pci_ioremap(pci, efx->mmio_start, efx->mmio_len); + + DBGCP(efx, "BAR of %lx bytes at phys %lx mapped at %p\n", + efx->mmio_len, efx->mmio_start, efx->membase); + + /* Enable PCI access */ + adjust_pci_device(pci); +} + +void efx_remove(struct net_device *netdev) +{ + struct efx_nic *efx = netdev_priv(netdev); + + iounmap(efx->membase); + efx->membase = NULL; +} diff --git a/src/drivers/net/sfc/efx_common.h b/src/drivers/net/sfc/efx_common.h new file mode 100644 index 00000000..3487966c --- /dev/null +++ b/src/drivers/net/sfc/efx_common.h @@ -0,0 +1,232 @@ +/************************************************************************** + * + * GPL common net driver for Solarflare network cards + * + * Written by Michael Brown + * + * Copyright Fen Systems Ltd. 2005 + * Copyright Level 5 Networks Inc. 2005 + * Copyright Solarflare Communications Inc. 2013-2017 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + ***************************************************************************/ +#ifndef EFX_COMMON_H +#define EFX_COMMON_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#define __packed __attribute__((__packed__)) +#define __force /*nothing*/ + +typedef uint16_t __le16; +typedef uint32_t __le32; +typedef uint64_t __le64; + +#define BUILD_BUG_ON_ZERO(e) (sizeof(struct{int: -!!(e); })) +#define BUILD_BUG_ON(e) ((void)BUILD_BUG_ON_ZERO(e)) + +#include +#include +#include +#include "efx_bitfield.h" +#include "mcdi.h" + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +/************************************************************************** + * + * Hardware data structures and sizing + * + ***************************************************************************/ +typedef efx_qword_t efx_rx_desc_t; +typedef efx_qword_t efx_tx_desc_t; +typedef efx_qword_t efx_event_t; + +#define EFX_BUF_ALIGN 4096 +#define EFX_RXD_SIZE 512 +#define EFX_RXD_MASK (EFX_RXD_SIZE - 1) +#define EFX_TXD_SIZE 512 +#define EFX_TXD_MASK (EFX_TXD_SIZE - 1) +#define EFX_EVQ_SIZE 512 +#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1) + +/* There is space for 512 rx descriptors available. This number can be + * anything between 1 and 512 in powers of 2. This value will affect the + * network performance. During a test we were able to push 239 descriptors + * before we ran out of space. + */ +#define EFX_NUM_RX_DESC 64 +#define EFX_NUM_RX_DESC_MASK (EFX_NUM_RX_DESC - 1) + +/* The packet size is usually 1500 bytes hence we choose 1600 as the buf size, + * which is (1500+metadata) + */ +#define EFX_RX_BUF_SIZE 1600 + +/* Settings for the state field in efx_nic. + */ +#define EFX_STATE_POLLING 1 + +typedef unsigned long long dma_addr_t; + +/** A buffer table allocation backing a tx dma, rx dma or eventq */ +struct efx_special_buffer { + dma_addr_t dma_addr; + int id; +}; + +/** A transmit queue */ +struct efx_tx_queue { + /* The hardware ring */ + efx_tx_desc_t *ring; + + /* The software ring storing io_buffers. */ + struct io_buffer *buf[EFX_TXD_SIZE]; + + /* The buffer table reservation pushed to hardware */ + struct efx_special_buffer entry; + + /* Software descriptor write ptr */ + unsigned int write_ptr; + + /* Hardware descriptor read ptr */ + unsigned int read_ptr; +}; + +/** A receive queue */ +struct efx_rx_queue { + /* The hardware ring */ + efx_rx_desc_t *ring; + + /* The software ring storing io_buffers */ + struct io_buffer *buf[EFX_NUM_RX_DESC]; + + /* The buffer table reservation pushed to hardware */ + struct efx_special_buffer entry; + + /* Descriptor write ptr, into both the hardware and software rings */ + unsigned int write_ptr; + + /* Hardware completion ptr */ + unsigned int read_ptr; + + /* The value of RX_CONT in the previous RX event */ + unsigned int rx_cont_prev; +}; + +/** An event queue */ +struct efx_ev_queue { + /* The hardware ring to push to hardware. + * Must be the first entry in the structure. + */ + efx_event_t *ring; + + /* The buffer table reservation pushed to hardware */ + struct efx_special_buffer entry; + + /* Pointers into the ring */ + unsigned int read_ptr; +}; + +/* Hardware revisions */ +enum efx_revision { + EFX_HUNTINGTON, +}; + +/** Hardware access */ +struct efx_nic { + struct net_device *netdev; + enum efx_revision revision; + const struct efx_nic_type *type; + + int port; + u32 state; + + /** Memory and IO base */ + void *membase; + unsigned long mmio_start; + unsigned long mmio_len; + + /* Buffer table allocation head */ + int buffer_head; + + /* Queues */ + struct efx_rx_queue rxq; + struct efx_tx_queue txq; + struct efx_ev_queue evq; + + unsigned int rx_prefix_size; + + /** INT_REG_KER */ + int int_en; + efx_oword_t int_ker __aligned; + + /* Set to true if firmware supports the workaround for bug35388 */ + bool workaround_35388; + +}; + + +/** Efx device type definition */ +struct efx_nic_type { + int (*mcdi_rpc)(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet); +}; + +extern const struct efx_nic_type hunt_nic_type; + +#define EFX_MAC_FRAME_LEN(_mtu) \ + (((_mtu) \ + + /* EtherII already included */ \ + + 4 /* FCS */ \ + /* No VLAN supported */ \ + + 16 /* bug16772 */ \ + + 7) & ~7) + +/******************************************************************************* + * + * + * Hardware API + * + * + ******************************************************************************/ +static inline void _efx_writel(struct efx_nic *efx, uint32_t value, + unsigned int reg) +{ + writel((value), (efx)->membase + (reg)); +} + +static inline uint32_t _efx_readl(struct efx_nic *efx, unsigned int reg) +{ + return readl((efx)->membase + (reg)); +} + +#define efx_writel_table(efx, value, index, reg) \ + efx_writel(efx, value, (reg) + ((index) * reg##_STEP)) + +#define efx_writel_page(efx, value, index, reg) \ + efx_writel(efx, value, (reg) + ((index) * 0x2000)) + +/* Hardware access */ +extern void efx_writel(struct efx_nic *efx, efx_dword_t *value, + unsigned int reg); +extern void efx_readl(struct efx_nic *efx, efx_dword_t *value, + unsigned int reg); + +/* Initialisation */ +extern void efx_probe(struct net_device *netdev, enum efx_revision rev); +extern void efx_remove(struct net_device *netdev); + +#endif /* EFX_COMMON_H */ diff --git a/src/drivers/net/sfc/efx_hunt.c b/src/drivers/net/sfc/efx_hunt.c new file mode 100644 index 00000000..07dd7dfe --- /dev/null +++ b/src/drivers/net/sfc/efx_hunt.c @@ -0,0 +1,510 @@ +/************************************************************************** + * + * Driver datapath for Solarflare network cards + * + * Written by Shradha Shah + * + * Copyright 2012-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + ***************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "efx_hunt.h" +#include "efx_bitfield.h" +#include "ef10_regs.h" + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +void efx_hunt_free_special_buffer(void *buf, int bytes) +{ + free_dma(buf, bytes); +} + +static void *efx_hunt_alloc_special_buffer(int bytes, + struct efx_special_buffer *entry) +{ + void *buffer; + dma_addr_t dma_addr; + + /* Allocate the buffer, aligned on a buffer address boundary. This + * buffer will be passed into an MC_CMD_INIT_*Q command to setup the + * appropriate type of queue via MCDI. + */ + buffer = malloc_dma(bytes, EFX_BUF_ALIGN); + if (!buffer) + return NULL; + + entry->dma_addr = dma_addr = virt_to_bus(buffer); + assert((dma_addr & (EFX_BUF_ALIGN - 1)) == 0); + + /* Buffer table entries aren't allocated, so set id to zero */ + entry->id = 0; + DBGP("Allocated 0x%x bytes at %p\n", bytes, buffer); + + return buffer; +} + +/******************************************************************************* + * + * + * TX + * + * + ******************************************************************************/ +static void +efx_hunt_build_tx_desc(efx_tx_desc_t *txd, struct io_buffer *iob) +{ + dma_addr_t dma_addr; + + dma_addr = virt_to_bus(iob->data); + + EFX_POPULATE_QWORD_4(*txd, + ESF_DZ_TX_KER_TYPE, 0, + ESF_DZ_TX_KER_CONT, 0, + ESF_DZ_TX_KER_BYTE_CNT, iob_len(iob), + ESF_DZ_TX_KER_BUF_ADDR, dma_addr); +} + +static void +efx_hunt_notify_tx_desc(struct efx_nic *efx) +{ + struct efx_tx_queue *txq = &efx->txq; + int ptr = txq->write_ptr & EFX_TXD_MASK; + efx_dword_t reg; + + EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, ptr); + efx_writel_page(efx, ®, 0, ER_DZ_TX_DESC_UPD_DWORD); +} + +int +efx_hunt_transmit(struct net_device *netdev, struct io_buffer *iob) +{ + struct efx_nic *efx = netdev_priv(netdev); + struct efx_tx_queue *txq = &efx->txq; + int fill_level, space; + efx_tx_desc_t *txd; + int buf_id; + + fill_level = txq->write_ptr - txq->read_ptr; + space = EFX_TXD_SIZE - fill_level - 1; + if (space < 1) + return -ENOBUFS; + + /* Save the iobuffer for later completion */ + buf_id = txq->write_ptr & EFX_TXD_MASK; + assert(txq->buf[buf_id] == NULL); + txq->buf[buf_id] = iob; + + DBGCIO(efx, "tx_buf[%d] for iob %p data %p len %zd\n", + buf_id, iob, iob->data, iob_len(iob)); + + /* Form the descriptor, and push it to hardware */ + txd = txq->ring + buf_id; + efx_hunt_build_tx_desc(txd, iob); + ++txq->write_ptr; + efx_hunt_notify_tx_desc(efx); + + return 0; +} + +static void +efx_hunt_transmit_done(struct efx_nic *efx, int id) +{ + struct efx_tx_queue *txq = &efx->txq; + unsigned int read_ptr, stop; + + /* Complete all buffers from read_ptr up to and including id */ + read_ptr = txq->read_ptr & EFX_TXD_MASK; + stop = (id + 1) & EFX_TXD_MASK; + + while (read_ptr != stop) { + struct io_buffer *iob = txq->buf[read_ptr]; + + assert(iob); + /* Complete the tx buffer */ + if (iob) + netdev_tx_complete(efx->netdev, iob); + DBGCIO(efx, "tx_buf[%d] for iob %p done\n", read_ptr, iob); + txq->buf[read_ptr] = NULL; + + ++txq->read_ptr; + read_ptr = txq->read_ptr & EFX_TXD_MASK; + } +} + +int efx_hunt_tx_init(struct net_device *netdev, dma_addr_t *dma_addr) +{ + struct efx_nic *efx = netdev_priv(netdev); + struct efx_tx_queue *txq = &efx->txq; + size_t bytes; + + /* Allocate hardware transmit queue */ + bytes = sizeof(efx_tx_desc_t) * EFX_TXD_SIZE; + txq->ring = efx_hunt_alloc_special_buffer(bytes, &txq->entry); + if (!txq->ring) + return -ENOMEM; + + txq->read_ptr = txq->write_ptr = 0; + *dma_addr = txq->entry.dma_addr; + return 0; +} + +/******************************************************************************* + * + * + * RX + * + * + ******************************************************************************/ +static void +efx_hunt_build_rx_desc(efx_rx_desc_t *rxd, struct io_buffer *iob) +{ + dma_addr_t dma_addr = virt_to_bus(iob->data); + + EFX_POPULATE_QWORD_2(*rxd, + ESF_DZ_RX_KER_BYTE_CNT, EFX_RX_BUF_SIZE, + ESF_DZ_RX_KER_BUF_ADDR, dma_addr); +} + +static void +efx_hunt_notify_rx_desc(struct efx_nic *efx) +{ + struct efx_rx_queue *rxq = &efx->rxq; + int ptr = rxq->write_ptr & EFX_RXD_MASK; + efx_dword_t reg; + + EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, ptr); + efx_writel_page(efx, ®, 0, ER_DZ_RX_DESC_UPD); +} + +static void +efx_hunt_rxq_fill(struct efx_nic *efx) +{ + struct efx_rx_queue *rxq = &efx->rxq; + int fill_level = rxq->write_ptr - rxq->read_ptr; + int space = EFX_NUM_RX_DESC - fill_level - 1; + int pushed = 0; + + while (space) { + int buf_id = rxq->write_ptr & (EFX_NUM_RX_DESC - 1); + int desc_id = rxq->write_ptr & EFX_RXD_MASK; + struct io_buffer *iob; + efx_rx_desc_t *rxd; + + assert(rxq->buf[buf_id] == NULL); + iob = alloc_iob(EFX_RX_BUF_SIZE); + if (!iob) + break; + + DBGCP(efx, "pushing rx_buf[%d] iob %p data %p\n", + buf_id, iob, iob->data); + + rxq->buf[buf_id] = iob; + rxd = rxq->ring + desc_id; + efx_hunt_build_rx_desc(rxd, iob); + ++rxq->write_ptr; + ++pushed; + --space; + } + + /* Push the ptr to hardware */ + if (pushed > 0) { + efx_hunt_notify_rx_desc(efx); + + DBGCP(efx, "pushed %d rx buffers to fill level %d\n", + pushed, rxq->write_ptr - rxq->read_ptr); + } +} + +static void +efx_hunt_receive(struct efx_nic *efx, unsigned int id, int len, int drop) +{ + struct efx_rx_queue *rxq = &efx->rxq; + unsigned int read_ptr = rxq->read_ptr & EFX_RXD_MASK; + unsigned int buf_ptr = rxq->read_ptr & EFX_NUM_RX_DESC_MASK; + struct io_buffer *iob; + + /* id is the lower 4 bits of the desc index + 1 in huntington*/ + /* hence anding with 15 */ + assert((id & 15) == ((read_ptr + (len != 0)) & 15)); + + /* Pop this rx buffer out of the software ring */ + iob = rxq->buf[buf_ptr]; + rxq->buf[buf_ptr] = NULL; + + DBGCIO(efx, "popping rx_buf[%d] iob %p data %p with %d bytes %s %x\n", + read_ptr, iob, iob->data, len, drop ? "bad" : "ok", drop); + + /* Pass the packet up if required */ + if (drop) + netdev_rx_err(efx->netdev, iob, EBADMSG); + else { + iob_put(iob, len); + iob_pull(iob, efx->rx_prefix_size); + netdev_rx(efx->netdev, iob); + } + + ++rxq->read_ptr; +} + +int efx_hunt_rx_init(struct net_device *netdev, dma_addr_t *dma_addr) +{ + struct efx_nic *efx = netdev_priv(netdev); + struct efx_rx_queue *rxq = &efx->rxq; + size_t bytes; + + /* Allocate hardware receive queue */ + bytes = sizeof(efx_rx_desc_t) * EFX_RXD_SIZE; + rxq->ring = efx_hunt_alloc_special_buffer(bytes, &rxq->entry); + if (rxq->ring == NULL) + return -ENOMEM; + + rxq->read_ptr = rxq->write_ptr = 0; + *dma_addr = rxq->entry.dma_addr; + return 0; +} + +/******************************************************************************* + * + * + * Event queues and interrupts + * + * + ******************************************************************************/ +int efx_hunt_ev_init(struct net_device *netdev, dma_addr_t *dma_addr) +{ + struct efx_nic *efx = netdev_priv(netdev); + struct efx_ev_queue *evq = &efx->evq; + size_t bytes; + + /* Allocate the hardware event queue */ + bytes = sizeof(efx_event_t) * EFX_EVQ_SIZE; + evq->ring = efx_hunt_alloc_special_buffer(bytes, &evq->entry); + if (evq->ring == NULL) + return -ENOMEM; + + memset(evq->ring, 0xff, bytes); + evq->read_ptr = 0; + *dma_addr = evq->entry.dma_addr; + return 0; +} + +static void +efx_hunt_clear_interrupts(struct efx_nic *efx) +{ + efx_dword_t reg; + /* read the ISR */ + efx_readl(efx, ®, ER_DZ_BIU_INT_ISR); +} + +/** + * See if an event is present + * + * @v event EFX event structure + * @ret True An event is pending + * @ret False No event is pending + * + * We check both the high and low dword of the event for all ones. We + * wrote all ones when we cleared the event, and no valid event can + * have all ones in either its high or low dwords. This approach is + * robust against reordering. + * + * Note that using a single 64-bit comparison is incorrect; even + * though the CPU read will be atomic, the DMA write may not be. + */ +static inline int +efx_hunt_event_present(efx_event_t *event) +{ + return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | + EFX_DWORD_IS_ALL_ONES(event->dword[1]))); +} + +static void +efx_hunt_evq_read_ack(struct efx_nic *efx) +{ + struct efx_ev_queue *evq = &efx->evq; + efx_dword_t reg; + + if (efx->workaround_35388) { + EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS, + EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, + ERF_DD_EVQ_IND_RPTR, + evq->read_ptr >> ERF_DD_EVQ_IND_RPTR_WIDTH); + efx_writel_page(efx, ®, 0, ER_DD_EVQ_INDIRECT); + EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS, + EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, + ERF_DD_EVQ_IND_RPTR, evq->read_ptr & + ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); + efx_writel_page(efx, ®, 0, ER_DD_EVQ_INDIRECT); + } else { + EFX_POPULATE_DWORD_1(reg, ERF_DZ_EVQ_RPTR, evq->read_ptr); + efx_writel_table(efx, ®, 0, ER_DZ_EVQ_RPTR); + } +} + +static unsigned int +efx_hunt_handle_event(struct efx_nic *efx, efx_event_t *evt) +{ + struct efx_rx_queue *rxq = &efx->rxq; + int ev_code, desc_ptr, len; + int next_ptr_lbits, packet_drop; + int rx_cont; + + /* Decode event */ + ev_code = EFX_QWORD_FIELD(*evt, ESF_DZ_EV_CODE); + + switch (ev_code) { + case ESE_DZ_EV_CODE_TX_EV: + desc_ptr = EFX_QWORD_FIELD(*evt, ESF_DZ_TX_DESCR_INDX); + efx_hunt_transmit_done(efx, desc_ptr); + break; + + case ESE_DZ_EV_CODE_RX_EV: + len = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_BYTES); + next_ptr_lbits = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_DSC_PTR_LBITS); + rx_cont = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_CONT); + + /* We don't expect to receive scattered packets, so drop the + * packet if RX_CONT is set on the current or previous event, or + * if len is zero. + */ + packet_drop = (len == 0) | (rx_cont << 1) | + (rxq->rx_cont_prev << 2); + efx_hunt_receive(efx, next_ptr_lbits, len, packet_drop); + rxq->rx_cont_prev = rx_cont; + return 1; + + default: + DBGCP(efx, "Unknown event type %d\n", ev_code); + break; + } + return 0; +} + +void efx_hunt_poll(struct net_device *netdev) +{ + struct efx_nic *efx = netdev_priv(netdev); + struct efx_ev_queue *evq = &efx->evq; + efx_event_t *evt; + int budget = 10; + + /* Read the event queue by directly looking for events + * (we don't even bother to read the eventq write ptr) + */ + evt = evq->ring + evq->read_ptr; + while (efx_hunt_event_present(evt) && (budget > 0)) { + DBGCP(efx, "Event at index 0x%x address %p is " + EFX_QWORD_FMT "\n", evq->read_ptr, + evt, EFX_QWORD_VAL(*evt)); + + budget -= efx_hunt_handle_event(efx, evt); + + /* Clear the event */ + EFX_SET_QWORD(*evt); + + /* Move to the next event. We don't ack the event + * queue until the end + */ + evq->read_ptr = ((evq->read_ptr + 1) & EFX_EVQ_MASK); + evt = evq->ring + evq->read_ptr; + } + + /* Push more rx buffers if needed */ + efx_hunt_rxq_fill(efx); + + /* Clear any pending interrupts */ + efx_hunt_clear_interrupts(efx); + + /* Ack the event queue if interrupts are enabled */ + if (efx->int_en) + efx_hunt_evq_read_ack(efx); +} + +void efx_hunt_irq(struct net_device *netdev, int enable) +{ + struct efx_nic *efx = netdev_priv(netdev); + + efx->int_en = enable; + + /* If interrupts are enabled, prime the event queue. Otherwise ack any + * pending interrupts + */ + if (enable) + efx_hunt_evq_read_ack(efx); + else if (efx->netdev->state & NETDEV_OPEN) + efx_hunt_clear_interrupts(efx); +} + +/******************************************************************************* + * + * + * Initialization and Close + * + * + ******************************************************************************/ +int efx_hunt_open(struct net_device *netdev) +{ + struct efx_nic *efx = netdev_priv(netdev); + efx_dword_t cmd; + + /* Set interrupt moderation to 0*/ + EFX_POPULATE_DWORD_2(cmd, + ERF_DZ_TC_TIMER_MODE, 0, + ERF_DZ_TC_TIMER_VAL, 0); + efx_writel_page(efx, &cmd, 0, ER_DZ_EVQ_TMR); + + /* Ack the eventq */ + if (efx->int_en) + efx_hunt_evq_read_ack(efx); + + /* Push receive buffers */ + efx_hunt_rxq_fill(efx); + + return 0; +} + +void efx_hunt_close(struct net_device *netdev) +{ + struct efx_nic *efx = netdev_priv(netdev); + struct efx_rx_queue *rxq = &efx->rxq; + struct efx_tx_queue *txq = &efx->txq; + int i; + + /* Complete outstanding descriptors */ + for (i = 0; i < EFX_NUM_RX_DESC; i++) { + if (rxq->buf[i]) { + free_iob(rxq->buf[i]); + rxq->buf[i] = NULL; + } + } + + for (i = 0; i < EFX_TXD_SIZE; i++) { + if (txq->buf[i]) { + netdev_tx_complete(efx->netdev, txq->buf[i]); + txq->buf[i] = NULL; + } + } + + /* Clear interrupts */ + efx_hunt_clear_interrupts(efx); +} diff --git a/src/drivers/net/sfc/efx_hunt.h b/src/drivers/net/sfc/efx_hunt.h new file mode 100644 index 00000000..b8377bf2 --- /dev/null +++ b/src/drivers/net/sfc/efx_hunt.h @@ -0,0 +1,75 @@ +/************************************************************************** + * + * GPL net driver for Solarflare network cards + * + * Written by Shradha Shah + * + * Copyright 2012-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + ***************************************************************************/ + +#ifndef EFX_HUNT_H +#define EFX_HUNT_H + +#include "efx_common.h" + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/************************************************************************** + * + * Hardware data structures and sizing + * + ***************************************************************************/ + +#define EFX_EV_SIZE(_nevs) ((_nevs) * sizeof(efx_qword_t)) +#define EFX_EVQ_NBUFS(_nevs) (EFX_EV_SIZE(_nevs) / EFX_BUF_ALIGN) + +#define EFX_RXQ_SIZE(_ndescs) ((_ndescs) * sizeof(efx_qword_t)) +#define EFX_RXQ_NBUFS(_ndescs) (EFX_RXQ_SIZE(_ndescs) / EFX_BUF_ALIGN) + +#define EFX_TXQ_SIZE(_ndescs) ((_ndescs) * sizeof(efx_qword_t)) +#define EFX_TXQ_NBUFS(_ndescs) (EFX_TXQ_SIZE(_ndescs) / EFX_BUF_ALIGN) + +/** MCDI request structure */ +struct efx_mcdi_req_s { + unsigned int emr_cmd; + efx_dword_t *emr_in_buf; + size_t emr_in_length; + int emr_rc; + efx_dword_t *emr_out_buf; + size_t emr_out_length; + size_t emr_out_length_used; +}; + +/******************************************************************************* + * + * + * Hardware API + * + * + ******************************************************************************/ + +extern void efx_hunt_free_special_buffer(void *buf, int bytes); + +/* Data path entry points */ +extern int efx_hunt_transmit(struct net_device *netdev, struct io_buffer *iob); +extern void efx_hunt_poll(struct net_device *netdev); +extern void efx_hunt_irq(struct net_device *netdev, int enable); + +/* Initialisation */ +extern int efx_hunt_ev_init(struct net_device *netdev, dma_addr_t *dma_addr); +extern int efx_hunt_rx_init(struct net_device *netdev, dma_addr_t *dma_addr); +extern int efx_hunt_tx_init(struct net_device *netdev, dma_addr_t *dma_addr); +extern int efx_hunt_open(struct net_device *netdev); +extern void efx_hunt_close(struct net_device *netdev); + +#endif /* EFX_HUNT_H */ diff --git a/src/drivers/net/sfc/mc_driver_pcol.h b/src/drivers/net/sfc/mc_driver_pcol.h new file mode 100644 index 00000000..e1174bd7 --- /dev/null +++ b/src/drivers/net/sfc/mc_driver_pcol.h @@ -0,0 +1,2281 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2012-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ +#ifndef SFC_MCDI_PCOL_H +#define SFC_MCDI_PCOL_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** \file mc_driver_pcol.h + * This file is a subset of the MCDI headers generated from the yml files. + */ + +/* The current version of the MCDI protocol. + * + * Note that the ROM burnt into the card only talks V0, so at the very + * least every driver must support version 0 and MCDI_PCOL_VERSION + */ +#ifdef WITH_MCDI_V2 +#define MCDI_PCOL_VERSION 2 +#else +#define MCDI_PCOL_VERSION 1 +#endif + +/* Unused commands: 0x23, 0x27, 0x30, 0x31 */ + +/* MCDI version 1 + * + * Each MCDI request starts with an MCDI_HEADER, which is a 32bit + * structure, filled in by the client. + * + * 0 7 8 16 20 22 23 24 31 + * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS | + * | | | + * | | \--- Response + * | \------- Error + * \------------------------------ Resync (always set) + * + * The client writes it's request into MC shared memory, and rings the + * doorbell. Each request is completed by either by the MC writing + * back into shared memory, or by writing out an event. + * + * All MCDI commands support completion by shared memory response. Each + * request may also contain additional data (accounted for by HEADER.LEN), + * and some response's may also contain additional data (again, accounted + * for by HEADER.LEN). + * + * Some MCDI commands support completion by event, in which any associated + * response data is included in the event. + * + * The protocol requires one response to be delivered for every request, a + * request should not be sent unless the response for the previous request + * has been received (either by polling shared memory, or by receiving + * an event). + */ + +/** Request/Response structure */ +#define MCDI_HEADER_OFST 0 +#define MCDI_HEADER_CODE_LBN 0 +#define MCDI_HEADER_CODE_WIDTH 7 +#define MCDI_HEADER_RESYNC_LBN 7 +#define MCDI_HEADER_RESYNC_WIDTH 1 +#define MCDI_HEADER_DATALEN_LBN 8 +#define MCDI_HEADER_DATALEN_WIDTH 8 +#define MCDI_HEADER_SEQ_LBN 16 +#define MCDI_HEADER_SEQ_WIDTH 4 +#define MCDI_HEADER_RSVD_LBN 20 +#define MCDI_HEADER_RSVD_WIDTH 1 +#define MCDI_HEADER_NOT_EPOCH_LBN 21 +#define MCDI_HEADER_NOT_EPOCH_WIDTH 1 +#define MCDI_HEADER_ERROR_LBN 22 +#define MCDI_HEADER_ERROR_WIDTH 1 +#define MCDI_HEADER_RESPONSE_LBN 23 +#define MCDI_HEADER_RESPONSE_WIDTH 1 +#define MCDI_HEADER_XFLAGS_LBN 24 +#define MCDI_HEADER_XFLAGS_WIDTH 8 +/* Request response using event */ +#define MCDI_HEADER_XFLAGS_EVREQ 0x01 +/* Request (and signal) early doorbell return */ +#define MCDI_HEADER_XFLAGS_DBRET 0x02 + +/* Maximum number of payload bytes */ +#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc +#define MCDI_CTL_SDU_LEN_MAX_V2 0x400 + +#ifdef WITH_MCDI_V2 +#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2 +#else +#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V1 +#endif + + +/* The MC can generate events for two reasons: + * - To advance a shared memory request if XFLAGS_EVREQ was set + * - As a notification (link state, i2c event), controlled + * via MC_CMD_LOG_CTRL + * + * Both events share a common structure: + * + * 0 32 33 36 44 52 60 + * | Data | Cont | Level | Src | Code | Rsvd | + * | + * \ There is another event pending in this notification + * + * If Code==CMDDONE, then the fields are further interpreted as: + * + * - LEVEL==INFO Command succeeded + * - LEVEL==ERR Command failed + * + * 0 8 16 24 32 + * | Seq | Datalen | Errno | Rsvd | + * + * These fields are taken directly out of the standard MCDI header, i.e., + * LEVEL==ERR, Datalen == 0 => Reboot + * + * Events can be squirted out of the UART (using LOG_CTRL) without a + * MCDI header. An event can be distinguished from a MCDI response by + * examining the first byte which is 0xc0. This corresponds to the + * non-existent MCDI command MC_CMD_DEBUG_LOG. + * + * 0 7 8 + * | command | Resync | = 0xc0 + * + * Since the event is written in big-endian byte order, this works + * providing bits 56-63 of the event are 0xc0. + * + * 56 60 63 + * | Rsvd | Code | = 0xc0 + * + * Which means for convenience the event code is 0xc for all MC + * generated events. + */ +#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc + + +/* Operation not permitted. */ +#define MC_CMD_ERR_EPERM 1 +/* Non-existent command target */ +#define MC_CMD_ERR_ENOENT 2 +/* assert() has killed the MC */ +#define MC_CMD_ERR_EINTR 4 +/* I/O failure */ +#define MC_CMD_ERR_EIO 5 +/* Already exists */ +#define MC_CMD_ERR_EEXIST 6 +/* Try again */ +#define MC_CMD_ERR_EAGAIN 11 +/* Out of memory */ +#define MC_CMD_ERR_ENOMEM 12 +/* Caller does not hold required locks */ +#define MC_CMD_ERR_EACCES 13 +/* Resource is currently unavailable (e.g. lock contention) */ +#define MC_CMD_ERR_EBUSY 16 +/* No such device */ +#define MC_CMD_ERR_ENODEV 19 +/* Invalid argument to target */ +#define MC_CMD_ERR_EINVAL 22 +/* Broken pipe */ +#define MC_CMD_ERR_EPIPE 32 +/* Read-only */ +#define MC_CMD_ERR_EROFS 30 +/* Out of range */ +#define MC_CMD_ERR_ERANGE 34 +/* Non-recursive resource is already acquired */ +#define MC_CMD_ERR_EDEADLK 35 +/* Operation not implemented */ +#define MC_CMD_ERR_ENOSYS 38 +/* Operation timed out */ +#define MC_CMD_ERR_ETIME 62 +/* Link has been severed */ +#define MC_CMD_ERR_ENOLINK 67 +/* Protocol error */ +#define MC_CMD_ERR_EPROTO 71 +/* Operation not supported */ +#define MC_CMD_ERR_ENOTSUP 95 +/* Address not available */ +#define MC_CMD_ERR_EADDRNOTAVAIL 99 +/* Not connected */ +#define MC_CMD_ERR_ENOTCONN 107 +/* Operation already in progress */ +#define MC_CMD_ERR_EALREADY 114 + +/* Resource allocation failed. */ +#define MC_CMD_ERR_ALLOC_FAIL 0x1000 +/* V-adaptor not found. */ +#define MC_CMD_ERR_NO_VADAPTOR 0x1001 +/* EVB port not found. */ +#define MC_CMD_ERR_NO_EVB_PORT 0x1002 +/* V-switch not found. */ +#define MC_CMD_ERR_NO_VSWITCH 0x1003 +/* Too many VLAN tags. */ +#define MC_CMD_ERR_VLAN_LIMIT 0x1004 +/* Bad PCI function number. */ +#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005 +/* Invalid VLAN mode. */ +#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006 +/* Invalid v-switch type. */ +#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007 +/* Invalid v-port type. */ +#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008 +/* MAC address exists. */ +#define MC_CMD_ERR_MAC_EXIST 0x1009 +/* Slave core not present */ +#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a +/* The datapath is disabled. */ +#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b +/* The requesting client is not a function */ +#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c +/* The requested operation might require the + * command to be passed between MCs, and the + * transport doesn't support that. Should + * only ever been seen over the UART. + */ +#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d +/* VLAN tag(s) exists */ +#define MC_CMD_ERR_VLAN_EXIST 0x100e +/* No MAC address assigned to an EVB port */ +#define MC_CMD_ERR_NO_MAC_ADDR 0x100f +/* Notifies the driver that the request has been relayed + * to an admin function for authorization. The driver should + * wait for a PROXY_RESPONSE event and then resend its request. + * This error code is followed by a 32-bit handle that + * helps matching it with the respective PROXY_RESPONSE event. + */ +#define MC_CMD_ERR_PROXY_PENDING 0x1010 +#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4 +/* The request cannot be passed for authorization because + * another request from the same function is currently being + * authorized. The drvier should try again later. + */ +#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011 +/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function + * that has enabled proxying or BLOCK_INDEX points to a function that + * doesn't await an authorization. + */ +#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012 +/* This code is currently only used internally in FW. Its meaning is that + * an operation failed due to lack of SR-IOV privilege. + * Normally it is translated to EPERM by send_cmd_err(), + * but it may also be used to trigger some special mechanism + * for handling such case, e.g. to relay the failed request + * to a designated admin function for authorization. + */ +#define MC_CMD_ERR_NO_PRIVILEGE 0x1013 +/* Workaround 26807 could not be turned on/off because some functions + * have already installed filters. See the comment at + * MC_CMD_WORKAROUND_BUG26807. + */ +#define MC_CMD_ERR_FILTERS_PRESENT 0x1014 +/* The clock whose frequency you've attempted to set set + * doesn't exist on this NIC + */ +#define MC_CMD_ERR_NO_CLOCK 0x1015 +/* Returned by MC_CMD_TESTASSERT if the action that should + * have caused an assertion failed to do so. + */ +#define MC_CMD_ERR_UNREACHABLE 0x1016 +/* This command needs to be processed in the background but there were no + * resources to do so. Send it again after a command has completed. + */ +#define MC_CMD_ERR_QUEUE_FULL 0x1017 + +#define MC_CMD_ERR_CODE_OFST 0 + + +#ifdef WITH_MCDI_V2 + +/* Version 2 adds an optional argument to error returns: the errno value + * may be followed by the (0-based) number of the first argument that + * could not be processed. + */ +#define MC_CMD_ERR_ARG_OFST 4 + +/* No space */ +#define MC_CMD_ERR_ENOSPC 28 + +#endif + +/* MCDI_EVENT structuredef */ +#define MCDI_EVENT_LEN 8 +#define MCDI_EVENT_CONT_LBN 32 +#define MCDI_EVENT_CONT_WIDTH 1 +#define MCDI_EVENT_LEVEL_LBN 33 +#define MCDI_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MCDI_EVENT_LEVEL_FATAL 0x3 +#define MCDI_EVENT_DATA_OFST 0 +#define MCDI_EVENT_CMDDONE_SEQ_LBN 0 +#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 +#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8 +#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8 +#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16 +#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 +/* enum: 100Mbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 +/* enum: 1Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 +/* enum: 10Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 +/* enum: 40Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 +#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 +#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0 +#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_STATE_LBN 8 +#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16 +#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16 +#define MCDI_EVENT_FWALERT_DATA_LBN 8 +#define MCDI_EVENT_FWALERT_DATA_WIDTH 24 +#define MCDI_EVENT_FWALERT_REASON_LBN 0 +#define MCDI_EVENT_FWALERT_REASON_WIDTH 8 +/* enum: SRAM Access. */ +#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 +#define MCDI_EVENT_FLR_VF_LBN 0 +#define MCDI_EVENT_FLR_VF_WIDTH 8 +#define MCDI_EVENT_TX_ERR_TXQ_LBN 0 +#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12 +#define MCDI_EVENT_TX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4 +/* enum: Descriptor loader reported failure */ +#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 +/* enum: Descriptor ring empty and no EOP seen for packet */ +#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 +/* enum: Overlength packet */ +#define MCDI_EVENT_TX_ERR_2BIG 0x3 +/* enum: Malformed option descriptor */ +#define MCDI_EVENT_TX_BAD_OPTDESC 0x5 +/* enum: Option descriptor part way through a packet */ +#define MCDI_EVENT_TX_OPT_IN_PKT 0x8 +/* enum: DMA or PIO data access error */ +#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9 +#define MCDI_EVENT_TX_ERR_INFO_LBN 16 +#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0 +#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12 +#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0 +#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8 +/* enum: PLL lost lock */ +#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 +/* enum: Filter overflow (PDMA) */ +#define MCDI_EVENT_PTP_ERR_FILTER 0x2 +/* enum: FIFO overflow (FPGA) */ +#define MCDI_EVENT_PTP_ERR_FIFO 0x3 +/* enum: Merge queue overflow */ +#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 +#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0 +#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8 +/* enum: AOE failed to load - no valid image? */ +#define MCDI_EVENT_AOE_NO_LOAD 0x1 +/* enum: AOE FC reported an exception */ +#define MCDI_EVENT_AOE_FC_ASSERT 0x2 +/* enum: AOE FC watchdogged */ +#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3 +/* enum: AOE FC failed to start */ +#define MCDI_EVENT_AOE_FC_NO_START 0x4 +/* enum: Generic AOE fault - likely to have been reported via other means too + * but intended for use by aoex driver. + */ +#define MCDI_EVENT_AOE_FAULT 0x5 +/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6 +/* enum: AOE loaded successfully */ +#define MCDI_EVENT_AOE_LOAD 0x7 +/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_DMA 0x8 +/* enum: AOE byteblaster connected/disconnected (Connection status in + * AOE_ERR_DATA) + */ +#define MCDI_EVENT_AOE_BYTEBLASTER 0x9 +/* enum: DDR ECC status update */ +#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa +/* enum: PTP status update */ +#define MCDI_EVENT_AOE_PTP_STATUS 0xb +/* enum: FPGA header incorrect */ +#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc +/* enum: FPGA Powered Off due to error in powering up FPGA */ +#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd +/* enum: AOE FPGA load failed due to MC to MUM communication failure */ +#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe +/* enum: Notify that invalid flash type detected */ +#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf +/* enum: Notify that the attempt to run FPGA Controller firmware timedout */ +#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10 +/* enum: Failure to probe one or more FPGA boot flash chips */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11 +/* enum: FPGA boot-flash contains an invalid image header */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12 +/* enum: Failed to program clocks required by the FPGA */ +#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13 +/* enum: Notify that FPGA Controller is alive to serve MCDI requests */ +#define MCDI_EVENT_AOE_FC_RUNNING 0x14 +#define MCDI_EVENT_AOE_ERR_DATA_LBN 8 +#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8 +/* enum: FC Assert happened, but the register information is not available */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0 +/* enum: The register information for FC Assert is ready for readinng by driver + */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8 +/* enum: Reading from NV failed */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0 +/* enum: Invalid Magic Number if FPGA header */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1 +/* enum: Invalid Silicon type detected in header */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2 +/* enum: Unsupported VRatio */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3 +/* enum: Unsupported DDR Type */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4 +/* enum: DDR Voltage out of supported range */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5 +/* enum: Unsupported DDR speed */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6 +/* enum: Unsupported DDR size */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7 +/* enum: Unsupported DDR rank */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8 +/* enum: Primary boot flash */ +#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0 +/* enum: Secondary boot flash */ +#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8 +#define MCDI_EVENT_RX_ERR_RXQ_LBN 0 +#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12 +#define MCDI_EVENT_RX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4 +#define MCDI_EVENT_RX_ERR_INFO_LBN 16 +#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0 +#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12 +#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0 +#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16 +#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0 +#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8 +/* enum: MUM failed to load - no valid image? */ +#define MCDI_EVENT_MUM_NO_LOAD 0x1 +/* enum: MUM f/w reported an exception */ +#define MCDI_EVENT_MUM_ASSERT 0x2 +/* enum: MUM not kicking watchdog */ +#define MCDI_EVENT_MUM_WATCHDOG 0x3 +#define MCDI_EVENT_MUM_ERR_DATA_LBN 8 +#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_DATA_LBN 0 +#define MCDI_EVENT_DATA_WIDTH 32 +#define MCDI_EVENT_SRC_LBN 36 +#define MCDI_EVENT_SRC_WIDTH 8 +#define MCDI_EVENT_EV_CODE_LBN 60 +#define MCDI_EVENT_EV_CODE_WIDTH 4 +#define MCDI_EVENT_CODE_LBN 44 +#define MCDI_EVENT_CODE_WIDTH 8 +/* enum: Event generated by host software */ +#define MCDI_EVENT_SW_EVENT 0x0 +/* enum: Bad assert. */ +#define MCDI_EVENT_CODE_BADSSERT 0x1 +/* enum: PM Notice. */ +#define MCDI_EVENT_CODE_PMNOTICE 0x2 +/* enum: Command done. */ +#define MCDI_EVENT_CODE_CMDDONE 0x3 +/* enum: Link change. */ +#define MCDI_EVENT_CODE_LINKCHANGE 0x4 +/* enum: Sensor Event. */ +#define MCDI_EVENT_CODE_SENSOREVT 0x5 +/* enum: Schedule error. */ +#define MCDI_EVENT_CODE_SCHEDERR 0x6 +/* enum: Reboot. */ +#define MCDI_EVENT_CODE_REBOOT 0x7 +/* enum: Mac stats DMA. */ +#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 +/* enum: Firmware alert. */ +#define MCDI_EVENT_CODE_FWALERT 0x9 +/* enum: Function level reset. */ +#define MCDI_EVENT_CODE_FLR 0xa +/* enum: Transmit error */ +#define MCDI_EVENT_CODE_TX_ERR 0xb +/* enum: Tx flush has completed */ +#define MCDI_EVENT_CODE_TX_FLUSH 0xc +/* enum: PTP packet received timestamp */ +#define MCDI_EVENT_CODE_PTP_RX 0xd +/* enum: PTP NIC failure */ +#define MCDI_EVENT_CODE_PTP_FAULT 0xe +/* enum: PTP PPS event */ +#define MCDI_EVENT_CODE_PTP_PPS 0xf +/* enum: Rx flush has completed */ +#define MCDI_EVENT_CODE_RX_FLUSH 0x10 +/* enum: Receive error */ +#define MCDI_EVENT_CODE_RX_ERR 0x11 +/* enum: AOE fault */ +#define MCDI_EVENT_CODE_AOE 0x12 +/* enum: Network port calibration failed (VCAL). */ +#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 +/* enum: HW PPS event */ +#define MCDI_EVENT_CODE_HW_PPS 0x14 +/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and + * a different format) + */ +#define MCDI_EVENT_CODE_MC_REBOOT 0x15 +/* enum: the MC has detected a parity error */ +#define MCDI_EVENT_CODE_PAR_ERR 0x16 +/* enum: the MC has detected a correctable error */ +#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17 +/* enum: the MC has detected an uncorrectable error */ +#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18 +/* enum: The MC has entered offline BIST mode */ +#define MCDI_EVENT_CODE_MC_BIST 0x19 +/* enum: PTP tick event providing current NIC time */ +#define MCDI_EVENT_CODE_PTP_TIME 0x1a +/* enum: MUM fault */ +#define MCDI_EVENT_CODE_MUM 0x1b +/* enum: notify the designated PF of a new authorization request */ +#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c +/* enum: notify a function that awaits an authorization that its request has + * been processed and it may now resend the command + */ +#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d +/* enum: Artificial event generated by host and posted via MC for test + * purposes. + */ +#define MCDI_EVENT_CODE_TESTGEN 0xfa +#define MCDI_EVENT_CMDDONE_DATA_OFST 0 +#define MCDI_EVENT_CMDDONE_DATA_LBN 0 +#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 +#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0 +#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32 +#define MCDI_EVENT_SENSOREVT_DATA_OFST 0 +#define MCDI_EVENT_SENSOREVT_DATA_LBN 0 +#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32 +#define MCDI_EVENT_TX_ERR_DATA_OFST 0 +#define MCDI_EVENT_TX_ERR_DATA_LBN 0 +#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of + * timestamp + */ +#define MCDI_EVENT_PTP_SECONDS_OFST 0 +#define MCDI_EVENT_PTP_SECONDS_LBN 0 +#define MCDI_EVENT_PTP_SECONDS_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of + * timestamp + */ +#define MCDI_EVENT_PTP_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_MAJOR_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field + * of timestamp + */ +#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 +#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 +#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of + * timestamp + */ +#define MCDI_EVENT_PTP_MINOR_OFST 0 +#define MCDI_EVENT_PTP_MINOR_LBN 0 +#define MCDI_EVENT_PTP_MINOR_WIDTH 32 +/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet + */ +#define MCDI_EVENT_PTP_UUID_OFST 0 +#define MCDI_EVENT_PTP_UUID_LBN 0 +#define MCDI_EVENT_PTP_UUID_WIDTH 32 +#define MCDI_EVENT_RX_ERR_DATA_OFST 0 +#define MCDI_EVENT_RX_ERR_DATA_LBN 0 +#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_PAR_ERR_DATA_OFST 0 +#define MCDI_EVENT_PAR_ERR_DATA_LBN 0 +#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32 +/* For CODE_PTP_TIME events, the major value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32 +/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC clock has ever been set + */ +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36 +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC and System clocks are in sync + */ +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37 +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of + * the minor value of the PTP clock + */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32 +/* Zero means that the request has been completed or authorized, and the driver + * should resend it. A non-zero value means that the authorization has been + * denied, and gives the reason. Typically it will be EPERM. + */ +#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36 +#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8 + +/* EVB_PORT_ID structuredef */ +#define EVB_PORT_ID_LEN 4 +#define EVB_PORT_ID_PORT_ID_OFST 0 +/* enum: An invalid port handle. */ +#define EVB_PORT_ID_NULL 0x0 +/* enum: The port assigned to this function.. */ +#define EVB_PORT_ID_ASSIGNED 0x1000000 +/* enum: External network port 0 */ +#define EVB_PORT_ID_MAC0 0x2000000 +/* enum: External network port 1 */ +#define EVB_PORT_ID_MAC1 0x2000001 +/* enum: External network port 2 */ +#define EVB_PORT_ID_MAC2 0x2000002 +/* enum: External network port 3 */ +#define EVB_PORT_ID_MAC3 0x2000003 +#define EVB_PORT_ID_PORT_ID_LBN 0 +#define EVB_PORT_ID_PORT_ID_WIDTH 32 + + +/***********************************/ +/* MC_CMD_DRV_ATTACH + * Inform MCPU that this port is managed on the host (i.e. driver active). For + * Huntington, also request the preferred datapath firmware to use if possible + * (it may not be possible for this request to be fulfilled; the driver must + * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which + * features are actually available). The FIRMWARE_ID field is ignored by older + * platforms. + */ +#define MC_CMD_DRV_ATTACH 0x1c +#undef MC_CMD_0x1c_PRIVILEGE_CTG + +#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DRV_ATTACH_IN msgrequest */ +#define MC_CMD_DRV_ATTACH_IN_LEN 12 +/* new state to set if UPDATE=1 */ +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_WIDTH 1 +#define MC_CMD_DRV_PREBOOT_LBN 1 +#define MC_CMD_DRV_PREBOOT_WIDTH 1 +/* 1 to set new state, or 0 to just report the existing state */ +#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 +/* preferred datapath firmware (for Huntington; ignored for Siena) */ +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8 +/* enum: Prefer to use full featured firmware */ +#define MC_CMD_FW_FULL_FEATURED 0x0 +/* enum: Prefer to use firmware with fewer features but lower latency */ +#define MC_CMD_FW_LOW_LATENCY 0x1 +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +#define MC_CMD_FW_PACKED_STREAM 0x2 +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +#define MC_CMD_FW_HIGH_TX_RATE 0x3 +/* enum: Reserved value */ +#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 +/* enum: Prefer to use firmware with additional "rules engine" filtering + * support + */ +#define MC_CMD_FW_RULES_ENGINE 0x5 +/* enum: Only this option is allowed for non-admin functions */ +#define MC_CMD_FW_DONT_CARE 0xffffffff + +/* MC_CMD_DRV_ATTACH_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_OUT_LEN 4 +/* previous or existing state, see the bitmask at NEW_STATE */ +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 + +/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 +/* previous or existing state, see the bitmask at NEW_STATE */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 +/* Flags associated with this function */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 +/* enum: Labels the lowest-numbered function visible to the OS */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0 +/* enum: The function can control the link state of the physical port it is + * bound to. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1 +/* enum: The function can perform privileged operations */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2 +/* enum: The function does not have an active port associated with it. The port + * refers to the Sorrento external FPGA port. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3 + + +/***********************************/ +/* MC_CMD_ENTITY_RESET + * Generic per-resource reset. There is no equivalent for per-board reset. + * Locks required: None; Return code: 0, ETIME. NOTE: This command is an + * extended version of the deprecated MC_CMD_PORT_RESET with added fields. + */ +#define MC_CMD_ENTITY_RESET 0x20 +/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */ + +/* MC_CMD_ENTITY_RESET_IN msgrequest */ +#define MC_CMD_ENTITY_RESET_IN_LEN 4 +/* Optional flags field. Omitting this will perform a "legacy" reset action + * (TBD). + */ +#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 + +/* MC_CMD_ENTITY_RESET_OUT msgresponse */ +#define MC_CMD_ENTITY_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PHY_CFG + * Report PHY configuration. This guarantees to succeed even if the PHY is in a + * 'zombie' state. Locks required: None + */ +#define MC_CMD_GET_PHY_CFG 0x24 +#undef MC_CMD_0x24_PRIVILEGE_CTG + +#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PHY_CFG_IN msgrequest */ +#define MC_CMD_GET_PHY_CFG_IN_LEN 0 + +/* MC_CMD_GET_PHY_CFG_OUT msgresponse */ +#define MC_CMD_GET_PHY_CFG_OUT_LEN 72 +/* flags */ +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 +/* Bitmask of supported capabilities */ +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 +#define MC_CMD_PHY_CAP_10HDX_LBN 1 +#define MC_CMD_PHY_CAP_10HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10FDX_LBN 2 +#define MC_CMD_PHY_CAP_10FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100HDX_LBN 3 +#define MC_CMD_PHY_CAP_100HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100FDX_LBN 4 +#define MC_CMD_PHY_CAP_100FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000HDX_LBN 5 +#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000FDX_LBN 6 +#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10000FDX_LBN 7 +#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_PAUSE_LBN 8 +#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1 +#define MC_CMD_PHY_CAP_ASYM_LBN 9 +#define MC_CMD_PHY_CAP_ASYM_WIDTH 1 +#define MC_CMD_PHY_CAP_AN_LBN 10 +#define MC_CMD_PHY_CAP_AN_WIDTH 1 +#define MC_CMD_PHY_CAP_40000FDX_LBN 11 +#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_DDM_LBN 12 +#define MC_CMD_PHY_CAP_DDM_WIDTH 1 +#define MC_CMD_PHY_CAP_100000FDX_LBN 13 +#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_25000FDX_LBN 14 +#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_50000FDX_LBN 15 +#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16 +#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_REQ_LBN 17 +#define MC_CMD_PHY_CAP_BASER_FEC_REQ_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_LBN 17 +#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_REQ_LBN 18 +#define MC_CMD_PHY_CAP_RS_FEC_REQ_WIDTH 1 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 +#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 +/* enum: Xaui. */ +#define MC_CMD_MEDIA_XAUI 0x1 +/* enum: CX4. */ +#define MC_CMD_MEDIA_CX4 0x2 +/* enum: KX4. */ +#define MC_CMD_MEDIA_KX4 0x3 +/* enum: XFP Far. */ +#define MC_CMD_MEDIA_XFP 0x4 +/* enum: SFP+. */ +#define MC_CMD_MEDIA_SFP_PLUS 0x5 +/* enum: 10GBaseT. */ +#define MC_CMD_MEDIA_BASE_T 0x6 +/* enum: QSFP+. */ +#define MC_CMD_MEDIA_QSFP_PLUS 0x7 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 +/* enum: Native clause 22 */ +#define MC_CMD_MMD_CLAUSE22 0x0 +#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ +#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */ +#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */ +#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */ +#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */ +#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */ +#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */ +/* enum: Clause22 proxied over clause45 by PHY. */ +#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d +#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */ +#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */ +#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 +#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20 + + +/***********************************/ +/* MC_CMD_GET_LINK + * Read the unified MAC/PHY link state. Locks required: None Return code: 0, + * ETIME. + */ +#define MC_CMD_GET_LINK 0x29 +#undef MC_CMD_0x29_PRIVILEGE_CTG + +#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LINK_IN msgrequest */ +#define MC_CMD_GET_LINK_IN_LEN 0 + +/* MC_CMD_GET_LINK_OUT msgresponse */ +#define MC_CMD_GET_LINK_OUT_LEN 28 +/* near-side advertised capabilities */ +#define MC_CMD_GET_LINK_OUT_CAP_OFST 0 +/* link-partner advertised capabilities */ +#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 + + +/***********************************/ +/* MC_CMD_SET_MAC + * Set MAC configuration. Locks required: None. Return code: 0, EINVAL + */ +#define MC_CMD_SET_MAC 0x2c +#undef MC_CMD_0x2c_PRIVILEGE_CTG + +#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_MAC_IN msgrequest */ +#define MC_CMD_SET_MAC_IN_LEN 28 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 +/* enum: Flow control is off. */ +#define MC_CMD_FCNTL_OFF 0x0 +/* enum: Respond to flow control. */ +#define MC_CMD_FCNTL_RESPOND 0x1 +/* enum: Respond to and Issue flow control. */ +#define MC_CMD_FCNTL_BIDIR 0x2 +/* enum: Auto neg flow control. */ +#define MC_CMD_FCNTL_AUTO 0x3 +/* enum: Priority flow control (eftest builds only). */ +#define MC_CMD_FCNTL_QBB 0x4 +/* enum: Issue flow control. */ +#define MC_CMD_FCNTL_GENERATE 0x5 +#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 + +/* MC_CMD_SET_MAC_EXT_IN msgrequest */ +#define MC_CMD_SET_MAC_EXT_IN_LEN 32 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20 +/* enum: Flow control is off. */ +/* MC_CMD_FCNTL_OFF 0x0 */ +/* enum: Respond to flow control. */ +/* MC_CMD_FCNTL_RESPOND 0x1 */ +/* enum: Respond to and Issue flow control. */ +/* MC_CMD_FCNTL_BIDIR 0x2 */ +/* enum: Auto neg flow control. */ +/* MC_CMD_FCNTL_AUTO 0x3 */ +/* enum: Priority flow control (eftest builds only). */ +/* MC_CMD_FCNTL_QBB 0x4 */ +/* enum: Issue flow control. */ +/* MC_CMD_FCNTL_GENERATE 0x5 */ +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1 +/* Select which parameters to configure. A parameter will only be modified if + * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in + * capabilities then this field is ignored (and all flags are assumed to be + * set). + */ +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1 + +/* MC_CMD_SET_MAC_OUT msgresponse */ +#define MC_CMD_SET_MAC_OUT_LEN 0 + +/* MC_CMD_SET_MAC_V2_OUT msgresponse */ +#define MC_CMD_SET_MAC_V2_OUT_LEN 4 +/* MTU as configured after processing the request. See comment at + * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL + * to 0. + */ +#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0 + + +/***********************************/ +/* MC_CMD_REBOOT + * Reboot the MC. + * + * The AFTER_ASSERTION flag is intended to be used when the driver notices an + * assertion failure (at which point it is expected to perform a complete tear + * down and reinitialise), to allow both ports to reset the MC once in an + * atomic fashion. + * + * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1, + * which means that they will automatically reboot out of the assertion + * handler, so this is in practise an optional operation. It is still + * recommended that drivers execute this to support custom firmwares with + * REBOOT_ON_ASSERT=0. + * + * Locks required: NONE Returns: Nothing. You get back a response with ERR=1, + * DATALEN=0 + */ +#define MC_CMD_REBOOT 0x3d +#undef MC_CMD_0x3d_PRIVILEGE_CTG + +#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_REBOOT_IN msgrequest */ +#define MC_CMD_REBOOT_IN_LEN 4 +#define MC_CMD_REBOOT_IN_FLAGS_OFST 0 +#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */ + +/* MC_CMD_REBOOT_OUT msgresponse */ +#define MC_CMD_REBOOT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_REBOOT_MODE + * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot + * mode to the specified value. Returns the old mode. + */ +#define MC_CMD_REBOOT_MODE 0x3f +#undef MC_CMD_0x3f_PRIVILEGE_CTG + +#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_REBOOT_MODE_IN msgrequest */ +#define MC_CMD_REBOOT_MODE_IN_LEN 4 +#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 +/* enum: Normal. */ +#define MC_CMD_REBOOT_MODE_NORMAL 0x0 +/* enum: Power-on Reset. */ +#define MC_CMD_REBOOT_MODE_POR 0x2 +/* enum: Snapper. */ +#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 +/* enum: snapper fake POR */ +#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4 +#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7 +#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1 + +/* MC_CMD_REBOOT_MODE_OUT msgresponse */ +#define MC_CMD_REBOOT_MODE_OUT_LEN 4 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 + + +/***********************************/ +/* MC_CMD_WORKAROUND + * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't + * understand the given workaround number - which should not be treated as a + * hard error by client code. This op does not imply any semantics about each + * workaround, that's between the driver and the mcfw on a per-workaround + * basis. Locks required: None. Returns: 0, EINVAL . + */ +#define MC_CMD_WORKAROUND 0x4a +#undef MC_CMD_0x4a_PRIVILEGE_CTG + +#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_WORKAROUND_IN msgrequest */ +#define MC_CMD_WORKAROUND_IN_LEN 8 +/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */ +#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 +/* enum: Bug 17230 work around. */ +#define MC_CMD_WORKAROUND_BUG17230 0x1 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_WORKAROUND_BUG35388 0x2 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_WORKAROUND_BUG35017 0x3 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_WORKAROUND_BUG41750 0x4 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_WORKAROUND_BUG42008 0x5 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) + * This feature cannot be turned on/off while there are any filters already + * present. The behaviour in such case depends on the acting client's privilege + * level. If the client has the admin privilege, then all functions that have + * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise + * the command will fail with MC_CMD_ERR_FILTERS_PRESENT. + */ +#define MC_CMD_WORKAROUND_BUG26807 0x6 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_WORKAROUND_BUG61265 0x7 +/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable + * the workaround + */ +#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 + +/* MC_CMD_WORKAROUND_OUT msgresponse */ +#define MC_CMD_WORKAROUND_OUT_LEN 0 + +/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used + * when (TYPE == MC_CMD_WORKAROUND_BUG26807) + */ +#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_GET_MAC_ADDRESSES + * Returns the base MAC, count and stride for the requesting function + */ +#define MC_CMD_GET_MAC_ADDRESSES 0x55 +#undef MC_CMD_0x55_PRIVILEGE_CTG + +#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0 + +/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16 +/* Base MAC address */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6 +/* Padding */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2 +/* Number of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8 +/* Spacing of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 + + +/***********************************/ +/* MC_CMD_GET_WORKAROUNDS + * Read the list of all implemented and all currently enabled workarounds. The + * enums here must correspond with those in MC_CMD_WORKAROUND. + */ +#define MC_CMD_GET_WORKAROUNDS 0x59 +#undef MC_CMD_0x59_PRIVILEGE_CTG + +#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */ +#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8 +/* Each workaround is represented by a single bit according to the enums below. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80 + + +/***********************************/ +/* MC_CMD_V2_EXTN + * Encapsulation for a v2 extended command + */ +#define MC_CMD_V2_EXTN 0x7f + +/* MC_CMD_V2_EXTN_IN msgrequest */ +#define MC_CMD_V2_EXTN_IN_LEN 4 +/* the extended command number */ +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0 +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1 +/* the actual length of the encapsulated command (which is not in the v1 + * header) + */ +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16 +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10 +#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26 +#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2 +/* Type of command/response */ +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28 +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4 +/* enum: MCDI command directed to or response originating from the MC. */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0 +/* enum: MCDI command directed to a TSA controller. MCDI responses of this type + * are not defined. + */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1 + + +/***********************************/ +/* MC_CMD_INIT_EVQ + * Set up an event queue according to the supplied parameters. The IN arguments + * end with an address for each 4k of host memory required to back the EVQ. + */ +#define MC_CMD_INIT_EVQ 0x80 +#undef MC_CMD_0x80_PRIVILEGE_CTG + +#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_EVQ_IN msgrequest */ +#define MC_CMD_INIT_EVQ_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num)) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12 +/* tbd */ +#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64 + +/* MC_CMD_INIT_EVQ_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_OUT_LEN 4 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0 + +/* MC_CMD_INIT_EVQ_V2_IN msgrequest */ +#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num)) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12 +/* tbd */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4 +/* enum: All initialisation flags specified by host. */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the lowest latency achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the best throughput achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2 +/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by + * firmware based on licenses and firmware variant. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64 + +/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0 +/* Actual configuration applied on the card */ +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1 + +/* QUEUE_CRC_MODE structuredef */ +#define QUEUE_CRC_MODE_LEN 1 +#define QUEUE_CRC_MODE_MODE_LBN 0 +#define QUEUE_CRC_MODE_MODE_WIDTH 4 +/* enum: No CRC. */ +#define QUEUE_CRC_MODE_NONE 0x0 +/* enum: CRC Fiber channel over ethernet. */ +#define QUEUE_CRC_MODE_FCOE 0x1 +/* enum: CRC (digest) iSCSI header only. */ +#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 +/* enum: CRC (digest) iSCSI header and payload. */ +#define QUEUE_CRC_MODE_ISCSI 0x3 +/* enum: CRC Fiber channel over IP over ethernet. */ +#define QUEUE_CRC_MODE_FCOIPOE 0x4 +/* enum: CRC MPA. */ +#define QUEUE_CRC_MODE_MPA 0x5 +#define QUEUE_CRC_MODE_SPARE_LBN 4 +#define QUEUE_CRC_MODE_SPARE_WIDTH 4 + + +/***********************************/ +/* MC_CMD_INIT_RXQ + * set up a receive queue according to the supplied parameters. The IN + * arguments end with an address for each 4k of host memory required to back + * the RXQ. + */ +#define MC_CMD_INIT_RXQ 0x81 +#undef MC_CMD_0x81_PRIVILEGE_CTG + +#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version + * in new code. + */ +#define MC_CMD_INIT_RXQ_IN_LENMIN 36 +#define MC_CMD_INIT_RXQ_IN_LENMAX 252 +#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to INIT_EVQ + */ +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10 +#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28 + +/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode + * flags + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to INIT_EVQ + */ +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 + +/* MC_CMD_INIT_RXQ_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_INIT_TXQ + */ +#define MC_CMD_INIT_TXQ 0x82 +#undef MC_CMD_0x82_PRIVILEGE_CTG + +#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version + * in new code. + */ +#define MC_CMD_INIT_TXQ_IN_LENMIN 36 +#define MC_CMD_INIT_TXQ_IN_LENMAX 252 +#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28 + +/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode + * flags + */ +#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 +/* Flags related to Qbb flow control mode. */ +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3 + +/* MC_CMD_INIT_TXQ_OUT msgresponse */ +#define MC_CMD_INIT_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_EVQ + * Teardown an EVQ. + * + * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first + * or the operation will fail with EBUSY + */ +#define MC_CMD_FINI_EVQ 0x83 +#undef MC_CMD_0x83_PRIVILEGE_CTG + +#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_EVQ_IN msgrequest */ +#define MC_CMD_FINI_EVQ_IN_LEN 4 +/* Instance of EVQ to destroy. Should be the same instance as that previously + * passed to INIT_EVQ + */ +#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0 + +/* MC_CMD_FINI_EVQ_OUT msgresponse */ +#define MC_CMD_FINI_EVQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_RXQ + * Teardown a RXQ. + */ +#define MC_CMD_FINI_RXQ 0x84 +#undef MC_CMD_0x84_PRIVILEGE_CTG + +#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_RXQ_IN msgrequest */ +#define MC_CMD_FINI_RXQ_IN_LEN 4 +/* Instance of RXQ to destroy */ +#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0 + +/* MC_CMD_FINI_RXQ_OUT msgresponse */ +#define MC_CMD_FINI_RXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_TXQ + * Teardown a TXQ. + */ +#define MC_CMD_FINI_TXQ 0x85 +#undef MC_CMD_0x85_PRIVILEGE_CTG + +#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_TXQ_IN msgrequest */ +#define MC_CMD_FINI_TXQ_IN_LEN 4 +/* Instance of TXQ to destroy */ +#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0 + +/* MC_CMD_FINI_TXQ_OUT msgresponse */ +#define MC_CMD_FINI_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FILTER_OP + * Multiplexed MCDI call for filter operations + */ +#define MC_CMD_FILTER_OP 0x8a +#undef MC_CMD_0x8a_PRIVILEGE_CTG + +#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FILTER_OP_IN msgrequest */ +#define MC_CMD_FILTER_OP_IN_LEN 108 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_IN_OP_OFST 0 +/* enum: single-recipient filter insert */ +#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 +/* enum: single-recipient filter remove */ +#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 +/* enum: multi-recipient filter subscribe */ +#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 +/* enum: multi-recipient filter unsubscribe */ +#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 +/* enum: replace one recipient with another (warning - the filter handle may + * change) + */ +#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 +/* receive mode */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68 +/* Firmware defined register 1 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16 + +/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to + * include handling of VXLAN/NVGRE encapsulated frame filtering (which is + * supported on Medford only). + */ +#define MC_CMD_FILTER_OP_EXT_IN_LEN 172 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 +/* receive mode */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16 + +/* MC_CMD_FILTER_OP_OUT msgresponse */ +#define MC_CMD_FILTER_OP_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 +/* enum: guaranteed invalid filter handle (low 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff +/* enum: guaranteed invalid filter handle (high 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff + +/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ +#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_EXT_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_OUT/HANDLE */ + + +/***********************************/ +/* MC_CMD_ALLOC_VIS + * Allocate VIs for current PCI function. + */ +#define MC_CMD_ALLOC_VIS 0x8b +#undef MC_CMD_0x8b_PRIVILEGE_CTG + +#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_ALLOC_VIS_IN msgrequest */ +#define MC_CMD_ALLOC_VIS_IN_LEN 8 +/* The minimum number of VIs that is acceptable */ +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0 +/* The maximum number of VIs that would be useful */ +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 + +/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request. + * Use extended version in new code. + */ +#define MC_CMD_ALLOC_VIS_OUT_LEN 8 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 + +/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8 + + +/***********************************/ +/* MC_CMD_FREE_VIS + * Free VIs for current PCI function. Any linked PIO buffers will be unlinked, + * but not freed. + */ +#define MC_CMD_FREE_VIS 0x8c +#undef MC_CMD_0x8c_PRIVILEGE_CTG + +#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FREE_VIS_IN msgrequest */ +#define MC_CMD_FREE_VIS_IN_LEN 0 + +/* MC_CMD_FREE_VIS_OUT msgresponse */ +#define MC_CMD_FREE_VIS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PORT_ASSIGNMENT + * Get port assignment for current PCI function. + */ +#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8 +#undef MC_CMD_0xb8_PRIVILEGE_CTG + +#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0 + +/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4 +/* Identifies the port assignment for this function. */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0 + + +/***********************************/ +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS + * Configure UDP ports for tunnel encapsulation hardware acceleration. The + * parser-dispatcher will attempt to parse traffic on these ports as tunnel + * encapsulation PDUs and filter them using the tunnel encapsulation filter + * chain rather than the standard filter chain. Note that this command can + * cause all functions to see a reset. (Available on Medford only.) + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117 +#undef MC_CMD_0x117_PRIVILEGE_CTG + +#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num)) +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1 +/* The number of entries in the ENTRIES array */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2 +/* Entries defining the UDP port to protocol mapping, each laid out as a + * TUNNEL_ENCAP_UDP_PORT_ENTRY + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16 + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2 +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1 + + +#endif /* SFC_MCDI_PCOL_H */ diff --git a/src/drivers/net/sfc/mcdi.h b/src/drivers/net/sfc/mcdi.h new file mode 100644 index 00000000..19c62021 --- /dev/null +++ b/src/drivers/net/sfc/mcdi.h @@ -0,0 +1,164 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * + * Written by Martin Habets + * + * Copyright 2012-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ +#ifndef SFC_MCDI_H +#define SFC_MCDI_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#endif + +#define MCDI_SEQ_MASK 0xf + +/* We expect that 16- and 32-bit fields in MCDI requests and responses + * are appropriately aligned, but 64-bit fields are only + * 32-bit-aligned. Also, on Siena we must copy to the MC shared + * memory strictly 32 bits at a time, so add any necessary padding. + */ +#define MCDI_DECLARE_BUF(_name, _len) \ + efx_dword_t _name[DIV_ROUND_UP(_len, 4)] +#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \ + MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8)) +#define _MCDI_PTR(_buf, _offset) \ + ((u8 *)(_buf) + (_offset)) +#define MCDI_PTR(_buf, _field) \ + _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST) +#define _MCDI_CHECK_ALIGN(_ofst, _align) \ + ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1))) +#define _MCDI_DWORD(_buf, _field) \ + ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2)) + +#define MCDI_WORD(_buf, _field) \ + ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ + le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field))) +#define MCDI_SET_DWORD(_buf, _field, _value) \ + EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) +#define MCDI_DWORD(_buf, _field) \ + EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0) +#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \ + EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1) +#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \ + _name2, _value2) \ + EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2) +#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3) \ + EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3) +#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4) \ + EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4) +#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5) \ + EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5) +#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6) \ + EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6) +#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6, _name7, _value7) \ + EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6, \ + MC_CMD_ ## _name7, _value7) +#define MCDI_SET_QWORD(_buf, _field, _value) \ + do { \ + EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \ + EFX_DWORD_0, (u32)(_value)); \ + EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \ + EFX_DWORD_0, (u64)(_value) >> 32); \ + } while (0) +#define MCDI_QWORD(_buf, _field) \ + (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \ + (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32) +#define MCDI_FIELD(_ptr, _type, _field) \ + EFX_EXTRACT_DWORD( \ + *(efx_dword_t *) \ + _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\ + MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \ + (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \ + MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1) + +#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \ + (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\ + + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align)) +#define MCDI_DECLARE_STRUCT_PTR(_name) \ + efx_dword_t *_name +#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \ + ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_VAR_ARRAY_LEN(_len, _field) \ + min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \ + ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN) +#define MCDI_ARRAY_WORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ + le16_to_cpu(*(__force const __le16 *) \ + _MCDI_ARRAY_PTR(_buf, _field, _index, 2))) +#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \ + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \ + EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \ + EFX_DWORD_0, _value) +#define MCDI_ARRAY_DWORD(_buf, _field, _index) \ + EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0) +#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \ + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \ + do { \ + EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\ + EFX_DWORD_0, (u32)(_value)); \ + EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\ + EFX_DWORD_0, (u64)(_value) >> 32); \ + } while (0) +#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \ + MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \ + _type ## _TYPEDEF, _field2) + +#define MCDI_EVENT_FIELD(_ev, _field) \ + EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) + +#endif diff --git a/src/drivers/net/sfc/sfc_hunt.c b/src/drivers/net/sfc/sfc_hunt.c new file mode 100644 index 00000000..3c9a5fe3 --- /dev/null +++ b/src/drivers/net/sfc/sfc_hunt.c @@ -0,0 +1,1328 @@ +/************************************************************************** + * + * Device driver for Solarflare Communications EF10 devices + * + * Written by Shradha Shah + * + * Copyright 2012-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + * + ***************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "efx_hunt.h" +#include "efx_bitfield.h" +#include "ef10_regs.h" +#include "mc_driver_pcol.h" +#include + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#define HUNTINGTON_NVRAM_CHUNK 0x80 +#define HUNTINGTON_NVS_MAX_LENGTH 0x1000 + +#define EMCDI_IO(code) EUNIQ(EINFO_EIO, (code)) + +#ifndef MIN +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#endif +#ifndef MAX +#define MAX(_a, _b) ((_a) > (_b) ? (_a) : (_b)) +#endif + +struct hunt_nic *primary_nics = NULL; + +struct hunt_nic { + struct efx_nic efx; + + /* PHY information */ + unsigned int phy_cap_mask; + unsigned int phy_cap; + unsigned long link_poll_timer; + + /* resource housekeeping */ + uint64_t uc_filter_id; + uint64_t mc_filter_id; + u8 mac[ETH_ALEN]; + + struct { + /* Common payload for all MCDI requests */ + unsigned int seqno; + + size_t resp_hdr_len; + size_t resp_data_len; + + struct io_buffer *iob; + uint64_t dma_addr; + } mcdi; + + struct hunt_nic *primary; + struct hunt_nic *next_primary; + u32 flags; +}; + +static int hunt_nic_is_primary(struct hunt_nic *hunt) +{ + return (hunt->flags & (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)); +} + +/******************************************************************************* + * + * + * MCDI transport + * + * This has been based on the implementation of MCDI in the common code driver. + * + * + ******************************************************************************/ + +static int hunt_mcdi_init(struct hunt_nic *hunt) +{ + size_t max_msg_size; + int rc; + + /* The MCDI message has two 32-bit headers (the MCDI header and the + * MCDI v2 extended command) and then up to MCDI_CTL_SDU_LEN_MAX_V2 + * bytes of payload + */ + max_msg_size = 2 * sizeof(efx_dword_t) + MCDI_CTL_SDU_LEN_MAX_V2; + + hunt->mcdi.iob = alloc_iob(max_msg_size); + if (!hunt->mcdi.iob) { + rc = -ENOMEM; + return rc; + } + return 0; +} + +static void hunt_mcdi_copyin(struct hunt_nic *hunt, + unsigned int cmd, + uint8_t *inbuf, + size_t inlen) +{ + efx_dword_t hdr[2]; + uint32_t seqno; + unsigned int xflags; + size_t hdr_len; + u8 *pdu = hunt->mcdi.iob->data; + + seqno = hunt->mcdi.seqno & MCDI_SEQ_MASK; + + xflags = 0; + + EFX_POPULATE_DWORD_7(hdr[0], + MCDI_HEADER_CODE, MC_CMD_V2_EXTN, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_DATALEN, 0, + MCDI_HEADER_SEQ, seqno, + MCDI_HEADER_ERROR, 0, + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_XFLAGS, xflags); + EFX_POPULATE_DWORD_2(hdr[1], + MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd, + MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen); + + hdr_len = sizeof(hdr); + + memcpy(pdu, &hdr, hdr_len); + assert(inlen <= MCDI_CTL_SDU_LEN_MAX_V2); + memcpy(pdu + hdr_len, inbuf, inlen); + + wmb(); /* Sync the data before ringing the doorbell */ + + /* Ring the doorbell to post the command DMA address to the MC */ + hunt->mcdi.dma_addr = virt_to_bus(hunt->mcdi.iob->data); + + assert((hunt->mcdi.dma_addr & 0xFF) == 0); + + _efx_writel(&hunt->efx, + cpu_to_le32((u64)hunt->mcdi.dma_addr >> 32), + ER_DZ_MC_DB_LWRD); + + _efx_writel(&hunt->efx, + cpu_to_le32((u32)hunt->mcdi.dma_addr), + ER_DZ_MC_DB_HWRD); +} + +static void hunt_mcdi_copyout(struct hunt_nic *hunt, + uint8_t *outbuf, size_t outlen) +{ + size_t offset; + const u8 *pdu = hunt->mcdi.iob->data; + + offset = hunt->mcdi.resp_hdr_len; + + if (outlen > 0) + memcpy(outbuf, pdu+offset, outlen); +} + +static int hunt_mcdi_request_poll(struct hunt_nic *hunt, bool quiet) +{ + unsigned int resplen, respseq, error; + unsigned long finish; + efx_dword_t errdword; + efx_qword_t qword; + const efx_dword_t *pdu = hunt->mcdi.iob->data; + const u8 *pdu1 = hunt->mcdi.iob->data; + int delay, rc; + + /* Spin for up to 5s, polling at intervals of 10us, 20us, ... ~100ms */ + finish = currticks() + (5 * TICKS_PER_SEC); + delay = 10; + while (1) { + udelay(delay); + + /* Check for an MCDI response */ + if (EFX_DWORD_FIELD(*pdu, MCDI_HEADER_RESPONSE)) + break; + + if (currticks() >= finish) + return -ETIMEDOUT; + + if (delay < 100000) + delay *= 2; + } + + memcpy(&qword, pdu1, 8); + + /* qword.dword[0] is the MCDI header; qword.dword[1] is the MCDI v2 + * extended command + */ + respseq = EFX_DWORD_FIELD(qword.dword[0], MCDI_HEADER_SEQ); + error = EFX_DWORD_FIELD(qword.dword[0], MCDI_HEADER_ERROR); + resplen = EFX_DWORD_FIELD(qword.dword[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN); + + if (error && resplen == 0) { + if (!quiet) + DBGC(hunt, "MC rebooted\n"); + return -EIO; + } else if ((respseq ^ hunt->mcdi.seqno) & MCDI_SEQ_MASK) { + if (!quiet) + DBGC(hunt, "MC response mismatch rxseq 0x%x txseq " + "0x%x\n", respseq, hunt->mcdi.seqno); + return -EIO; + } else if (error) { + memcpy(&errdword, pdu1 + 8, 4); + rc = EFX_DWORD_FIELD(errdword, EFX_DWORD_0); + switch (rc) { + case MC_CMD_ERR_ENOENT: + return -ENOENT; + case MC_CMD_ERR_EINTR: + return -EINTR; + case MC_CMD_ERR_EACCES: + return -EACCES; + case MC_CMD_ERR_EBUSY: + return -EBUSY; + case MC_CMD_ERR_EINVAL: + return -EINVAL; + case MC_CMD_ERR_EDEADLK: + return -EDEADLK; + case MC_CMD_ERR_ENOSYS: + return -ENOSYS; + case MC_CMD_ERR_ETIME: + return -ETIME; + case MC_CMD_ERR_EPERM: + return -EPERM; + default: + /* Return the MC error in an I/O error. */ + return EMCDI_IO(rc & 0xff); + } + } + hunt->mcdi.resp_hdr_len = 8; + hunt->mcdi.resp_data_len = resplen; + + return 0; +} + +static void hunt_mcdi_fini(struct hunt_nic *hunt) +{ + free_iob(hunt->mcdi.iob); +} + +int _hunt_mcdi(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet) +{ + int rc; + struct hunt_nic *hunt = (struct hunt_nic *) efx; + size_t local_outlen_actual; + + if (outlen_actual == NULL) + outlen_actual = &local_outlen_actual; + + ++hunt->mcdi.seqno; + hunt_mcdi_copyin(hunt, cmd, (uint8_t *) inbuf, inlen); + + rc = hunt_mcdi_request_poll(hunt, quiet); + if (rc != 0) { + if (!quiet) + DBGC(hunt, "MC response to cmd 0x%x: %s\n", + cmd, strerror(rc)); + return rc; + } + + *outlen_actual = hunt->mcdi.resp_data_len; + + hunt_mcdi_copyout(hunt, (uint8_t *) outbuf, outlen); + + return 0; +} + +static int hunt_mcdi(struct hunt_nic *hunt, struct efx_mcdi_req_s *req) +{ + return _hunt_mcdi(&hunt->efx, req->emr_cmd, + (const efx_dword_t *) req->emr_in_buf, + req->emr_in_length, + (efx_dword_t *) req->emr_out_buf, req->emr_out_length, + &req->emr_out_length_used, false); +} + +static int hunt_mcdi_quiet(struct hunt_nic *hunt, struct efx_mcdi_req_s *req) +{ + return _hunt_mcdi(&hunt->efx, req->emr_cmd, + (const efx_dword_t *) req->emr_in_buf, + req->emr_in_length, + (efx_dword_t *) req->emr_out_buf, req->emr_out_length, + &req->emr_out_length_used, true); +} + +/******************************************************************************* + * + * + * Hardware initialization + * + * + ******************************************************************************/ +static int hunt_get_workarounds(struct hunt_nic *hunt, uint32_t *implemented, + uint32_t *enabled) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN); + int rc; + + *implemented = *enabled = 0; + + req.emr_cmd = MC_CMD_GET_WORKAROUNDS; + req.emr_in_buf = NULL; + req.emr_in_length = 0; + req.emr_out_buf = outbuf; + req.emr_out_length = sizeof(outbuf); + + rc = hunt_mcdi(hunt, &req); + + if (rc) + return rc; + + if (req.emr_out_length_used < MC_CMD_GET_WORKAROUNDS_OUT_LEN) + return -EMSGSIZE; + + *implemented = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED); + *enabled = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED); + return 0; +} + +static int hunt_enable_workaround_35388(struct hunt_nic *hunt) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(payload, MC_CMD_WORKAROUND_IN_LEN); + + req.emr_cmd = MC_CMD_WORKAROUND; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN; + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_SET_DWORD(req.emr_in_buf, WORKAROUND_IN_TYPE, + MC_CMD_WORKAROUND_BUG35388); + MCDI_SET_DWORD(req.emr_in_buf, WORKAROUND_IN_ENABLED, 1); + + /* If the firmware doesn't support this workaround, hunt_mcdi() will + * return -EINVAL from hunt_mcdi_request_poll(). + */ + return hunt_mcdi(hunt, &req); +} + +static int hunt_workaround_35388(struct hunt_nic *hunt) +{ + uint32_t implemented, enabled; + int rc = hunt_get_workarounds(hunt, &implemented, &enabled); + + if (rc < 0) + return 0; + if (!(implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388)) + return 0; + if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) + return 1; + + rc = hunt_enable_workaround_35388(hunt); + if (rc == 0) + return 1; /* Workaround is enabled */ + else + return 0; +} + +static int hunt_get_port_assignment(struct hunt_nic *hunt) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN); + int rc; + + req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT; + req.emr_in_buf = NULL; + req.emr_in_length = 0; + req.emr_out_buf = outbuf; + req.emr_out_length = sizeof(outbuf); + + rc = hunt_mcdi(hunt, &req); + if (rc != 0) + return rc; + + hunt->efx.port = MCDI_DWORD(req.emr_out_buf, + GET_PORT_ASSIGNMENT_OUT_PORT); + return 0; +} + +static int hunt_mac_addr(struct hunt_nic *hunt, uint8_t *ll_addr) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); + int rc; + + req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES; + req.emr_in_buf = NULL; + req.emr_in_length = 0; + req.emr_out_buf = outbuf; + req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN; + + rc = hunt_mcdi(hunt, &req); + if (rc != 0) + return rc; + + if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) + return -EMSGSIZE; + + memcpy(ll_addr, + MCDI_PTR(req.emr_out_buf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), + ETH_ALEN); + + return 0; +} + +static int hunt_get_phy_cfg(struct hunt_nic *hunt) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN); + int rc; + + req.emr_cmd = MC_CMD_GET_PHY_CFG; + req.emr_in_buf = NULL; + req.emr_in_length = 0; + req.emr_out_buf = outbuf; + req.emr_out_length = sizeof(outbuf); + + rc = hunt_mcdi(hunt, &req); + if (rc != 0) + return rc; + + if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) + return -EMSGSIZE; + + hunt->phy_cap_mask = hunt->phy_cap = + MCDI_DWORD(req.emr_out_buf, GET_PHY_CFG_OUT_SUPPORTED_CAP); + DBGC2(hunt, "GET_PHY_CFG: flags=%x, caps=%x\n", rc, hunt->phy_cap); + return 0; +} + +static int hunt_driver_attach(struct hunt_nic *hunt, int attach) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN); + int rc; + + req.emr_cmd = MC_CMD_DRV_ATTACH; + req.emr_in_buf = inbuf; + req.emr_in_length = sizeof(inbuf); + req.emr_out_buf = outbuf; + req.emr_out_length = sizeof(outbuf); + + /* Set the PREBOOT flag to indicate later instances of attach should + * force an ENTITY RESET + */ + if (attach) + attach |= 1 << MC_CMD_DRV_PREBOOT_LBN; + + MCDI_SET_DWORD(req.emr_in_buf, DRV_ATTACH_IN_NEW_STATE, attach); + MCDI_SET_DWORD(req.emr_in_buf, DRV_ATTACH_IN_UPDATE, 1); + MCDI_SET_DWORD(req.emr_in_buf, DRV_ATTACH_IN_FIRMWARE_ID, + MC_CMD_FW_DONT_CARE); + + rc = hunt_mcdi(hunt, &req); + if (rc != 0) + return rc; + + if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) + return -EMSGSIZE; + + hunt->flags = MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS); + + return 0; +} + +static int hunt_reset(struct hunt_nic *hunt) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN); + + req.emr_cmd = MC_CMD_ENTITY_RESET; + req.emr_in_buf = inbuf; + req.emr_in_length = sizeof(inbuf); + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_POPULATE_DWORD_1(req.emr_in_buf, ENTITY_RESET_IN_FLAG, + ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); + return hunt_mcdi(hunt, &req); +} + +static void hunt_clear_udp_tunnel_ports(struct hunt_nic *hunt) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); + struct efx_mcdi_req_s req; + int rc; + + memset(inbuf, 0, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); + MCDI_SET_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS, + (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN)); + + req.emr_cmd = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS; + req.emr_in_buf = inbuf; + req.emr_in_length = sizeof(inbuf); + req.emr_out_buf = outbuf; + req.emr_out_length = sizeof(outbuf); + + rc = hunt_mcdi_quiet(hunt, &req); + if (rc) + return; + + if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) & + (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) { + DBGC(hunt, + "Rebooting MC due to clearing UDP tunnel port list\n"); + /* Delay for the MC reboot to complete. */ + mdelay(100); + } +} + +static int hunt_set_mac(struct hunt_nic *hunt) +{ + struct net_device *netdev = hunt->efx.netdev; + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(payload, MC_CMD_SET_MAC_IN_LEN); + unsigned int fcntl; + int rc; + + req.emr_cmd = MC_CMD_SET_MAC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_MAC_IN_LEN; + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_SET_DWORD(req.emr_in_buf, SET_MAC_IN_MTU, + EFX_MAC_FRAME_LEN(ETH_FRAME_LEN)); + MCDI_SET_DWORD(req.emr_in_buf, SET_MAC_IN_DRAIN, 0); + memcpy(MCDI_PTR(req.emr_in_buf, SET_MAC_IN_ADDR), + netdev->ll_addr, ETH_ALEN); + MCDI_SET_DWORD(req.emr_in_buf, SET_MAC_IN_REJECT, 0); + + /* If the PHY supports autnegotiation, then configure the MAC to match + * the negotiated settings. Otherwise force the MAC to TX and RX flow + * control. + */ + if (hunt->phy_cap_mask & (1 << MC_CMD_PHY_CAP_AN_LBN)) + fcntl = MC_CMD_FCNTL_AUTO; + else + fcntl = MC_CMD_FCNTL_BIDIR; + MCDI_SET_DWORD(req.emr_in_buf, SET_MAC_IN_FCNTL, fcntl); + + rc = hunt_mcdi(hunt, &req); + /* Ignore failure for permissions reasons */ + if (rc == -EPERM) + rc = 0; + return rc; +} + +static int hunt_alloc_vis(struct hunt_nic *hunt) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); + + req.emr_cmd = MC_CMD_ALLOC_VIS; + req.emr_in_buf = inbuf; + req.emr_in_length = sizeof(inbuf); + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_SET_DWORD(req.emr_in_buf, ALLOC_VIS_IN_MIN_VI_COUNT, 1); + MCDI_SET_DWORD(req.emr_in_buf, ALLOC_VIS_IN_MAX_VI_COUNT, 1); + + return hunt_mcdi(hunt, &req); +} + +static void hunt_free_vis(struct hunt_nic *hunt) +{ + struct efx_mcdi_req_s req; + int rc; + + req.emr_cmd = MC_CMD_FREE_VIS; + req.emr_in_buf = NULL; + req.emr_in_length = 0; + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + rc = hunt_mcdi(hunt, &req); + if (rc != 0) + DBGC(hunt, "MC_CMD_FREE_VIS Failed\n"); +} + +/******************************************************************************* + * + * + * Link state handling + * + * + ******************************************************************************/ +static int hunt_check_link(struct hunt_nic *hunt) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + unsigned int flags, speed; + bool up; + int rc; + static bool link_state = false; + + req.emr_cmd = MC_CMD_GET_LINK; + req.emr_in_buf = NULL; + req.emr_in_length = 0; + req.emr_out_buf = outbuf; + req.emr_out_length = sizeof(outbuf); + + rc = hunt_mcdi(hunt, &req); + if (rc != 0) + return rc; + + if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) + return -EMSGSIZE; + + flags = MCDI_DWORD(req.emr_out_buf, GET_LINK_OUT_FLAGS); + up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); + speed = MCDI_DWORD(req.emr_out_buf, GET_LINK_OUT_LINK_SPEED); + + /* Set netdev_link_*() based on the link status from the MC */ + if (up && speed) + netdev_link_up(hunt->efx.netdev); + else + netdev_link_down(hunt->efx.netdev); + + if (up != link_state) { + DBGC(hunt, "Link %s, flags=%x, our caps=%x, lpa=%x, speed=%d, fcntl=%x, mac_fault=%x\n", + (up? "up": "down"), flags, + MCDI_DWORD(req.emr_out_buf, GET_LINK_OUT_CAP), + MCDI_DWORD(req.emr_out_buf, GET_LINK_OUT_LP_CAP), + speed, + MCDI_DWORD(req.emr_out_buf, GET_LINK_OUT_FCNTL), + MCDI_DWORD(req.emr_out_buf, GET_LINK_OUT_MAC_FAULT)); + link_state = up; + } + + return 0; +} + +#define MCDI_PORT_SPEED_CAPS ((1 << MC_CMD_PHY_CAP_10HDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_10FDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_100HDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_100FDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_1000HDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_1000FDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_10000FDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) + +/******************************************************************************* + * + * + * TX + * + * + ******************************************************************************/ +static int +hunt_tx_init(struct net_device *netdev, struct hunt_nic *hunt) +{ + struct efx_mcdi_req_s req; + dma_addr_t dma_addr; + efx_qword_t *addr; + MCDI_DECLARE_BUF(inbuf, + MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_NBUFS(EFX_TXD_SIZE))); + int rc, npages; + + rc = efx_hunt_tx_init(netdev, &dma_addr); + if (rc != 0) + return rc; + + npages = EFX_TXQ_NBUFS(EFX_TXD_SIZE); + + req.emr_cmd = MC_CMD_INIT_TXQ; + req.emr_in_buf = inbuf; + req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_SET_DWORD(req.emr_in_buf, INIT_TXQ_IN_SIZE, EFX_TXD_SIZE); + MCDI_SET_DWORD(req.emr_in_buf, INIT_TXQ_IN_TARGET_EVQ, 0); + MCDI_SET_DWORD(req.emr_in_buf, INIT_TXQ_IN_LABEL, 0); + MCDI_SET_DWORD(req.emr_in_buf, INIT_TXQ_IN_INSTANCE, 0); + + MCDI_POPULATE_DWORD_6(req.emr_in_buf, INIT_TXQ_IN_FLAGS, + INIT_TXQ_IN_FLAG_BUFF_MODE, 0, + INIT_TXQ_IN_FLAG_IP_CSUM_DIS, 1, + INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, 1, + INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, + INIT_TXQ_IN_CRC_MODE, 0, + INIT_TXQ_IN_FLAG_TIMESTAMP, 0); + + MCDI_SET_DWORD(req.emr_in_buf, INIT_TXQ_IN_OWNER_ID, 0); + MCDI_SET_DWORD(req.emr_in_buf, INIT_TXQ_IN_PORT_ID, + EVB_PORT_ID_ASSIGNED); + + addr = (efx_qword_t *) MCDI_PTR(req.emr_in_buf, INIT_TXQ_IN_DMA_ADDR); + + EFX_POPULATE_QWORD_2(*addr, + EFX_DWORD_1, (uint32_t)(dma_addr >> 32), + EFX_DWORD_0, (uint32_t)(dma_addr & 0xffffffff)); + + return hunt_mcdi(hunt, &req); +} + +static void hunt_tx_fini(struct hunt_nic *hunt) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); + struct efx_nic *efx = &hunt->efx; + struct efx_tx_queue *txq = &efx->txq; + int rc; + + req.emr_cmd = MC_CMD_FINI_TXQ; + req.emr_in_buf = inbuf; + req.emr_in_length = sizeof(inbuf); + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_SET_DWORD(req.emr_in_buf, FINI_TXQ_IN_INSTANCE, 0); + + rc = hunt_mcdi(hunt, &req); + if (rc != 0) + DBGC(hunt, "MC_CMD_FINI_TXQ Failed\n"); + + efx_hunt_free_special_buffer(txq->ring, + sizeof(efx_tx_desc_t) * EFX_TXD_SIZE); + txq->ring = NULL; +} + +/******************************************************************************* + * + * + * RX + * + * + ******************************************************************************/ +static int hunt_rx_filter_insert(struct net_device *netdev, + struct hunt_nic *hunt, + int multicast) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN); + int rc; + uint64_t filter_id; + (void) netdev; + + req.emr_cmd = MC_CMD_FILTER_OP; + req.emr_in_buf = inbuf; + req.emr_in_length = sizeof(inbuf); + req.emr_out_buf = outbuf; + req.emr_out_length = sizeof(outbuf); + + MCDI_SET_DWORD(req.emr_in_buf, FILTER_OP_IN_OP, + multicast ? MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE + : MC_CMD_FILTER_OP_IN_OP_INSERT); + MCDI_POPULATE_DWORD_1(req.emr_in_buf, FILTER_OP_IN_MATCH_FIELDS, + FILTER_OP_IN_MATCH_DST_MAC, 1); + if (multicast) + memset(MCDI_PTR(req.emr_in_buf, FILTER_OP_IN_DST_MAC), + 0xff, ETH_ALEN); + else + memcpy(MCDI_PTR(req.emr_in_buf, FILTER_OP_IN_DST_MAC), + hunt->mac, ETH_ALEN); + + MCDI_SET_DWORD(req.emr_in_buf, FILTER_OP_IN_PORT_ID, + EVB_PORT_ID_ASSIGNED); + MCDI_SET_DWORD(req.emr_in_buf, FILTER_OP_IN_RX_DEST, + MC_CMD_FILTER_OP_IN_RX_DEST_HOST); + MCDI_SET_DWORD(req.emr_in_buf, FILTER_OP_IN_RX_QUEUE, 0); + MCDI_SET_DWORD(req.emr_in_buf, FILTER_OP_IN_RX_MODE, 0); + MCDI_SET_DWORD(req.emr_in_buf, FILTER_OP_IN_TX_DEST, + MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); + + rc = hunt_mcdi(hunt, &req); + if (rc != 0) + return rc; + + if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) + return -EIO; + + filter_id = MCDI_QWORD(req.emr_out_buf, FILTER_OP_OUT_HANDLE); + if (multicast) + hunt->mc_filter_id = filter_id; + else + hunt->uc_filter_id = filter_id; + + return 0; +} + +static int hunt_rx_filter_remove(struct hunt_nic *hunt, + int multicast) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + + req.emr_cmd = MC_CMD_FILTER_OP; + req.emr_in_buf = inbuf; + req.emr_in_length = sizeof(inbuf); + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_SET_DWORD(req.emr_in_buf, FILTER_OP_IN_OP, + multicast ? MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE : + MC_CMD_FILTER_OP_IN_OP_REMOVE); + MCDI_SET_QWORD(req.emr_in_buf, FILTER_OP_IN_HANDLE, + multicast ? hunt->mc_filter_id : + hunt->uc_filter_id); + return hunt_mcdi(hunt, &req); +} + +static int hunt_get_mac(struct hunt_nic *hunt) +{ + struct efx_mcdi_req_s req; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); + int rc; + + req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES; + req.emr_in_buf = NULL; + req.emr_in_length = 0; + req.emr_out_buf = outbuf; + req.emr_out_length = sizeof(outbuf); + + rc = hunt_mcdi(hunt, &req); + if (rc != 0) + return rc; + + if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) + return -EMSGSIZE; + + memcpy(hunt->mac, MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), + ETH_ALEN); + return 0; +} + +static int hunt_rx_filter_init(struct net_device *netdev, + struct hunt_nic *hunt) +{ + int rc = hunt_get_mac(hunt); + + if (rc != 0) + return rc; + + rc = hunt_rx_filter_insert(netdev, hunt, 0); + if (rc != 0) + return rc; + + rc = hunt_rx_filter_insert(netdev, hunt, 1); + if (rc != 0) + hunt_rx_filter_remove(hunt, 0); + + return rc; +} + +static int +hunt_rx_init(struct net_device *netdev, + struct hunt_nic *hunt) +{ + struct efx_mcdi_req_s req; + dma_addr_t dma_addr; + efx_qword_t *addr; + MCDI_DECLARE_BUF(inbuf, + MC_CMD_INIT_RXQ_IN_LEN(EFX_RXQ_NBUFS(EFX_RXD_SIZE))); + int rc, npages; + + rc = efx_hunt_rx_init(netdev, &dma_addr); + if (rc != 0) + return rc; + + npages = EFX_RXQ_NBUFS(EFX_RXD_SIZE); + + req.emr_cmd = MC_CMD_INIT_RXQ; + req.emr_in_buf = inbuf; + req.emr_in_length = MC_CMD_INIT_RXQ_IN_LEN(npages); + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_SET_DWORD(req.emr_in_buf, INIT_RXQ_IN_SIZE, EFX_RXD_SIZE); + MCDI_SET_DWORD(req.emr_in_buf, INIT_RXQ_IN_TARGET_EVQ, 0); + MCDI_SET_DWORD(req.emr_in_buf, INIT_RXQ_IN_LABEL, 0); + MCDI_SET_DWORD(req.emr_in_buf, INIT_RXQ_IN_INSTANCE, 0); + MCDI_POPULATE_DWORD_5(req.emr_in_buf, INIT_RXQ_IN_FLAGS, + INIT_RXQ_IN_FLAG_BUFF_MODE, 0, + INIT_RXQ_IN_FLAG_HDR_SPLIT, 0, + INIT_RXQ_IN_FLAG_TIMESTAMP, 0, + INIT_RXQ_IN_CRC_MODE, 0, + INIT_RXQ_IN_FLAG_PREFIX, 1); + MCDI_SET_DWORD(req.emr_in_buf, INIT_RXQ_IN_OWNER_ID, 0); + MCDI_SET_DWORD(req.emr_in_buf, INIT_RXQ_IN_PORT_ID, + EVB_PORT_ID_ASSIGNED); + + addr = (efx_qword_t *) MCDI_PTR(req.emr_in_buf, INIT_RXQ_IN_DMA_ADDR); + + EFX_POPULATE_QWORD_2(*addr, + EFX_DWORD_1, (uint32_t)(dma_addr >> 32), + EFX_DWORD_0, (uint32_t)(dma_addr & 0xffffffff)); + return hunt_mcdi(hunt, &req); +} + +static void hunt_rx_filter_fini(struct hunt_nic *hunt) +{ + hunt_rx_filter_remove(hunt, 0); + hunt_rx_filter_remove(hunt, 1); +} + +static void hunt_rx_fini(struct hunt_nic *hunt) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); + struct efx_mcdi_req_s req; + struct efx_nic *efx = &hunt->efx; + struct efx_rx_queue *rxq = &efx->rxq; + int rc; + + req.emr_cmd = MC_CMD_FINI_RXQ; + req.emr_in_buf = inbuf; + req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN; + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_SET_DWORD(req.emr_in_buf, FINI_RXQ_IN_INSTANCE, 0); + + rc = hunt_mcdi(hunt, &req); + if (rc != 0) + DBGC(hunt, "MC_CMD_FINI_RXQ Failed\n"); + + efx_hunt_free_special_buffer(rxq->ring, + sizeof(efx_rx_desc_t) * EFX_RXD_SIZE); + rxq->ring = NULL; +} + +/******************************************************************************* + * + * + * Event queues and interrupts + * + * + ******************************************************************************/ +static int +hunt_ev_init(struct net_device *netdev, + struct hunt_nic *hunt) +{ + struct efx_mcdi_req_s req; + dma_addr_t dma_addr; + efx_qword_t *addr; + MCDI_DECLARE_BUF(inbuf, + MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_SIZE))); + MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN); + int rc, npages; + + rc = efx_hunt_ev_init(netdev, &dma_addr); + if (rc != 0) + return rc; + + npages = EFX_EVQ_NBUFS(EFX_EVQ_SIZE); + + req.emr_cmd = MC_CMD_INIT_EVQ; + req.emr_in_buf = inbuf; + req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages); + req.emr_out_buf = outbuf; + req.emr_out_length = sizeof(outbuf); + + MCDI_SET_DWORD(req.emr_in_buf, INIT_EVQ_IN_SIZE, EFX_EVQ_SIZE); + MCDI_SET_DWORD(req.emr_in_buf, INIT_EVQ_IN_INSTANCE, 0); + MCDI_SET_DWORD(req.emr_in_buf, INIT_EVQ_IN_IRQ_NUM, 0); + + MCDI_POPULATE_DWORD_6(req.emr_in_buf, INIT_EVQ_IN_FLAGS, + INIT_EVQ_IN_FLAG_INTERRUPTING, 1, + INIT_EVQ_IN_FLAG_RPTR_DOS, 0, + INIT_EVQ_IN_FLAG_INT_ARMD, 0, + INIT_EVQ_IN_FLAG_CUT_THRU, 0, + INIT_EVQ_IN_FLAG_RX_MERGE, 0, + INIT_EVQ_IN_FLAG_TX_MERGE, 0); + + MCDI_SET_DWORD(req.emr_in_buf, INIT_EVQ_IN_TMR_MODE, + MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); + MCDI_SET_DWORD(req.emr_in_buf, INIT_EVQ_IN_TMR_LOAD, 0); + MCDI_SET_DWORD(req.emr_in_buf, INIT_EVQ_IN_TMR_RELOAD, 0); + + MCDI_SET_DWORD(req.emr_in_buf, INIT_EVQ_IN_COUNT_MODE, + MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); + MCDI_SET_DWORD(req.emr_in_buf, INIT_EVQ_IN_COUNT_THRSHLD, 0); + + addr = (efx_qword_t *) MCDI_PTR(req.emr_in_buf, INIT_EVQ_IN_DMA_ADDR); + + EFX_POPULATE_QWORD_2(*addr, + EFX_DWORD_1, (uint32_t)(dma_addr >> 32), + EFX_DWORD_0, (uint32_t)(dma_addr & 0xffffffff)); + rc = hunt_mcdi(hunt, &req); + if (rc != 0) + return rc; + + if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) + return -EMSGSIZE; + + return 0; +} + +static void hunt_ev_fini(struct hunt_nic *hunt) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); + struct efx_mcdi_req_s req; + struct efx_nic *efx = &hunt->efx; + struct efx_ev_queue *evq = &efx->evq; + int rc; + + req.emr_cmd = MC_CMD_FINI_EVQ; + req.emr_in_buf = inbuf; + req.emr_in_length = sizeof(inbuf); + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_SET_DWORD(req.emr_in_buf, FINI_EVQ_IN_INSTANCE, 0); + + rc = hunt_mcdi(hunt, &req); + if (rc != 0) + DBGC(hunt, "MC_CMD_FINI_EVQ Failed\n"); + + efx_hunt_free_special_buffer(evq->ring, + sizeof(efx_event_t) * EFX_EVQ_SIZE); + evq->ring = NULL; +} + +static void +hunt_poll(struct net_device *netdev) +{ + struct hunt_nic *hunt = netdev_priv(netdev); + + /* If called while already polling, return immediately */ + if (hunt->efx.state & EFX_STATE_POLLING) + return; + hunt->efx.state |= EFX_STATE_POLLING; + + /* Poll link state */ + if (hunt->link_poll_timer + TICKS_PER_SEC < currticks()) { + hunt->link_poll_timer = currticks(); + hunt_check_link(hunt); + } + + /* Poll data path */ + efx_hunt_poll(netdev); + + hunt->efx.state &= ~EFX_STATE_POLLING; +} + +/******************************************************************************* + * + * + * Netdevice operations + * + * + ******************************************************************************/ +static int hunt_open(struct net_device *netdev) +{ + struct hunt_nic *hunt = netdev_priv(netdev); + int rc; + + /* Allocate VIs */ + rc = hunt_alloc_vis(hunt); + if (rc != 0) + goto fail2; + + /* Initialize data path */ + rc = hunt_ev_init(netdev, hunt); + if (rc != 0) + goto fail3; + + rc = hunt_rx_init(netdev, hunt); + if (rc != 0) + goto fail4; + + rc = hunt_rx_filter_init(netdev, hunt); + if (rc != 0) + goto fail5; + + rc = hunt_tx_init(netdev, hunt); + if (rc != 0) + goto fail6; + + rc = efx_hunt_open(netdev); + if (rc) + goto fail7; + + rc = hunt_set_mac(hunt); + if (rc) + goto fail8; + + /* Mark the link as down before checking the link state because the + * latter might fail. + */ + netdev_link_down(netdev); + hunt_check_link(hunt); + + DBGC2(hunt, "%s: open ok\n", netdev->name); + return 0; + +fail8: + efx_hunt_close(netdev); +fail7: + hunt_tx_fini(hunt); +fail6: + hunt_rx_filter_fini(hunt); +fail5: + hunt_rx_fini(hunt); +fail4: + hunt_ev_fini(hunt); +fail3: + hunt_free_vis(hunt); +fail2: + DBGC2(hunt, "%s: %s\n", netdev->name, strerror(rc)); + return rc; +} + + +static void hunt_close(struct net_device *netdev) +{ + struct hunt_nic *hunt = netdev_priv(netdev); + + /* Stop datapath */ + efx_hunt_close(netdev); + + hunt_tx_fini(hunt); + hunt_rx_fini(hunt); + hunt_rx_filter_fini(hunt); + hunt_ev_fini(hunt); + + hunt_free_vis(hunt); + + /* Reset hardware and detach */ + hunt_reset(hunt); +} + + +/******************************************************************************* + * + * + * Public operations + * + * + ******************************************************************************/ + +static struct net_device_operations hunt_operations = { + .open = hunt_open, + .close = hunt_close, + .transmit = efx_hunt_transmit, + .poll = hunt_poll, + .irq = efx_hunt_irq, +}; + +static int +hunt_probe(struct pci_device *pci) +{ + struct net_device *netdev; + struct hunt_nic *hunt; + struct efx_nic *efx; + int rc = 0; + + /* Create the network adapter */ + netdev = alloc_etherdev(sizeof(struct hunt_nic)); + if (!netdev) { + rc = -ENOMEM; + goto fail1; + } + + /* Initialise the network adapter, and initialise private storage */ + netdev_init(netdev, &hunt_operations); + pci_set_drvdata(pci, netdev); + netdev->dev = &pci->dev; + netdev->state |= NETDEV_IRQ_UNSUPPORTED; + + hunt = netdev_priv(netdev); + memset(hunt, 0, sizeof(*hunt)); + efx = &hunt->efx; + + efx->type = &hunt_nic_type; + + /* Initialise efx datapath */ + efx_probe(netdev, EFX_HUNTINGTON); + + /* Initialise MCDI. In case we are recovering from a crash, first + * cancel any outstanding request by sending a special message using the + * least significant bits of the 'high' (doorbell) register. + */ + _efx_writel(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); + rc = hunt_mcdi_init(hunt); + if (rc != 0) + goto fail2; + + /* Reset (most) configuration for this function */ + rc = hunt_reset(hunt); + if (rc != 0) + goto fail3; + + /* Medford has a list of UDP tunnel ports that is populated by the + * driver. Avoid dropping any unencapsulated packets. This may cause + * an MC reboot. + */ + hunt_clear_udp_tunnel_ports(hunt); + + /* Enable the workaround for bug35388, if supported */ + efx->workaround_35388 = hunt_workaround_35388(hunt); + + /* Set the RX packet prefix size */ + efx->rx_prefix_size = ES_DZ_RX_PREFIX_SIZE; + + rc = hunt_get_port_assignment(hunt); + if (rc != 0) + goto fail3; + + rc = hunt_mac_addr(hunt, netdev->ll_addr); + if (rc != 0) + goto fail4; + + rc = hunt_get_phy_cfg(hunt); + if (rc != 0) + goto fail5; + + rc = hunt_driver_attach(hunt, 1); + if (rc != 0) + goto fail5; + + /* If not exposing this network device, return successfully here */ + if (hunt->flags & (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) + return 0; + + if (hunt_nic_is_primary(hunt)) { + hunt->next_primary = primary_nics; + primary_nics = hunt; + hunt->primary = hunt; + } else { + struct hunt_nic *other_hunt = primary_nics; + + while (other_hunt && !hunt->primary) { + struct pci_device *other_pci = (struct pci_device *) + other_hunt->efx.netdev->dev; + /* Check if the seg:bus:dev parts match. */ + if (PCI_FIRST_FUNC(other_pci->busdevfn) == + PCI_FIRST_FUNC(pci->busdevfn)) + hunt->primary = other_hunt; + + other_hunt = other_hunt->next_primary; + } + if (!hunt->primary) { + rc = -EIO; + goto fail6; + } + } + + rc = register_netdev(netdev); + if (rc != 0) + goto fail8; + + DBG2("%s " PCI_FMT " ok\n", __func__, PCI_ARGS(pci)); + return 0; + +fail8: +fail6: + (void) hunt_driver_attach(hunt, 0); +fail5: +fail4: +fail3: + hunt_mcdi_fini(hunt); +fail2: + efx_remove(netdev); + netdev_put(netdev); +fail1: + DBG2("%s " PCI_FMT " rc=%d\n", __func__, PCI_ARGS(pci), rc); + return rc; +} + +static void hunt_remove(struct pci_device *pci) +{ + struct net_device *netdev = pci_get_drvdata(pci); + struct hunt_nic *hunt = netdev_priv(netdev); + + if (!(hunt->flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT))) { + /* The netdevice might still be open, so unregister it now + * before ripping stuff out from underneath. + */ + unregister_netdev(netdev); + } + + (void)hunt_driver_attach(hunt, 0); + hunt_mcdi_fini(hunt); + + /* Destroy data path */ + efx_remove(netdev); + + netdev_nullify(netdev); + netdev_put(netdev); +} + +const struct efx_nic_type hunt_nic_type = { + .mcdi_rpc = _hunt_mcdi, +}; + +static struct pci_device_id hunt_nics[] = { + PCI_ROM(0x1924, 0x0903, "SFC9120", "Solarflare SFC9120 Adapter", 0), + PCI_ROM(0x1924, 0x0923, "SFC9140", "Solarflare SFC9140 Adapter", 0), + PCI_ROM(0x1924, 0x0a03, "SFC9220", "Solarflare SFN8xxx Adapter", 0), + PCI_ROM(0x1924, 0x0b03, "SFC9250", "Solarflare X25xx Adapter", 0), +}; + +struct pci_driver hunt_driver __pci_driver = { + .ids = hunt_nics, + .id_count = ARRAY_SIZE(hunt_nics), + .probe = hunt_probe, + .remove = hunt_remove, +}; diff --git a/src/drivers/net/smsc75xx.c b/src/drivers/net/smsc75xx.c new file mode 100644 index 00000000..861669ed --- /dev/null +++ b/src/drivers/net/smsc75xx.c @@ -0,0 +1,577 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include "smsc75xx.h" + +/** @file + * + * SMSC LAN75xx USB Ethernet driver + * + */ + +/** Bulk IN completion profiler */ +static struct profiler smsc75xx_in_profiler __profiler = + { .name = "smsc75xx.in" }; + +/** Bulk OUT profiler */ +static struct profiler smsc75xx_out_profiler __profiler = + { .name = "smsc75xx.out" }; + +/****************************************************************************** + * + * Statistics (for debugging) + * + ****************************************************************************** + */ + +/** + * Dump statistics (for debugging) + * + * @v smscusb SMSC USB device + * @ret rc Return status code + */ +int smsc75xx_dump_statistics ( struct smscusb_device *smscusb ) { + struct smsc75xx_statistics stats; + int rc; + + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return 0; + + /* Get statistics */ + if ( ( rc = smscusb_get_statistics ( smscusb, 0, &stats, + sizeof ( stats ) ) ) != 0 ) { + DBGC ( smscusb, "SMSC75XX %p could not get statistics: " + "%s\n", smscusb, strerror ( rc ) ); + return rc; + } + + /* Dump statistics */ + DBGC ( smscusb, "SMSC75XX %p RXE fcs %d aln %d frg %d jab %d und %d " + "ovr %d drp %d\n", smscusb, le32_to_cpu ( stats.rx.err.fcs ), + le32_to_cpu ( stats.rx.err.alignment ), + le32_to_cpu ( stats.rx.err.fragment ), + le32_to_cpu ( stats.rx.err.jabber ), + le32_to_cpu ( stats.rx.err.undersize ), + le32_to_cpu ( stats.rx.err.oversize ), + le32_to_cpu ( stats.rx.err.dropped ) ); + DBGC ( smscusb, "SMSC75XX %p RXB ucast %d bcast %d mcast %d\n", + smscusb, le32_to_cpu ( stats.rx.byte.unicast ), + le32_to_cpu ( stats.rx.byte.broadcast ), + le32_to_cpu ( stats.rx.byte.multicast ) ); + DBGC ( smscusb, "SMSC75XX %p RXF ucast %d bcast %d mcast %d pause " + "%d\n", smscusb, le32_to_cpu ( stats.rx.frame.unicast ), + le32_to_cpu ( stats.rx.frame.broadcast ), + le32_to_cpu ( stats.rx.frame.multicast ), + le32_to_cpu ( stats.rx.frame.pause ) ); + DBGC ( smscusb, "SMSC75XX %p TXE fcs %d def %d car %d cnt %d sgl %d " + "mul %d exc %d lat %d\n", smscusb, + le32_to_cpu ( stats.tx.err.fcs ), + le32_to_cpu ( stats.tx.err.deferral ), + le32_to_cpu ( stats.tx.err.carrier ), + le32_to_cpu ( stats.tx.err.count ), + le32_to_cpu ( stats.tx.err.single ), + le32_to_cpu ( stats.tx.err.multiple ), + le32_to_cpu ( stats.tx.err.excessive ), + le32_to_cpu ( stats.tx.err.late ) ); + DBGC ( smscusb, "SMSC75XX %p TXB ucast %d bcast %d mcast %d\n", + smscusb, le32_to_cpu ( stats.tx.byte.unicast ), + le32_to_cpu ( stats.tx.byte.broadcast ), + le32_to_cpu ( stats.tx.byte.multicast ) ); + DBGC ( smscusb, "SMSC75XX %p TXF ucast %d bcast %d mcast %d pause " + "%d\n", smscusb, le32_to_cpu ( stats.tx.frame.unicast ), + le32_to_cpu ( stats.tx.frame.broadcast ), + le32_to_cpu ( stats.tx.frame.multicast ), + le32_to_cpu ( stats.tx.frame.pause ) ); + + return 0; +} + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset device + * + * @v smscusb SMSC USB device + * @ret rc Return status code + */ +static int smsc75xx_reset ( struct smscusb_device *smscusb ) { + uint32_t hw_cfg; + unsigned int i; + int rc; + + /* Reset device */ + if ( ( rc = smscusb_writel ( smscusb, SMSC75XX_HW_CFG, + SMSC75XX_HW_CFG_LRST ) ) != 0 ) + return rc; + + /* Wait for reset to complete */ + for ( i = 0 ; i < SMSC75XX_RESET_MAX_WAIT_MS ; i++ ) { + + /* Check if reset has completed */ + if ( ( rc = smscusb_readl ( smscusb, SMSC75XX_HW_CFG, + &hw_cfg ) ) != 0 ) + return rc; + if ( ! ( hw_cfg & SMSC75XX_HW_CFG_LRST ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( smscusb, "SMSC75XX %p timed out waiting for reset\n", + smscusb ); + return -ETIMEDOUT; +} + +/****************************************************************************** + * + * Endpoint operations + * + ****************************************************************************** + */ + +/** + * Complete bulk IN transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void smsc75xx_in_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct smscusb_device *smscusb = + container_of ( ep, struct smscusb_device, usbnet.in ); + struct net_device *netdev = smscusb->netdev; + struct smsc75xx_rx_header *header; + + /* Profile completions */ + profile_start ( &smsc75xx_in_profiler ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) { + free_iob ( iobuf ); + return; + } + + /* Record USB errors against the network device */ + if ( rc != 0 ) { + DBGC ( smscusb, "SMSC75XX %p bulk IN failed: %s\n", + smscusb, strerror ( rc ) ); + goto err; + } + + /* Sanity check */ + if ( iob_len ( iobuf ) < ( sizeof ( *header ) ) ) { + DBGC ( smscusb, "SMSC75XX %p underlength bulk IN\n", + smscusb ); + DBGC_HDA ( smscusb, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto err; + } + + /* Strip header */ + header = iobuf->data; + iob_pull ( iobuf, sizeof ( *header ) ); + + /* Check for errors */ + if ( header->command & cpu_to_le32 ( SMSC75XX_RX_RED ) ) { + DBGC ( smscusb, "SMSC75XX %p receive error (%08x):\n", + smscusb, le32_to_cpu ( header->command ) ); + DBGC_HDA ( smscusb, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EIO; + goto err; + } + + /* Hand off to network stack */ + netdev_rx ( netdev, iob_disown ( iobuf ) ); + + profile_stop ( &smsc75xx_in_profiler ); + return; + + err: + /* Hand off to network stack */ + netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); +} + +/** Bulk IN endpoint operations */ +struct usb_endpoint_driver_operations smsc75xx_in_operations = { + .complete = smsc75xx_in_complete, +}; + +/** + * Transmit packet + * + * @v smscusb SMSC USB device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int smsc75xx_out_transmit ( struct smscusb_device *smscusb, + struct io_buffer *iobuf ) { + struct smsc75xx_tx_header *header; + size_t len = iob_len ( iobuf ); + int rc; + + /* Profile transmissions */ + profile_start ( &smsc75xx_out_profiler ); + + /* Prepend header */ + if ( ( rc = iob_ensure_headroom ( iobuf, sizeof ( *header ) ) ) != 0 ) + return rc; + header = iob_push ( iobuf, sizeof ( *header ) ); + header->command = cpu_to_le32 ( SMSC75XX_TX_FCS | len ); + header->tag = 0; + header->mss = 0; + + /* Enqueue I/O buffer */ + if ( ( rc = usb_stream ( &smscusb->usbnet.out, iobuf, 0 ) ) != 0 ) + return rc; + + profile_stop ( &smsc75xx_out_profiler ); + return 0; +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int smsc75xx_open ( struct net_device *netdev ) { + struct smscusb_device *smscusb = netdev->priv; + int rc; + + /* Clear stored interrupt status */ + smscusb->int_sts = 0; + + /* Configure bulk IN empty response */ + if ( ( rc = smscusb_writel ( smscusb, SMSC75XX_HW_CFG, + SMSC75XX_HW_CFG_BIR ) ) != 0 ) + goto err_hw_cfg; + + /* Open USB network device */ + if ( ( rc = usbnet_open ( &smscusb->usbnet ) ) != 0 ) { + DBGC ( smscusb, "SMSC75XX %p could not open: %s\n", + smscusb, strerror ( rc ) ); + goto err_open; + } + + /* Configure interrupt endpoint */ + if ( ( rc = smscusb_writel ( smscusb, SMSC75XX_INT_EP_CTL, + ( SMSC75XX_INT_EP_CTL_RDFO_EN | + SMSC75XX_INT_EP_CTL_PHY_EN ) ) ) != 0 ) + goto err_int_ep_ctl; + + /* Configure bulk IN delay */ + if ( ( rc = smscusb_writel ( smscusb, SMSC75XX_BULK_IN_DLY, + SMSC75XX_BULK_IN_DLY_SET ( 0 ) ) ) != 0 ) + goto err_bulk_in_dly; + + /* Configure receive filters */ + if ( ( rc = smscusb_writel ( smscusb, SMSC75XX_RFE_CTL, + ( SMSC75XX_RFE_CTL_AB | + SMSC75XX_RFE_CTL_AM | + SMSC75XX_RFE_CTL_AU ) ) ) != 0 ) + goto err_rfe_ctl; + + /* Configure receive FIFO */ + if ( ( rc = smscusb_writel ( smscusb, SMSC75XX_FCT_RX_CTL, + ( SMSC75XX_FCT_RX_CTL_EN | + SMSC75XX_FCT_RX_CTL_BAD ) ) ) != 0 ) + goto err_fct_rx_ctl; + + /* Configure transmit FIFO */ + if ( ( rc = smscusb_writel ( smscusb, SMSC75XX_FCT_TX_CTL, + SMSC75XX_FCT_TX_CTL_EN ) ) != 0 ) + goto err_fct_tx_ctl; + + /* Configure receive datapath */ + if ( ( rc = smscusb_writel ( smscusb, SMSC75XX_MAC_RX, + ( SMSC75XX_MAC_RX_MAX_SIZE_DEFAULT | + SMSC75XX_MAC_RX_FCS | + SMSC75XX_MAC_RX_EN ) ) ) != 0 ) + goto err_mac_rx; + + /* Configure transmit datapath */ + if ( ( rc = smscusb_writel ( smscusb, SMSC75XX_MAC_TX, + SMSC75XX_MAC_TX_EN ) ) != 0 ) + goto err_mac_tx; + + /* Set MAC address */ + if ( ( rc = smscusb_set_address ( smscusb, + SMSC75XX_RX_ADDR_BASE ) ) != 0 ) + goto err_set_address; + + /* Set MAC address perfect filter */ + if ( ( rc = smscusb_set_filter ( smscusb, + SMSC75XX_ADDR_FILT_BASE ) ) != 0 ) + goto err_set_filter; + + /* Enable PHY interrupts and update link status */ + if ( ( rc = smscusb_mii_open ( smscusb, SMSC75XX_MII_PHY_INTR_MASK, + ( SMSC75XX_PHY_INTR_ANEG_DONE | + SMSC75XX_PHY_INTR_LINK_DOWN ) ) ) != 0) + goto err_mii_open; + + return 0; + + err_mii_open: + err_set_filter: + err_set_address: + err_mac_tx: + err_mac_rx: + err_fct_tx_ctl: + err_fct_rx_ctl: + err_rfe_ctl: + err_bulk_in_dly: + err_int_ep_ctl: + usbnet_close ( &smscusb->usbnet ); + err_open: + err_hw_cfg: + smsc75xx_reset ( smscusb ); + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void smsc75xx_close ( struct net_device *netdev ) { + struct smscusb_device *smscusb = netdev->priv; + + /* Close USB network device */ + usbnet_close ( &smscusb->usbnet ); + + /* Dump statistics (for debugging) */ + if ( DBG_LOG ) + smsc75xx_dump_statistics ( smscusb ); + + /* Reset device */ + smsc75xx_reset ( smscusb ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +int smsc75xx_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { + struct smscusb_device *smscusb = netdev->priv; + int rc; + + /* Transmit packet */ + if ( ( rc = smsc75xx_out_transmit ( smscusb, iobuf ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +void smsc75xx_poll ( struct net_device *netdev ) { + struct smscusb_device *smscusb = netdev->priv; + uint32_t int_sts; + int rc; + + /* Poll USB bus */ + usb_poll ( smscusb->bus ); + + /* Refill endpoints */ + if ( ( rc = usbnet_refill ( &smscusb->usbnet ) ) != 0 ) + netdev_rx_err ( netdev, NULL, rc ); + + /* Do nothing more unless there are interrupts to handle */ + int_sts = smscusb->int_sts; + if ( ! int_sts ) + return; + + /* Check link status if applicable */ + if ( int_sts & SMSC75XX_INT_STS_PHY_INT ) { + smscusb_mii_check_link ( smscusb ); + int_sts &= ~SMSC75XX_INT_STS_PHY_INT; + } + + /* Record RX FIFO overflow if applicable */ + if ( int_sts & SMSC75XX_INT_STS_RDFO_INT ) { + DBGC2 ( smscusb, "SMSC75XX %p RX FIFO overflowed\n", smscusb ); + netdev_rx_err ( netdev, NULL, -ENOBUFS ); + int_sts &= ~SMSC75XX_INT_STS_RDFO_INT; + } + + /* Check for unexpected interrupts */ + if ( int_sts ) { + DBGC ( smscusb, "SMSC75XX %p unexpected interrupt %#08x\n", + smscusb, int_sts ); + netdev_rx_err ( netdev, NULL, -ENOTTY ); + } + + /* Clear interrupts */ + if ( ( rc = smscusb_writel ( smscusb, SMSC75XX_INT_STS, + smscusb->int_sts ) ) != 0 ) + netdev_rx_err ( netdev, NULL, rc ); + smscusb->int_sts = 0; +} + +/** SMSC75xx network device operations */ +static struct net_device_operations smsc75xx_operations = { + .open = smsc75xx_open, + .close = smsc75xx_close, + .transmit = smsc75xx_transmit, + .poll = smsc75xx_poll, +}; + +/****************************************************************************** + * + * USB interface + * + ****************************************************************************** + */ + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int smsc75xx_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct net_device *netdev; + struct smscusb_device *smscusb; + int rc; + + /* Allocate and initialise structure */ + netdev = alloc_etherdev ( sizeof ( *smscusb ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &smsc75xx_operations ); + netdev->dev = &func->dev; + smscusb = netdev->priv; + memset ( smscusb, 0, sizeof ( *smscusb ) ); + smscusb_init ( smscusb, netdev, func, &smsc75xx_in_operations ); + smscusb_mii_init ( smscusb, SMSC75XX_MII_BASE, + SMSC75XX_MII_PHY_INTR_SOURCE ); + usb_refill_init ( &smscusb->usbnet.in, 0, SMSC75XX_IN_MTU, + SMSC75XX_IN_MAX_FILL ); + DBGC ( smscusb, "SMSC75XX %p on %s\n", smscusb, func->name ); + + /* Describe USB network device */ + if ( ( rc = usbnet_describe ( &smscusb->usbnet, config ) ) != 0 ) { + DBGC ( smscusb, "SMSC75XX %p could not describe: %s\n", + smscusb, strerror ( rc ) ); + goto err_describe; + } + + /* Reset device */ + if ( ( rc = smsc75xx_reset ( smscusb ) ) != 0 ) + goto err_reset; + + /* Read MAC address */ + if ( ( rc = smscusb_eeprom_fetch_mac ( smscusb, + SMSC75XX_E2P_BASE ) ) != 0 ) + goto err_fetch_mac; + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register; + + usb_func_set_drvdata ( func, netdev ); + return 0; + + unregister_netdev ( netdev ); + err_register: + err_fetch_mac: + err_reset: + err_describe: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void smsc75xx_remove ( struct usb_function *func ) { + struct net_device *netdev = usb_func_get_drvdata ( func ); + + unregister_netdev ( netdev ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** SMSC75xx device IDs */ +static struct usb_device_id smsc75xx_ids[] = { + { + .name = "smsc7500", + .vendor = 0x0424, + .product = 0x7500, + }, + { + .name = "smsc7505", + .vendor = 0x0424, + .product = 0x7505, + }, +}; + +/** SMSC LAN75xx driver */ +struct usb_driver smsc75xx_driver __usb_driver = { + .ids = smsc75xx_ids, + .id_count = ( sizeof ( smsc75xx_ids ) / sizeof ( smsc75xx_ids[0] ) ), + .class = USB_CLASS_ID ( 0xff, 0x00, 0xff ), + .score = USB_SCORE_NORMAL, + .probe = smsc75xx_probe, + .remove = smsc75xx_remove, +}; diff --git a/src/drivers/net/smsc75xx.h b/src/drivers/net/smsc75xx.h new file mode 100644 index 00000000..72339df0 --- /dev/null +++ b/src/drivers/net/smsc75xx.h @@ -0,0 +1,223 @@ +#ifndef _SMSC75XX_H +#define _SMSC75XX_H + +/** @file + * + * SMSC LAN75xx USB Ethernet driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include "smscusb.h" + +/** Interrupt status register */ +#define SMSC75XX_INT_STS 0x00c +#define SMSC75XX_INT_STS_RDFO_INT 0x00400000UL /**< RX FIFO overflow */ +#define SMSC75XX_INT_STS_PHY_INT 0x00020000UL /**< PHY interrupt */ + +/** Hardware configuration register */ +#define SMSC75XX_HW_CFG 0x010 +#define SMSC75XX_HW_CFG_BIR 0x00000080UL /**< Bulk IN use NAK */ +#define SMSC75XX_HW_CFG_LRST 0x00000002UL /**< Soft lite reset */ + +/** Interrupt endpoint control register */ +#define SMSC75XX_INT_EP_CTL 0x038 +#define SMSC75XX_INT_EP_CTL_RDFO_EN 0x00400000UL /**< RX FIFO overflow */ +#define SMSC75XX_INT_EP_CTL_PHY_EN 0x00020000UL /**< PHY interrupt */ + +/** Bulk IN delay register */ +#define SMSC75XX_BULK_IN_DLY 0x03c +#define SMSC75XX_BULK_IN_DLY_SET(ticks) ( (ticks) << 0 ) /**< Delay / 16.7ns */ + +/** EEPROM register base */ +#define SMSC75XX_E2P_BASE 0x040 + +/** Receive filtering engine control register */ +#define SMSC75XX_RFE_CTL 0x060 +#define SMSC75XX_RFE_CTL_AB 0x00000400UL /**< Accept broadcast */ +#define SMSC75XX_RFE_CTL_AM 0x00000200UL /**< Accept multicast */ +#define SMSC75XX_RFE_CTL_AU 0x00000100UL /**< Accept unicast */ + +/** FIFO controller RX FIFO control register */ +#define SMSC75XX_FCT_RX_CTL 0x090 +#define SMSC75XX_FCT_RX_CTL_EN 0x80000000UL /**< FCT RX enable */ +#define SMSC75XX_FCT_RX_CTL_BAD 0x02000000UL /**< Store bad frames */ + +/** FIFO controller TX FIFO control register */ +#define SMSC75XX_FCT_TX_CTL 0x094 +#define SMSC75XX_FCT_TX_CTL_EN 0x80000000UL /**< FCT TX enable */ + +/** MAC receive register */ +#define SMSC75XX_MAC_RX 0x104 +#define SMSC75XX_MAC_RX_MAX_SIZE(mtu) ( (mtu) << 16 ) /**< Max frame size */ +#define SMSC75XX_MAC_RX_MAX_SIZE_DEFAULT \ + SMSC75XX_MAC_RX_MAX_SIZE ( ETH_FRAME_LEN + 4 /* VLAN */ + 4 /* CRC */ ) +#define SMSC75XX_MAC_RX_FCS 0x00000010UL /**< FCS stripping */ +#define SMSC75XX_MAC_RX_EN 0x00000001UL /**< RX enable */ + +/** MAC transmit register */ +#define SMSC75XX_MAC_TX 0x108 +#define SMSC75XX_MAC_TX_EN 0x00000001UL /**< TX enable */ + +/** MAC receive address register base */ +#define SMSC75XX_RX_ADDR_BASE 0x118 + +/** MII register base */ +#define SMSC75XX_MII_BASE 0x120 + +/** PHY interrupt source MII register */ +#define SMSC75XX_MII_PHY_INTR_SOURCE 29 + +/** PHY interrupt mask MII register */ +#define SMSC75XX_MII_PHY_INTR_MASK 30 + +/** PHY interrupt: auto-negotiation complete */ +#define SMSC75XX_PHY_INTR_ANEG_DONE 0x0040 + +/** PHY interrupt: link down */ +#define SMSC75XX_PHY_INTR_LINK_DOWN 0x0010 + +/** MAC address perfect filter register base */ +#define SMSC75XX_ADDR_FILT_BASE 0x300 + +/** Receive packet header */ +struct smsc75xx_rx_header { + /** RX command word */ + uint32_t command; + /** VLAN tag */ + uint16_t vtag; + /** Checksum */ + uint16_t csum; + /** Two-byte padding used to align Ethernet payload */ + uint16_t pad; +} __attribute__ (( packed )); + +/** Receive error detected */ +#define SMSC75XX_RX_RED 0x00400000UL + +/** Transmit packet header */ +struct smsc75xx_tx_header { + /** TX command word */ + uint32_t command; + /** VLAN tag */ + uint16_t tag; + /** Maximum segment size */ + uint16_t mss; +} __attribute__ (( packed )); + +/** Insert frame checksum and pad */ +#define SMSC75XX_TX_FCS 0x00400000UL + +/** Byte count statistics */ +struct smsc75xx_byte_statistics { + /** Unicast byte count */ + uint32_t unicast; + /** Broadcast byte count */ + uint32_t broadcast; + /** Multicast byte count */ + uint32_t multicast; +} __attribute__ (( packed )); + +/** Frame count statistics */ +struct smsc75xx_frame_statistics { + /** Unicast frames */ + uint32_t unicast; + /** Broadcast frames */ + uint32_t broadcast; + /** Multicast frames */ + uint32_t multicast; + /** Pause frames */ + uint32_t pause; + /** Frames by length category */ + uint32_t len[7]; +} __attribute__ (( packed )); + +/** Receive error statistics */ +struct smsc75xx_rx_error_statistics { + /** FCS errors */ + uint32_t fcs; + /** Alignment errors */ + uint32_t alignment; + /** Fragment errors */ + uint32_t fragment; + /** Jabber errors */ + uint32_t jabber; + /** Undersize frame errors */ + uint32_t undersize; + /** Oversize frame errors */ + uint32_t oversize; + /** Dropped frame errors */ + uint32_t dropped; +} __attribute__ (( packed )); + +/** Receive statistics */ +struct smsc75xx_rx_statistics { + /** Error statistics */ + struct smsc75xx_rx_error_statistics err; + /** Byte count statistics */ + struct smsc75xx_byte_statistics byte; + /** Frame count statistics */ + struct smsc75xx_frame_statistics frame; +} __attribute__ (( packed )); + +/** Transmit error statistics */ +struct smsc75xx_tx_error_statistics { + /** FCS errors */ + uint32_t fcs; + /** Excess deferral errors */ + uint32_t deferral; + /** Carrier errors */ + uint32_t carrier; + /** Bad byte count */ + uint32_t count; + /** Single collisions */ + uint32_t single; + /** Multiple collisions */ + uint32_t multiple; + /** Excession collisions */ + uint32_t excessive; + /** Late collisions */ + uint32_t late; +} __attribute__ (( packed )); + +/** Transmit statistics */ +struct smsc75xx_tx_statistics { + /** Error statistics */ + struct smsc75xx_tx_error_statistics err; + /** Byte count statistics */ + struct smsc75xx_byte_statistics byte; + /** Frame count statistics */ + struct smsc75xx_frame_statistics frame; +} __attribute__ (( packed )); + +/** Statistics */ +struct smsc75xx_statistics { + /** Receive statistics */ + struct smsc75xx_rx_statistics rx; + /** Transmit statistics */ + struct smsc75xx_tx_statistics tx; +} __attribute__ (( packed )); + +/** Maximum time to wait for reset (in milliseconds) */ +#define SMSC75XX_RESET_MAX_WAIT_MS 100 + +/** Bulk IN maximum fill level + * + * This is a policy decision. + */ +#define SMSC75XX_IN_MAX_FILL 8 + +/** Bulk IN buffer size */ +#define SMSC75XX_IN_MTU \ + ( sizeof ( struct smsc75xx_rx_header ) + \ + ETH_FRAME_LEN + 4 /* possible VLAN header */ ) + +extern struct usb_endpoint_driver_operations smsc75xx_in_operations; + +extern int smsc75xx_dump_statistics ( struct smscusb_device *smscusb ); +extern int smsc75xx_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ); +extern void smsc75xx_poll ( struct net_device *netdev ); + +#endif /* _SMSC75XX_H */ diff --git a/src/drivers/net/smsc95xx.c b/src/drivers/net/smsc95xx.c new file mode 100644 index 00000000..3ec49584 --- /dev/null +++ b/src/drivers/net/smsc95xx.c @@ -0,0 +1,771 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "smsc95xx.h" + +/** @file + * + * SMSC LAN95xx USB Ethernet driver + * + */ + +/** Bulk IN completion profiler */ +static struct profiler smsc95xx_in_profiler __profiler = + { .name = "smsc95xx.in" }; + +/** Bulk OUT profiler */ +static struct profiler smsc95xx_out_profiler __profiler = + { .name = "smsc95xx.out" }; + +/****************************************************************************** + * + * MAC address + * + ****************************************************************************** + */ + +/** + * Construct MAC address for Honeywell VM3 + * + * @v smscusb SMSC USB device + * @ret rc Return status code + */ +static int smsc95xx_vm3_fetch_mac ( struct smscusb_device *smscusb ) { + struct net_device *netdev = smscusb->netdev; + struct smbios_structure structure; + struct smbios_system_information system; + struct { + char manufacturer[ 10 /* "Honeywell" + NUL */ ]; + char product[ 4 /* "VM3" + NUL */ ]; + char mac[ base16_encoded_len ( ETH_ALEN ) + 1 /* NUL */ ]; + } strings; + int len; + int rc; + + /* Find system information */ + if ( ( rc = find_smbios_structure ( SMBIOS_TYPE_SYSTEM_INFORMATION, 0, + &structure ) ) != 0 ) { + DBGC ( smscusb, "SMSC95XX %p could not find system " + "information: %s\n", smscusb, strerror ( rc ) ); + return rc; + } + + /* Read system information */ + if ( ( rc = read_smbios_structure ( &structure, &system, + sizeof ( system ) ) ) != 0 ) { + DBGC ( smscusb, "SMSC95XX %p could not read system " + "information: %s\n", smscusb, strerror ( rc ) ); + return rc; + } + + /* NUL-terminate all strings to be fetched */ + memset ( &strings, 0, sizeof ( strings ) ); + + /* Fetch system manufacturer name */ + len = read_smbios_string ( &structure, system.manufacturer, + strings.manufacturer, + ( sizeof ( strings.manufacturer ) - 1 ) ); + if ( len < 0 ) { + rc = len; + DBGC ( smscusb, "SMSC95XX %p could not read manufacturer " + "name: %s\n", smscusb, strerror ( rc ) ); + return rc; + } + + /* Fetch system product name */ + len = read_smbios_string ( &structure, system.product, strings.product, + ( sizeof ( strings.product ) - 1 ) ); + if ( len < 0 ) { + rc = len; + DBGC ( smscusb, "SMSC95XX %p could not read product name: " + "%s\n", smscusb, strerror ( rc ) ); + return rc; + } + + /* Ignore non-VM3 devices */ + if ( ( strcmp ( strings.manufacturer, "Honeywell" ) != 0 ) || + ( strcmp ( strings.product, "VM3" ) != 0 ) ) + return -ENOTTY; + + /* Find OEM strings */ + if ( ( rc = find_smbios_structure ( SMBIOS_TYPE_OEM_STRINGS, 0, + &structure ) ) != 0 ) { + DBGC ( smscusb, "SMSC95XX %p could not find OEM strings: %s\n", + smscusb, strerror ( rc ) ); + return rc; + } + + /* Fetch MAC address */ + len = read_smbios_string ( &structure, SMSC95XX_VM3_OEM_STRING_MAC, + strings.mac, ( sizeof ( strings.mac ) - 1 )); + if ( len < 0 ) { + rc = len; + DBGC ( smscusb, "SMSC95XX %p could not read OEM string: %s\n", + smscusb, strerror ( rc ) ); + return rc; + } + + /* Sanity check */ + if ( len != ( ( int ) ( sizeof ( strings.mac ) - 1 ) ) ) { + DBGC ( smscusb, "SMSC95XX %p invalid MAC address \"%s\"\n", + smscusb, strings.mac ); + return -EINVAL; + } + + /* Decode MAC address */ + len = base16_decode ( strings.mac, netdev->hw_addr, ETH_ALEN ); + if ( len < 0 ) { + rc = len; + DBGC ( smscusb, "SMSC95XX %p invalid MAC address \"%s\"\n", + smscusb, strings.mac ); + return rc; + } + + DBGC ( smscusb, "SMSC95XX %p using VM3 MAC %s\n", + smscusb, eth_ntoa ( netdev->hw_addr ) ); + return 0; +} + +/** + * Fetch MAC address + * + * @v smscusb SMSC USB device + * @ret rc Return status code + */ +static int smsc95xx_fetch_mac ( struct smscusb_device *smscusb ) { + struct net_device *netdev = smscusb->netdev; + int rc; + + /* Read MAC address from EEPROM, if present */ + if ( ( rc = smscusb_eeprom_fetch_mac ( smscusb, + SMSC95XX_E2P_BASE ) ) == 0 ) + return 0; + + /* Read MAC address from device tree, if present */ + if ( ( rc = smscusb_fdt_fetch_mac ( smscusb ) ) == 0 ) + return 0; + + /* Construct MAC address for Honeywell VM3, if applicable */ + if ( ( rc = smsc95xx_vm3_fetch_mac ( smscusb ) ) == 0 ) + return 0; + + /* Otherwise, generate a random MAC address */ + eth_random_addr ( netdev->hw_addr ); + DBGC ( smscusb, "SMSC95XX %p using random MAC %s\n", + smscusb, eth_ntoa ( netdev->hw_addr ) ); + return 0; +} + +/****************************************************************************** + * + * Statistics (for debugging) + * + ****************************************************************************** + */ + +/** + * Dump statistics (for debugging) + * + * @v smscusb SMSC USB device + * @ret rc Return status code + */ +static int smsc95xx_dump_statistics ( struct smscusb_device *smscusb ) { + struct smsc95xx_rx_statistics rx; + struct smsc95xx_tx_statistics tx; + int rc; + + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return 0; + + /* Get RX statistics */ + if ( ( rc = smscusb_get_statistics ( smscusb, SMSC95XX_RX_STATISTICS, + &rx, sizeof ( rx ) ) ) != 0 ) { + DBGC ( smscusb, "SMSC95XX %p could not get RX statistics: " + "%s\n", smscusb, strerror ( rc ) ); + return rc; + } + + /* Get TX statistics */ + if ( ( rc = smscusb_get_statistics ( smscusb, SMSC95XX_TX_STATISTICS, + &tx, sizeof ( tx ) ) ) != 0 ) { + DBGC ( smscusb, "SMSC95XX %p could not get TX statistics: " + "%s\n", smscusb, strerror ( rc ) ); + return rc; + } + + /* Dump statistics */ + DBGC ( smscusb, "SMSC95XX %p RX good %d bad %d crc %d und %d aln %d " + "ovr %d lat %d drp %d\n", smscusb, le32_to_cpu ( rx.good ), + le32_to_cpu ( rx.bad ), le32_to_cpu ( rx.crc ), + le32_to_cpu ( rx.undersize ), le32_to_cpu ( rx.alignment ), + le32_to_cpu ( rx.oversize ), le32_to_cpu ( rx.late ), + le32_to_cpu ( rx.dropped ) ); + DBGC ( smscusb, "SMSC95XX %p TX good %d bad %d pau %d sgl %d mul %d " + "exc %d lat %d und %d def %d car %d\n", smscusb, + le32_to_cpu ( tx.good ), le32_to_cpu ( tx.bad ), + le32_to_cpu ( tx.pause ), le32_to_cpu ( tx.single ), + le32_to_cpu ( tx.multiple ), le32_to_cpu ( tx.excessive ), + le32_to_cpu ( tx.late ), le32_to_cpu ( tx.underrun ), + le32_to_cpu ( tx.deferred ), le32_to_cpu ( tx.carrier ) ); + + return 0; +} + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset device + * + * @v smscusb SMSC USB device + * @ret rc Return status code + */ +static int smsc95xx_reset ( struct smscusb_device *smscusb ) { + uint32_t hw_cfg; + uint32_t led_gpio_cfg; + int rc; + + /* Reset device */ + if ( ( rc = smscusb_writel ( smscusb, SMSC95XX_HW_CFG, + SMSC95XX_HW_CFG_LRST ) ) != 0 ) + return rc; + + /* Wait for reset to complete */ + udelay ( SMSC95XX_RESET_DELAY_US ); + + /* Check that reset has completed */ + if ( ( rc = smscusb_readl ( smscusb, SMSC95XX_HW_CFG, &hw_cfg ) ) != 0 ) + return rc; + if ( hw_cfg & SMSC95XX_HW_CFG_LRST ) { + DBGC ( smscusb, "SMSC95XX %p failed to reset\n", smscusb ); + return -ETIMEDOUT; + } + + /* Configure LEDs */ + led_gpio_cfg = ( SMSC95XX_LED_GPIO_CFG_GPCTL2_NSPD_LED | + SMSC95XX_LED_GPIO_CFG_GPCTL1_NLNKA_LED | + SMSC95XX_LED_GPIO_CFG_GPCTL0_NFDX_LED ); + if ( ( rc = smscusb_writel ( smscusb, SMSC95XX_LED_GPIO_CFG, + led_gpio_cfg ) ) != 0 ) { + DBGC ( smscusb, "SMSC95XX %p could not configure LEDs: %s\n", + smscusb, strerror ( rc ) ); + /* Ignore error and continue */ + } + + return 0; +} + +/****************************************************************************** + * + * Endpoint operations + * + ****************************************************************************** + */ + +/** + * Complete bulk IN transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void smsc95xx_in_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct smscusb_device *smscusb = + container_of ( ep, struct smscusb_device, usbnet.in ); + struct net_device *netdev = smscusb->netdev; + struct smsc95xx_rx_header *header; + + /* Profile completions */ + profile_start ( &smsc95xx_in_profiler ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) { + free_iob ( iobuf ); + return; + } + + /* Record USB errors against the network device */ + if ( rc != 0 ) { + DBGC ( smscusb, "SMSC95XX %p bulk IN failed: %s\n", + smscusb, strerror ( rc ) ); + goto err; + } + + /* Sanity check */ + if ( iob_len ( iobuf ) < ( sizeof ( *header ) + 4 /* CRC */ ) ) { + DBGC ( smscusb, "SMSC95XX %p underlength bulk IN\n", + smscusb ); + DBGC_HDA ( smscusb, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto err; + } + + /* Strip header and CRC */ + header = iobuf->data; + iob_pull ( iobuf, sizeof ( *header ) ); + iob_unput ( iobuf, 4 /* CRC */ ); + + /* Check for errors */ + if ( header->command & cpu_to_le32 ( SMSC95XX_RX_RUNT | + SMSC95XX_RX_LATE | + SMSC95XX_RX_CRC ) ) { + DBGC ( smscusb, "SMSC95XX %p receive error (%08x):\n", + smscusb, le32_to_cpu ( header->command ) ); + DBGC_HDA ( smscusb, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EIO; + goto err; + } + + /* Hand off to network stack */ + netdev_rx ( netdev, iob_disown ( iobuf ) ); + + profile_stop ( &smsc95xx_in_profiler ); + return; + + err: + /* Hand off to network stack */ + netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); +} + +/** Bulk IN endpoint operations */ +static struct usb_endpoint_driver_operations smsc95xx_in_operations = { + .complete = smsc95xx_in_complete, +}; + +/** + * Transmit packet + * + * @v smscusb SMSC USB device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int smsc95xx_out_transmit ( struct smscusb_device *smscusb, + struct io_buffer *iobuf ) { + struct smsc95xx_tx_header *header; + size_t len = iob_len ( iobuf ); + int rc; + + /* Profile transmissions */ + profile_start ( &smsc95xx_out_profiler ); + + /* Prepend header */ + if ( ( rc = iob_ensure_headroom ( iobuf, sizeof ( *header ) ) ) != 0 ) + return rc; + header = iob_push ( iobuf, sizeof ( *header ) ); + header->command = cpu_to_le32 ( SMSC95XX_TX_FIRST | SMSC95XX_TX_LAST | + SMSC95XX_TX_LEN ( len ) ); + header->len = cpu_to_le32 ( len ); + + /* Enqueue I/O buffer */ + if ( ( rc = usb_stream ( &smscusb->usbnet.out, iobuf, 0 ) ) != 0 ) + return rc; + + profile_stop ( &smsc95xx_out_profiler ); + return 0; +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int smsc95xx_open ( struct net_device *netdev ) { + struct smscusb_device *smscusb = netdev->priv; + int rc; + + /* Clear stored interrupt status */ + smscusb->int_sts = 0; + + /* Configure bulk IN empty response */ + if ( ( rc = smscusb_writel ( smscusb, SMSC95XX_HW_CFG, + SMSC95XX_HW_CFG_BIR ) ) != 0 ) + goto err_hw_cfg; + + /* Open USB network device */ + if ( ( rc = usbnet_open ( &smscusb->usbnet ) ) != 0 ) { + DBGC ( smscusb, "SMSC95XX %p could not open: %s\n", + smscusb, strerror ( rc ) ); + goto err_open; + } + + /* Configure interrupt endpoint */ + if ( ( rc = smscusb_writel ( smscusb, SMSC95XX_INT_EP_CTL, + ( SMSC95XX_INT_EP_CTL_RXDF_EN | + SMSC95XX_INT_EP_CTL_PHY_EN ) ) ) != 0 ) + goto err_int_ep_ctl; + + /* Configure bulk IN delay */ + if ( ( rc = smscusb_writel ( smscusb, SMSC95XX_BULK_IN_DLY, + SMSC95XX_BULK_IN_DLY_SET ( 0 ) ) ) != 0 ) + goto err_bulk_in_dly; + + /* Configure MAC */ + if ( ( rc = smscusb_writel ( smscusb, SMSC95XX_MAC_CR, + ( SMSC95XX_MAC_CR_RXALL | + SMSC95XX_MAC_CR_FDPX | + SMSC95XX_MAC_CR_MCPAS | + SMSC95XX_MAC_CR_PRMS | + SMSC95XX_MAC_CR_PASSBAD | + SMSC95XX_MAC_CR_TXEN | + SMSC95XX_MAC_CR_RXEN ) ) ) != 0 ) + goto err_mac_cr; + + /* Configure transmit datapath */ + if ( ( rc = smscusb_writel ( smscusb, SMSC95XX_TX_CFG, + SMSC95XX_TX_CFG_ON ) ) != 0 ) + goto err_tx_cfg; + + /* Set MAC address */ + if ( ( rc = smscusb_set_address ( smscusb, SMSC95XX_ADDR_BASE ) ) != 0 ) + goto err_set_address; + + /* Enable PHY interrupts and update link status */ + if ( ( rc = smscusb_mii_open ( smscusb, SMSC95XX_MII_PHY_INTR_MASK, + ( SMSC95XX_PHY_INTR_ANEG_DONE | + SMSC95XX_PHY_INTR_LINK_DOWN ) ) ) != 0) + goto err_mii_open; + + return 0; + + err_mii_open: + err_set_address: + err_tx_cfg: + err_mac_cr: + err_bulk_in_dly: + err_int_ep_ctl: + usbnet_close ( &smscusb->usbnet ); + err_open: + err_hw_cfg: + smsc95xx_reset ( smscusb ); + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void smsc95xx_close ( struct net_device *netdev ) { + struct smscusb_device *smscusb = netdev->priv; + + /* Close USB network device */ + usbnet_close ( &smscusb->usbnet ); + + /* Dump statistics (for debugging) */ + smsc95xx_dump_statistics ( smscusb ); + + /* Reset device */ + smsc95xx_reset ( smscusb ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int smsc95xx_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct smscusb_device *smscusb = netdev->priv; + int rc; + + /* Transmit packet */ + if ( ( rc = smsc95xx_out_transmit ( smscusb, iobuf ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void smsc95xx_poll ( struct net_device *netdev ) { + struct smscusb_device *smscusb = netdev->priv; + uint32_t int_sts; + int rc; + + /* Poll USB bus */ + usb_poll ( smscusb->bus ); + + /* Refill endpoints */ + if ( ( rc = usbnet_refill ( &smscusb->usbnet ) ) != 0 ) + netdev_rx_err ( netdev, NULL, rc ); + + /* Do nothing more unless there are interrupts to handle */ + int_sts = smscusb->int_sts; + if ( ! int_sts ) + return; + + /* Check link status if applicable */ + if ( int_sts & SMSC95XX_INT_STS_PHY_INT ) { + smscusb_mii_check_link ( smscusb ); + int_sts &= ~SMSC95XX_INT_STS_PHY_INT; + } + + /* Record RX FIFO overflow if applicable */ + if ( int_sts & SMSC95XX_INT_STS_RXDF_INT ) { + DBGC2 ( smscusb, "SMSC95XX %p RX FIFO overflowed\n", + smscusb ); + netdev_rx_err ( netdev, NULL, -ENOBUFS ); + int_sts &= ~SMSC95XX_INT_STS_RXDF_INT; + } + + /* Check for unexpected interrupts */ + if ( int_sts ) { + DBGC ( smscusb, "SMSC95XX %p unexpected interrupt %#08x\n", + smscusb, int_sts ); + netdev_rx_err ( netdev, NULL, -ENOTTY ); + } + + /* Clear interrupts */ + if ( ( rc = smscusb_writel ( smscusb, SMSC95XX_INT_STS, + smscusb->int_sts ) ) != 0 ) + netdev_rx_err ( netdev, NULL, rc ); + smscusb->int_sts = 0; +} + +/** SMSC95xx network device operations */ +static struct net_device_operations smsc95xx_operations = { + .open = smsc95xx_open, + .close = smsc95xx_close, + .transmit = smsc95xx_transmit, + .poll = smsc95xx_poll, +}; + +/****************************************************************************** + * + * USB interface + * + ****************************************************************************** + */ + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int smsc95xx_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct net_device *netdev; + struct smscusb_device *smscusb; + int rc; + + /* Allocate and initialise structure */ + netdev = alloc_etherdev ( sizeof ( *smscusb ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &smsc95xx_operations ); + netdev->dev = &func->dev; + smscusb = netdev->priv; + memset ( smscusb, 0, sizeof ( *smscusb ) ); + smscusb_init ( smscusb, netdev, func, &smsc95xx_in_operations ); + smscusb_mii_init ( smscusb, SMSC95XX_MII_BASE, + SMSC95XX_MII_PHY_INTR_SOURCE ); + usb_refill_init ( &smscusb->usbnet.in, + ( sizeof ( struct smsc95xx_tx_header ) - + sizeof ( struct smsc95xx_rx_header ) ), + SMSC95XX_IN_MTU, SMSC95XX_IN_MAX_FILL ); + DBGC ( smscusb, "SMSC95XX %p on %s\n", smscusb, func->name ); + + /* Describe USB network device */ + if ( ( rc = usbnet_describe ( &smscusb->usbnet, config ) ) != 0 ) { + DBGC ( smscusb, "SMSC95XX %p could not describe: %s\n", + smscusb, strerror ( rc ) ); + goto err_describe; + } + + /* Reset device */ + if ( ( rc = smsc95xx_reset ( smscusb ) ) != 0 ) + goto err_reset; + + /* Read MAC address */ + if ( ( rc = smsc95xx_fetch_mac ( smscusb ) ) != 0 ) + goto err_fetch_mac; + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register; + + usb_func_set_drvdata ( func, netdev ); + return 0; + + unregister_netdev ( netdev ); + err_register: + err_fetch_mac: + err_reset: + err_describe: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void smsc95xx_remove ( struct usb_function *func ) { + struct net_device *netdev = usb_func_get_drvdata ( func ); + + unregister_netdev ( netdev ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** SMSC95xx device IDs */ +static struct usb_device_id smsc95xx_ids[] = { + { + .name = "smsc9500", + .vendor = 0x0424, + .product = 0x9500, + }, + { + .name = "smsc9505", + .vendor = 0x0424, + .product = 0x9505, + }, + { + .name = "smsc9500a", + .vendor = 0x0424, + .product = 0x9e00, + }, + { + .name = "smsc9505a", + .vendor = 0x0424, + .product = 0x9e01, + }, + { + .name = "smsc9514", + .vendor = 0x0424, + .product = 0xec00, + }, + { + .name = "smsc9500-s", + .vendor = 0x0424, + .product = 0x9900, + }, + { + .name = "smsc9505-s", + .vendor = 0x0424, + .product = 0x9901, + }, + { + .name = "smsc9500a-s", + .vendor = 0x0424, + .product = 0x9902, + }, + { + .name = "smsc9505a-s", + .vendor = 0x0424, + .product = 0x9903, + }, + { + .name = "smsc9514-s", + .vendor = 0x0424, + .product = 0x9904, + }, + { + .name = "smsc9500a-h", + .vendor = 0x0424, + .product = 0x9905, + }, + { + .name = "smsc9505a-h", + .vendor = 0x0424, + .product = 0x9906, + }, + { + .name = "smsc9500-2", + .vendor = 0x0424, + .product = 0x9907, + }, + { + .name = "smsc9500a-2", + .vendor = 0x0424, + .product = 0x9908, + }, + { + .name = "smsc9514-2", + .vendor = 0x0424, + .product = 0x9909, + }, + { + .name = "smsc9530", + .vendor = 0x0424, + .product = 0x9530, + }, + { + .name = "smsc9730", + .vendor = 0x0424, + .product = 0x9730, + }, + { + .name = "smsc89530", + .vendor = 0x0424, + .product = 0x9e08, + }, +}; + +/** SMSC LAN95xx driver */ +struct usb_driver smsc95xx_driver __usb_driver = { + .ids = smsc95xx_ids, + .id_count = ( sizeof ( smsc95xx_ids ) / sizeof ( smsc95xx_ids[0] ) ), + .class = USB_CLASS_ID ( 0xff, 0x00, 0xff ), + .score = USB_SCORE_NORMAL, + .probe = smsc95xx_probe, + .remove = smsc95xx_remove, +}; diff --git a/src/drivers/net/smsc95xx.h b/src/drivers/net/smsc95xx.h new file mode 100644 index 00000000..0cdf3824 --- /dev/null +++ b/src/drivers/net/smsc95xx.h @@ -0,0 +1,180 @@ +#ifndef _SMSC95XX_H +#define _SMSC95XX_H + +/** @file + * + * SMSC LAN95xx USB Ethernet driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include "smscusb.h" + +/** Interrupt status register */ +#define SMSC95XX_INT_STS 0x008 +#define SMSC95XX_INT_STS_RXDF_INT 0x00000800UL /**< RX FIFO overflow */ +#define SMSC95XX_INT_STS_PHY_INT 0x00008000UL /**< PHY interrupt */ + +/** Transmit configuration register */ +#define SMSC95XX_TX_CFG 0x010 +#define SMSC95XX_TX_CFG_ON 0x00000004UL /**< TX enable */ + +/** Hardware configuration register */ +#define SMSC95XX_HW_CFG 0x014 +#define SMSC95XX_HW_CFG_BIR 0x00001000UL /**< Bulk IN use NAK */ +#define SMSC95XX_HW_CFG_LRST 0x00000008UL /**< Soft lite reset */ + +/** LED GPIO configuration register */ +#define SMSC95XX_LED_GPIO_CFG 0x024 +#define SMSC95XX_LED_GPIO_CFG_GPCTL2(x) ( (x) << 24 ) /**< GPIO 2 control */ +#define SMSC95XX_LED_GPIO_CFG_GPCTL2_NSPD_LED \ + SMSC95XX_LED_GPIO_CFG_GPCTL2 ( 1 ) /**< Link speed LED */ +#define SMSC95XX_LED_GPIO_CFG_GPCTL1(x) ( (x) << 20 ) /**< GPIO 1 control */ +#define SMSC95XX_LED_GPIO_CFG_GPCTL1_NLNKA_LED \ + SMSC95XX_LED_GPIO_CFG_GPCTL1 ( 1 ) /**< Activity LED */ +#define SMSC95XX_LED_GPIO_CFG_GPCTL0(x) ( (x) << 16 ) /**< GPIO 0 control */ +#define SMSC95XX_LED_GPIO_CFG_GPCTL0_NFDX_LED \ + SMSC95XX_LED_GPIO_CFG_GPCTL0 ( 1 ) /**< Full-duplex LED */ + +/** EEPROM register base */ +#define SMSC95XX_E2P_BASE 0x030 + +/** Interrupt endpoint control register */ +#define SMSC95XX_INT_EP_CTL 0x068 +#define SMSC95XX_INT_EP_CTL_RXDF_EN 0x00000800UL /**< RX FIFO overflow */ +#define SMSC95XX_INT_EP_CTL_PHY_EN 0x00008000UL /**< PHY interrupt */ + +/** Bulk IN delay register */ +#define SMSC95XX_BULK_IN_DLY 0x06c +#define SMSC95XX_BULK_IN_DLY_SET(ticks) ( (ticks) << 0 ) /**< Delay / 16.7ns */ + +/** MAC control register */ +#define SMSC95XX_MAC_CR 0x100 +#define SMSC95XX_MAC_CR_RXALL 0x80000000UL /**< Receive all */ +#define SMSC95XX_MAC_CR_FDPX 0x00100000UL /**< Full duplex */ +#define SMSC95XX_MAC_CR_MCPAS 0x00080000UL /**< All multicast */ +#define SMSC95XX_MAC_CR_PRMS 0x00040000UL /**< Promiscuous */ +#define SMSC95XX_MAC_CR_PASSBAD 0x00010000UL /**< Pass bad frames */ +#define SMSC95XX_MAC_CR_TXEN 0x00000008UL /**< TX enabled */ +#define SMSC95XX_MAC_CR_RXEN 0x00000004UL /**< RX enabled */ + +/** MAC address register base */ +#define SMSC95XX_ADDR_BASE 0x104 + +/** MII register base */ +#define SMSC95XX_MII_BASE 0x0114 + +/** PHY interrupt source MII register */ +#define SMSC95XX_MII_PHY_INTR_SOURCE 29 + +/** PHY interrupt mask MII register */ +#define SMSC95XX_MII_PHY_INTR_MASK 30 + +/** PHY interrupt: auto-negotiation complete */ +#define SMSC95XX_PHY_INTR_ANEG_DONE 0x0040 + +/** PHY interrupt: link down */ +#define SMSC95XX_PHY_INTR_LINK_DOWN 0x0010 + +/** Receive packet header */ +struct smsc95xx_rx_header { + /** Command word */ + uint32_t command; +} __attribute__ (( packed )); + +/** Runt frame */ +#define SMSC95XX_RX_RUNT 0x00004000UL + +/** Late collision */ +#define SMSC95XX_RX_LATE 0x00000040UL + +/** CRC error */ +#define SMSC95XX_RX_CRC 0x00000002UL + +/** Transmit packet header */ +struct smsc95xx_tx_header { + /** Command word */ + uint32_t command; + /** Frame length */ + uint32_t len; +} __attribute__ (( packed )); + +/** First segment */ +#define SMSC95XX_TX_FIRST 0x00002000UL + +/** Last segment */ +#define SMSC95XX_TX_LAST 0x00001000UL + +/** Buffer size */ +#define SMSC95XX_TX_LEN(len) ( (len) << 0 ) + +/** Receive statistics */ +struct smsc95xx_rx_statistics { + /** Good frames */ + uint32_t good; + /** CRC errors */ + uint32_t crc; + /** Runt frame errors */ + uint32_t undersize; + /** Alignment errors */ + uint32_t alignment; + /** Frame too long errors */ + uint32_t oversize; + /** Later collision errors */ + uint32_t late; + /** Bad frames */ + uint32_t bad; + /** Dropped frames */ + uint32_t dropped; +} __attribute__ (( packed )); + +/** Receive statistics */ +#define SMSC95XX_RX_STATISTICS 0 + +/** Transmit statistics */ +struct smsc95xx_tx_statistics { + /** Good frames */ + uint32_t good; + /** Pause frames */ + uint32_t pause; + /** Single collisions */ + uint32_t single; + /** Multiple collisions */ + uint32_t multiple; + /** Excessive collisions */ + uint32_t excessive; + /** Late collisions */ + uint32_t late; + /** Buffer underruns */ + uint32_t underrun; + /** Excessive deferrals */ + uint32_t deferred; + /** Carrier errors */ + uint32_t carrier; + /** Bad frames */ + uint32_t bad; +} __attribute__ (( packed )); + +/** Transmit statistics */ +#define SMSC95XX_TX_STATISTICS 1 + +/** Reset delay (in microseconds) */ +#define SMSC95XX_RESET_DELAY_US 2 + +/** Bulk IN maximum fill level + * + * This is a policy decision. + */ +#define SMSC95XX_IN_MAX_FILL 8 + +/** Bulk IN buffer size */ +#define SMSC95XX_IN_MTU \ + ( sizeof ( struct smsc95xx_rx_header ) + \ + ETH_FRAME_LEN + 4 /* possible VLAN header */ \ + + 4 /* CRC */ ) + +/** Honeywell VM3 MAC address OEM string index */ +#define SMSC95XX_VM3_OEM_STRING_MAC 2 + +#endif /* _SMSC95XX_H */ diff --git a/src/drivers/net/smscusb.c b/src/drivers/net/smscusb.c new file mode 100644 index 00000000..c639c58c --- /dev/null +++ b/src/drivers/net/smscusb.c @@ -0,0 +1,825 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include "smscusb.h" + +/** @file + * + * SMSC USB Ethernet drivers + * + */ + +/** Interrupt completion profiler */ +static struct profiler smscusb_intr_profiler __profiler = + { .name = "smscusb.intr" }; + +/****************************************************************************** + * + * Register access + * + ****************************************************************************** + */ + +/** + * Write register (without byte-swapping) + * + * @v smscusb Smscusb device + * @v address Register address + * @v value Register value + * @ret rc Return status code + */ +int smscusb_raw_writel ( struct smscusb_device *smscusb, unsigned int address, + uint32_t value ) { + int rc; + + /* Write register */ + DBGCIO ( smscusb, "SMSCUSB %p [%03x] <= %08x\n", + smscusb, address, le32_to_cpu ( value ) ); + if ( ( rc = usb_control ( smscusb->usb, SMSCUSB_REGISTER_WRITE, 0, + address, &value, sizeof ( value ) ) ) != 0 ) { + DBGC ( smscusb, "SMSCUSB %p could not write %03x: %s\n", + smscusb, address, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Read register (without byte-swapping) + * + * @v smscusb SMSC USB device + * @v address Register address + * @ret value Register value + * @ret rc Return status code + */ +int smscusb_raw_readl ( struct smscusb_device *smscusb, unsigned int address, + uint32_t *value ) { + int rc; + + /* Read register */ + if ( ( rc = usb_control ( smscusb->usb, SMSCUSB_REGISTER_READ, 0, + address, value, sizeof ( *value ) ) ) != 0 ) { + DBGC ( smscusb, "SMSCUSB %p could not read %03x: %s\n", + smscusb, address, strerror ( rc ) ); + return rc; + } + DBGCIO ( smscusb, "SMSCUSB %p [%03x] => %08x\n", + smscusb, address, le32_to_cpu ( *value ) ); + + return 0; +} + +/****************************************************************************** + * + * EEPROM access + * + ****************************************************************************** + */ + +/** + * Wait for EEPROM to become idle + * + * @v smscusb SMSC USB device + * @v e2p_base E2P register base + * @ret rc Return status code + */ +static int smscusb_eeprom_wait ( struct smscusb_device *smscusb, + unsigned int e2p_base ) { + uint32_t e2p_cmd; + unsigned int i; + int rc; + + /* Wait for EPC_BSY to become clear */ + for ( i = 0 ; i < SMSCUSB_EEPROM_MAX_WAIT_MS ; i++ ) { + + /* Read E2P_CMD and check EPC_BSY */ + if ( ( rc = smscusb_readl ( smscusb, + ( e2p_base + SMSCUSB_E2P_CMD ), + &e2p_cmd ) ) != 0 ) + return rc; + if ( ! ( e2p_cmd & SMSCUSB_E2P_CMD_EPC_BSY ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( smscusb, "SMSCUSB %p timed out waiting for EEPROM\n", + smscusb ); + return -ETIMEDOUT; +} + +/** + * Read byte from EEPROM + * + * @v smscusb SMSC USB device + * @v e2p_base E2P register base + * @v address EEPROM address + * @ret byte Byte read, or negative error + */ +static int smscusb_eeprom_read_byte ( struct smscusb_device *smscusb, + unsigned int e2p_base, + unsigned int address ) { + uint32_t e2p_cmd; + uint32_t e2p_data; + int rc; + + /* Wait for EEPROM to become idle */ + if ( ( rc = smscusb_eeprom_wait ( smscusb, e2p_base ) ) != 0 ) + return rc; + + /* Initiate read command */ + e2p_cmd = ( SMSCUSB_E2P_CMD_EPC_BSY | SMSCUSB_E2P_CMD_EPC_CMD_READ | + SMSCUSB_E2P_CMD_EPC_ADDR ( address ) ); + if ( ( rc = smscusb_writel ( smscusb, ( e2p_base + SMSCUSB_E2P_CMD ), + e2p_cmd ) ) != 0 ) + return rc; + + /* Wait for command to complete */ + if ( ( rc = smscusb_eeprom_wait ( smscusb, e2p_base ) ) != 0 ) + return rc; + + /* Read EEPROM data */ + if ( ( rc = smscusb_readl ( smscusb, ( e2p_base + SMSCUSB_E2P_DATA ), + &e2p_data ) ) != 0 ) + return rc; + + return SMSCUSB_E2P_DATA_GET ( e2p_data ); +} + +/** + * Read data from EEPROM + * + * @v smscusb SMSC USB device + * @v e2p_base E2P register base + * @v address EEPROM address + * @v data Data buffer + * @v len Length of data + * @ret rc Return status code + */ +static int smscusb_eeprom_read ( struct smscusb_device *smscusb, + unsigned int e2p_base, unsigned int address, + void *data, size_t len ) { + uint8_t *bytes; + int byte; + + /* Read bytes */ + for ( bytes = data ; len-- ; address++, bytes++ ) { + byte = smscusb_eeprom_read_byte ( smscusb, e2p_base, address ); + if ( byte < 0 ) + return byte; + *bytes = byte; + } + + return 0; +} + +/** + * Fetch MAC address from EEPROM + * + * @v smscusb SMSC USB device + * @v e2p_base E2P register base + * @ret rc Return status code + */ +int smscusb_eeprom_fetch_mac ( struct smscusb_device *smscusb, + unsigned int e2p_base ) { + struct net_device *netdev = smscusb->netdev; + int rc; + + /* Read MAC address from EEPROM */ + if ( ( rc = smscusb_eeprom_read ( smscusb, e2p_base, SMSCUSB_EEPROM_MAC, + netdev->hw_addr, ETH_ALEN ) ) != 0 ) + return rc; + + /* Check that EEPROM is physically present */ + if ( ! is_valid_ether_addr ( netdev->hw_addr ) ) { + DBGC ( smscusb, "SMSCUSB %p has no EEPROM MAC (%s)\n", + smscusb, eth_ntoa ( netdev->hw_addr ) ); + return -ENODEV; + } + + DBGC ( smscusb, "SMSCUSB %p using EEPROM MAC %s\n", + smscusb, eth_ntoa ( netdev->hw_addr ) ); + return 0; +} + +/****************************************************************************** + * + * OTP access + * + ****************************************************************************** + */ + +/** + * Power up OTP + * + * @v smscusb SMSC USB device + * @v otp_base OTP register base + * @ret rc Return status code + */ +static int smscusb_otp_power_up ( struct smscusb_device *smscusb, + unsigned int otp_base ) { + uint32_t otp_power; + unsigned int i; + int rc; + + /* Power up OTP */ + if ( ( rc = smscusb_writel ( smscusb, ( otp_base + SMSCUSB_OTP_POWER ), + 0 ) ) != 0 ) + return rc; + + /* Wait for OTP_POWER_DOWN to become clear */ + for ( i = 0 ; i < SMSCUSB_OTP_MAX_WAIT_MS ; i++ ) { + + /* Read OTP_POWER and check OTP_POWER_DOWN */ + if ( ( rc = smscusb_readl ( smscusb, + ( otp_base + SMSCUSB_OTP_POWER ), + &otp_power ) ) != 0 ) + return rc; + if ( ! ( otp_power & SMSCUSB_OTP_POWER_DOWN ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( smscusb, "SMSCUSB %p timed out waiting for OTP power up\n", + smscusb ); + return -ETIMEDOUT; +} + +/** + * Wait for OTP to become idle + * + * @v smscusb SMSC USB device + * @v otp_base OTP register base + * @ret rc Return status code + */ +static int smscusb_otp_wait ( struct smscusb_device *smscusb, + unsigned int otp_base ) { + uint32_t otp_status; + unsigned int i; + int rc; + + /* Wait for OTP_STATUS_BUSY to become clear */ + for ( i = 0 ; i < SMSCUSB_OTP_MAX_WAIT_MS ; i++ ) { + + /* Read OTP_STATUS and check OTP_STATUS_BUSY */ + if ( ( rc = smscusb_readl ( smscusb, + ( otp_base + SMSCUSB_OTP_STATUS ), + &otp_status ) ) != 0 ) + return rc; + if ( ! ( otp_status & SMSCUSB_OTP_STATUS_BUSY ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( smscusb, "SMSCUSB %p timed out waiting for OTP\n", + smscusb ); + return -ETIMEDOUT; +} + +/** + * Read byte from OTP + * + * @v smscusb SMSC USB device + * @v otp_base OTP register base + * @v address OTP address + * @ret byte Byte read, or negative error + */ +static int smscusb_otp_read_byte ( struct smscusb_device *smscusb, + unsigned int otp_base, + unsigned int address ) { + uint8_t addrh = ( address >> 8 ); + uint8_t addrl = ( address >> 0 ); + uint32_t otp_data; + int rc; + + /* Wait for OTP to become idle */ + if ( ( rc = smscusb_otp_wait ( smscusb, otp_base ) ) != 0 ) + return rc; + + /* Initiate read command */ + if ( ( rc = smscusb_writel ( smscusb, ( otp_base + SMSCUSB_OTP_ADDRH ), + addrh ) ) != 0 ) + return rc; + if ( ( rc = smscusb_writel ( smscusb, ( otp_base + SMSCUSB_OTP_ADDRL ), + addrl ) ) != 0 ) + return rc; + if ( ( rc = smscusb_writel ( smscusb, ( otp_base + SMSCUSB_OTP_CMD ), + SMSCUSB_OTP_CMD_READ ) ) != 0 ) + return rc; + if ( ( rc = smscusb_writel ( smscusb, ( otp_base + SMSCUSB_OTP_GO ), + SMSCUSB_OTP_GO_GO ) ) != 0 ) + return rc; + + /* Wait for command to complete */ + if ( ( rc = smscusb_otp_wait ( smscusb, otp_base ) ) != 0 ) + return rc; + + /* Read OTP data */ + if ( ( rc = smscusb_readl ( smscusb, ( otp_base + SMSCUSB_OTP_DATA ), + &otp_data ) ) != 0 ) + return rc; + + return SMSCUSB_OTP_DATA_GET ( otp_data ); +} + +/** + * Read data from OTP + * + * @v smscusb SMSC USB device + * @v otp_base OTP register base + * @v address OTP address + * @v data Data buffer + * @v len Length of data + * @ret rc Return status code + */ +static int smscusb_otp_read ( struct smscusb_device *smscusb, + unsigned int otp_base, unsigned int address, + void *data, size_t len ) { + uint8_t *bytes; + int byte; + int rc; + + /* Power up OTP */ + if ( ( rc = smscusb_otp_power_up ( smscusb, otp_base ) ) != 0 ) + return rc; + + /* Read bytes */ + for ( bytes = data ; len-- ; address++, bytes++ ) { + byte = smscusb_otp_read_byte ( smscusb, otp_base, address ); + if ( byte < 0 ) + return byte; + *bytes = byte; + } + + return 0; +} + +/** + * Fetch MAC address from OTP + * + * @v smscusb SMSC USB device + * @v otp_base OTP register base + * @ret rc Return status code + */ +int smscusb_otp_fetch_mac ( struct smscusb_device *smscusb, + unsigned int otp_base ) { + struct net_device *netdev = smscusb->netdev; + uint8_t signature; + unsigned int address; + int rc; + + /* Read OTP signature byte */ + if ( ( rc = smscusb_otp_read ( smscusb, otp_base, 0, &signature, + sizeof ( signature ) ) ) != 0 ) + return rc; + + /* Determine location of MAC address */ + switch ( signature ) { + case SMSCUSB_OTP_1_SIG: + address = SMSCUSB_OTP_1_MAC; + break; + case SMSCUSB_OTP_2_SIG: + address = SMSCUSB_OTP_2_MAC; + break; + default: + DBGC ( smscusb, "SMSCUSB %p unknown OTP signature %#02x\n", + smscusb, signature ); + return -ENOTSUP; + } + + /* Read MAC address from OTP */ + if ( ( rc = smscusb_otp_read ( smscusb, otp_base, address, + netdev->hw_addr, ETH_ALEN ) ) != 0 ) + return rc; + + /* Check that OTP is valid */ + if ( ! is_valid_ether_addr ( netdev->hw_addr ) ) { + DBGC ( smscusb, "SMSCUSB %p has no layout %#02x OTP MAC (%s)\n", + smscusb, signature, eth_ntoa ( netdev->hw_addr ) ); + return -ENODEV; + } + + DBGC ( smscusb, "SMSCUSB %p using layout %#02x OTP MAC %s\n", + smscusb, signature, eth_ntoa ( netdev->hw_addr ) ); + return 0; +} + +/****************************************************************************** + * + * Device tree + * + ****************************************************************************** + */ + +/** + * Fetch MAC address from device tree + * + * @v smscusb SMSC USB device + * @ret rc Return status code + */ +int smscusb_fdt_fetch_mac ( struct smscusb_device *smscusb ) { + struct net_device *netdev = smscusb->netdev; + unsigned int offset; + int rc; + + /* Look for "ethernet[0]" alias */ + if ( ( rc = fdt_alias ( "ethernet", &offset ) != 0 ) && + ( rc = fdt_alias ( "ethernet0", &offset ) != 0 ) ) { + return rc; + } + + /* Fetch MAC address */ + if ( ( rc = fdt_mac ( offset, netdev ) ) != 0 ) + return rc; + + DBGC ( smscusb, "SMSCUSB %p using FDT MAC %s\n", + smscusb, eth_ntoa ( netdev->hw_addr ) ); + return 0; +} + +/****************************************************************************** + * + * MII access + * + ****************************************************************************** + */ + +/** + * Wait for MII to become idle + * + * @v smscusb SMSC USB device + * @ret rc Return status code + */ +static int smscusb_mii_wait ( struct smscusb_device *smscusb ) { + unsigned int base = smscusb->mii_base; + uint32_t mii_access; + unsigned int i; + int rc; + + /* Wait for MIIBZY to become clear */ + for ( i = 0 ; i < SMSCUSB_MII_MAX_WAIT_MS ; i++ ) { + + /* Read MII_ACCESS and check MIIBZY */ + if ( ( rc = smscusb_readl ( smscusb, + ( base + SMSCUSB_MII_ACCESS ), + &mii_access ) ) != 0 ) + return rc; + if ( ! ( mii_access & SMSCUSB_MII_ACCESS_MIIBZY ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( smscusb, "SMSCUSB %p timed out waiting for MII\n", + smscusb ); + return -ETIMEDOUT; +} + +/** + * Read from MII register + * + * @v mdio MII interface + * @v phy PHY address + * @v reg Register address + * @ret value Data read, or negative error + */ +static int smscusb_mii_read ( struct mii_interface *mdio, + unsigned int phy __unused, unsigned int reg ) { + struct smscusb_device *smscusb = + container_of ( mdio, struct smscusb_device, mdio ); + unsigned int base = smscusb->mii_base; + uint32_t mii_access; + uint32_t mii_data; + int rc; + + /* Wait for MII to become idle */ + if ( ( rc = smscusb_mii_wait ( smscusb ) ) != 0 ) + return rc; + + /* Initiate read command */ + mii_access = ( SMSCUSB_MII_ACCESS_PHY_ADDRESS | + SMSCUSB_MII_ACCESS_MIIRINDA ( reg ) | + SMSCUSB_MII_ACCESS_MIIBZY ); + if ( ( rc = smscusb_writel ( smscusb, ( base + SMSCUSB_MII_ACCESS ), + mii_access ) ) != 0 ) + return rc; + + /* Wait for command to complete */ + if ( ( rc = smscusb_mii_wait ( smscusb ) ) != 0 ) + return rc; + + /* Read MII data */ + if ( ( rc = smscusb_readl ( smscusb, ( base + SMSCUSB_MII_DATA ), + &mii_data ) ) != 0 ) + return rc; + + return SMSCUSB_MII_DATA_GET ( mii_data ); +} + +/** + * Write to MII register + * + * @v mdio MII interface + * @v phy PHY address + * @v reg Register address + * @v data Data to write + * @ret rc Return status code + */ +static int smscusb_mii_write ( struct mii_interface *mdio, + unsigned int phy __unused, unsigned int reg, + unsigned int data ) { + struct smscusb_device *smscusb = + container_of ( mdio, struct smscusb_device, mdio ); + unsigned int base = smscusb->mii_base; + uint32_t mii_access; + uint32_t mii_data; + int rc; + + /* Wait for MII to become idle */ + if ( ( rc = smscusb_mii_wait ( smscusb ) ) != 0 ) + return rc; + + /* Write MII data */ + mii_data = SMSCUSB_MII_DATA_SET ( data ); + if ( ( rc = smscusb_writel ( smscusb, ( base + SMSCUSB_MII_DATA ), + mii_data ) ) != 0 ) + return rc; + + /* Initiate write command */ + mii_access = ( SMSCUSB_MII_ACCESS_PHY_ADDRESS | + SMSCUSB_MII_ACCESS_MIIRINDA ( reg ) | + SMSCUSB_MII_ACCESS_MIIWNR | + SMSCUSB_MII_ACCESS_MIIBZY ); + if ( ( rc = smscusb_writel ( smscusb, ( base + SMSCUSB_MII_ACCESS ), + mii_access ) ) != 0 ) + return rc; + + /* Wait for command to complete */ + if ( ( rc = smscusb_mii_wait ( smscusb ) ) != 0 ) + return rc; + + return 0; +} + +/** MII operations */ +struct mii_operations smscusb_mii_operations = { + .read = smscusb_mii_read, + .write = smscusb_mii_write, +}; + +/** + * Check link status + * + * @v smscusb SMSC USB device + * @ret rc Return status code + */ +int smscusb_mii_check_link ( struct smscusb_device *smscusb ) { + struct net_device *netdev = smscusb->netdev; + int intr; + int rc; + + /* Read PHY interrupt source */ + intr = mii_read ( &smscusb->mii, smscusb->phy_source ); + if ( intr < 0 ) { + rc = intr; + DBGC ( smscusb, "SMSCUSB %p could not get PHY interrupt " + "source: %s\n", smscusb, strerror ( rc ) ); + return rc; + } + + /* Acknowledge PHY interrupt */ + if ( ( rc = mii_write ( &smscusb->mii, smscusb->phy_source, + intr ) ) != 0 ) { + DBGC ( smscusb, "SMSCUSB %p could not acknowledge PHY " + "interrupt: %s\n", smscusb, strerror ( rc ) ); + return rc; + } + + /* Check link status */ + if ( ( rc = mii_check_link ( &smscusb->mii, netdev ) ) != 0 ) { + DBGC ( smscusb, "SMSCUSB %p could not check link: %s\n", + smscusb, strerror ( rc ) ); + return rc; + } + + DBGC ( smscusb, "SMSCUSB %p link %s (intr %#04x)\n", + smscusb, ( netdev_link_ok ( netdev ) ? "up" : "down" ), intr ); + return 0; +} + +/** + * Enable PHY interrupts and update link status + * + * @v smscusb SMSC USB device + * @v phy_mask PHY interrupt mask register + * @v intrs PHY interrupts to enable + * @ret rc Return status code + */ +int smscusb_mii_open ( struct smscusb_device *smscusb, + unsigned int phy_mask, unsigned int intrs ) { + int rc; + + /* Enable PHY interrupts */ + if ( ( rc = mii_write ( &smscusb->mii, phy_mask, intrs ) ) != 0 ) { + DBGC ( smscusb, "SMSCUSB %p could not set PHY interrupt " + "mask: %s\n", smscusb, strerror ( rc ) ); + return rc; + } + + /* Update link status */ + smscusb_mii_check_link ( smscusb ); + + return 0; +} + +/****************************************************************************** + * + * Receive filtering + * + ****************************************************************************** + */ + +/** + * Set receive address + * + * @v smscusb SMSC USB device + * @v addr_base Receive address register base + * @ret rc Return status code + */ +int smscusb_set_address ( struct smscusb_device *smscusb, + unsigned int addr_base ) { + struct net_device *netdev = smscusb->netdev; + union smscusb_mac mac; + int rc; + + /* Copy MAC address */ + memset ( &mac, 0, sizeof ( mac ) ); + memcpy ( mac.raw, netdev->ll_addr, ETH_ALEN ); + + /* Write MAC address high register */ + if ( ( rc = smscusb_raw_writel ( smscusb, + ( addr_base + SMSCUSB_RX_ADDRH ), + mac.addr.h ) ) != 0 ) + return rc; + + /* Write MAC address low register */ + if ( ( rc = smscusb_raw_writel ( smscusb, + ( addr_base + SMSCUSB_RX_ADDRL ), + mac.addr.l ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Set receive filter + * + * @v smscusb SMSC USB device + * @v filt_base Receive filter register base + * @ret rc Return status code + */ +int smscusb_set_filter ( struct smscusb_device *smscusb, + unsigned int filt_base ) { + struct net_device *netdev = smscusb->netdev; + union smscusb_mac mac; + int rc; + + /* Copy MAC address */ + memset ( &mac, 0, sizeof ( mac ) ); + memcpy ( mac.raw, netdev->ll_addr, ETH_ALEN ); + mac.addr.h |= cpu_to_le32 ( SMSCUSB_ADDR_FILTH_VALID ); + + /* Write MAC address perfect filter high register */ + if ( ( rc = smscusb_raw_writel ( smscusb, + ( filt_base + SMSCUSB_ADDR_FILTH(0) ), + mac.addr.h ) ) != 0 ) + return rc; + + /* Write MAC address perfect filter low register */ + if ( ( rc = smscusb_raw_writel ( smscusb, + ( filt_base + SMSCUSB_ADDR_FILTL(0) ), + mac.addr.l ) ) != 0 ) + return rc; + + return 0; +} + +/****************************************************************************** + * + * Endpoint operations + * + ****************************************************************************** + */ + +/** + * Complete interrupt transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void smscusb_intr_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct smscusb_device *smscusb = + container_of ( ep, struct smscusb_device, usbnet.intr ); + struct net_device *netdev = smscusb->netdev; + struct smscusb_interrupt *intr; + + /* Profile completions */ + profile_start ( &smscusb_intr_profiler ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto done; + + /* Record USB errors against the network device */ + if ( rc != 0 ) { + DBGC ( smscusb, "SMSCUSB %p interrupt failed: %s\n", + smscusb, strerror ( rc ) ); + DBGC_HDA ( smscusb, 0, iobuf->data, iob_len ( iobuf ) ); + netdev_rx_err ( netdev, NULL, rc ); + goto done; + } + + /* Extract interrupt data */ + if ( iob_len ( iobuf ) != sizeof ( *intr ) ) { + DBGC ( smscusb, "SMSCUSB %p malformed interrupt\n", + smscusb ); + DBGC_HDA ( smscusb, 0, iobuf->data, iob_len ( iobuf ) ); + netdev_rx_err ( netdev, NULL, rc ); + goto done; + } + intr = iobuf->data; + + /* Record interrupt status */ + smscusb->int_sts = le32_to_cpu ( intr->int_sts ); + profile_stop ( &smscusb_intr_profiler ); + + done: + /* Free I/O buffer */ + free_iob ( iobuf ); +} + +/** Interrupt endpoint operations */ +struct usb_endpoint_driver_operations smscusb_intr_operations = { + .complete = smscusb_intr_complete, +}; + +/** + * Complete bulk OUT transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void smscusb_out_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct smscusb_device *smscusb = + container_of ( ep, struct smscusb_device, usbnet.out ); + struct net_device *netdev = smscusb->netdev; + + /* Report TX completion */ + netdev_tx_complete_err ( netdev, iobuf, rc ); +} + +/** Bulk OUT endpoint operations */ +struct usb_endpoint_driver_operations smscusb_out_operations = { + .complete = smscusb_out_complete, +}; diff --git a/src/drivers/net/smscusb.h b/src/drivers/net/smscusb.h new file mode 100644 index 00000000..e866bb74 --- /dev/null +++ b/src/drivers/net/smscusb.h @@ -0,0 +1,299 @@ +#ifndef _SMSCUSB_H +#define _SMSCUSB_H + +/** @file + * + * SMSC USB Ethernet drivers + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** Register write command */ +#define SMSCUSB_REGISTER_WRITE \ + ( USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE | \ + USB_REQUEST_TYPE ( 0xa0 ) ) + +/** Register read command */ +#define SMSCUSB_REGISTER_READ \ + ( USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE | \ + USB_REQUEST_TYPE ( 0xa1 ) ) + +/** Get statistics command */ +#define SMSCUSB_GET_STATISTICS \ + ( USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE | \ + USB_REQUEST_TYPE ( 0xa2 ) ) + +/** EEPROM command register offset */ +#define SMSCUSB_E2P_CMD 0x000 +#define SMSCUSB_E2P_CMD_EPC_BSY 0x80000000UL /**< EPC busy */ +#define SMSCUSB_E2P_CMD_EPC_CMD_READ 0x00000000UL /**< READ command */ +#define SMSCUSB_E2P_CMD_EPC_ADDR(addr) ( (addr) << 0 ) /**< EPC address */ + +/** EEPROM data register offset */ +#define SMSCUSB_E2P_DATA 0x004 +#define SMSCUSB_E2P_DATA_GET(e2p_data) \ + ( ( (e2p_data) >> 0 ) & 0xff ) /**< EEPROM data */ + +/** MAC address EEPROM address */ +#define SMSCUSB_EEPROM_MAC 0x01 + +/** Maximum time to wait for EEPROM (in milliseconds) */ +#define SMSCUSB_EEPROM_MAX_WAIT_MS 100 + +/** OTP power register offset */ +#define SMSCUSB_OTP_POWER 0x000 +#define SMSCUSB_OTP_POWER_DOWN 0x00000001UL /**< OTP power down */ + +/** OTP address high byte register offset */ +#define SMSCUSB_OTP_ADDRH 0x004 + +/** OTP address low byte register offset */ +#define SMSCUSB_OTP_ADDRL 0x008 + +/** OTP data register offset */ +#define SMSCUSB_OTP_DATA 0x018 +#define SMSCUSB_OTP_DATA_GET(otp_data) \ + ( ( (otp_data) >> 0 ) & 0xff ) /**< OTP data */ + +/** OTP command selection register offset */ +#define SMSCUSB_OTP_CMD 0x020 +#define SMSCUSB_OTP_CMD_READ 0x00000001UL /**< Read command */ + +/** OTP command initiation register offset */ +#define SMSCUSB_OTP_GO 0x028 +#define SMSCUSB_OTP_GO_GO 0x00000001UL /**< Initiate command */ + +/** OTP status register offset */ +#define SMSCUSB_OTP_STATUS 0x030 +#define SMSCUSB_OTP_STATUS_BUSY 0x00000001UL /**< OTP busy */ + +/** Maximum time to wait for OTP (in milliseconds) */ +#define SMSCUSB_OTP_MAX_WAIT_MS 100 + +/** OTP layout 1 signature */ +#define SMSCUSB_OTP_1_SIG 0xf3 + +/** OTP layout 1 MAC address offset */ +#define SMSCUSB_OTP_1_MAC 0x001 + +/** OTP layout 2 signature */ +#define SMSCUSB_OTP_2_SIG 0xf7 + +/** OTP layout 2 MAC address offset */ +#define SMSCUSB_OTP_2_MAC 0x101 + +/** MII access register offset */ +#define SMSCUSB_MII_ACCESS 0x000 +#define SMSCUSB_MII_ACCESS_PHY_ADDRESS 0x00000800UL /**< PHY address */ +#define SMSCUSB_MII_ACCESS_MIIRINDA(addr) ( (addr) << 6 ) /**< MII register */ +#define SMSCUSB_MII_ACCESS_MIIWNR 0x00000002UL /**< MII write */ +#define SMSCUSB_MII_ACCESS_MIIBZY 0x00000001UL /**< MII busy */ + +/** MII data register offset */ +#define SMSCUSB_MII_DATA 0x004 +#define SMSCUSB_MII_DATA_SET(data) ( (data) << 0 ) /**< Set data */ +#define SMSCUSB_MII_DATA_GET(mii_data) \ + ( ( (mii_data) >> 0 ) & 0xffff ) /**< Get data */ + +/** Maximum time to wait for MII (in milliseconds) */ +#define SMSCUSB_MII_MAX_WAIT_MS 100 + +/** MAC address */ +union smscusb_mac { + /** MAC receive address registers */ + struct { + /** MAC receive address low register */ + uint32_t l; + /** MAC receive address high register */ + uint32_t h; + } __attribute__ (( packed )) addr; + /** Raw MAC address */ + uint8_t raw[ETH_ALEN]; +}; + +/** MAC receive address high register offset */ +#define SMSCUSB_RX_ADDRH 0x000 + +/** MAC receive address low register offset */ +#define SMSCUSB_RX_ADDRL 0x004 + +/** MAC address perfect filter N high register offset */ +#define SMSCUSB_ADDR_FILTH(n) ( 0x000 + ( 8 * (n) ) ) +#define SMSCUSB_ADDR_FILTH_VALID 0x80000000UL /**< Address valid */ + +/** MAC address perfect filter N low register offset */ +#define SMSCUSB_ADDR_FILTL(n) ( 0x004 + ( 8 * (n) ) ) + +/** Interrupt packet format */ +struct smscusb_interrupt { + /** Current value of INT_STS register */ + uint32_t int_sts; +} __attribute__ (( packed )); + +/** An SMSC USB device */ +struct smscusb_device { + /** USB device */ + struct usb_device *usb; + /** USB bus */ + struct usb_bus *bus; + /** Network device */ + struct net_device *netdev; + /** USB network device */ + struct usbnet_device usbnet; + /** MII interface */ + struct mii_interface mdio; + /** MII device */ + struct mii_device mii; + /** MII register base */ + uint16_t mii_base; + /** PHY interrupt source register */ + uint16_t phy_source; + /** Interrupt status */ + uint32_t int_sts; +}; + +extern int smscusb_raw_writel ( struct smscusb_device *smscusb, + unsigned int address, uint32_t value ); +extern int smscusb_raw_readl ( struct smscusb_device *smscusb, + unsigned int address, uint32_t *value ); + +/** + * Write register + * + * @v smscusb SMSC USB device + * @v address Register address + * @v value Register value + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +smscusb_writel ( struct smscusb_device *smscusb, unsigned int address, + uint32_t value ) { + int rc; + + /* Write register */ + if ( ( rc = smscusb_raw_writel ( smscusb, address, + cpu_to_le32 ( value ) ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Read register + * + * @v smscusb SMSC USB device + * @v address Register address + * @ret value Register value + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +smscusb_readl ( struct smscusb_device *smscusb, unsigned int address, + uint32_t *value ) { + int rc; + + /* Read register */ + if ( ( rc = smscusb_raw_readl ( smscusb, address, value ) ) != 0 ) + return rc; + le32_to_cpus ( value ); + + return 0; +} + +/** + * Get statistics + * + * @v smscusb SMSC USB device + * @v index Statistics set index + * @v data Statistics data to fill in + * @v len Length of statistics data + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +smscusb_get_statistics ( struct smscusb_device *smscusb, unsigned int index, + void *data, size_t len ) { + int rc; + + /* Read statistics */ + if ( ( rc = usb_control ( smscusb->usb, SMSCUSB_GET_STATISTICS, 0, + index, data, len ) ) != 0 ) { + DBGC ( smscusb, "SMSCUSB %p could not get statistics set %d: " + "%s\n", smscusb, index, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** Interrupt maximum fill level + * + * This is a policy decision. + */ +#define SMSCUSB_INTR_MAX_FILL 2 + +extern struct usb_endpoint_driver_operations smscusb_intr_operations; +extern struct usb_endpoint_driver_operations smscusb_out_operations; +extern struct mii_operations smscusb_mii_operations; + +/** + * Initialise SMSC USB device + * + * @v smscusb SMSC USB device + * @v netdev Network device + * @v func USB function + * @v in Bulk IN endpoint operations + */ +static inline __attribute__ (( always_inline )) void +smscusb_init ( struct smscusb_device *smscusb, struct net_device *netdev, + struct usb_function *func, + struct usb_endpoint_driver_operations *in ) { + struct usb_device *usb = func->usb; + + smscusb->usb = usb; + smscusb->bus = usb->port->hub->bus; + smscusb->netdev = netdev; + usbnet_init ( &smscusb->usbnet, func, &smscusb_intr_operations, in, + &smscusb_out_operations ); + usb_refill_init ( &smscusb->usbnet.intr, 0, 0, SMSCUSB_INTR_MAX_FILL ); +} + +/** + * Initialise SMSC USB device MII interface + * + * @v smscusb SMSC USB device + * @v mii_base MII register base + * @v phy_source Interrupt source PHY register + */ +static inline __attribute__ (( always_inline )) void +smscusb_mii_init ( struct smscusb_device *smscusb, unsigned int mii_base, + unsigned int phy_source ) { + + mdio_init ( &smscusb->mdio, &smscusb_mii_operations ); + mii_init ( &smscusb->mii, &smscusb->mdio, 0 ); + smscusb->mii_base = mii_base; + smscusb->phy_source = phy_source; +} + +extern int smscusb_eeprom_fetch_mac ( struct smscusb_device *smscusb, + unsigned int e2p_base ); +extern int smscusb_otp_fetch_mac ( struct smscusb_device *smscusb, + unsigned int otp_base ); +extern int smscusb_fdt_fetch_mac ( struct smscusb_device *smscusb ); +extern int smscusb_mii_check_link ( struct smscusb_device *smscusb ); +extern int smscusb_mii_open ( struct smscusb_device *smscusb, + unsigned int phy_mask, unsigned int intrs ); +extern int smscusb_set_address ( struct smscusb_device *smscusb, + unsigned int addr_base ); +extern int smscusb_set_filter ( struct smscusb_device *smscusb, + unsigned int filt_base ); + +#endif /* _SMSCUSB_H */ diff --git a/src/drivers/net/thunderx.c b/src/drivers/net/thunderx.c new file mode 100644 index 00000000..1865a9b9 --- /dev/null +++ b/src/drivers/net/thunderx.c @@ -0,0 +1,1716 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "thunderx.h" +#include "thunderxcfg.h" + +/** @file + * + * Cavium ThunderX Ethernet driver + * + */ + +/** List of BGX Ethernet interfaces */ +static LIST_HEAD ( txnic_bgxs ); + +/** List of physical functions */ +static LIST_HEAD ( txnic_pfs ); + +/** Debug colour for physical function and BGX messages */ +#define TXNICCOL(x) ( &txnic_pfs + (x)->node ) + +/** Board configuration protocol */ +static EFI_THUNDER_CONFIG_PROTOCOL *txcfg; +EFI_REQUEST_PROTOCOL ( EFI_THUNDER_CONFIG_PROTOCOL, &txcfg ); + +/****************************************************************************** + * + * Diagnostics + * + ****************************************************************************** + */ + +/** + * Show virtual NIC diagnostics (for debugging) + * + * @v vnic Virtual NIC + */ +static __attribute__ (( unused )) void txnic_diag ( struct txnic *vnic ) { + + DBGC ( vnic, "TXNIC %s SQ %05zx(%05llx)/%05zx(%05llx) %08llx\n", + vnic->name, + ( ( vnic->sq.prod % TXNIC_SQES ) * TXNIC_SQ_STRIDE ), + readq ( vnic->regs + TXNIC_QS_SQ_TAIL(0) ), + ( ( vnic->sq.cons % TXNIC_SQES ) * TXNIC_SQ_STRIDE ), + readq ( vnic->regs + TXNIC_QS_SQ_HEAD(0) ), + readq ( vnic->regs + TXNIC_QS_SQ_STATUS(0) ) ); + DBGC ( vnic, "TXNIC %s RQ %05zx(%05llx)/%05zx(%05llx) %016llx\n", + vnic->name, + ( ( vnic->rq.prod % TXNIC_RQES ) * TXNIC_RQ_STRIDE ), + readq ( vnic->regs + TXNIC_QS_RBDR_TAIL(0) ), + ( ( vnic->rq.cons % TXNIC_RQES ) * TXNIC_RQ_STRIDE ), + readq ( vnic->regs + TXNIC_QS_RBDR_HEAD(0) ), + readq ( vnic->regs + TXNIC_QS_RBDR_STATUS0(0) ) ); + DBGC ( vnic, "TXNIC %s CQ xxxxx(%05llx)/%05x(%05llx) %08llx:%08llx\n", + vnic->name, readq ( vnic->regs + TXNIC_QS_CQ_TAIL(0) ), + ( ( vnic->cq.cons % TXNIC_CQES ) * TXNIC_CQ_STRIDE ), + readq ( vnic->regs + TXNIC_QS_CQ_HEAD(0) ), + readq ( vnic->regs + TXNIC_QS_CQ_STATUS(0) ), + readq ( vnic->regs + TXNIC_QS_CQ_STATUS2(0) ) ); +} + +/****************************************************************************** + * + * Send queue + * + ****************************************************************************** + */ + +/** + * Create send queue + * + * @v vnic Virtual NIC + * @ret rc Return status code + */ +static int txnic_create_sq ( struct txnic *vnic ) { + + /* Reset send queue */ + vnic->sq.prod = 0; + vnic->sq.cons = 0; + writeq ( TXNIC_QS_SQ_CFG_RESET, ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) ); + + /* Configure and enable send queue */ + writeq ( user_to_phys ( vnic->sq.sqe, 0 ), + ( vnic->regs + TXNIC_QS_SQ_BASE(0) ) ); + writeq ( ( TXNIC_QS_SQ_CFG_ENA | TXNIC_QS_SQ_CFG_QSIZE_1K ), + ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) ); + + DBGC ( vnic, "TXNIC %s SQ at [%08lx,%08lx)\n", + vnic->name, user_to_phys ( vnic->sq.sqe, 0 ), + user_to_phys ( vnic->sq.sqe, TXNIC_SQ_SIZE ) ); + return 0; +} + +/** + * Disable send queue + * + * @v vnic Virtual NIC + * @ret rc Return status code + */ +static int txnic_disable_sq ( struct txnic *vnic ) { + uint64_t status; + unsigned int i; + + /* Disable send queue */ + writeq ( 0, ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) ); + + /* Wait for send queue to be stopped */ + for ( i = 0 ; i < TXNIC_SQ_STOP_MAX_WAIT_MS ; i++ ) { + + /* Check if send queue is stopped */ + status = readq ( vnic->regs + TXNIC_QS_SQ_STATUS(0) ); + if ( status & TXNIC_QS_SQ_STATUS_STOPPED ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( vnic, "TXNIC %s SQ disable timed out\n", vnic->name ); + return -ETIMEDOUT; +} + +/** + * Destroy send queue + * + * @v vnic Virtual NIC + */ +static void txnic_destroy_sq ( struct txnic *vnic ) { + int rc; + + /* Disable send queue */ + if ( ( rc = txnic_disable_sq ( vnic ) ) != 0 ) { + /* Nothing else we can do */ + return; + } + + /* Reset send queue */ + writeq ( TXNIC_QS_SQ_CFG_RESET, ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) ); +} + +/** + * Send packet + * + * @v vnic Virtual NIC + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int txnic_send ( struct txnic *vnic, struct io_buffer *iobuf ) { + struct txnic_sqe sqe; + unsigned int sq_idx; + size_t offset; + size_t len; + + /* Get next send queue entry */ + if ( ( vnic->sq.prod - vnic->sq.cons ) >= TXNIC_SQ_FILL ) { + DBGC ( vnic, "TXNIC %s out of send queue entries\n", + vnic->name ); + return -ENOBUFS; + } + sq_idx = ( vnic->sq.prod++ % TXNIC_SQES ); + offset = ( sq_idx * TXNIC_SQ_STRIDE ); + + /* Populate send descriptor */ + len = iob_len ( iobuf ); + memset ( &sqe, 0, sizeof ( sqe ) ); + sqe.hdr.total = cpu_to_le32 ( ( len >= ETH_ZLEN ) ? len : ETH_ZLEN ); + sqe.hdr.subdcnt = ( TXNIC_SQE_SUBDESCS - 1 ); + sqe.hdr.flags = TXNIC_SEND_HDR_FLAGS; + sqe.gather.size = cpu_to_le16 ( len ); + sqe.gather.flags = TXNIC_SEND_GATHER_FLAGS; + sqe.gather.addr = cpu_to_le64 ( virt_to_bus ( iobuf->data ) ); + DBGC2 ( vnic, "TXNIC %s SQE %#03x is [%08lx,%08lx)\n", + vnic->name, sq_idx, virt_to_bus ( iobuf->data ), + ( virt_to_bus ( iobuf->data ) + len ) ); + + /* Copy send descriptor to ring */ + copy_to_user ( vnic->sq.sqe, offset, &sqe, sizeof ( sqe ) ); + + /* Ring doorbell */ + wmb(); + writeq ( TXNIC_SQE_SUBDESCS, ( vnic->regs + TXNIC_QS_SQ_DOOR(0) ) ); + + return 0; +} + +/** + * Complete send queue entry + * + * @v vnic Virtual NIC + * @v cqe Send completion queue entry + */ +static void txnic_complete_sqe ( struct txnic *vnic, + struct txnic_cqe_send *cqe ) { + struct net_device *netdev = vnic->netdev; + unsigned int sq_idx; + unsigned int status; + + /* Parse completion */ + sq_idx = ( le16_to_cpu ( cqe->sqe_ptr ) / TXNIC_SQE_SUBDESCS ); + status = cqe->send_status; + + /* Sanity check */ + assert ( sq_idx == ( vnic->sq.cons % TXNIC_SQES ) ); + + /* Free send queue entry */ + vnic->sq.cons++; + + /* Complete transmission */ + if ( status ) { + DBGC ( vnic, "TXNIC %s SQE %#03x complete (status %#02x)\n", + vnic->name, sq_idx, status ); + netdev_tx_complete_next_err ( netdev, -EIO ); + } else { + DBGC2 ( vnic, "TXNIC %s SQE %#03x complete\n", + vnic->name, sq_idx ); + netdev_tx_complete_next ( netdev ); + } +} + +/****************************************************************************** + * + * Receive queue + * + ****************************************************************************** + */ + +/** + * Create receive queue + * + * @v vnic Virtual NIC + * @ret rc Return status code + */ +static int txnic_create_rq ( struct txnic *vnic ) { + + /* Reset receive buffer descriptor ring */ + vnic->rq.prod = 0; + vnic->rq.cons = 0; + writeq ( TXNIC_QS_RBDR_CFG_RESET, + ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) ); + + /* Configure and enable receive buffer descriptor ring */ + writeq ( user_to_phys ( vnic->rq.rqe, 0 ), + ( vnic->regs + TXNIC_QS_RBDR_BASE(0) ) ); + writeq ( ( TXNIC_QS_RBDR_CFG_ENA | TXNIC_QS_RBDR_CFG_QSIZE_8K | + TXNIC_QS_RBDR_CFG_LINES ( TXNIC_RQE_SIZE / + TXNIC_LINE_SIZE ) ), + ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) ); + + /* Enable receive queue */ + writeq ( TXNIC_QS_RQ_CFG_ENA, ( vnic->regs + TXNIC_QS_RQ_CFG(0) ) ); + + DBGC ( vnic, "TXNIC %s RQ at [%08lx,%08lx)\n", + vnic->name, user_to_phys ( vnic->rq.rqe, 0 ), + user_to_phys ( vnic->rq.rqe, TXNIC_RQ_SIZE ) ); + return 0; +} + +/** + * Disable receive queue + * + * @v vnic Virtual NIC + * @ret rc Return status code + */ +static int txnic_disable_rq ( struct txnic *vnic ) { + uint64_t cfg; + unsigned int i; + + /* Disable receive queue */ + writeq ( 0, ( vnic->regs + TXNIC_QS_RQ_CFG(0) ) ); + + /* Wait for receive queue to be disabled */ + for ( i = 0 ; i < TXNIC_RQ_DISABLE_MAX_WAIT_MS ; i++ ) { + + /* Check if receive queue is disabled */ + cfg = readq ( vnic->regs + TXNIC_QS_RQ_CFG(0) ); + if ( ! ( cfg & TXNIC_QS_RQ_CFG_ENA ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( vnic, "TXNIC %s RQ disable timed out\n", vnic->name ); + return -ETIMEDOUT; +} + +/** + * Destroy receive queue + * + * @v vnic Virtual NIC + */ +static void txnic_destroy_rq ( struct txnic *vnic ) { + unsigned int i; + int rc; + + /* Disable receive queue */ + if ( ( rc = txnic_disable_rq ( vnic ) ) != 0 ) { + /* Leak memory; there's nothing else we can do */ + return; + } + + /* Disable receive buffer descriptor ring */ + writeq ( 0, ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) ); + + /* Reset receive buffer descriptor ring */ + writeq ( TXNIC_QS_RBDR_CFG_RESET, + ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) ); + + /* Free any unused I/O buffers */ + for ( i = 0 ; i < TXNIC_RQ_FILL ; i++ ) { + if ( vnic->rq.iobuf[i] ) + free_iob ( vnic->rq.iobuf[i] ); + vnic->rq.iobuf[i] = NULL; + } +} + +/** + * Refill receive queue + * + * @v vnic Virtual NIC + */ +static void txnic_refill_rq ( struct txnic *vnic ) { + struct io_buffer *iobuf; + struct txnic_rqe rqe; + unsigned int rq_idx; + unsigned int rq_iobuf_idx; + unsigned int refilled = 0; + size_t offset; + + /* Refill ring */ + while ( ( vnic->rq.prod - vnic->rq.cons ) < TXNIC_RQ_FILL ) { + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( TXNIC_RQE_SIZE ); + if ( ! iobuf ) { + /* Wait for next refill */ + break; + } + + /* Get next receive descriptor */ + rq_idx = ( vnic->rq.prod++ % TXNIC_RQES ); + offset = ( rq_idx * TXNIC_RQ_STRIDE ); + + /* Populate receive descriptor */ + rqe.rbdre.addr = cpu_to_le64 ( virt_to_bus ( iobuf->data ) ); + DBGC2 ( vnic, "TXNIC %s RQE %#03x is [%08lx,%08lx)\n", + vnic->name, rq_idx, virt_to_bus ( iobuf->data ), + ( virt_to_bus ( iobuf->data ) + TXNIC_RQE_SIZE ) ); + + /* Copy receive descriptor to ring */ + copy_to_user ( vnic->rq.rqe, offset, &rqe, sizeof ( rqe ) ); + refilled++; + + /* Record I/O buffer */ + rq_iobuf_idx = ( rq_idx % TXNIC_RQ_FILL ); + assert ( vnic->rq.iobuf[rq_iobuf_idx] == NULL ); + vnic->rq.iobuf[rq_iobuf_idx] = iobuf; + } + + /* Ring doorbell */ + wmb(); + writeq ( refilled, ( vnic->regs + TXNIC_QS_RBDR_DOOR(0) ) ); +} + +/** + * Complete receive queue entry + * + * @v vnic Virtual NIC + * @v cqe Receive completion queue entry + */ +static void txnic_complete_rqe ( struct txnic *vnic, + struct txnic_cqe_rx *cqe ) { + struct net_device *netdev = vnic->netdev; + struct io_buffer *iobuf; + unsigned int errop; + unsigned int rq_idx; + unsigned int rq_iobuf_idx; + size_t apad_len; + size_t len; + + /* Parse completion */ + errop = cqe->errop; + apad_len = TXNIC_CQE_RX_APAD_LEN ( cqe->apad ); + len = le16_to_cpu ( cqe->len ); + + /* Get next receive I/O buffer */ + rq_idx = ( vnic->rq.cons++ % TXNIC_RQES ); + rq_iobuf_idx = ( rq_idx % TXNIC_RQ_FILL ); + iobuf = vnic->rq.iobuf[rq_iobuf_idx]; + vnic->rq.iobuf[rq_iobuf_idx] = NULL; + + /* Populate I/O buffer */ + iob_reserve ( iobuf, apad_len ); + iob_put ( iobuf, len ); + + /* Hand off to network stack */ + if ( errop ) { + DBGC ( vnic, "TXNIC %s RQE %#03x error (length %zd, errop " + "%#02x)\n", vnic->name, rq_idx, len, errop ); + netdev_rx_err ( netdev, iobuf, -EIO ); + } else { + DBGC2 ( vnic, "TXNIC %s RQE %#03x complete (length %zd)\n", + vnic->name, rq_idx, len ); + netdev_rx ( netdev, iobuf ); + } +} + +/****************************************************************************** + * + * Completion queue + * + ****************************************************************************** + */ + +/** + * Create completion queue + * + * @v vnic Virtual NIC + * @ret rc Return status code + */ +static int txnic_create_cq ( struct txnic *vnic ) { + + /* Reset completion queue */ + vnic->cq.cons = 0; + writeq ( TXNIC_QS_CQ_CFG_RESET, ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) ); + + /* Configure and enable completion queue */ + writeq ( user_to_phys ( vnic->cq.cqe, 0 ), + ( vnic->regs + TXNIC_QS_CQ_BASE(0) ) ); + writeq ( ( TXNIC_QS_CQ_CFG_ENA | TXNIC_QS_CQ_CFG_QSIZE_256 ), + ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) ); + + DBGC ( vnic, "TXNIC %s CQ at [%08lx,%08lx)\n", + vnic->name, user_to_phys ( vnic->cq.cqe, 0 ), + user_to_phys ( vnic->cq.cqe, TXNIC_CQ_SIZE ) ); + return 0; +} + +/** + * Disable completion queue + * + * @v vnic Virtual NIC + * @ret rc Return status code + */ +static int txnic_disable_cq ( struct txnic *vnic ) { + uint64_t cfg; + unsigned int i; + + /* Disable completion queue */ + writeq ( 0, ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) ); + + /* Wait for completion queue to be disabled */ + for ( i = 0 ; i < TXNIC_CQ_DISABLE_MAX_WAIT_MS ; i++ ) { + + /* Check if completion queue is disabled */ + cfg = readq ( vnic->regs + TXNIC_QS_CQ_CFG(0) ); + if ( ! ( cfg & TXNIC_QS_CQ_CFG_ENA ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( vnic, "TXNIC %s CQ disable timed out\n", vnic->name ); + return -ETIMEDOUT; +} + +/** + * Destroy completion queue + * + * @v vnic Virtual NIC + */ +static void txnic_destroy_cq ( struct txnic *vnic ) { + int rc; + + /* Disable completion queue */ + if ( ( rc = txnic_disable_cq ( vnic ) ) != 0 ) { + /* Leak memory; there's nothing else we can do */ + return; + } + + /* Reset completion queue */ + writeq ( TXNIC_QS_CQ_CFG_RESET, ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) ); +} + +/** + * Poll completion queue + * + * @v vnic Virtual NIC + */ +static void txnic_poll_cq ( struct txnic *vnic ) { + union txnic_cqe cqe; + uint64_t status; + size_t offset; + unsigned int qcount; + unsigned int cq_idx; + unsigned int i; + + /* Get number of completions */ + status = readq ( vnic->regs + TXNIC_QS_CQ_STATUS(0) ); + qcount = TXNIC_QS_CQ_STATUS_QCOUNT ( status ); + if ( ! qcount ) + return; + + /* Process completion queue entries */ + for ( i = 0 ; i < qcount ; i++ ) { + + /* Get completion queue entry */ + cq_idx = ( vnic->cq.cons++ % TXNIC_CQES ); + offset = ( cq_idx * TXNIC_CQ_STRIDE ); + copy_from_user ( &cqe, vnic->cq.cqe, offset, sizeof ( cqe ) ); + + /* Process completion queue entry */ + switch ( cqe.common.cqe_type ) { + case TXNIC_CQE_TYPE_SEND: + txnic_complete_sqe ( vnic, &cqe.send ); + break; + case TXNIC_CQE_TYPE_RX: + txnic_complete_rqe ( vnic, &cqe.rx ); + break; + default: + DBGC ( vnic, "TXNIC %s unknown completion type %d\n", + vnic->name, cqe.common.cqe_type ); + DBGC_HDA ( vnic, user_to_phys ( vnic->cq.cqe, offset ), + &cqe, sizeof ( cqe ) ); + break; + } + } + + /* Ring doorbell */ + writeq ( qcount, ( vnic->regs + TXNIC_QS_CQ_DOOR(0) ) ); +} + +/****************************************************************************** + * + * Virtual NIC + * + ****************************************************************************** + */ + +/** + * Open virtual NIC + * + * @v vnic Virtual NIC + * @ret rc Return status code + */ +static int txnic_open ( struct txnic *vnic ) { + int rc; + + /* Create completion queue */ + if ( ( rc = txnic_create_cq ( vnic ) ) != 0 ) + goto err_create_cq; + + /* Create send queue */ + if ( ( rc = txnic_create_sq ( vnic ) ) != 0 ) + goto err_create_sq; + + /* Create receive queue */ + if ( ( rc = txnic_create_rq ( vnic ) ) != 0 ) + goto err_create_rq; + + /* Refill receive queue */ + txnic_refill_rq ( vnic ); + + return 0; + + txnic_destroy_rq ( vnic ); + err_create_rq: + txnic_destroy_sq ( vnic ); + err_create_sq: + txnic_destroy_cq ( vnic ); + err_create_cq: + return rc; +} + +/** + * Close virtual NIC + * + * @v vnic Virtual NIC + */ +static void txnic_close ( struct txnic *vnic ) { + + /* Destroy receive queue */ + txnic_destroy_rq ( vnic ); + + /* Destroy send queue */ + txnic_destroy_sq ( vnic ); + + /* Destroy completion queue */ + txnic_destroy_cq ( vnic ); +} + +/** + * Poll virtual NIC + * + * @v vnic Virtual NIC + */ +static void txnic_poll ( struct txnic *vnic ) { + + /* Poll completion queue */ + txnic_poll_cq ( vnic ); + + /* Refill receive queue */ + txnic_refill_rq ( vnic ); +} + +/** + * Allocate virtual NIC + * + * @v pci Underlying PCI device + * @v membase Register base address + * @ret vnic Virtual NIC, or NULL on failure + */ +static struct txnic * txnic_alloc ( struct pci_device *pci, + unsigned long membase ) { + struct net_device *netdev; + struct txnic *vnic; + + /* Allocate network device */ + netdev = alloc_etherdev ( sizeof ( *vnic ) ); + if ( ! netdev ) + goto err_alloc_netdev; + netdev->dev = &pci->dev; + vnic = netdev->priv; + vnic->netdev = netdev; + vnic->name = pci->dev.name; + + /* Allow caller to reuse netdev->priv. (The generic virtual + * NIC code never assumes that netdev->priv==vnic.) + */ + netdev->priv = NULL; + + /* Allocate completion queue */ + vnic->cq.cqe = umalloc ( TXNIC_CQ_SIZE ); + if ( ! vnic->cq.cqe ) + goto err_alloc_cq; + + /* Allocate send queue */ + vnic->sq.sqe = umalloc ( TXNIC_SQ_SIZE ); + if ( ! vnic->sq.sqe ) + goto err_alloc_sq; + + /* Allocate receive queue */ + vnic->rq.rqe = umalloc ( TXNIC_RQ_SIZE ); + if ( ! vnic->rq.rqe ) + goto err_alloc_rq; + + /* Map registers */ + vnic->regs = pci_ioremap ( pci, membase, TXNIC_VF_BAR_SIZE ); + if ( ! vnic->regs ) + goto err_ioremap; + + return vnic; + + iounmap ( vnic->regs ); + err_ioremap: + ufree ( vnic->rq.rqe ); + err_alloc_rq: + ufree ( vnic->sq.sqe ); + err_alloc_sq: + ufree ( vnic->cq.cqe ); + err_alloc_cq: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc_netdev: + return NULL; +} + +/** + * Free virtual NIC + * + * @v vnic Virtual NIC + */ +static void txnic_free ( struct txnic *vnic ) { + struct net_device *netdev = vnic->netdev; + + /* Unmap registers */ + iounmap ( vnic->regs ); + + /* Free receive queue */ + ufree ( vnic->rq.rqe ); + + /* Free send queue */ + ufree ( vnic->sq.sqe ); + + /* Free completion queue */ + ufree ( vnic->cq.cqe ); + + /* Free network device */ + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/****************************************************************************** + * + * Logical MAC virtual NICs + * + ****************************************************************************** + */ + +/** + * Show LMAC diagnostics (for debugging) + * + * @v lmac Logical MAC + */ +static __attribute__ (( unused )) void +txnic_lmac_diag ( struct txnic_lmac *lmac ) { + struct txnic *vnic = lmac->vnic; + uint64_t status1; + uint64_t status2; + uint64_t br_status1; + uint64_t br_status2; + uint64_t br_algn_status; + uint64_t br_pmd_status; + uint64_t an_status; + + /* Read status (clearing latching bits) */ + writeq ( BGX_SPU_STATUS1_RCV_LNK, ( lmac->regs + BGX_SPU_STATUS1 ) ); + writeq ( BGX_SPU_STATUS2_RCVFLT, ( lmac->regs + BGX_SPU_STATUS2 ) ); + status1 = readq ( lmac->regs + BGX_SPU_STATUS1 ); + status2 = readq ( lmac->regs + BGX_SPU_STATUS2 ); + DBGC ( vnic, "TXNIC %s SPU %02llx:%04llx%s%s%s\n", + vnic->name, status1, status2, + ( ( status1 & BGX_SPU_STATUS1_FLT ) ? " FLT" : "" ), + ( ( status1 & BGX_SPU_STATUS1_RCV_LNK ) ? " RCV_LNK" : "" ), + ( ( status2 & BGX_SPU_STATUS2_RCVFLT ) ? " RCVFLT" : "" ) ); + + /* Read BASE-R status (clearing latching bits) */ + writeq ( ( BGX_SPU_BR_STATUS2_LATCHED_LOCK | + BGX_SPU_BR_STATUS2_LATCHED_BER ), + ( lmac->regs + BGX_SPU_BR_STATUS2 ) ); + br_status1 = readq ( lmac->regs + BGX_SPU_BR_STATUS1 ); + br_status2 = readq ( lmac->regs + BGX_SPU_BR_STATUS2 ); + DBGC ( vnic, "TXNIC %s BR %04llx:%04llx%s%s%s%s%s\n", + vnic->name, br_status2, br_status2, + ( ( br_status1 & BGX_SPU_BR_STATUS1_RCV_LNK ) ? " RCV_LNK" : ""), + ( ( br_status1 & BGX_SPU_BR_STATUS1_HI_BER ) ? " HI_BER" : "" ), + ( ( br_status1 & BGX_SPU_BR_STATUS1_BLK_LOCK ) ? + " BLK_LOCK" : "" ), + ( ( br_status2 & BGX_SPU_BR_STATUS2_LATCHED_LOCK ) ? + " LATCHED_LOCK" : "" ), + ( ( br_status2 & BGX_SPU_BR_STATUS2_LATCHED_BER ) ? + " LATCHED_BER" : "" ) ); + + /* Read BASE-R alignment status */ + br_algn_status = readq ( lmac->regs + BGX_SPU_BR_ALGN_STATUS ); + DBGC ( vnic, "TXNIC %s BR ALGN %016llx%s\n", vnic->name, br_algn_status, + ( ( br_algn_status & BGX_SPU_BR_ALGN_STATUS_ALIGND ) ? + " ALIGND" : "" ) ); + + /* Read BASE-R link training status */ + br_pmd_status = readq ( lmac->regs + BGX_SPU_BR_PMD_STATUS ); + DBGC ( vnic, "TXNIC %s BR PMD %04llx\n", vnic->name, br_pmd_status ); + + /* Read autonegotiation status (clearing latching bits) */ + writeq ( ( BGX_SPU_AN_STATUS_PAGE_RX | BGX_SPU_AN_STATUS_LINK_STATUS ), + ( lmac->regs + BGX_SPU_AN_STATUS ) ); + an_status = readq ( lmac->regs + BGX_SPU_AN_STATUS ); + DBGC ( vnic, "TXNIC %s BR AN %04llx%s%s%s%s%s\n", vnic->name, an_status, + ( ( an_status & BGX_SPU_AN_STATUS_XNP_STAT ) ? " XNP_STAT" : ""), + ( ( an_status & BGX_SPU_AN_STATUS_PAGE_RX ) ? " PAGE_RX" : "" ), + ( ( an_status & BGX_SPU_AN_STATUS_AN_COMPLETE ) ? + " AN_COMPLETE" : "" ), + ( ( an_status & BGX_SPU_AN_STATUS_LINK_STATUS ) ? + " LINK_STATUS" : "" ), + ( ( an_status & BGX_SPU_AN_STATUS_LP_AN_ABLE ) ? + " LP_AN_ABLE" : "" ) ); + + /* Read transmit statistics */ + DBGC ( vnic, "TXNIC %s TXF xc %#llx xd %#llx mc %#llx sc %#llx ok " + "%#llx bc %#llx mc %#llx un %#llx pa %#llx\n", vnic->name, + readq ( lmac->regs + BGX_CMR_TX_STAT0 ), + readq ( lmac->regs + BGX_CMR_TX_STAT1 ), + readq ( lmac->regs + BGX_CMR_TX_STAT2 ), + readq ( lmac->regs + BGX_CMR_TX_STAT3 ), + readq ( lmac->regs + BGX_CMR_TX_STAT5 ), + readq ( lmac->regs + BGX_CMR_TX_STAT14 ), + readq ( lmac->regs + BGX_CMR_TX_STAT15 ), + readq ( lmac->regs + BGX_CMR_TX_STAT16 ), + readq ( lmac->regs + BGX_CMR_TX_STAT17 ) ); + DBGC ( vnic, "TXNIC %s TXB ok %#llx hist %#llx:%#llx:%#llx:%#llx:" + "%#llx:%#llx:%#llx:%#llx\n", vnic->name, + readq ( lmac->regs + BGX_CMR_TX_STAT4 ), + readq ( lmac->regs + BGX_CMR_TX_STAT6 ), + readq ( lmac->regs + BGX_CMR_TX_STAT7 ), + readq ( lmac->regs + BGX_CMR_TX_STAT8 ), + readq ( lmac->regs + BGX_CMR_TX_STAT9 ), + readq ( lmac->regs + BGX_CMR_TX_STAT10 ), + readq ( lmac->regs + BGX_CMR_TX_STAT11 ), + readq ( lmac->regs + BGX_CMR_TX_STAT12 ), + readq ( lmac->regs + BGX_CMR_TX_STAT13 ) ); + + /* Read receive statistics */ + DBGC ( vnic, "TXNIC %s RXF ok %#llx pa %#llx nm %#llx ov %#llx er " + "%#llx nc %#llx\n", vnic->name, + readq ( lmac->regs + BGX_CMR_RX_STAT0 ), + readq ( lmac->regs + BGX_CMR_RX_STAT2 ), + readq ( lmac->regs + BGX_CMR_RX_STAT4 ), + readq ( lmac->regs + BGX_CMR_RX_STAT6 ), + readq ( lmac->regs + BGX_CMR_RX_STAT8 ), + readq ( lmac->regs + BGX_CMR_RX_STAT9 ) ); + DBGC ( vnic, "TXNIC %s RXB ok %#llx pa %#llx nm %#llx ov %#llx nc " + "%#llx\n", vnic->name, + readq ( lmac->regs + BGX_CMR_RX_STAT1 ), + readq ( lmac->regs + BGX_CMR_RX_STAT3 ), + readq ( lmac->regs + BGX_CMR_RX_STAT5 ), + readq ( lmac->regs + BGX_CMR_RX_STAT7 ), + readq ( lmac->regs + BGX_CMR_RX_STAT10 ) ); +} + +/** + * Update LMAC link state + * + * @v lmac Logical MAC + */ +static void txnic_lmac_update_link ( struct txnic_lmac *lmac ) { + struct txnic *vnic = lmac->vnic; + struct net_device *netdev = vnic->netdev; + uint64_t status1; + + /* Read status (clearing latching bits) */ + writeq ( BGX_SPU_STATUS1_RCV_LNK, ( lmac->regs + BGX_SPU_STATUS1 ) ); + status1 = readq ( lmac->regs + BGX_SPU_STATUS1 ); + + /* Report link status */ + if ( status1 & BGX_SPU_STATUS1_RCV_LNK ) { + netdev_link_up ( netdev ); + } else { + netdev_link_down ( netdev ); + } +} + +/** + * Poll LMAC link state + * + * @v lmac Logical MAC + */ +static void txnic_lmac_poll_link ( struct txnic_lmac *lmac ) { + struct txnic *vnic = lmac->vnic; + uint64_t intr; + + /* Get interrupt status */ + intr = readq ( lmac->regs + BGX_SPU_INT ); + if ( ! intr ) + return; + DBGC ( vnic, "TXNIC %s INT %04llx%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + vnic->name, intr, + ( ( intr & BGX_SPU_INT_TRAINING_FAIL ) ? " TRAINING_FAIL" : "" ), + ( ( intr & BGX_SPU_INT_TRAINING_DONE ) ? " TRAINING_DONE" : "" ), + ( ( intr & BGX_SPU_INT_AN_COMPLETE ) ? " AN_COMPLETE" : "" ), + ( ( intr & BGX_SPU_INT_AN_LINK_GOOD ) ? " AN_LINK_GOOD" : "" ), + ( ( intr & BGX_SPU_INT_AN_PAGE_RX ) ? " AN_PAGE_RX" : "" ), + ( ( intr & BGX_SPU_INT_FEC_UNCORR ) ? " FEC_UNCORR" : "" ), + ( ( intr & BGX_SPU_INT_FEC_CORR ) ? " FEC_CORR" : "" ), + ( ( intr & BGX_SPU_INT_BIP_ERR ) ? " BIP_ERR" : "" ), + ( ( intr & BGX_SPU_INT_DBG_SYNC ) ? " DBG_SYNC" : "" ), + ( ( intr & BGX_SPU_INT_ALGNLOS ) ? " ALGNLOS" : "" ), + ( ( intr & BGX_SPU_INT_SYNLOS ) ? " SYNLOS" : "" ), + ( ( intr & BGX_SPU_INT_BITLCKLS ) ? " BITLCKLS" : "" ), + ( ( intr & BGX_SPU_INT_ERR_BLK ) ? " ERR_BLK" : "" ), + ( ( intr & BGX_SPU_INT_RX_LINK_DOWN ) ? " RX_LINK_DOWN" : "" ), + ( ( intr & BGX_SPU_INT_RX_LINK_UP ) ? " RX_LINK_UP" : "" ) ); + + /* Clear interrupt status */ + writeq ( intr, ( lmac->regs + BGX_SPU_INT ) ); + + /* Update link state */ + txnic_lmac_update_link ( lmac ); +} + +/** + * Reset LMAC + * + * @v lmac Logical MAC + */ +static void txnic_lmac_reset ( struct txnic_lmac *lmac ) { + struct txnic_bgx *bgx = lmac->bgx; + struct txnic_pf *pf = bgx->pf; + void *qsregs = ( pf->regs + TXNIC_PF_QS ( lmac->idx ) ); + + /* There is no reset available for the physical function + * aspects of a virtual NIC; we have to explicitly reload a + * sensible set of default values. + */ + writeq ( 0, ( qsregs + TXNIC_PF_QS_CFG ) ); + writeq ( 0, ( qsregs + TXNIC_PF_QS_RQ_CFG(0) ) ); + writeq ( 0, ( qsregs + TXNIC_PF_QS_RQ_DROP_CFG(0) ) ); + writeq ( 0, ( qsregs + TXNIC_PF_QS_RQ_BP_CFG(0) ) ); + writeq ( 0, ( qsregs + TXNIC_PF_QS_SQ_CFG(0) ) ); +} + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int txnic_lmac_open ( struct net_device *netdev ) { + struct txnic_lmac *lmac = netdev->priv; + struct txnic_bgx *bgx = lmac->bgx; + struct txnic_pf *pf = bgx->pf; + struct txnic *vnic = lmac->vnic; + unsigned int vnic_idx = lmac->idx; + unsigned int chan_idx = TXNIC_CHAN_IDX ( vnic_idx ); + unsigned int tl4_idx = TXNIC_TL4_IDX ( vnic_idx ); + unsigned int tl3_idx = TXNIC_TL3_IDX ( vnic_idx ); + unsigned int tl2_idx = TXNIC_TL2_IDX ( vnic_idx ); + void *lmregs = ( pf->regs + TXNIC_PF_LMAC ( vnic_idx ) ); + void *chregs = ( pf->regs + TXNIC_PF_CHAN ( chan_idx ) ); + void *qsregs = ( pf->regs + TXNIC_PF_QS ( vnic_idx ) ); + size_t max_pkt_size; + int rc; + + /* Configure channel/match parse indices */ + writeq ( ( TXNIC_PF_MPI_CFG_VNIC ( vnic_idx ) | + TXNIC_PF_MPI_CFG_RSSI_BASE ( vnic_idx ) ), + ( TXNIC_PF_MPI_CFG ( vnic_idx ) + pf->regs ) ); + writeq ( ( TXNIC_PF_RSSI_RQ_RQ_QS ( vnic_idx ) ), + ( TXNIC_PF_RSSI_RQ ( vnic_idx ) + pf->regs ) ); + + /* Configure LMAC */ + max_pkt_size = ( netdev->max_pkt_len + 4 /* possible VLAN */ ); + writeq ( ( TXNIC_PF_LMAC_CFG_ADJUST_DEFAULT | + TXNIC_PF_LMAC_CFG_MIN_PKT_SIZE ( ETH_ZLEN ) ), + ( TXNIC_PF_LMAC_CFG + lmregs ) ); + writeq ( ( TXNIC_PF_LMAC_CFG2_MAX_PKT_SIZE ( max_pkt_size ) ), + ( TXNIC_PF_LMAC_CFG2 + lmregs ) ); + writeq ( ( TXNIC_PF_LMAC_CREDIT_CC_UNIT_CNT_DEFAULT | + TXNIC_PF_LMAC_CREDIT_CC_PACKET_CNT_DEFAULT | + TXNIC_PF_LMAC_CREDIT_CC_ENABLE ), + ( TXNIC_PF_LMAC_CREDIT + lmregs ) ); + + /* Configure channels */ + writeq ( ( TXNIC_PF_CHAN_TX_CFG_BP_ENA ), + ( TXNIC_PF_CHAN_TX_CFG + chregs ) ); + writeq ( ( TXNIC_PF_CHAN_RX_CFG_CPI_BASE ( vnic_idx ) ), + ( TXNIC_PF_CHAN_RX_CFG + chregs ) ); + writeq ( ( TXNIC_PF_CHAN_RX_BP_CFG_ENA | + TXNIC_PF_CHAN_RX_BP_CFG_BPID ( vnic_idx ) ), + ( TXNIC_PF_CHAN_RX_BP_CFG + chregs ) ); + + /* Configure traffic limiters */ + writeq ( ( TXNIC_PF_TL2_CFG_RR_QUANTUM_DEFAULT ), + ( TXNIC_PF_TL2_CFG ( tl2_idx ) + pf->regs ) ); + writeq ( ( TXNIC_PF_TL3_CFG_RR_QUANTUM_DEFAULT ), + ( TXNIC_PF_TL3_CFG ( tl3_idx ) + pf->regs ) ); + writeq ( ( TXNIC_PF_TL3_CHAN_CHAN ( chan_idx ) ), + ( TXNIC_PF_TL3_CHAN ( tl3_idx ) + pf->regs ) ); + writeq ( ( TXNIC_PF_TL4_CFG_SQ_QS ( vnic_idx ) | + TXNIC_PF_TL4_CFG_RR_QUANTUM_DEFAULT ), + ( TXNIC_PF_TL4_CFG ( tl4_idx ) + pf->regs ) ); + + /* Configure send queue */ + writeq ( ( TXNIC_PF_QS_SQ_CFG_CQ_QS ( vnic_idx ) ), + ( TXNIC_PF_QS_SQ_CFG(0) + qsregs ) ); + writeq ( ( TXNIC_PF_QS_SQ_CFG2_TL4 ( tl4_idx ) ), + ( TXNIC_PF_QS_SQ_CFG2(0) + qsregs ) ); + + /* Configure receive queue */ + writeq ( ( TXNIC_PF_QS_RQ_CFG_CACHING_ALL | + TXNIC_PF_QS_RQ_CFG_CQ_QS ( vnic_idx ) | + TXNIC_PF_QS_RQ_CFG_RBDR_CONT_QS ( vnic_idx ) | + TXNIC_PF_QS_RQ_CFG_RBDR_STRT_QS ( vnic_idx ) ), + ( TXNIC_PF_QS_RQ_CFG(0) + qsregs ) ); + writeq ( ( TXNIC_PF_QS_RQ_BP_CFG_RBDR_BP_ENA | + TXNIC_PF_QS_RQ_BP_CFG_CQ_BP_ENA | + TXNIC_PF_QS_RQ_BP_CFG_BPID ( vnic_idx ) ), + ( TXNIC_PF_QS_RQ_BP_CFG(0) + qsregs ) ); + + /* Enable queue set */ + writeq ( ( TXNIC_PF_QS_CFG_ENA | TXNIC_PF_QS_CFG_VNIC ( vnic_idx ) ), + ( TXNIC_PF_QS_CFG + qsregs ) ); + + /* Open virtual NIC */ + if ( ( rc = txnic_open ( vnic ) ) != 0 ) + goto err_open; + + /* Update link state */ + txnic_lmac_update_link ( lmac ); + + return 0; + + txnic_close ( vnic ); + err_open: + writeq ( 0, ( qsregs + TXNIC_PF_QS_CFG ) ); + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void txnic_lmac_close ( struct net_device *netdev ) { + struct txnic_lmac *lmac = netdev->priv; + struct txnic_bgx *bgx = lmac->bgx; + struct txnic_pf *pf = bgx->pf; + struct txnic *vnic = lmac->vnic; + void *qsregs = ( pf->regs + TXNIC_PF_QS ( lmac->idx ) ); + + /* Close virtual NIC */ + txnic_close ( vnic ); + + /* Disable queue set */ + writeq ( 0, ( qsregs + TXNIC_PF_QS_CFG ) ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int txnic_lmac_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct txnic_lmac *lmac = netdev->priv; + struct txnic *vnic = lmac->vnic; + + return txnic_send ( vnic, iobuf ); +} + +/** + * Poll network device + * + * @v netdev Network device + */ +static void txnic_lmac_poll ( struct net_device *netdev ) { + struct txnic_lmac *lmac = netdev->priv; + struct txnic *vnic = lmac->vnic; + + /* Poll virtual NIC */ + txnic_poll ( vnic ); + + /* Poll link state */ + txnic_lmac_poll_link ( lmac ); +} + +/** Network device operations */ +static struct net_device_operations txnic_lmac_operations = { + .open = txnic_lmac_open, + .close = txnic_lmac_close, + .transmit = txnic_lmac_transmit, + .poll = txnic_lmac_poll, +}; + +/** + * Probe logical MAC virtual NIC + * + * @v lmac Logical MAC + * @ret rc Return status code + */ +static int txnic_lmac_probe ( struct txnic_lmac *lmac ) { + struct txnic_bgx *bgx = lmac->bgx; + struct txnic_pf *pf = bgx->pf; + struct txnic *vnic; + struct net_device *netdev; + unsigned long membase; + int rc; + + /* Sanity check */ + assert ( lmac->vnic == NULL ); + + /* Calculate register base address */ + membase = ( pf->vf_membase + ( lmac->idx * pf->vf_stride ) ); + + /* Allocate and initialise network device */ + vnic = txnic_alloc ( bgx->pci, membase ); + if ( ! vnic ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev = vnic->netdev; + netdev_init ( netdev, &txnic_lmac_operations ); + netdev->priv = lmac; + lmac->vnic = vnic; + + /* Reset device */ + txnic_lmac_reset ( lmac ); + + /* Set MAC address */ + memcpy ( netdev->hw_addr, lmac->mac.raw, ETH_ALEN ); + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register; + vnic->name = netdev->name; + DBGC ( TXNICCOL ( pf ), "TXNIC %d/%d/%d is %s (%s)\n", pf->node, + bgx->idx, lmac->idx, vnic->name, eth_ntoa ( lmac->mac.raw ) ); + + /* Update link state */ + txnic_lmac_update_link ( lmac ); + + return 0; + + unregister_netdev ( netdev ); + err_register: + txnic_lmac_reset ( lmac ); + txnic_free ( vnic ); + lmac->vnic = NULL; + err_alloc: + return rc; +} + +/** + * Remove logical MAC virtual NIC + * + * @v lmac Logical MAC + */ +static void txnic_lmac_remove ( struct txnic_lmac *lmac ) { + uint64_t config; + + /* Sanity check */ + assert ( lmac->vnic != NULL ); + + /* Disable packet receive and transmit */ + config = readq ( lmac->regs + BGX_CMR_CONFIG ); + config &= ~( BGX_CMR_CONFIG_DATA_PKT_TX_EN | + BGX_CMR_CONFIG_DATA_PKT_RX_EN ); + writeq ( config, ( lmac->regs + BGX_CMR_CONFIG ) ); + + /* Unregister network device */ + unregister_netdev ( lmac->vnic->netdev ); + + /* Reset device */ + txnic_lmac_reset ( lmac ); + + /* Free virtual NIC */ + txnic_free ( lmac->vnic ); + lmac->vnic = NULL; +} + +/** + * Probe all LMACs on a BGX Ethernet interface + * + * @v pf Physical function + * @v bgx BGX Ethernet interface + * @ret rc Return status code + */ +static int txnic_lmac_probe_all ( struct txnic_pf *pf, struct txnic_bgx *bgx ) { + unsigned int bgx_idx; + int lmac_idx; + int count; + int rc; + + /* Sanity checks */ + bgx_idx = bgx->idx; + assert ( pf->node == bgx->node ); + assert ( pf->bgx[bgx_idx] == NULL ); + assert ( bgx->pf == NULL ); + + /* Associate BGX with physical function */ + pf->bgx[bgx_idx] = bgx; + bgx->pf = pf; + + /* Probe all LMACs */ + count = bgx->count; + for ( lmac_idx = 0 ; lmac_idx < count ; lmac_idx++ ) { + if ( ( rc = txnic_lmac_probe ( &bgx->lmac[lmac_idx] ) ) != 0 ) + goto err_probe; + } + + return 0; + + lmac_idx = count; + err_probe: + for ( lmac_idx-- ; lmac_idx >= 0 ; lmac_idx-- ) + txnic_lmac_remove ( &bgx->lmac[lmac_idx] ); + pf->bgx[bgx_idx] = NULL; + bgx->pf = NULL; + return rc; +} + +/** + * Remove all LMACs on a BGX Ethernet interface + * + * @v pf Physical function + * @v bgx BGX Ethernet interface + */ +static void txnic_lmac_remove_all ( struct txnic_pf *pf, + struct txnic_bgx *bgx ) { + unsigned int lmac_idx; + + /* Sanity checks */ + assert ( pf->bgx[bgx->idx] == bgx ); + assert ( bgx->pf == pf ); + + /* Remove all LMACs */ + for ( lmac_idx = 0 ; lmac_idx < bgx->count ; lmac_idx++ ) + txnic_lmac_remove ( &bgx->lmac[lmac_idx] ); + + /* Disassociate BGX from physical function */ + pf->bgx[bgx->idx] = NULL; + bgx->pf = NULL; +} + +/****************************************************************************** + * + * NIC physical function interface + * + ****************************************************************************** + */ + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int txnic_pf_probe ( struct pci_device *pci ) { + struct txnic_pf *pf; + struct txnic_bgx *bgx; + unsigned long membase; + unsigned int i; + int rc; + + /* Allocate and initialise structure */ + pf = zalloc ( sizeof ( *pf ) ); + if ( ! pf ) { + rc = -ENOMEM; + goto err_alloc; + } + pf->pci = pci; + pci_set_drvdata ( pci, pf ); + + /* Get base addresses */ + membase = pciea_bar_start ( pci, PCIEA_BEI_BAR_0 ); + pf->vf_membase = pciea_bar_start ( pci, PCIEA_BEI_VF_BAR_0 ); + pf->vf_stride = pciea_bar_size ( pci, PCIEA_BEI_VF_BAR_0 ); + + /* Calculate node ID */ + pf->node = txnic_address_node ( membase ); + DBGC ( TXNICCOL ( pf ), "TXNIC %d/*/* PF %s at %#lx (VF %#lx+%#lx)\n", + pf->node, pci->dev.name, membase, pf->vf_membase, pf->vf_stride); + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Map registers */ + pf->regs = pci_ioremap ( pci, membase, TXNIC_PF_BAR_SIZE ); + if ( ! pf->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + + /* Configure physical function */ + writeq ( TXNIC_PF_CFG_ENA, ( pf->regs + TXNIC_PF_CFG ) ); + writeq ( ( TXNIC_PF_BP_CFG_BP_POLL_ENA | + TXNIC_PF_BP_CFG_BP_POLL_DLY_DEFAULT ), + ( pf->regs + TXNIC_PF_BP_CFG ) ); + for ( i = 0 ; i < TXNIC_NUM_BGX ; i++ ) { + writeq ( ( TXNIC_PF_INTF_SEND_CFG_BLOCK_BGX | + TXNIC_PF_INTF_SEND_CFG_BLOCK ( i ) ), + ( pf->regs + TXNIC_PF_INTF_SEND_CFG ( i ) ) ); + writeq ( ( TXNIC_PF_INTF_BP_CFG_BP_ENA | + TXNIC_PF_INTF_BP_CFG_BP_ID_BGX | + TXNIC_PF_INTF_BP_CFG_BP_ID ( i ) ), + ( pf->regs + TXNIC_PF_INTF_BP_CFG ( i ) ) ); + } + writeq ( ( TXNIC_PF_PKIND_CFG_LENERR_EN | + TXNIC_PF_PKIND_CFG_MAXLEN_DISABLE | + TXNIC_PF_PKIND_CFG_MINLEN_DISABLE ), + ( pf->regs + TXNIC_PF_PKIND_CFG(0) ) ); + + /* Add to list of physical functions */ + list_add_tail ( &pf->list, &txnic_pfs ); + + /* Probe all LMACs, if applicable */ + list_for_each_entry ( bgx, &txnic_bgxs, list ) { + if ( bgx->node != pf->node ) + continue; + if ( ( rc = txnic_lmac_probe_all ( pf, bgx ) ) != 0 ) + goto err_probe; + } + + return 0; + + err_probe: + for ( i = 0 ; i < TXNIC_NUM_BGX ; i++ ) { + if ( pf->bgx[i] ) + txnic_lmac_remove_all ( pf, pf->bgx[i] ); + } + list_del ( &pf->list ); + writeq ( 0, ( pf->regs + TXNIC_PF_CFG ) ); + iounmap ( pf->regs ); + err_ioremap: + free ( pf ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void txnic_pf_remove ( struct pci_device *pci ) { + struct txnic_pf *pf = pci_get_drvdata ( pci ); + unsigned int i; + + /* Remove all LMACs, if applicable */ + for ( i = 0 ; i < TXNIC_NUM_BGX ; i++ ) { + if ( pf->bgx[i] ) + txnic_lmac_remove_all ( pf, pf->bgx[i] ); + } + + /* Remove from list of physical functions */ + list_del ( &pf->list ); + + /* Unmap registers */ + iounmap ( pf->regs ); + + /* Free physical function */ + free ( pf ); +} + +/** NIC physical function PCI device IDs */ +static struct pci_device_id txnic_pf_ids[] = { + PCI_ROM ( 0x177d, 0xa01e, "thunder-pf", "ThunderX NIC PF", 0 ), +}; + +/** NIC physical function PCI driver */ +struct pci_driver txnic_pf_driver __pci_driver = { + .ids = txnic_pf_ids, + .id_count = ( sizeof ( txnic_pf_ids ) / sizeof ( txnic_pf_ids[0] ) ), + .probe = txnic_pf_probe, + .remove = txnic_pf_remove, +}; + +/****************************************************************************** + * + * BGX interface + * + ****************************************************************************** + */ + +/** LMAC types */ +static struct txnic_lmac_type txnic_lmac_types[] = { + [TXNIC_LMAC_XAUI] = { + .name = "XAUI", + .count = 1, + .lane_to_sds = 0xe4, + }, + [TXNIC_LMAC_RXAUI] = { + .name = "RXAUI", + .count = 2, + .lane_to_sds = 0x0e04, + }, + [TXNIC_LMAC_10G_R] = { + .name = "10GBASE-R", + .count = 4, + .lane_to_sds = 0x00000000, + }, + [TXNIC_LMAC_40G_R] = { + .name = "40GBASE-R", + .count = 1, + .lane_to_sds = 0xe4, + }, +}; + +/** + * Detect BGX Ethernet interface LMAC type + * + * @v bgx BGX Ethernet interface + * @ret type LMAC type, or negative error + */ +static int txnic_bgx_detect ( struct txnic_bgx *bgx ) { + uint64_t config; + uint64_t br_pmd_control; + uint64_t rx_lmacs; + unsigned int type; + + /* We assume that the early (pre-UEFI) firmware will have + * configured at least the LMAC 0 type and use of link + * training, and may have overridden the number of LMACs. + */ + + /* Determine type from LMAC 0 */ + config = readq ( bgx->regs + BGX_CMR_CONFIG ); + type = BGX_CMR_CONFIG_LMAC_TYPE_GET ( config ); + if ( ( type >= ( sizeof ( txnic_lmac_types ) / + sizeof ( txnic_lmac_types[0] ) ) ) || + ( txnic_lmac_types[type].count == 0 ) ) { + DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/* BGX unknown type %d\n", + bgx->node, bgx->idx, type ); + return -ENOTTY; + } + bgx->type = &txnic_lmac_types[type]; + + /* Check whether link training is required */ + br_pmd_control = readq ( bgx->regs + BGX_SPU_BR_PMD_CONTROL ); + bgx->training = + ( !! ( br_pmd_control & BGX_SPU_BR_PMD_CONTROL_TRAIN_EN ) ); + + /* Determine number of LMACs */ + rx_lmacs = readq ( bgx->regs + BGX_CMR_RX_LMACS ); + bgx->count = BGX_CMR_RX_LMACS_LMACS_GET ( rx_lmacs ); + if ( ( bgx->count == TXNIC_NUM_LMAC ) && + ( bgx->type->count != TXNIC_NUM_LMAC ) ) { + DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/* assuming %d LMACs\n", + bgx->node, bgx->idx, bgx->type->count ); + bgx->count = bgx->type->count; + } + + return type; +} + +/** + * Initialise BGX Ethernet interface + * + * @v bgx BGX Ethernet interface + * @v type LMAC type + */ +static void txnic_bgx_init ( struct txnic_bgx *bgx, unsigned int type ) { + uint64_t global_config; + uint32_t lane_to_sds; + unsigned int i; + + /* Set number of LMACs */ + writeq ( BGX_CMR_RX_LMACS_LMACS_SET ( bgx->count ), + ( bgx->regs + BGX_CMR_RX_LMACS ) ); + writeq ( BGX_CMR_TX_LMACS_LMACS_SET ( bgx->count ), + ( bgx->regs + BGX_CMR_TX_LMACS ) ); + + /* Set LMAC types and lane mappings, and disable all LMACs */ + lane_to_sds = bgx->type->lane_to_sds; + for ( i = 0 ; i < bgx->count ; i++ ) { + writeq ( ( BGX_CMR_CONFIG_LMAC_TYPE_SET ( type ) | + BGX_CMR_CONFIG_LANE_TO_SDS ( lane_to_sds ) ), + ( bgx->regs + BGX_LMAC ( i ) + BGX_CMR_CONFIG ) ); + lane_to_sds >>= 8; + } + + /* Reset all MAC address filtering */ + for ( i = 0 ; i < TXNIC_NUM_DMAC ; i++ ) + writeq ( 0, ( bgx->regs + BGX_CMR_RX_DMAC_CAM ( i ) ) ); + + /* Reset NCSI steering */ + for ( i = 0 ; i < TXNIC_NUM_STEERING ; i++ ) + writeq ( 0, ( bgx->regs + BGX_CMR_RX_STEERING ( i ) ) ); + + /* Enable backpressure to all channels */ + writeq ( BGX_CMR_CHAN_MSK_AND_ALL ( bgx->count ), + ( bgx->regs + BGX_CMR_CHAN_MSK_AND ) ); + + /* Strip FCS */ + global_config = readq ( bgx->regs + BGX_CMR_GLOBAL_CONFIG ); + global_config |= BGX_CMR_GLOBAL_CONFIG_FCS_STRIP; + writeq ( global_config, ( bgx->regs + BGX_CMR_GLOBAL_CONFIG ) ); +} + +/** + * Get MAC address + * + * @v lmac Logical MAC + */ +static void txnic_bgx_mac ( struct txnic_lmac *lmac ) { + struct txnic_bgx *bgx = lmac->bgx; + unsigned int lmac_idx = TXNIC_LMAC_IDX ( lmac->idx ); + uint64_t mac; + EFI_STATUS efirc; + int rc; + + /* Extract MAC from Board Configuration protocol, if available */ + if ( txcfg ) { + if ( ( efirc = txcfg->GetLmacProp ( txcfg, bgx->node, bgx->idx, + lmac_idx, MAC_ADDRESS, + sizeof ( mac ), + &mac ) ) == 0 ) { + lmac->mac.be64 = cpu_to_be64 ( mac ); + } else { + rc = -EEFI ( efirc ); + DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/%d could not get " + "MAC address: %s\n", bgx->node, bgx->idx, + lmac->idx, strerror ( rc ) ); + } + } else { + DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/%d has no board " + "configuration protocol\n", bgx->node, bgx->idx, + lmac->idx ); + } + + /* Use random MAC address if none available */ + if ( ! lmac->mac.be64 ) { + DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/%d has no MAC address\n", + bgx->node, bgx->idx, lmac->idx ); + eth_random_addr ( lmac->mac.raw ); + } +} + +/** + * Initialise Super PHY Unit (SPU) + * + * @v lmac Logical MAC + */ +static void txnic_bgx_spu_init ( struct txnic_lmac *lmac ) { + struct txnic_bgx *bgx = lmac->bgx; + + /* Reset PHY */ + writeq ( BGX_SPU_CONTROL1_RESET, ( lmac->regs + BGX_SPU_CONTROL1 ) ); + mdelay ( BGX_SPU_RESET_DELAY_MS ); + + /* Power down PHY */ + writeq ( BGX_SPU_CONTROL1_LO_PWR, ( lmac->regs + BGX_SPU_CONTROL1 ) ); + + /* Configure training, if applicable */ + if ( bgx->training ) { + writeq ( 0, ( lmac->regs + BGX_SPU_BR_PMD_LP_CUP ) ); + writeq ( 0, ( lmac->regs + BGX_SPU_BR_PMD_LD_CUP ) ); + writeq ( 0, ( lmac->regs + BGX_SPU_BR_PMD_LD_REP ) ); + writeq ( BGX_SPU_BR_PMD_CONTROL_TRAIN_EN, + ( lmac->regs + BGX_SPU_BR_PMD_CONTROL ) ); + } + + /* Disable forward error correction */ + writeq ( 0, ( lmac->regs + BGX_SPU_FEC_CONTROL ) ); + + /* Disable autonegotiation */ + writeq ( 0, ( lmac->regs + BGX_SPU_AN_CONTROL ) ); + + /* Power up PHY */ + writeq ( 0, ( lmac->regs + BGX_SPU_CONTROL1 ) ); +} + +/** + * Initialise LMAC + * + * @v bgx BGX Ethernet interface + * @v lmac_idx LMAC index + */ +static void txnic_bgx_lmac_init ( struct txnic_bgx *bgx, + unsigned int lmac_idx ) { + struct txnic_lmac *lmac = &bgx->lmac[lmac_idx]; + uint64_t config; + + /* Record associated BGX */ + lmac->bgx = bgx; + + /* Set register base address (already mapped) */ + lmac->regs = ( bgx->regs + BGX_LMAC ( lmac_idx ) ); + + /* Calculate virtual NIC index */ + lmac->idx = TXNIC_VNIC_IDX ( bgx->idx, lmac_idx ); + + /* Set MAC address */ + txnic_bgx_mac ( lmac ); + + /* Initialise PHY */ + txnic_bgx_spu_init ( lmac ); + + /* Accept all multicasts and broadcasts */ + writeq ( ( BGX_CMR_RX_DMAC_CTL_MCST_MODE_ACCEPT | + BGX_CMR_RX_DMAC_CTL_BCST_ACCEPT ), + ( lmac->regs + BGX_CMR_RX_DMAC_CTL ) ); + + /* Enable LMAC */ + config = readq ( lmac->regs + BGX_CMR_CONFIG ); + config |= ( BGX_CMR_CONFIG_ENABLE | + BGX_CMR_CONFIG_DATA_PKT_RX_EN | + BGX_CMR_CONFIG_DATA_PKT_TX_EN ); + writeq ( config, ( lmac->regs + BGX_CMR_CONFIG ) ); +} + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int txnic_bgx_probe ( struct pci_device *pci ) { + struct txnic_bgx *bgx; + struct txnic_pf *pf; + unsigned long membase; + unsigned int i; + int type; + int rc; + + /* Allocate and initialise structure */ + bgx = zalloc ( sizeof ( *bgx ) ); + if ( ! bgx ) { + rc = -ENOMEM; + goto err_alloc; + } + bgx->pci = pci; + pci_set_drvdata ( pci, bgx ); + + /* Get base address */ + membase = pciea_bar_start ( pci, PCIEA_BEI_BAR_0 ); + + /* Calculate node ID and index */ + bgx->node = txnic_address_node ( membase ); + bgx->idx = txnic_address_bgx ( membase ); + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Map registers */ + bgx->regs = pci_ioremap ( pci, membase, TXNIC_BGX_BAR_SIZE ); + if ( ! bgx->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + + /* Detect LMAC type */ + if ( ( type = txnic_bgx_detect ( bgx ) ) < 0 ) { + rc = type; + goto err_detect; + } + DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/* BGX %s at %#lx %dx %s%s\n", + bgx->node, bgx->idx, pci->dev.name, membase, bgx->count, + bgx->type->name, ( bgx->training ? "(training)" : "" ) ); + + /* Initialise interface */ + txnic_bgx_init ( bgx, type ); + + /* Initialise all LMACs */ + for ( i = 0 ; i < bgx->count ; i++ ) + txnic_bgx_lmac_init ( bgx, i ); + + /* Add to list of BGX devices */ + list_add_tail ( &bgx->list, &txnic_bgxs ); + + /* Probe all LMACs, if applicable */ + list_for_each_entry ( pf, &txnic_pfs, list ) { + if ( pf->node != bgx->node ) + continue; + if ( ( rc = txnic_lmac_probe_all ( pf, bgx ) ) != 0 ) + goto err_probe; + } + + return 0; + + if ( bgx->pf ) + txnic_lmac_remove_all ( bgx->pf, bgx ); + list_del ( &bgx->list ); + err_probe: + err_detect: + iounmap ( bgx->regs ); + err_ioremap: + free ( bgx ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void txnic_bgx_remove ( struct pci_device *pci ) { + struct txnic_bgx *bgx = pci_get_drvdata ( pci ); + + /* Remove all LMACs, if applicable */ + if ( bgx->pf ) + txnic_lmac_remove_all ( bgx->pf, bgx ); + + /* Remove from list of BGX devices */ + list_del ( &bgx->list ); + + /* Unmap registers */ + iounmap ( bgx->regs ); + + /* Free BGX device */ + free ( bgx ); +} + +/** BGX PCI device IDs */ +static struct pci_device_id txnic_bgx_ids[] = { + PCI_ROM ( 0x177d, 0xa026, "thunder-bgx", "ThunderX BGX", 0 ), +}; + +/** BGX PCI driver */ +struct pci_driver txnic_bgx_driver __pci_driver = { + .ids = txnic_bgx_ids, + .id_count = ( sizeof ( txnic_bgx_ids ) / sizeof ( txnic_bgx_ids[0] ) ), + .probe = txnic_bgx_probe, + .remove = txnic_bgx_remove, +}; diff --git a/src/drivers/net/thunderx.h b/src/drivers/net/thunderx.h new file mode 100644 index 00000000..410daf6e --- /dev/null +++ b/src/drivers/net/thunderx.h @@ -0,0 +1,949 @@ +#ifndef _THUNDERX_H +#define _THUNDERX_H + +/** @file + * + * Cavium ThunderX Ethernet driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/****************************************************************************** + * + * Address space + * + ****************************************************************************** + */ + +/** Size of a cache line */ +#define TXNIC_LINE_SIZE 128 + +/** Virtual function BAR size */ +#define TXNIC_VF_BAR_SIZE 0x200000UL + +/** Physical function BAR size */ +#define TXNIC_PF_BAR_SIZE 0x40000000UL + +/** BGX BAR size */ +#define TXNIC_BGX_BAR_SIZE 0x400000UL + +/** Maximum number of BGX Ethernet interfaces (per node) */ +#define TXNIC_NUM_BGX 2 + +/** Maximum number of Logical MACs (per BGX) */ +#define TXNIC_NUM_LMAC 4 + +/** Maximum number of destination MAC addresses (per BGX) */ +#define TXNIC_NUM_DMAC 32 + +/** Maximum number of steering rules (per BGX) */ +#define TXNIC_NUM_STEERING 8 + +/** + * Calculate node ID + * + * @v addr PCI BAR base address + * @ret node Node ID + */ +static inline unsigned int txnic_address_node ( uint64_t addr ) { + + /* Node ID is in bits [45:44] of the hardcoded BAR address */ + return ( ( addr >> 44 ) & 0x3 ); +} + +/** + * Calculate BGX Ethernet interface index + * + * @v addr PCI BAR base address + * @ret index Index + */ +static inline unsigned int txnic_address_bgx ( uint64_t addr ) { + + /* Index is in bit 24 of the hardcoded BAR address */ + return ( ( addr >> 24 ) & 0x1 ); +} + +/****************************************************************************** + * + * Send queue + * + ****************************************************************************** + */ + +/** Send queue configuration */ +#define TXNIC_QS_SQ_CFG(q) ( ( (q) << 18 ) | 0x010800 ) +#define TXNIC_QS_SQ_CFG_ENA ( 1ULL << 19 ) +#define TXNIC_QS_SQ_CFG_RESET ( 1ULL << 17 ) +#define TXNIC_QS_SQ_CFG_QSIZE(sz) ( ( ( uint64_t ) (sz) ) << 8 ) +#define TXNIC_QS_SQ_CFG_QSIZE_1K \ + TXNIC_QS_SQ_CFG_QSIZE ( 0 ) + +/** Send queue base address */ +#define TXNIC_QS_SQ_BASE(q) ( ( (q) << 18 ) | 0x010820 ) + +/** Send queue head pointer */ +#define TXNIC_QS_SQ_HEAD(q) ( ( (q) << 18 ) | 0x010828 ) + +/** Send queue tail pointer */ +#define TXNIC_QS_SQ_TAIL(q) ( ( (q) << 18 ) | 0x010830 ) + +/** Send queue doorbell */ +#define TXNIC_QS_SQ_DOOR(q) ( ( (q) << 18 ) | 0x010838 ) + +/** Send queue status */ +#define TXNIC_QS_SQ_STATUS(q) ( ( (q) << 18 ) | 0x010840 ) +#define TXNIC_QS_SQ_STATUS_STOPPED ( 1ULL << 21 ) + +/** Maximum time to wait for a send queue to stop + * + * This is a policy decision. + */ +#define TXNIC_SQ_STOP_MAX_WAIT_MS 100 + +/** A send header subdescriptor */ +struct txnic_send_header { + /** Total length */ + uint32_t total; + /** Unused */ + uint8_t unused_a[2]; + /** Subdescriptor count */ + uint8_t subdcnt; + /** Flags */ + uint8_t flags; + /** Unused */ + uint8_t unused_b[8]; +} __attribute__ (( packed )); + +/** Flags for send header subdescriptor + * + * These comprise SUBDC=0x1 and PNC=0x1. + */ +#define TXNIC_SEND_HDR_FLAGS 0x14 + +/** A send gather subdescriptor */ +struct txnic_send_gather { + /** Size */ + uint16_t size; + /** Unused */ + uint8_t unused[5]; + /** Flags */ + uint8_t flags; + /** Address */ + uint64_t addr; +} __attribute__ (( packed )); + +/** Flags for send gather subdescriptor + * + * These comprise SUBDC=0x4 and LD_TYPE=0x0. + */ +#define TXNIC_SEND_GATHER_FLAGS 0x40 + +/** A send queue entry + * + * Each send queue entry comprises a single send header subdescriptor + * and a single send gather subdescriptor. + */ +struct txnic_sqe { + /** Send header descriptor */ + struct txnic_send_header hdr; + /** Send gather descriptor */ + struct txnic_send_gather gather; +} __attribute__ (( packed )); + +/** Number of subdescriptors per send queue entry */ +#define TXNIC_SQE_SUBDESCS ( sizeof ( struct txnic_sqe ) / \ + sizeof ( struct txnic_send_header ) ) + +/** Number of send queue entries + * + * The minimum send queue size is 1024 entries. + */ +#define TXNIC_SQES ( 1024 / TXNIC_SQE_SUBDESCS ) + +/** Send queue maximum fill level + * + * This is a policy decision. + */ +#define TXNIC_SQ_FILL 32 + +/** Send queue alignment */ +#define TXNIC_SQ_ALIGN TXNIC_LINE_SIZE + +/** Send queue stride */ +#define TXNIC_SQ_STRIDE sizeof ( struct txnic_sqe ) + +/** Send queue size */ +#define TXNIC_SQ_SIZE ( TXNIC_SQES * TXNIC_SQ_STRIDE ) + +/** A send queue */ +struct txnic_sq { + /** Producer counter */ + unsigned int prod; + /** Consumer counter */ + unsigned int cons; + /** Send queue entries */ + userptr_t sqe; +}; + +/****************************************************************************** + * + * Receive queue + * + ****************************************************************************** + */ + +/** Receive queue configuration */ +#define TXNIC_QS_RQ_CFG(q) ( ( (q) << 18 ) | 0x010600 ) +#define TXNIC_QS_RQ_CFG_ENA ( 1ULL << 1 ) + +/** Maximum time to wait for a receive queue to disable + * + * This is a policy decision. + */ +#define TXNIC_RQ_DISABLE_MAX_WAIT_MS 100 + +/** Receive buffer descriptor ring configuration */ +#define TXNIC_QS_RBDR_CFG(q) ( ( (q) << 18 ) | 0x010c00 ) +#define TXNIC_QS_RBDR_CFG_ENA ( 1ULL << 44 ) +#define TXNIC_QS_RBDR_CFG_RESET ( 1ULL << 43 ) +#define TXNIC_QS_RBDR_CFG_QSIZE(sz) ( ( ( uint64_t ) (sz) ) << 32 ) +#define TXNIC_QS_RBDR_CFG_QSIZE_8K \ + TXNIC_QS_RBDR_CFG_QSIZE ( 0 ) +#define TXNIC_QS_RBDR_CFG_LINES(sz) ( ( ( uint64_t ) (sz) ) << 0 ) + +/** Receive buffer descriptor ring base address */ +#define TXNIC_QS_RBDR_BASE(q) ( ( (q) << 18 ) | 0x010c20 ) + +/** Receive buffer descriptor ring head pointer */ +#define TXNIC_QS_RBDR_HEAD(q) ( ( (q) << 18 ) | 0x010c28 ) + +/** Receive buffer descriptor ring tail pointer */ +#define TXNIC_QS_RBDR_TAIL(q) ( ( (q) << 18 ) | 0x010c30 ) + +/** Receive buffer descriptor ring doorbell */ +#define TXNIC_QS_RBDR_DOOR(q) ( ( (q) << 18 ) | 0x010c38 ) + +/** Receive buffer descriptor ring status 0 */ +#define TXNIC_QS_RBDR_STATUS0(q) ( ( (q) << 18 ) | 0x010c40 ) + +/** A receive buffer descriptor ring entry */ +struct txnic_rbdr_entry { + /** Address */ + uint64_t addr; +} __attribute__ (( packed )); + +/** A receive queue entry */ +struct txnic_rqe { + /** Receive buffer descriptor ring entry */ + struct txnic_rbdr_entry rbdre; +} __attribute__ (( packed )); + +/** Number of receive queue entries + * + * The minimum receive queue size is 8192 entries. + */ +#define TXNIC_RQES 8192 + +/** Receive queue maximum fill level + * + * This is a policy decision. Must not exceed TXNIC_RQES. + */ +#define TXNIC_RQ_FILL 32 + +/** Receive queue entry size + * + * This is a policy decision. + */ +#define TXNIC_RQE_SIZE ( ( ETH_DATA_ALIGN + ETH_FRAME_LEN + \ + 4 /* VLAN */ + TXNIC_LINE_SIZE - 1 ) \ + & ~( TXNIC_LINE_SIZE - 1 ) ) + +/** Receive queue alignment */ +#define TXNIC_RQ_ALIGN TXNIC_LINE_SIZE + +/** Receive queue stride */ +#define TXNIC_RQ_STRIDE sizeof ( struct txnic_rqe ) + +/** Receive queue size */ +#define TXNIC_RQ_SIZE ( TXNIC_RQES * TXNIC_RQ_STRIDE ) + +/** A receive queue */ +struct txnic_rq { + /** Producer counter */ + unsigned int prod; + /** Consumer counter */ + unsigned int cons; + /** Receive queue entries */ + userptr_t rqe; + /** I/O buffers */ + struct io_buffer *iobuf[TXNIC_RQ_FILL]; +}; + +/****************************************************************************** + * + * Completion queue + * + ****************************************************************************** + */ + +/** Completion queue configuration */ +#define TXNIC_QS_CQ_CFG(q) ( ( (q) << 18 ) | 0x010400 ) +#define TXNIC_QS_CQ_CFG_ENA ( 1ULL << 42 ) +#define TXNIC_QS_CQ_CFG_RESET ( 1ULL << 41 ) +#define TXNIC_QS_CQ_CFG_QSIZE(sz) ( ( ( uint64_t ) (sz) ) << 32 ) +#define TXNIC_QS_CQ_CFG_QSIZE_256 \ + TXNIC_QS_CQ_CFG_QSIZE ( 7 ) + +/** Maximum time to wait for a completion queue to disable + * + * This is a policy decision. + */ +#define TXNIC_CQ_DISABLE_MAX_WAIT_MS 100 + +/** Completion queue base address */ +#define TXNIC_QS_CQ_BASE(q) ( ( (q) << 18 ) | 0x010420 ) + +/** Completion queue head pointer */ +#define TXNIC_QS_CQ_HEAD(q) ( ( (q) << 18 ) | 0x010428 ) + +/** Completion queue tail pointer */ +#define TXNIC_QS_CQ_TAIL(q) ( ( (q) << 18 ) | 0x010430 ) + +/** Completion queue doorbell */ +#define TXNIC_QS_CQ_DOOR(q) ( ( (q) << 18 ) | 0x010438 ) + +/** Completion queue status */ +#define TXNIC_QS_CQ_STATUS(q) ( ( (q) << 18 ) | 0x010440 ) +#define TXNIC_QS_CQ_STATUS_QCOUNT(status) \ + ( ( (status) >> 0 ) & 0xffff ) + +/** Completion queue status 2 */ +#define TXNIC_QS_CQ_STATUS2(q) ( ( (q) << 18 ) | 0x010448 ) + +/** A send completion queue entry */ +struct txnic_cqe_send { + /** Status */ + uint8_t send_status; + /** Unused */ + uint8_t unused[4]; + /** Send queue entry pointer */ + uint16_t sqe_ptr; + /** Type */ + uint8_t cqe_type; +} __attribute__ (( packed )); + +/** Send completion queue entry type */ +#define TXNIC_CQE_TYPE_SEND 0x80 + +/** A receive completion queue entry */ +struct txnic_cqe_rx { + /** Error opcode */ + uint8_t errop; + /** Unused */ + uint8_t unused_a[6]; + /** Type */ + uint8_t cqe_type; + /** Unused */ + uint8_t unused_b[1]; + /** Padding */ + uint8_t apad; + /** Unused */ + uint8_t unused_c[4]; + /** Length */ + uint16_t len; +} __attribute__ (( packed )); + +/** Receive completion queue entry type */ +#define TXNIC_CQE_TYPE_RX 0x20 + +/** Applied padding */ +#define TXNIC_CQE_RX_APAD_LEN( apad ) ( (apad) >> 5 ) + +/** Completion queue entry common fields */ +struct txnic_cqe_common { + /** Unused */ + uint8_t unused_a[7]; + /** Type */ + uint8_t cqe_type; +} __attribute__ (( packed )); + +/** A completion queue entry */ +union txnic_cqe { + /** Common fields */ + struct txnic_cqe_common common; + /** Send completion */ + struct txnic_cqe_send send; + /** Receive completion */ + struct txnic_cqe_rx rx; +}; + +/** Number of completion queue entries + * + * The minimum completion queue size is 256 entries. + */ +#define TXNIC_CQES 256 + +/** Completion queue alignment */ +#define TXNIC_CQ_ALIGN 512 + +/** Completion queue stride */ +#define TXNIC_CQ_STRIDE 512 + +/** Completion queue size */ +#define TXNIC_CQ_SIZE ( TXNIC_CQES * TXNIC_CQ_STRIDE ) + +/** A completion queue */ +struct txnic_cq { + /** Consumer counter */ + unsigned int cons; + /** Completion queue entries */ + userptr_t cqe; +}; + +/****************************************************************************** + * + * Virtual NIC + * + ****************************************************************************** + */ + +/** A virtual NIC */ +struct txnic { + /** Registers */ + void *regs; + /** Device name (for debugging) */ + const char *name; + /** Network device */ + struct net_device *netdev; + + /** Send queue */ + struct txnic_sq sq; + /** Receive queue */ + struct txnic_rq rq; + /** Completion queue */ + struct txnic_cq cq; +}; + +/****************************************************************************** + * + * Physical function + * + ****************************************************************************** + */ + +/** Physical function configuration */ +#define TXNIC_PF_CFG 0x000000 +#define TXNIC_PF_CFG_ENA ( 1ULL << 0 ) + +/** Backpressure configuration */ +#define TXNIC_PF_BP_CFG 0x000080 +#define TXNIC_PF_BP_CFG_BP_POLL_ENA ( 1ULL << 6 ) +#define TXNIC_PF_BP_CFG_BP_POLL_DLY(dl) ( ( ( uint64_t ) (dl) ) << 0 ) +#define TXNIC_PF_BP_CFG_BP_POLL_DLY_DEFAULT \ + TXNIC_PF_BP_CFG_BP_POLL_DLY ( 3 ) + +/** Interface send configuration */ +#define TXNIC_PF_INTF_SEND_CFG(in) ( ( (in) << 8 ) | 0x000200 ) +#define TXNIC_PF_INTF_SEND_CFG_BLOCK_BGX ( 1ULL << 3 ) +#define TXNIC_PF_INTF_SEND_CFG_BLOCK(bl) ( ( ( uint64_t ) (bl) ) << 0 ) + +/** Interface backpressure configuration */ +#define TXNIC_PF_INTF_BP_CFG(in) ( ( (in) << 8 ) | 0x000208 ) +#define TXNIC_PF_INTF_BP_CFG_BP_ENA ( 1ULL << 63 ) +#define TXNIC_PF_INTF_BP_CFG_BP_ID_BGX ( 1ULL << 3 ) +#define TXNIC_PF_INTF_BP_CFG_BP_ID(bp) ( ( ( uint64_t ) (bp) ) << 0 ) + +/** Port kind configuration */ +#define TXNIC_PF_PKIND_CFG(pk) ( ( (pk) << 3 ) | 0x000600 ) +#define TXNIC_PF_PKIND_CFG_LENERR_EN ( 1ULL << 33 ) +#define TXNIC_PF_PKIND_CFG_MAXLEN(ct) ( ( ( uint64_t ) (ct) ) << 16 ) +#define TXNIC_PF_PKIND_CFG_MAXLEN_DISABLE \ + TXNIC_PF_PKIND_CFG_MAXLEN ( 0xffff ) +#define TXNIC_PF_PKIND_CFG_MINLEN(ct) ( ( ( uint64_t ) (ct) ) << 0 ) +#define TXNIC_PF_PKIND_CFG_MINLEN_DISABLE \ + TXNIC_PF_PKIND_CFG_MINLEN ( 0x0000 ) + +/** Match parse index configuration */ +#define TXNIC_PF_MPI_CFG(ix) ( ( (ix) << 3 ) | 0x210000 ) +#define TXNIC_PF_MPI_CFG_VNIC(vn) ( ( ( uint64_t ) (vn) ) << 24 ) +#define TXNIC_PF_MPI_CFG_RSSI_BASE(ix) ( ( ( uint64_t ) (ix) ) << 0 ) + +/** RSS indirection receive queue */ +#define TXNIC_PF_RSSI_RQ(ix) ( ( (ix) << 3 ) | 0x220000 ) +#define TXNIC_PF_RSSI_RQ_RQ_QS(qs) ( ( ( uint64_t ) (qs) ) << 3 ) + +/** LMAC registers */ +#define TXNIC_PF_LMAC(lm) ( ( (lm) << 3 ) | 0x240000 ) + +/** LMAC configuration */ +#define TXNIC_PF_LMAC_CFG 0x000000 +#define TXNIC_PF_LMAC_CFG_ADJUST(ad) ( ( ( uint64_t ) (ad) ) << 8 ) +#define TXNIC_PF_LMAC_CFG_ADJUST_DEFAULT \ + TXNIC_PF_LMAC_CFG_ADJUST ( 6 ) +#define TXNIC_PF_LMAC_CFG_MIN_PKT_SIZE(sz) ( ( ( uint64_t ) (sz) ) << 0 ) + +/** LMAC configuration 2 */ +#define TXNIC_PF_LMAC_CFG2 0x000100 +#define TXNIC_PF_LMAC_CFG2_MAX_PKT_SIZE(sz) ( ( ( uint64_t ) (sz) ) << 0 ) + +/** LMAC credit */ +#define TXNIC_PF_LMAC_CREDIT 0x004000 +#define TXNIC_PF_LMAC_CREDIT_CC_UNIT_CNT(ct) ( ( ( uint64_t ) (ct) ) << 12 ) +#define TXNIC_PF_LMAC_CREDIT_CC_UNIT_CNT_DEFAULT \ + TXNIC_PF_LMAC_CREDIT_CC_UNIT_CNT ( 192 ) +#define TXNIC_PF_LMAC_CREDIT_CC_PACKET_CNT(ct) ( ( ( uint64_t ) (ct) ) << 2 ) +#define TXNIC_PF_LMAC_CREDIT_CC_PACKET_CNT_DEFAULT \ + TXNIC_PF_LMAC_CREDIT_CC_PACKET_CNT ( 511 ) +#define TXNIC_PF_LMAC_CREDIT_CC_ENABLE ( 1ULL << 1 ) + +/** Channel registers */ +#define TXNIC_PF_CHAN(ch) ( ( (ch) << 3 ) | 0x400000 ) + +/** Channel transmit configuration */ +#define TXNIC_PF_CHAN_TX_CFG 0x000000 +#define TXNIC_PF_CHAN_TX_CFG_BP_ENA ( 1ULL << 0 ) + +/** Channel receive configuration */ +#define TXNIC_PF_CHAN_RX_CFG 0x020000 +#define TXNIC_PF_CHAN_RX_CFG_CPI_BASE(ix) ( ( ( uint64_t ) (ix) ) << 48 ) + +/** Channel receive backpressure configuration */ +#define TXNIC_PF_CHAN_RX_BP_CFG 0x080000 +#define TXNIC_PF_CHAN_RX_BP_CFG_ENA ( 1ULL << 63 ) +#define TXNIC_PF_CHAN_RX_BP_CFG_BPID(bp) ( ( ( uint64_t ) (bp) ) << 0 ) + +/** Traffic limiter 2 configuration */ +#define TXNIC_PF_TL2_CFG(tl) ( ( (tl) << 3 ) | 0x500000 ) +#define TXNIC_PF_TL2_CFG_RR_QUANTUM(rr) ( ( ( uint64_t ) (rr) ) << 0 ) +#define TXNIC_PF_TL2_CFG_RR_QUANTUM_DEFAULT \ + TXNIC_PF_TL2_CFG_RR_QUANTUM ( 0x905 ) + +/** Traffic limiter 3 configuration */ +#define TXNIC_PF_TL3_CFG(tl) ( ( (tl) << 3 ) | 0x600000 ) +#define TXNIC_PF_TL3_CFG_RR_QUANTUM(rr) ( ( ( uint64_t ) (rr) ) << 0 ) +#define TXNIC_PF_TL3_CFG_RR_QUANTUM_DEFAULT \ + TXNIC_PF_TL3_CFG_RR_QUANTUM ( 0x905 ) + +/** Traffic limiter 3 channel mapping */ +#define TXNIC_PF_TL3_CHAN(tl) ( ( (tl) << 3 ) | 0x620000 ) +#define TXNIC_PF_TL3_CHAN_CHAN(ch) ( ( (ch) & 0x7f ) << 0 ) + +/** Traffic limiter 4 configuration */ +#define TXNIC_PF_TL4_CFG(tl) ( ( (tl) << 3 ) | 0x800000 ) +#define TXNIC_PF_TL4_CFG_SQ_QS(qs) ( ( ( uint64_t ) (qs) ) << 27 ) +#define TXNIC_PF_TL4_CFG_RR_QUANTUM(rr) ( ( ( uint64_t ) (rr) ) << 0 ) +#define TXNIC_PF_TL4_CFG_RR_QUANTUM_DEFAULT \ + TXNIC_PF_TL4_CFG_RR_QUANTUM ( 0x905 ) + +/** Queue set registers */ +#define TXNIC_PF_QS(qs) ( ( (qs) << 21 ) | 0x20000000UL ) + +/** Queue set configuration */ +#define TXNIC_PF_QS_CFG 0x010000 +#define TXNIC_PF_QS_CFG_ENA ( 1ULL << 31 ) +#define TXNIC_PF_QS_CFG_VNIC(vn) ( ( ( uint64_t ) (vn) ) << 0 ) + +/** Receive queue configuration */ +#define TXNIC_PF_QS_RQ_CFG(q) ( ( (q) << 18 ) | 0x010400 ) +#define TXNIC_PF_QS_RQ_CFG_CACHING(cx) ( ( ( uint64_t ) (cx) ) << 26 ) +#define TXNIC_PF_QS_RQ_CFG_CACHING_ALL \ + TXNIC_PF_QS_RQ_CFG_CACHING ( 1 ) +#define TXNIC_PF_QS_RQ_CFG_CQ_QS(qs) ( ( ( uint64_t ) (qs) ) << 19 ) +#define TXNIC_PF_QS_RQ_CFG_RBDR_CONT_QS(qs) ( ( ( uint64_t ) (qs) ) << 9 ) +#define TXNIC_PF_QS_RQ_CFG_RBDR_STRT_QS(qs) ( ( ( uint64_t ) (qs) ) << 1 ) + +/** Receive queue drop configuration */ +#define TXNIC_PF_QS_RQ_DROP_CFG(q) ( ( (q) << 18 ) | 0x010420 ) + +/** Receive queue backpressure configuration */ +#define TXNIC_PF_QS_RQ_BP_CFG(q) ( ( (q) << 18 ) | 0x010500 ) +#define TXNIC_PF_QS_RQ_BP_CFG_RBDR_BP_ENA ( 1ULL << 63 ) +#define TXNIC_PF_QS_RQ_BP_CFG_CQ_BP_ENA ( 1ULL << 62 ) +#define TXNIC_PF_QS_RQ_BP_CFG_BPID(bp) ( ( ( uint64_t ) (bp) ) << 0 ) + +/** Send queue configuration */ +#define TXNIC_PF_QS_SQ_CFG(q) ( ( (q) << 18 ) | 0x010c00 ) +#define TXNIC_PF_QS_SQ_CFG_CQ_QS(qs) ( ( ( uint64_t ) (qs) ) << 3 ) + +/** Send queue configuration 2 */ +#define TXNIC_PF_QS_SQ_CFG2(q) ( ( (q) << 18 ) | 0x010c08 ) +#define TXNIC_PF_QS_SQ_CFG2_TL4(tl) ( ( ( uint64_t ) (tl) ) << 0 ) + +/** A physical function */ +struct txnic_pf { + /** Registers */ + void *regs; + /** PCI device */ + struct pci_device *pci; + /** Node ID */ + unsigned int node; + + /** Virtual function BAR base */ + unsigned long vf_membase; + /** Virtual function BAR stride */ + unsigned long vf_stride; + + /** List of physical functions */ + struct list_head list; + /** BGX Ethernet interfaces (if known) */ + struct txnic_bgx *bgx[TXNIC_NUM_BGX]; +}; + +/** + * Calculate virtual NIC index + * + * @v bgx_idx BGX Ethernet interface index + * @v lmac_idx Logical MAC index + * @ret vnic_idx Virtual NIC index + */ +#define TXNIC_VNIC_IDX( bgx_idx, lmac_idx ) \ + ( ( (bgx_idx) * TXNIC_NUM_LMAC ) + (lmac_idx) ) + +/** + * Calculate BGX Ethernet interface index + * + * @v vnic_idx Virtual NIC index + * @ret bgx_idx BGX Ethernet interface index + */ +#define TXNIC_BGX_IDX( vnic_idx ) ( (vnic_idx) / TXNIC_NUM_LMAC ) + +/** + * Calculate logical MAC index + * + * @v vnic_idx Virtual NIC index + * @ret lmac_idx Logical MAC index + */ +#define TXNIC_LMAC_IDX( vnic_idx ) ( (vnic_idx) % TXNIC_NUM_LMAC ) + +/** + * Calculate traffic limiter 2 index + * + * @v vnic_idx Virtual NIC index + * @v tl2_idx Traffic limiter 2 index + */ +#define TXNIC_TL2_IDX( vnic_idx ) ( (vnic_idx) << 3 ) + +/** + * Calculate traffic limiter 3 index + * + * @v vnic_idx Virtual NIC index + * @v tl3_idx Traffic limiter 3 index + */ +#define TXNIC_TL3_IDX( vnic_idx ) ( (vnic_idx) << 5 ) + +/** + * Calculate traffic limiter 4 index + * + * @v vnic_idx Virtual NIC index + * @v tl4_idx Traffic limiter 4 index + */ +#define TXNIC_TL4_IDX( vnic_idx ) ( (vnic_idx) << 7 ) + +/** + * Calculate channel index + * + * @v vnic_idx Virtual NIC index + * @v chan_idx Channel index + */ +#define TXNIC_CHAN_IDX( vnic_idx ) ( ( TXNIC_BGX_IDX (vnic_idx) << 7 ) | \ + ( TXNIC_LMAC_IDX (vnic_idx) << 4 ) ) + +/****************************************************************************** + * + * BGX Ethernet interface + * + ****************************************************************************** + */ + +/** Per-LMAC registers */ +#define BGX_LMAC(lm) ( ( (lm) << 20 ) | 0x00000000UL ) + +/** CMR configuration */ +#define BGX_CMR_CONFIG 0x000000 +#define BGX_CMR_CONFIG_ENABLE ( 1ULL << 15 ) +#define BGX_CMR_CONFIG_DATA_PKT_RX_EN ( 1ULL << 14 ) +#define BGX_CMR_CONFIG_DATA_PKT_TX_EN ( 1ULL << 13 ) +#define BGX_CMR_CONFIG_LMAC_TYPE_GET(config) \ + ( ( (config) >> 8 ) & 0x7 ) +#define BGX_CMR_CONFIG_LMAC_TYPE_SET(ty) ( ( ( uint64_t ) (ty) ) << 8 ) +#define BGX_CMR_CONFIG_LANE_TO_SDS(ls) ( ( ( uint64_t ) (ls) ) << 0 ) + +/** CMR global configuration */ +#define BGX_CMR_GLOBAL_CONFIG 0x000008 +#define BGX_CMR_GLOBAL_CONFIG_FCS_STRIP ( 1ULL << 6 ) + +/** CMR receive statistics 0 */ +#define BGX_CMR_RX_STAT0 0x000070 + +/** CMR receive statistics 1 */ +#define BGX_CMR_RX_STAT1 0x000078 + +/** CMR receive statistics 2 */ +#define BGX_CMR_RX_STAT2 0x000080 + +/** CMR receive statistics 3 */ +#define BGX_CMR_RX_STAT3 0x000088 + +/** CMR receive statistics 4 */ +#define BGX_CMR_RX_STAT4 0x000090 + +/** CMR receive statistics 5 */ +#define BGX_CMR_RX_STAT5 0x000098 + +/** CMR receive statistics 6 */ +#define BGX_CMR_RX_STAT6 0x0000a0 + +/** CMR receive statistics 7 */ +#define BGX_CMR_RX_STAT7 0x0000a8 + +/** CMR receive statistics 8 */ +#define BGX_CMR_RX_STAT8 0x0000b0 + +/** CMR receive statistics 9 */ +#define BGX_CMR_RX_STAT9 0x0000b8 + +/** CMR receive statistics 10 */ +#define BGX_CMR_RX_STAT10 0x0000c0 + +/** CMR destination MAC control */ +#define BGX_CMR_RX_DMAC_CTL 0x0000e8 +#define BGX_CMR_RX_DMAC_CTL_MCST_MODE(md) ( ( ( uint64_t ) (md) ) << 1 ) +#define BGX_CMR_RX_DMAC_CTL_MCST_MODE_ACCEPT \ + BGX_CMR_RX_DMAC_CTL_MCST_MODE ( 1 ) +#define BGX_CMR_RX_DMAC_CTL_BCST_ACCEPT ( 1ULL << 0 ) + +/** CMR destination MAC CAM */ +#define BGX_CMR_RX_DMAC_CAM(i) ( ( (i) << 3 ) | 0x000200 ) + +/** CMR receive steering */ +#define BGX_CMR_RX_STEERING(i) ( ( (i) << 3 ) | 0x000300 ) + +/** CMR backpressure channel mask AND */ +#define BGX_CMR_CHAN_MSK_AND 0x000450 +#define BGX_CMR_CHAN_MSK_AND_ALL(count) \ + ( 0xffffffffffffffffULL >> ( 16 * ( 4 - (count) ) ) ) + +/** CMR transmit statistics 0 */ +#define BGX_CMR_TX_STAT0 0x000600 + +/** CMR transmit statistics 1 */ +#define BGX_CMR_TX_STAT1 0x000608 + +/** CMR transmit statistics 2 */ +#define BGX_CMR_TX_STAT2 0x000610 + +/** CMR transmit statistics 3 */ +#define BGX_CMR_TX_STAT3 0x000618 + +/** CMR transmit statistics 4 */ +#define BGX_CMR_TX_STAT4 0x000620 + +/** CMR transmit statistics 5 */ +#define BGX_CMR_TX_STAT5 0x000628 + +/** CMR transmit statistics 6 */ +#define BGX_CMR_TX_STAT6 0x000630 + +/** CMR transmit statistics 7 */ +#define BGX_CMR_TX_STAT7 0x000638 + +/** CMR transmit statistics 8 */ +#define BGX_CMR_TX_STAT8 0x000640 + +/** CMR transmit statistics 9 */ +#define BGX_CMR_TX_STAT9 0x000648 + +/** CMR transmit statistics 10 */ +#define BGX_CMR_TX_STAT10 0x000650 + +/** CMR transmit statistics 11 */ +#define BGX_CMR_TX_STAT11 0x000658 + +/** CMR transmit statistics 12 */ +#define BGX_CMR_TX_STAT12 0x000660 + +/** CMR transmit statistics 13 */ +#define BGX_CMR_TX_STAT13 0x000668 + +/** CMR transmit statistics 14 */ +#define BGX_CMR_TX_STAT14 0x000670 + +/** CMR transmit statistics 15 */ +#define BGX_CMR_TX_STAT15 0x000678 + +/** CMR transmit statistics 16 */ +#define BGX_CMR_TX_STAT16 0x000680 + +/** CMR transmit statistics 17 */ +#define BGX_CMR_TX_STAT17 0x000688 + +/** CMR receive logical MACs */ +#define BGX_CMR_RX_LMACS 0x000468 +#define BGX_CMR_RX_LMACS_LMACS_GET(lmacs) \ + ( ( (lmacs) >> 0 ) & 0x7 ) +#define BGX_CMR_RX_LMACS_LMACS_SET(ct) ( ( ( uint64_t ) (ct) ) << 0 ) + +/** CMR transmit logical MACs */ +#define BGX_CMR_TX_LMACS 0x001000 +#define BGX_CMR_TX_LMACS_LMACS_GET(lmacs) \ + ( ( (lmacs) >> 0 ) & 0x7 ) +#define BGX_CMR_TX_LMACS_LMACS_SET(ct) ( ( ( uint64_t ) (ct) ) << 0 ) + +/** SPU control 1 */ +#define BGX_SPU_CONTROL1 0x010000 +#define BGX_SPU_CONTROL1_RESET ( 1ULL << 15 ) +#define BGX_SPU_CONTROL1_LO_PWR ( 1ULL << 11 ) + +/** SPU reset delay */ +#define BGX_SPU_RESET_DELAY_MS 10 + +/** SPU status 1 */ +#define BGX_SPU_STATUS1 0x010008 +#define BGX_SPU_STATUS1_FLT ( 1ULL << 7 ) +#define BGX_SPU_STATUS1_RCV_LNK ( 1ULL << 2 ) + +/** SPU status 2 */ +#define BGX_SPU_STATUS2 0x010020 +#define BGX_SPU_STATUS2_RCVFLT ( 1ULL << 10 ) + +/** SPU BASE-R status 1 */ +#define BGX_SPU_BR_STATUS1 0x010030 +#define BGX_SPU_BR_STATUS1_RCV_LNK ( 1ULL << 12 ) +#define BGX_SPU_BR_STATUS1_HI_BER ( 1ULL << 1 ) +#define BGX_SPU_BR_STATUS1_BLK_LOCK ( 1ULL << 0 ) + +/** SPU BASE-R status 2 */ +#define BGX_SPU_BR_STATUS2 0x010038 +#define BGX_SPU_BR_STATUS2_LATCHED_LOCK ( 1ULL << 15 ) +#define BGX_SPU_BR_STATUS2_LATCHED_BER ( 1ULL << 14 ) + +/** SPU BASE-R alignment status */ +#define BGX_SPU_BR_ALGN_STATUS 0x010050 +#define BGX_SPU_BR_ALGN_STATUS_ALIGND ( 1ULL << 12 ) + +/** SPU BASE-R link training control */ +#define BGX_SPU_BR_PMD_CONTROL 0x010068 +#define BGX_SPU_BR_PMD_CONTROL_TRAIN_EN ( 1ULL << 1 ) + +/** SPU BASE-R link training status */ +#define BGX_SPU_BR_PMD_STATUS 0x010070 + +/** SPU link partner coefficient update */ +#define BGX_SPU_BR_PMD_LP_CUP 0x010078 + +/** SPU local device coefficient update */ +#define BGX_SPU_BR_PMD_LD_CUP 0x010088 + +/** SPU local device status report */ +#define BGX_SPU_BR_PMD_LD_REP 0x010090 + +/** SPU forward error correction control */ +#define BGX_SPU_FEC_CONTROL 0x0100a0 + +/** SPU autonegotation control */ +#define BGX_SPU_AN_CONTROL 0x0100c8 + +/** SPU autonegotiation status */ +#define BGX_SPU_AN_STATUS 0x0100d0 +#define BGX_SPU_AN_STATUS_XNP_STAT ( 1ULL << 7 ) +#define BGX_SPU_AN_STATUS_PAGE_RX ( 1ULL << 6 ) +#define BGX_SPU_AN_STATUS_AN_COMPLETE ( 1ULL << 5 ) +#define BGX_SPU_AN_STATUS_LINK_STATUS ( 1ULL << 2 ) +#define BGX_SPU_AN_STATUS_LP_AN_ABLE ( 1ULL << 0 ) + +/** SPU interrupt */ +#define BGX_SPU_INT 0x010220 +#define BGX_SPU_INT_TRAINING_FAIL ( 1ULL << 14 ) +#define BGX_SPU_INT_TRAINING_DONE ( 1ULL << 13 ) +#define BGX_SPU_INT_AN_COMPLETE ( 1ULL << 12 ) +#define BGX_SPU_INT_AN_LINK_GOOD ( 1ULL << 11 ) +#define BGX_SPU_INT_AN_PAGE_RX ( 1ULL << 10 ) +#define BGX_SPU_INT_FEC_UNCORR ( 1ULL << 9 ) +#define BGX_SPU_INT_FEC_CORR ( 1ULL << 8 ) +#define BGX_SPU_INT_BIP_ERR ( 1ULL << 7 ) +#define BGX_SPU_INT_DBG_SYNC ( 1ULL << 6 ) +#define BGX_SPU_INT_ALGNLOS ( 1ULL << 5 ) +#define BGX_SPU_INT_SYNLOS ( 1ULL << 4 ) +#define BGX_SPU_INT_BITLCKLS ( 1ULL << 3 ) +#define BGX_SPU_INT_ERR_BLK ( 1ULL << 2 ) +#define BGX_SPU_INT_RX_LINK_DOWN ( 1ULL << 1 ) +#define BGX_SPU_INT_RX_LINK_UP ( 1ULL << 0 ) + +/** LMAC types */ +enum txnic_lmac_types { + TXNIC_LMAC_SGMII = 0x0, /**< SGMII/1000BASE-X */ + TXNIC_LMAC_XAUI = 0x1, /**< 10GBASE-X/XAUI or DXAUI */ + TXNIC_LMAC_RXAUI = 0x2, /**< Reduced XAUI */ + TXNIC_LMAC_10G_R = 0x3, /**< 10GBASE-R */ + TXNIC_LMAC_40G_R = 0x4, /**< 40GBASE-R */ +}; + +/** An LMAC type */ +struct txnic_lmac_type { + /** Name */ + const char *name; + /** Number of LMACs */ + uint8_t count; + /** Lane-to-SDS mapping */ + uint32_t lane_to_sds; +}; + +/** An LMAC address */ +union txnic_lmac_address { + struct { + uint8_t pad[2]; + uint8_t raw[ETH_ALEN]; + } __attribute__ (( packed )); + uint64_t be64; +}; + +/** A Logical MAC (LMAC) */ +struct txnic_lmac { + /** Registers */ + void *regs; + /** Containing BGX Ethernet interface */ + struct txnic_bgx *bgx; + /** Virtual NIC index */ + unsigned int idx; + + /** MAC address */ + union txnic_lmac_address mac; + + /** Virtual NIC (if applicable) */ + struct txnic *vnic; +}; + +/** A BGX Ethernet interface */ +struct txnic_bgx { + /** Registers */ + void *regs; + /** PCI device */ + struct pci_device *pci; + /** Node ID */ + unsigned int node; + /** BGX index */ + unsigned int idx; + + /** LMAC type */ + struct txnic_lmac_type *type; + /** Number of LMACs */ + unsigned int count; + /** Link training is in use */ + int training; + + /** List of BGX Ethernet interfaces */ + struct list_head list; + /** Physical function (if known) */ + struct txnic_pf *pf; + + /** Logical MACs */ + struct txnic_lmac lmac[TXNIC_NUM_LMAC]; +}; + +#endif /* _THUNDERX_H */ diff --git a/src/drivers/net/thunderxcfg.h b/src/drivers/net/thunderxcfg.h new file mode 100644 index 00000000..ffb34d36 --- /dev/null +++ b/src/drivers/net/thunderxcfg.h @@ -0,0 +1,155 @@ +#ifndef _THUNDERXCFG_H +#define _THUNDERXCFG_H + +/** @file + * + * Cavium ThunderX Board Configuration + * + * The definitions in this section are extracted from BSD-licensed + * (but non-public) portions of ThunderPkg. + * + */ + +FILE_LICENCE ( BSD2 ); + +#include + +/****************************************************************************** + * + * From ThunderxBoardConfig.h + * + ****************************************************************************** + * + * Header file for Cavium ThunderX Board Configurations + * Copyright (c) 2015, Cavium Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF + * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#define MAX_NODES 2 +#define CLUSTER_COUNT 3 +#define CORE_PER_CLUSTER_COUNT 16 +#define CORE_COUNT (CLUSTER_COUNT*CORE_PER_CLUSTER_COUNT) +#define BGX_PER_NODE_COUNT 2 +#define LMAC_PER_BGX_COUNT 4 +#define PEM_PER_NODE_COUNT 6 +#define LMC_PER_NODE_COUNT 4 +#define DIMM_PER_LMC_COUNT 2 + +#define THUNDERX_CPU_ID(node, cluster, core) (((node) << 16) | ((cluster) << 8) | (core)) + +/****************************************************************************** + * + * From ThunderConfigProtocol.h + * + ****************************************************************************** + * + * Thunder board Configuration Protocol + * + * Copyright (c) 2015, Cavium Inc. All rights reserved.
+ * + * This program and the accompanying materials are licensed and made + * available under the terms and conditions of the BSD License which + * accompanies this distribution. The full text of the license may + * be found at http://opensource.org/licenses/bsd-license.php + * + * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" + * BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER + * EXPRESS OR IMPLIED. + * + */ + +#define EFI_THUNDER_CONFIG_PROTOCOL_GUID \ + {0xc12b1873, 0xac17, 0x4176, {0xac, 0x77, 0x7e, 0xcb, 0x4d, 0xef, 0xff, 0xec}} + +/// +/// Forward declaration +/// +typedef struct _EFI_THUNDER_CONFIG_PROTOCOL EFI_THUNDER_CONFIG_PROTOCOL; + +typedef enum { + BGX_ENABLED, + BGX_MODE, + LMAC_COUNT, + BASE_ADDRESS, + LMAC_TYPE_BGX, + QLM_MASK, + QLM_FREQ, + USE_TRAINING +} BGX_PROPERTY; + +typedef enum { + ENABLED, + LANE_TO_SDS, + MAC_ADDRESS +} LMAC_PROPERTY; + +/// +/// Function prototypes +/// +typedef +EFI_STATUS +(EFIAPI *EFI_THUNDER_CONFIG_PROTOCOL_GET_CONFIG)( + IN EFI_THUNDER_CONFIG_PROTOCOL *This, + OUT VOID** cfg + ); + +typedef +EFI_STATUS +(EFIAPI *EFI_THUNDER_CONFIG_PROTOCOL_GET_BGX_PROP)( + IN EFI_THUNDER_CONFIG_PROTOCOL *This, + IN UINTN NodeId, + IN UINTN BgxId, + IN BGX_PROPERTY BgxProp, + IN UINT64 ValueSize, + OUT UINT64 *Value + ); + +typedef +EFI_STATUS +(EFIAPI *EFI_THUNDER_CONFIG_PROTOCOL_GET_LMAC_PROP)( + IN EFI_THUNDER_CONFIG_PROTOCOL *This, + IN UINTN NodeId, + IN UINTN BgxId, + IN UINTN LmacId, + IN LMAC_PROPERTY LmacProp, + IN UINT64 ValueSize, + OUT UINT64 *Value + ); + +/// +/// Protocol structure +/// +struct _EFI_THUNDER_CONFIG_PROTOCOL { + EFI_THUNDER_CONFIG_PROTOCOL_GET_CONFIG GetConfig; + EFI_THUNDER_CONFIG_PROTOCOL_GET_BGX_PROP GetBgxProp; + EFI_THUNDER_CONFIG_PROTOCOL_GET_LMAC_PROP GetLmacProp; + VOID* BoardConfig; +}; + +#endif /* _THUNDERXCFG_H */ diff --git a/src/drivers/usb/ehci.c b/src/drivers/usb/ehci.c new file mode 100644 index 00000000..15193efe --- /dev/null +++ b/src/drivers/usb/ehci.c @@ -0,0 +1,2096 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ehci.h" + +/** @file + * + * USB Enhanced Host Controller Interface (EHCI) driver + * + */ + +/** + * Construct error code from transfer descriptor status + * + * @v status Transfer descriptor status + * @ret rc Error code + * + * Bits 2-5 of the status code provide some indication as to the root + * cause of the error. We incorporate these into the error code as + * reported to usb_complete_err(). + */ +#define EIO_STATUS( status ) EUNIQ ( EINFO_EIO, ( ( (status) >> 2 ) & 0xf ) ) + +/****************************************************************************** + * + * Register access + * + ****************************************************************************** + */ + +/** + * Initialise device + * + * @v ehci EHCI device + * @v regs MMIO registers + */ +static void ehci_init ( struct ehci_device *ehci, void *regs ) { + uint32_t hcsparams; + uint32_t hccparams; + size_t caplength; + + /* Locate capability and operational registers */ + ehci->cap = regs; + caplength = readb ( ehci->cap + EHCI_CAP_CAPLENGTH ); + ehci->op = ( ehci->cap + caplength ); + DBGC2 ( ehci, "EHCI %s cap %08lx op %08lx\n", ehci->name, + virt_to_phys ( ehci->cap ), virt_to_phys ( ehci->op ) ); + + /* Read structural parameters */ + hcsparams = readl ( ehci->cap + EHCI_CAP_HCSPARAMS ); + ehci->ports = EHCI_HCSPARAMS_PORTS ( hcsparams ); + DBGC ( ehci, "EHCI %s has %d ports\n", ehci->name, ehci->ports ); + + /* Read capability parameters 1 */ + hccparams = readl ( ehci->cap + EHCI_CAP_HCCPARAMS ); + ehci->addr64 = EHCI_HCCPARAMS_ADDR64 ( hccparams ); + ehci->flsize = ( EHCI_HCCPARAMS_FLSIZE ( hccparams ) ? + EHCI_FLSIZE_SMALL : EHCI_FLSIZE_DEFAULT ); + ehci->eecp = EHCI_HCCPARAMS_EECP ( hccparams ); + DBGC2 ( ehci, "EHCI %s %d-bit flsize %d\n", ehci->name, + ( ehci->addr64 ? 64 : 32 ), ehci->flsize ); +} + +/** + * Find extended capability + * + * @v ehci EHCI device + * @v pci PCI device + * @v id Capability ID + * @v offset Offset to previous extended capability instance, or zero + * @ret offset Offset to extended capability, or zero if not found + */ +static unsigned int ehci_extended_capability ( struct ehci_device *ehci, + struct pci_device *pci, + unsigned int id, + unsigned int offset ) { + uint32_t eecp; + + /* Locate the extended capability */ + while ( 1 ) { + + /* Locate first or next capability as applicable */ + if ( offset ) { + pci_read_config_dword ( pci, offset, &eecp ); + offset = EHCI_EECP_NEXT ( eecp ); + } else { + offset = ehci->eecp; + } + if ( ! offset ) + return 0; + + /* Check if this is the requested capability */ + pci_read_config_dword ( pci, offset, &eecp ); + if ( EHCI_EECP_ID ( eecp ) == id ) + return offset; + } +} + +/** + * Calculate buffer alignment + * + * @v len Length + * @ret align Buffer alignment + * + * Determine alignment required for a buffer which must be aligned to + * at least EHCI_MIN_ALIGN and which must not cross a page boundary. + */ +static inline size_t ehci_align ( size_t len ) { + size_t align; + + /* Align to own length (rounded up to a power of two) */ + align = ( 1 << fls ( len - 1 ) ); + + /* Round up to EHCI_MIN_ALIGN if needed */ + if ( align < EHCI_MIN_ALIGN ) + align = EHCI_MIN_ALIGN; + + return align; +} + +/** + * Check control data structure reachability + * + * @v ehci EHCI device + * @v ptr Data structure pointer + * @ret rc Return status code + */ +static int ehci_ctrl_reachable ( struct ehci_device *ehci, void *ptr ) { + physaddr_t phys = virt_to_phys ( ptr ); + uint32_t segment; + + /* Always reachable in a 32-bit build */ + if ( sizeof ( physaddr_t ) <= sizeof ( uint32_t ) ) + return 0; + + /* Reachable only if control segment matches in a 64-bit build */ + segment = ( ( ( uint64_t ) phys ) >> 32 ); + if ( segment == ehci->ctrldssegment ) + return 0; + + return -ENOTSUP; +} + +/****************************************************************************** + * + * Diagnostics + * + ****************************************************************************** + */ + +/** + * Dump host controller registers + * + * @v ehci EHCI device + */ +static __unused void ehci_dump ( struct ehci_device *ehci ) { + uint8_t caplength; + uint16_t hciversion; + uint32_t hcsparams; + uint32_t hccparams; + uint32_t usbcmd; + uint32_t usbsts; + uint32_t usbintr; + uint32_t frindex; + uint32_t ctrldssegment; + uint32_t periodiclistbase; + uint32_t asynclistaddr; + uint32_t configflag; + + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return; + + /* Dump capability registers */ + caplength = readb ( ehci->cap + EHCI_CAP_CAPLENGTH ); + hciversion = readw ( ehci->cap + EHCI_CAP_HCIVERSION ); + hcsparams = readl ( ehci->cap + EHCI_CAP_HCSPARAMS ); + hccparams = readl ( ehci->cap + EHCI_CAP_HCCPARAMS ); + DBGC ( ehci, "EHCI %s caplen %02x hciversion %04x hcsparams %08x " + "hccparams %08x\n", ehci->name, caplength, hciversion, + hcsparams, hccparams ); + + /* Dump operational registers */ + usbcmd = readl ( ehci->op + EHCI_OP_USBCMD ); + usbsts = readl ( ehci->op + EHCI_OP_USBSTS ); + usbintr = readl ( ehci->op + EHCI_OP_USBINTR ); + frindex = readl ( ehci->op + EHCI_OP_FRINDEX ); + ctrldssegment = readl ( ehci->op + EHCI_OP_CTRLDSSEGMENT ); + periodiclistbase = readl ( ehci->op + EHCI_OP_PERIODICLISTBASE ); + asynclistaddr = readl ( ehci->op + EHCI_OP_ASYNCLISTADDR ); + configflag = readl ( ehci->op + EHCI_OP_CONFIGFLAG ); + DBGC ( ehci, "EHCI %s usbcmd %08x usbsts %08x usbint %08x frindx " + "%08x\n", ehci->name, usbcmd, usbsts, usbintr, frindex ); + DBGC ( ehci, "EHCI %s ctrlds %08x period %08x asyncl %08x cfgflg " + "%08x\n", ehci->name, ctrldssegment, periodiclistbase, + asynclistaddr, configflag ); +} + +/****************************************************************************** + * + * USB legacy support + * + ****************************************************************************** + */ + +/** Prevent the release of ownership back to BIOS */ +static int ehci_legacy_prevent_release; + +/** + * Initialise USB legacy support + * + * @v ehci EHCI device + * @v pci PCI device + */ +static void ehci_legacy_init ( struct ehci_device *ehci, + struct pci_device *pci ) { + unsigned int legacy; + uint8_t bios; + + /* Locate USB legacy support capability (if present) */ + legacy = ehci_extended_capability ( ehci, pci, EHCI_EECP_ID_LEGACY, 0 ); + if ( ! legacy ) { + /* Not an error; capability may not be present */ + DBGC ( ehci, "EHCI %s has no USB legacy support capability\n", + ehci->name ); + return; + } + + /* Check if legacy USB support is enabled */ + pci_read_config_byte ( pci, ( legacy + EHCI_USBLEGSUP_BIOS ), &bios ); + if ( ! ( bios & EHCI_USBLEGSUP_BIOS_OWNED ) ) { + /* Not an error; already owned by OS */ + DBGC ( ehci, "EHCI %s USB legacy support already disabled\n", + ehci->name ); + return; + } + + /* Record presence of USB legacy support capability */ + ehci->legacy = legacy; +} + +/** + * Claim ownership from BIOS + * + * @v ehci EHCI device + * @v pci PCI device + */ +static void ehci_legacy_claim ( struct ehci_device *ehci, + struct pci_device *pci ) { + unsigned int legacy = ehci->legacy; + uint32_t ctlsts; + uint8_t bios; + unsigned int i; + + /* Do nothing unless legacy support capability is present */ + if ( ! legacy ) + return; + + /* Dump original SMI usage */ + pci_read_config_dword ( pci, ( legacy + EHCI_USBLEGSUP_CTLSTS ), + &ctlsts ); + if ( ctlsts ) { + DBGC ( ehci, "EHCI %s BIOS using SMIs: %08x\n", + ehci->name, ctlsts ); + } + + /* Claim ownership */ + pci_write_config_byte ( pci, ( legacy + EHCI_USBLEGSUP_OS ), + EHCI_USBLEGSUP_OS_OWNED ); + + /* Wait for BIOS to release ownership */ + for ( i = 0 ; i < EHCI_USBLEGSUP_MAX_WAIT_MS ; i++ ) { + + /* Check if BIOS has released ownership */ + pci_read_config_byte ( pci, ( legacy + EHCI_USBLEGSUP_BIOS ), + &bios ); + if ( ! ( bios & EHCI_USBLEGSUP_BIOS_OWNED ) ) { + DBGC ( ehci, "EHCI %s claimed ownership from BIOS\n", + ehci->name ); + pci_read_config_dword ( pci, ( legacy + + EHCI_USBLEGSUP_CTLSTS ), + &ctlsts ); + if ( ctlsts ) { + DBGC ( ehci, "EHCI %s warning: BIOS retained " + "SMIs: %08x\n", ehci->name, ctlsts ); + } + return; + } + + /* Delay */ + mdelay ( 1 ); + } + + /* BIOS did not release ownership. Claim it forcibly by + * disabling all SMIs. + */ + DBGC ( ehci, "EHCI %s could not claim ownership from BIOS: forcibly " + "disabling SMIs\n", ehci->name ); + pci_write_config_dword ( pci, ( legacy + EHCI_USBLEGSUP_CTLSTS ), 0 ); +} + +/** + * Release ownership back to BIOS + * + * @v ehci EHCI device + * @v pci PCI device + */ +static void ehci_legacy_release ( struct ehci_device *ehci, + struct pci_device *pci ) { + unsigned int legacy = ehci->legacy; + uint32_t ctlsts; + + /* Do nothing unless legacy support capability is present */ + if ( ! legacy ) + return; + + /* Do nothing if releasing ownership is prevented */ + if ( ehci_legacy_prevent_release ) { + DBGC ( ehci, "EHCI %s not releasing ownership to BIOS\n", + ehci->name ); + return; + } + + /* Release ownership */ + pci_write_config_byte ( pci, ( legacy + EHCI_USBLEGSUP_OS ), 0 ); + DBGC ( ehci, "EHCI %s released ownership to BIOS\n", ehci->name ); + + /* Dump restored SMI usage */ + pci_read_config_dword ( pci, ( legacy + EHCI_USBLEGSUP_CTLSTS ), + &ctlsts ); + DBGC ( ehci, "EHCI %s BIOS reclaimed SMIs: %08x\n", + ehci->name, ctlsts ); +} + +/****************************************************************************** + * + * Companion controllers + * + ****************************************************************************** + */ + +/** + * Poll child companion controllers + * + * @v ehci EHCI device + */ +static void ehci_poll_companions ( struct ehci_device *ehci ) { + struct usb_bus *bus; + struct device_description *desc; + + /* Poll any USB buses belonging to child companion controllers */ + for_each_usb_bus ( bus ) { + + /* Get underlying devices description */ + desc = &bus->dev->desc; + + /* Skip buses that are not PCI devices */ + if ( desc->bus_type != BUS_TYPE_PCI ) + continue; + + /* Skip buses that are not part of the same PCI device */ + if ( PCI_FIRST_FUNC ( desc->location ) != + PCI_FIRST_FUNC ( ehci->bus->dev->desc.location ) ) + continue; + + /* Skip buses that are not UHCI or OHCI PCI devices */ + if ( ( desc->class != PCI_CLASS ( PCI_CLASS_SERIAL, + PCI_CLASS_SERIAL_USB, + PCI_CLASS_SERIAL_USB_UHCI ))&& + ( desc->class != PCI_CLASS ( PCI_CLASS_SERIAL, + PCI_CLASS_SERIAL_USB, + PCI_CLASS_SERIAL_USB_OHCI ) )) + continue; + + /* Poll child companion controller bus */ + DBGC2 ( ehci, "EHCI %s polling companion %s\n", + ehci->name, bus->name ); + usb_poll ( bus ); + } +} + +/** + * Locate EHCI companion controller + * + * @v pci PCI device + * @ret busdevfn EHCI companion controller bus:dev.fn (if any) + */ +unsigned int ehci_companion ( struct pci_device *pci ) { + struct pci_device tmp; + unsigned int busdevfn; + int rc; + + /* Look for an EHCI function on the same PCI device */ + busdevfn = pci->busdevfn; + while ( ++busdevfn <= PCI_LAST_FUNC ( pci->busdevfn ) ) { + pci_init ( &tmp, busdevfn ); + if ( ( rc = pci_read_config ( &tmp ) ) != 0 ) + continue; + if ( tmp.class == PCI_CLASS ( PCI_CLASS_SERIAL, + PCI_CLASS_SERIAL_USB, + PCI_CLASS_SERIAL_USB_EHCI ) ) + return busdevfn; + } + + return 0; +} + +/****************************************************************************** + * + * Run / stop / reset + * + ****************************************************************************** + */ + +/** + * Start EHCI device + * + * @v ehci EHCI device + */ +static void ehci_run ( struct ehci_device *ehci ) { + uint32_t usbcmd; + + /* Set run/stop bit */ + usbcmd = readl ( ehci->op + EHCI_OP_USBCMD ); + usbcmd &= ~EHCI_USBCMD_FLSIZE_MASK; + usbcmd |= ( EHCI_USBCMD_RUN | EHCI_USBCMD_FLSIZE ( ehci->flsize ) | + EHCI_USBCMD_PERIODIC | EHCI_USBCMD_ASYNC ); + writel ( usbcmd, ehci->op + EHCI_OP_USBCMD ); +} + +/** + * Stop EHCI device + * + * @v ehci EHCI device + * @ret rc Return status code + */ +static int ehci_stop ( struct ehci_device *ehci ) { + uint32_t usbcmd; + uint32_t usbsts; + unsigned int i; + + /* Clear run/stop bit */ + usbcmd = readl ( ehci->op + EHCI_OP_USBCMD ); + usbcmd &= ~( EHCI_USBCMD_RUN | EHCI_USBCMD_PERIODIC | + EHCI_USBCMD_ASYNC ); + writel ( usbcmd, ehci->op + EHCI_OP_USBCMD ); + + /* Wait for device to stop */ + for ( i = 0 ; i < EHCI_STOP_MAX_WAIT_MS ; i++ ) { + + /* Check if device is stopped */ + usbsts = readl ( ehci->op + EHCI_OP_USBSTS ); + if ( usbsts & EHCI_USBSTS_HCH ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( ehci, "EHCI %s timed out waiting for stop\n", ehci->name ); + return -ETIMEDOUT; +} + +/** + * Reset EHCI device + * + * @v ehci EHCI device + * @ret rc Return status code + */ +static int ehci_reset ( struct ehci_device *ehci ) { + uint32_t usbcmd; + unsigned int i; + int rc; + + /* The EHCI specification states that resetting a running + * device may result in undefined behaviour, so try stopping + * it first. + */ + if ( ( rc = ehci_stop ( ehci ) ) != 0 ) { + /* Ignore errors and attempt to reset the device anyway */ + } + + /* Reset device */ + writel ( EHCI_USBCMD_HCRST, ehci->op + EHCI_OP_USBCMD ); + + /* Wait for reset to complete */ + for ( i = 0 ; i < EHCI_RESET_MAX_WAIT_MS ; i++ ) { + + /* Check if reset is complete */ + usbcmd = readl ( ehci->op + EHCI_OP_USBCMD ); + if ( ! ( usbcmd & EHCI_USBCMD_HCRST ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( ehci, "EHCI %s timed out waiting for reset\n", ehci->name ); + return -ETIMEDOUT; +} + +/****************************************************************************** + * + * Transfer descriptor rings + * + ****************************************************************************** + */ + +/** + * Allocate transfer descriptor ring + * + * @v ehci EHCI device + * @v ring Transfer descriptor ring + * @ret rc Return status code + */ +static int ehci_ring_alloc ( struct ehci_device *ehci, + struct ehci_ring *ring ) { + struct ehci_transfer_descriptor *desc; + struct ehci_transfer_descriptor *next; + unsigned int i; + size_t len; + uint32_t link; + int rc; + + /* Initialise structure */ + memset ( ring, 0, sizeof ( *ring ) ); + + /* Allocate I/O buffers */ + ring->iobuf = zalloc ( EHCI_RING_COUNT * sizeof ( ring->iobuf[0] ) ); + if ( ! ring->iobuf ) { + rc = -ENOMEM; + goto err_alloc_iobuf; + } + + /* Allocate queue head */ + ring->head = malloc_dma ( sizeof ( *ring->head ), + ehci_align ( sizeof ( *ring->head ) ) ); + if ( ! ring->head ) { + rc = -ENOMEM; + goto err_alloc_queue; + } + if ( ( rc = ehci_ctrl_reachable ( ehci, ring->head ) ) != 0 ) { + DBGC ( ehci, "EHCI %s queue head unreachable\n", ehci->name ); + goto err_unreachable_queue; + } + memset ( ring->head, 0, sizeof ( *ring->head ) ); + + /* Allocate transfer descriptors */ + len = ( EHCI_RING_COUNT * sizeof ( ring->desc[0] ) ); + ring->desc = malloc_dma ( len, sizeof ( ring->desc[0] ) ); + if ( ! ring->desc ) { + rc = -ENOMEM; + goto err_alloc_desc; + } + memset ( ring->desc, 0, len ); + + /* Initialise transfer descriptors */ + for ( i = 0 ; i < EHCI_RING_COUNT ; i++ ) { + desc = &ring->desc[i]; + if ( ( rc = ehci_ctrl_reachable ( ehci, desc ) ) != 0 ) { + DBGC ( ehci, "EHCI %s descriptor unreachable\n", + ehci->name ); + goto err_unreachable_desc; + } + next = &ring->desc[ ( i + 1 ) % EHCI_RING_COUNT ]; + link = virt_to_phys ( next ); + desc->next = cpu_to_le32 ( link ); + desc->alt = cpu_to_le32 ( link ); + } + + /* Initialise queue head */ + link = virt_to_phys ( &ring->desc[0] ); + ring->head->cache.next = cpu_to_le32 ( link ); + + return 0; + + err_unreachable_desc: + free_dma ( ring->desc, len ); + err_alloc_desc: + err_unreachable_queue: + free_dma ( ring->head, sizeof ( *ring->head ) ); + err_alloc_queue: + free ( ring->iobuf ); + err_alloc_iobuf: + return rc; +} + +/** + * Free transfer descriptor ring + * + * @v ring Transfer descriptor ring + */ +static void ehci_ring_free ( struct ehci_ring *ring ) { + unsigned int i; + + /* Sanity checks */ + assert ( ehci_ring_fill ( ring ) == 0 ); + for ( i = 0 ; i < EHCI_RING_COUNT ; i++ ) + assert ( ring->iobuf[i] == NULL ); + + /* Free transfer descriptors */ + free_dma ( ring->desc, ( EHCI_RING_COUNT * sizeof ( ring->desc[0] ) ) ); + + /* Free queue head */ + free_dma ( ring->head, sizeof ( *ring->head ) ); + + /* Free I/O buffers */ + free ( ring->iobuf ); +} + +/** + * Enqueue transfer descriptors + * + * @v ehci EHCI device + * @v ring Transfer descriptor ring + * @v iobuf I/O buffer + * @v xfers Transfers + * @v count Number of transfers + * @ret rc Return status code + */ +static int ehci_enqueue ( struct ehci_device *ehci, struct ehci_ring *ring, + struct io_buffer *iobuf, + const struct ehci_transfer *xfer, + unsigned int count ) { + struct ehci_transfer_descriptor *desc; + physaddr_t phys; + void *data; + size_t len; + size_t offset; + size_t frag_len; + unsigned int toggle; + unsigned int index; + unsigned int i; + + /* Sanity check */ + assert ( iobuf != NULL ); + assert ( count > 0 ); + + /* Fail if ring does not have sufficient space */ + if ( ehci_ring_remaining ( ring ) < count ) + return -ENOBUFS; + + /* Fail if any portion is unreachable */ + for ( i = 0 ; i < count ; i++ ) { + if ( ! xfer[i].len ) + continue; + phys = ( virt_to_phys ( xfer[i].data ) + xfer[i].len - 1 ); + if ( ( phys > 0xffffffffUL ) && ( ! ehci->addr64 ) ) + return -ENOTSUP; + } + + /* Enqueue each transfer, recording the I/O buffer with the last */ + for ( ; count ; ring->prod++, xfer++ ) { + + /* Populate descriptor header */ + index = ( ring->prod % EHCI_RING_COUNT ); + desc = &ring->desc[index]; + toggle = ( xfer->flags & EHCI_FL_TOGGLE ); + assert ( xfer->len <= EHCI_LEN_MASK ); + assert ( EHCI_FL_TOGGLE == EHCI_LEN_TOGGLE ); + desc->len = cpu_to_le16 ( xfer->len | toggle ); + desc->flags = ( xfer->flags | EHCI_FL_CERR_MAX ); + + /* Populate buffer pointers */ + data = xfer->data; + len = xfer->len; + for ( i = 0 ; len ; i++ ) { + + /* Calculate length of this fragment */ + phys = virt_to_phys ( data ); + offset = ( phys & ( EHCI_PAGE_ALIGN - 1 ) ); + frag_len = ( EHCI_PAGE_ALIGN - offset ); + if ( frag_len > len ) + frag_len = len; + + /* Sanity checks */ + assert ( ( i == 0 ) || ( offset == 0 ) ); + assert ( i < ( sizeof ( desc->low ) / + sizeof ( desc->low[0] ) ) ); + + /* Populate buffer pointer */ + desc->low[i] = cpu_to_le32 ( phys ); + if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) { + desc->high[i] = + cpu_to_le32 ( ((uint64_t) phys) >> 32 ); + } + + /* Move to next fragment */ + data += frag_len; + len -= frag_len; + } + + /* Ensure everything is valid before activating descriptor */ + wmb(); + desc->status = EHCI_STATUS_ACTIVE; + + /* Record I/O buffer against last ring index */ + if ( --count == 0 ) + ring->iobuf[index] = iobuf; + } + + return 0; +} + +/** + * Dequeue a transfer descriptor + * + * @v ring Transfer descriptor ring + * @ret iobuf I/O buffer (or NULL) + */ +static struct io_buffer * ehci_dequeue ( struct ehci_ring *ring ) { + struct ehci_transfer_descriptor *desc; + struct io_buffer *iobuf; + unsigned int index = ( ring->cons % EHCI_RING_COUNT ); + + /* Sanity check */ + assert ( ehci_ring_fill ( ring ) > 0 ); + + /* Mark descriptor as inactive (and not halted) */ + desc = &ring->desc[index]; + desc->status = 0; + + /* Retrieve I/O buffer */ + iobuf = ring->iobuf[index]; + ring->iobuf[index] = NULL; + + /* Update consumer counter */ + ring->cons++; + + return iobuf; +} + +/****************************************************************************** + * + * Schedule management + * + ****************************************************************************** + */ + +/** + * Get link value for a queue head + * + * @v queue Queue head + * @ret link Link value + */ +static inline uint32_t ehci_link_qh ( struct ehci_queue_head *queue ) { + + return ( virt_to_phys ( queue ) | EHCI_LINK_TYPE_QH ); +} + +/** + * (Re)build asynchronous schedule + * + * @v ehci EHCI device + */ +static void ehci_async_schedule ( struct ehci_device *ehci ) { + struct ehci_endpoint *endpoint; + struct ehci_queue_head *queue; + uint32_t link; + + /* Build schedule in reverse order of execution. Provided + * that we only ever add or remove single endpoints, this can + * safely run concurrently with hardware execution of the + * schedule. + */ + link = ehci_link_qh ( ehci->head ); + list_for_each_entry_reverse ( endpoint, &ehci->async, schedule ) { + queue = endpoint->ring.head; + queue->link = cpu_to_le32 ( link ); + wmb(); + link = ehci_link_qh ( queue ); + } + ehci->head->link = cpu_to_le32 ( link ); + wmb(); +} + +/** + * Add endpoint to asynchronous schedule + * + * @v endpoint Endpoint + */ +static void ehci_async_add ( struct ehci_endpoint *endpoint ) { + struct ehci_device *ehci = endpoint->ehci; + + /* Add to end of schedule */ + list_add_tail ( &endpoint->schedule, &ehci->async ); + + /* Rebuild schedule */ + ehci_async_schedule ( ehci ); +} + +/** + * Remove endpoint from asynchronous schedule + * + * @v endpoint Endpoint + * @ret rc Return status code + */ +static int ehci_async_del ( struct ehci_endpoint *endpoint ) { + struct ehci_device *ehci = endpoint->ehci; + uint32_t usbcmd; + uint32_t usbsts; + unsigned int i; + + /* Remove from schedule */ + list_check_contains_entry ( endpoint, &ehci->async, schedule ); + list_del ( &endpoint->schedule ); + + /* Rebuild schedule */ + ehci_async_schedule ( ehci ); + + /* Request notification when asynchronous schedule advances */ + usbcmd = readl ( ehci->op + EHCI_OP_USBCMD ); + usbcmd |= EHCI_USBCMD_ASYNC_ADVANCE; + writel ( usbcmd, ehci->op + EHCI_OP_USBCMD ); + + /* Wait for asynchronous schedule to advance */ + for ( i = 0 ; i < EHCI_ASYNC_ADVANCE_MAX_WAIT_MS ; i++ ) { + + /* Check for asynchronous schedule advancing */ + usbsts = readl ( ehci->op + EHCI_OP_USBSTS ); + if ( usbsts & EHCI_USBSTS_ASYNC_ADVANCE ) { + usbsts &= ~EHCI_USBSTS_CHANGE; + usbsts |= EHCI_USBSTS_ASYNC_ADVANCE; + writel ( usbsts, ehci->op + EHCI_OP_USBSTS ); + return 0; + } + + /* Delay */ + mdelay ( 1 ); + } + + /* Bad things will probably happen now */ + DBGC ( ehci, "EHCI %s timed out waiting for asynchronous schedule " + "to advance\n", ehci->name ); + return -ETIMEDOUT; +} + +/** + * (Re)build periodic schedule + * + * @v ehci EHCI device + */ +static void ehci_periodic_schedule ( struct ehci_device *ehci ) { + struct ehci_endpoint *endpoint; + struct ehci_queue_head *queue; + uint32_t link; + unsigned int frames; + unsigned int max_interval; + unsigned int i; + + /* Build schedule in reverse order of execution. Provided + * that we only ever add or remove single endpoints, this can + * safely run concurrently with hardware execution of the + * schedule. + */ + DBGCP ( ehci, "EHCI %s periodic schedule: ", ehci->name ); + link = EHCI_LINK_TERMINATE; + list_for_each_entry_reverse ( endpoint, &ehci->periodic, schedule ) { + queue = endpoint->ring.head; + queue->link = cpu_to_le32 ( link ); + wmb(); + DBGCP ( ehci, "%s%d", + ( ( link == EHCI_LINK_TERMINATE ) ? "" : "<-" ), + endpoint->ep->interval ); + link = ehci_link_qh ( queue ); + } + DBGCP ( ehci, "\n" ); + + /* Populate periodic frame list */ + DBGCP ( ehci, "EHCI %s periodic frame list:", ehci->name ); + frames = EHCI_PERIODIC_FRAMES ( ehci->flsize ); + for ( i = 0 ; i < frames ; i++ ) { + + /* Calculate maximum interval (in microframes) which + * may appear as part of this frame list. + */ + if ( i == 0 ) { + /* Start of list: include all endpoints */ + max_interval = -1U; + } else { + /* Calculate highest power-of-two frame interval */ + max_interval = ( 1 << ( ffs ( i ) - 1 ) ); + /* Convert to microframes */ + max_interval <<= 3; + /* Round up to nearest 2^n-1 */ + max_interval = ( ( max_interval << 1 ) - 1 ); + } + + /* Find first endpoint in schedule satisfying this + * maximum interval constraint. + */ + link = EHCI_LINK_TERMINATE; + list_for_each_entry ( endpoint, &ehci->periodic, schedule ) { + if ( endpoint->ep->interval <= max_interval ) { + queue = endpoint->ring.head; + link = ehci_link_qh ( queue ); + DBGCP ( ehci, " %d:%d", + i, endpoint->ep->interval ); + break; + } + } + ehci->frame[i].link = cpu_to_le32 ( link ); + } + wmb(); + DBGCP ( ehci, "\n" ); +} + +/** + * Add endpoint to periodic schedule + * + * @v endpoint Endpoint + */ +static void ehci_periodic_add ( struct ehci_endpoint *endpoint ) { + struct ehci_device *ehci = endpoint->ehci; + struct ehci_endpoint *before; + unsigned int interval = endpoint->ep->interval; + + /* Find first endpoint with a smaller interval */ + list_for_each_entry ( before, &ehci->periodic, schedule ) { + if ( before->ep->interval < interval ) + break; + } + list_add_tail ( &endpoint->schedule, &before->schedule ); + + /* Rebuild schedule */ + ehci_periodic_schedule ( ehci ); +} + +/** + * Remove endpoint from periodic schedule + * + * @v endpoint Endpoint + * @ret rc Return status code + */ +static int ehci_periodic_del ( struct ehci_endpoint *endpoint ) { + struct ehci_device *ehci = endpoint->ehci; + + /* Remove from schedule */ + list_check_contains_entry ( endpoint, &ehci->periodic, schedule ); + list_del ( &endpoint->schedule ); + + /* Rebuild schedule */ + ehci_periodic_schedule ( ehci ); + + /* Delay for a whole USB frame (with a 100% safety margin) */ + mdelay ( 2 ); + + return 0; +} + +/** + * Add endpoint to appropriate schedule + * + * @v endpoint Endpoint + */ +static void ehci_schedule_add ( struct ehci_endpoint *endpoint ) { + struct usb_endpoint *ep = endpoint->ep; + unsigned int attr = ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK ); + + if ( attr == USB_ENDPOINT_ATTR_INTERRUPT ) { + ehci_periodic_add ( endpoint ); + } else { + ehci_async_add ( endpoint ); + } +} + +/** + * Remove endpoint from appropriate schedule + * + * @v endpoint Endpoint + * @ret rc Return status code + */ +static int ehci_schedule_del ( struct ehci_endpoint *endpoint ) { + struct usb_endpoint *ep = endpoint->ep; + unsigned int attr = ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK ); + + if ( attr == USB_ENDPOINT_ATTR_INTERRUPT ) { + return ehci_periodic_del ( endpoint ); + } else { + return ehci_async_del ( endpoint ); + } +} + +/****************************************************************************** + * + * Endpoint operations + * + ****************************************************************************** + */ + +/** + * Determine endpoint characteristics + * + * @v ep USB endpoint + * @ret chr Endpoint characteristics + */ +static uint32_t ehci_endpoint_characteristics ( struct usb_endpoint *ep ) { + struct usb_device *usb = ep->usb; + unsigned int attr = ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK ); + uint32_t chr; + + /* Determine basic characteristics */ + chr = ( EHCI_CHR_ADDRESS ( usb->address ) | + EHCI_CHR_ENDPOINT ( ep->address ) | + EHCI_CHR_MAX_LEN ( ep->mtu ) ); + + /* Control endpoints require manual control of the data toggle */ + if ( attr == USB_ENDPOINT_ATTR_CONTROL ) + chr |= EHCI_CHR_TOGGLE; + + /* Determine endpoint speed */ + if ( usb->speed == USB_SPEED_HIGH ) { + chr |= EHCI_CHR_EPS_HIGH; + } else { + if ( usb->speed == USB_SPEED_FULL ) { + chr |= EHCI_CHR_EPS_FULL; + } else { + chr |= EHCI_CHR_EPS_LOW; + } + if ( attr == USB_ENDPOINT_ATTR_CONTROL ) + chr |= EHCI_CHR_CONTROL; + } + + return chr; +} + +/** + * Determine endpoint capabilities + * + * @v ep USB endpoint + * @ret cap Endpoint capabilities + */ +static uint32_t ehci_endpoint_capabilities ( struct usb_endpoint *ep ) { + struct usb_device *usb = ep->usb; + struct usb_port *tt = usb_transaction_translator ( usb ); + unsigned int attr = ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK ); + uint32_t cap; + unsigned int i; + + /* Determine basic capabilities */ + cap = EHCI_CAP_MULT ( ep->burst + 1 ); + + /* Determine interrupt schedule mask, if applicable */ + if ( ( attr == USB_ENDPOINT_ATTR_INTERRUPT ) && + ( ( ep->interval != 0 ) /* avoid infinite loop */ ) ) { + for ( i = 0 ; i < 8 /* microframes per frame */ ; + i += ep->interval ) { + cap |= EHCI_CAP_INTR_SCHED ( i ); + } + } + + /* Set transaction translator hub address and port, if applicable */ + if ( tt ) { + assert ( tt->hub->usb ); + cap |= ( EHCI_CAP_TT_HUB ( tt->hub->usb->address ) | + EHCI_CAP_TT_PORT ( tt->address ) ); + if ( attr == USB_ENDPOINT_ATTR_INTERRUPT ) + cap |= EHCI_CAP_SPLIT_SCHED_DEFAULT; + } + + return cap; +} + +/** + * Update endpoint characteristics and capabilities + * + * @v ep USB endpoint + */ +static void ehci_endpoint_update ( struct usb_endpoint *ep ) { + struct ehci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct ehci_queue_head *head; + + /* Update queue characteristics and capabilities */ + head = endpoint->ring.head; + head->chr = cpu_to_le32 ( ehci_endpoint_characteristics ( ep ) ); + head->cap = cpu_to_le32 ( ehci_endpoint_capabilities ( ep ) ); +} + +/** + * Open endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int ehci_endpoint_open ( struct usb_endpoint *ep ) { + struct usb_device *usb = ep->usb; + struct ehci_device *ehci = usb_get_hostdata ( usb ); + struct ehci_endpoint *endpoint; + int rc; + + /* Allocate and initialise structure */ + endpoint = zalloc ( sizeof ( *endpoint ) ); + if ( ! endpoint ) { + rc = -ENOMEM; + goto err_alloc; + } + endpoint->ehci = ehci; + endpoint->ep = ep; + usb_endpoint_set_hostdata ( ep, endpoint ); + + /* Initialise descriptor ring */ + if ( ( rc = ehci_ring_alloc ( ehci, &endpoint->ring ) ) != 0 ) + goto err_ring_alloc; + + /* Update queue characteristics and capabilities */ + ehci_endpoint_update ( ep ); + + /* Add to list of endpoints */ + list_add_tail ( &endpoint->list, &ehci->endpoints ); + + /* Add to schedule */ + ehci_schedule_add ( endpoint ); + + return 0; + + ehci_ring_free ( &endpoint->ring ); + err_ring_alloc: + free ( endpoint ); + err_alloc: + return rc; +} + +/** + * Close endpoint + * + * @v ep USB endpoint + */ +static void ehci_endpoint_close ( struct usb_endpoint *ep ) { + struct ehci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct ehci_device *ehci = endpoint->ehci; + struct usb_device *usb = ep->usb; + struct io_buffer *iobuf; + int rc; + + /* Remove from schedule */ + if ( ( rc = ehci_schedule_del ( endpoint ) ) != 0 ) { + /* No way to prevent hardware from continuing to + * access the memory, so leak it. + */ + DBGC ( ehci, "EHCI %s %s could not unschedule: %s\n", + usb->name, usb_endpoint_name ( ep ), strerror ( rc ) ); + return; + } + + /* Cancel any incomplete transfers */ + while ( ehci_ring_fill ( &endpoint->ring ) ) { + iobuf = ehci_dequeue ( &endpoint->ring ); + if ( iobuf ) + usb_complete_err ( ep, iobuf, -ECANCELED ); + } + + /* Remove from list of endpoints */ + list_del ( &endpoint->list ); + + /* Free descriptor ring */ + ehci_ring_free ( &endpoint->ring ); + + /* Free endpoint */ + free ( endpoint ); +} + +/** + * Reset endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int ehci_endpoint_reset ( struct usb_endpoint *ep ) { + struct ehci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct ehci_ring *ring = &endpoint->ring; + struct ehci_transfer_descriptor *cache = &ring->head->cache; + uint32_t link; + + /* Sanity checks */ + assert ( ! ( cache->status & EHCI_STATUS_ACTIVE ) ); + assert ( cache->status & EHCI_STATUS_HALTED ); + + /* Reset residual count */ + ring->residual = 0; + + /* Reset data toggle */ + cache->len = 0; + + /* Prepare to restart at next unconsumed descriptor */ + link = virt_to_phys ( &ring->desc[ ring->cons % EHCI_RING_COUNT ] ); + cache->next = cpu_to_le32 ( link ); + + /* Restart ring */ + wmb(); + cache->status = 0; + + return 0; +} + +/** + * Update MTU + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int ehci_endpoint_mtu ( struct usb_endpoint *ep ) { + + /* Update endpoint characteristics and capabilities */ + ehci_endpoint_update ( ep ); + + return 0; +} + +/** + * Enqueue message transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int ehci_endpoint_message ( struct usb_endpoint *ep, + struct io_buffer *iobuf ) { + struct ehci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct ehci_device *ehci = endpoint->ehci; + struct usb_setup_packet *packet; + unsigned int input; + struct ehci_transfer xfers[3]; + struct ehci_transfer *xfer = xfers; + size_t len; + int rc; + + /* Construct setup stage */ + assert ( iob_len ( iobuf ) >= sizeof ( *packet ) ); + packet = iobuf->data; + iob_pull ( iobuf, sizeof ( *packet ) ); + xfer->data = packet; + xfer->len = sizeof ( *packet ); + xfer->flags = EHCI_FL_PID_SETUP; + xfer++; + + /* Construct data stage, if applicable */ + len = iob_len ( iobuf ); + input = ( packet->request & cpu_to_le16 ( USB_DIR_IN ) ); + if ( len ) { + xfer->data = iobuf->data; + xfer->len = len; + xfer->flags = ( EHCI_FL_TOGGLE | + ( input ? EHCI_FL_PID_IN : EHCI_FL_PID_OUT ) ); + xfer++; + } + + /* Construct status stage */ + xfer->data = NULL; + xfer->len = 0; + xfer->flags = ( EHCI_FL_TOGGLE | EHCI_FL_IOC | + ( ( len && input ) ? EHCI_FL_PID_OUT : EHCI_FL_PID_IN)); + xfer++; + + /* Enqueue transfer */ + if ( ( rc = ehci_enqueue ( ehci, &endpoint->ring, iobuf, xfers, + ( xfer - xfers ) ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Calculate number of transfer descriptors + * + * @v len Length of data + * @v zlp Append a zero-length packet + * @ret count Number of transfer descriptors + */ +static unsigned int ehci_endpoint_count ( size_t len, int zlp ) { + unsigned int count; + + /* Split into 16kB transfers. A single transfer can handle up + * to 20kB if it happens to be page-aligned, or up to 16kB + * with arbitrary alignment. We simplify the code by assuming + * that we can fit only 16kB into each transfer. + */ + count = ( ( len + EHCI_MTU - 1 ) / EHCI_MTU ); + + /* Append a zero-length transfer if applicable */ + if ( zlp || ( count == 0 ) ) + count++; + + return count; +} + +/** + * Enqueue stream transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v zlp Append a zero-length packet + * @ret rc Return status code + */ +static int ehci_endpoint_stream ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int zlp ) { + struct ehci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct ehci_device *ehci = endpoint->ehci; + void *data = iobuf->data; + size_t len = iob_len ( iobuf ); + unsigned int count = ehci_endpoint_count ( len, zlp ); + unsigned int input = ( ep->address & USB_DIR_IN ); + unsigned int flags = ( input ? EHCI_FL_PID_IN : EHCI_FL_PID_OUT ); + struct ehci_transfer xfers[count]; + struct ehci_transfer *xfer = xfers; + size_t xfer_len; + unsigned int i; + int rc; + + /* Create transfers */ + for ( i = 0 ; i < count ; i++ ) { + + /* Calculate transfer length */ + xfer_len = EHCI_MTU; + if ( xfer_len > len ) + xfer_len = len; + + /* Create transfer */ + xfer->data = data; + xfer->len = xfer_len; + xfer->flags = flags; + + /* Move to next transfer */ + data += xfer_len; + len -= xfer_len; + xfer++; + } + xfer[-1].flags |= EHCI_FL_IOC; + + /* Enqueue transfer */ + if ( ( rc = ehci_enqueue ( ehci, &endpoint->ring, iobuf, xfers, + count ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Poll for completions + * + * @v endpoint Endpoint + */ +static void ehci_endpoint_poll ( struct ehci_endpoint *endpoint ) { + struct ehci_device *ehci = endpoint->ehci; + struct ehci_ring *ring = &endpoint->ring; + struct ehci_transfer_descriptor *desc; + struct usb_endpoint *ep = endpoint->ep; + struct usb_device *usb = ep->usb; + struct io_buffer *iobuf; + unsigned int index; + unsigned int status; + int rc; + + /* Consume all completed descriptors */ + while ( ehci_ring_fill ( &endpoint->ring ) ) { + + /* Stop if we reach an uncompleted descriptor */ + rmb(); + index = ( ring->cons % EHCI_RING_COUNT ); + desc = &ring->desc[index]; + status = desc->status; + if ( status & EHCI_STATUS_ACTIVE ) + break; + + /* Consume this descriptor */ + iobuf = ehci_dequeue ( ring ); + + /* If we have encountered an error, then consume all + * remaining descriptors in this transaction, report + * the error to the USB core, and stop further + * processing. + */ + if ( status & EHCI_STATUS_HALTED ) { + rc = -EIO_STATUS ( status ); + DBGC ( ehci, "EHCI %s %s completion %d failed (status " + "%02x): %s\n", usb->name, + usb_endpoint_name ( ep ), index, status, + strerror ( rc ) ); + while ( ! iobuf ) + iobuf = ehci_dequeue ( ring ); + usb_complete_err ( endpoint->ep, iobuf, rc ); + return; + } + + /* Accumulate residual data count */ + ring->residual += ( le16_to_cpu ( desc->len ) & EHCI_LEN_MASK ); + + /* If this is not the end of a transaction (i.e. has + * no I/O buffer), then continue to next descriptor. + */ + if ( ! iobuf ) + continue; + + /* Update I/O buffer length */ + iob_unput ( iobuf, ring->residual ); + ring->residual = 0; + + /* Report completion to USB core */ + usb_complete ( endpoint->ep, iobuf ); + } +} + +/****************************************************************************** + * + * Device operations + * + ****************************************************************************** + */ + +/** + * Open device + * + * @v usb USB device + * @ret rc Return status code + */ +static int ehci_device_open ( struct usb_device *usb ) { + struct ehci_device *ehci = usb_bus_get_hostdata ( usb->port->hub->bus ); + + usb_set_hostdata ( usb, ehci ); + return 0; +} + +/** + * Close device + * + * @v usb USB device + */ +static void ehci_device_close ( struct usb_device *usb ) { + struct ehci_device *ehci = usb_get_hostdata ( usb ); + struct usb_bus *bus = ehci->bus; + + /* Free device address, if assigned */ + if ( usb->address ) + usb_free_address ( bus, usb->address ); +} + +/** + * Assign device address + * + * @v usb USB device + * @ret rc Return status code + */ +static int ehci_device_address ( struct usb_device *usb ) { + struct ehci_device *ehci = usb_get_hostdata ( usb ); + struct usb_bus *bus = ehci->bus; + struct usb_endpoint *ep0 = usb_endpoint ( usb, USB_EP0_ADDRESS ); + int address; + int rc; + + /* Sanity checks */ + assert ( usb->address == 0 ); + assert ( ep0 != NULL ); + + /* Allocate device address */ + address = usb_alloc_address ( bus ); + if ( address < 0 ) { + rc = address; + DBGC ( ehci, "EHCI %s could not allocate address: %s\n", + usb->name, strerror ( rc ) ); + goto err_alloc_address; + } + + /* Set address */ + if ( ( rc = usb_set_address ( usb, address ) ) != 0 ) + goto err_set_address; + + /* Update device address */ + usb->address = address; + + /* Update control endpoint characteristics and capabilities */ + ehci_endpoint_update ( ep0 ); + + return 0; + + err_set_address: + usb_free_address ( bus, address ); + err_alloc_address: + return rc; +} + +/****************************************************************************** + * + * Hub operations + * + ****************************************************************************** + */ + +/** + * Open hub + * + * @v hub USB hub + * @ret rc Return status code + */ +static int ehci_hub_open ( struct usb_hub *hub __unused ) { + + /* Nothing to do */ + return 0; +} + +/** + * Close hub + * + * @v hub USB hub + */ +static void ehci_hub_close ( struct usb_hub *hub __unused ) { + + /* Nothing to do */ +} + +/****************************************************************************** + * + * Root hub operations + * + ****************************************************************************** + */ + +/** + * Open root hub + * + * @v hub USB hub + * @ret rc Return status code + */ +static int ehci_root_open ( struct usb_hub *hub ) { + struct ehci_device *ehci = usb_hub_get_drvdata ( hub ); + uint32_t portsc; + unsigned int i; + + /* Route all ports to EHCI controller */ + writel ( EHCI_CONFIGFLAG_CF, ehci->op + EHCI_OP_CONFIGFLAG ); + + /* Enable power to all ports */ + for ( i = 1 ; i <= ehci->ports ; i++ ) { + portsc = readl ( ehci->op + EHCI_OP_PORTSC ( i ) ); + portsc &= ~EHCI_PORTSC_CHANGE; + portsc |= EHCI_PORTSC_PP; + writel ( portsc, ehci->op + EHCI_OP_PORTSC ( i ) ); + } + + /* Wait 20ms after potentially enabling power to a port */ + mdelay ( EHCI_PORT_POWER_DELAY_MS ); + + return 0; +} + +/** + * Close root hub + * + * @v hub USB hub + */ +static void ehci_root_close ( struct usb_hub *hub ) { + struct ehci_device *ehci = usb_hub_get_drvdata ( hub ); + + /* Route all ports back to companion controllers */ + writel ( 0, ehci->op + EHCI_OP_CONFIGFLAG ); +} + +/** + * Enable port + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int ehci_root_enable ( struct usb_hub *hub, struct usb_port *port ) { + struct ehci_device *ehci = usb_hub_get_drvdata ( hub ); + uint32_t portsc; + unsigned int line; + unsigned int i; + + /* Check for a low-speed device */ + portsc = readl ( ehci->op + EHCI_OP_PORTSC ( port->address ) ); + line = EHCI_PORTSC_LINE_STATUS ( portsc ); + if ( line == EHCI_PORTSC_LINE_STATUS_LOW ) { + DBGC ( ehci, "EHCI %s-%d detected low-speed device: " + "disowning\n", ehci->name, port->address ); + goto disown; + } + + /* Reset port */ + portsc &= ~( EHCI_PORTSC_PED | EHCI_PORTSC_CHANGE ); + portsc |= EHCI_PORTSC_PR; + writel ( portsc, ehci->op + EHCI_OP_PORTSC ( port->address ) ); + mdelay ( USB_RESET_DELAY_MS ); + portsc &= ~EHCI_PORTSC_PR; + writel ( portsc, ehci->op + EHCI_OP_PORTSC ( port->address ) ); + + /* Wait for reset to complete */ + for ( i = 0 ; i < EHCI_PORT_RESET_MAX_WAIT_MS ; i++ ) { + + /* Check port status */ + portsc = readl ( ehci->op + EHCI_OP_PORTSC ( port->address ) ); + if ( ! ( portsc & EHCI_PORTSC_PR ) ) { + if ( portsc & EHCI_PORTSC_PED ) + return 0; + DBGC ( ehci, "EHCI %s-%d not enabled after reset: " + "disowning\n", ehci->name, port->address ); + goto disown; + } + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( ehci, "EHCI %s-%d timed out waiting for port to reset\n", + ehci->name, port->address ); + return -ETIMEDOUT; + + disown: + /* Disown port */ + portsc &= ~EHCI_PORTSC_CHANGE; + portsc |= EHCI_PORTSC_OWNER; + writel ( portsc, ehci->op + EHCI_OP_PORTSC ( port->address ) ); + + /* Delay to allow child companion controllers to settle */ + mdelay ( EHCI_DISOWN_DELAY_MS ); + + /* Poll child companion controllers */ + ehci_poll_companions ( ehci ); + + return -ENODEV; +} + +/** + * Disable port + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int ehci_root_disable ( struct usb_hub *hub, struct usb_port *port ) { + struct ehci_device *ehci = usb_hub_get_drvdata ( hub ); + uint32_t portsc; + + /* Disable port */ + portsc = readl ( ehci->op + EHCI_OP_PORTSC ( port->address ) ); + portsc &= ~( EHCI_PORTSC_PED | EHCI_PORTSC_CHANGE ); + writel ( portsc, ehci->op + EHCI_OP_PORTSC ( port->address ) ); + + return 0; +} + +/** + * Update root hub port speed + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int ehci_root_speed ( struct usb_hub *hub, struct usb_port *port ) { + struct ehci_device *ehci = usb_hub_get_drvdata ( hub ); + uint32_t portsc; + unsigned int speed; + unsigned int line; + int ccs; + int csc; + int ped; + + /* Read port status */ + portsc = readl ( ehci->op + EHCI_OP_PORTSC ( port->address ) ); + DBGC2 ( ehci, "EHCI %s-%d status is %08x\n", + ehci->name, port->address, portsc ); + ccs = ( portsc & EHCI_PORTSC_CCS ); + csc = ( portsc & EHCI_PORTSC_CSC ); + ped = ( portsc & EHCI_PORTSC_PED ); + line = EHCI_PORTSC_LINE_STATUS ( portsc ); + + /* Record disconnections and clear changes */ + port->disconnected |= csc; + writel ( portsc, ehci->op + EHCI_OP_PORTSC ( port->address ) ); + + /* Determine port speed */ + if ( ! ccs ) { + /* Port not connected */ + speed = USB_SPEED_NONE; + } else if ( line == EHCI_PORTSC_LINE_STATUS_LOW ) { + /* Detected as low-speed */ + speed = USB_SPEED_LOW; + } else if ( ped ) { + /* Port already enabled: must be high-speed */ + speed = USB_SPEED_HIGH; + } else { + /* Not low-speed and not yet enabled. Could be either + * full-speed or high-speed; we can't yet tell. + */ + speed = USB_SPEED_FULL; + } + port->speed = speed; + return 0; +} + +/** + * Clear transaction translator buffer + * + * @v hub USB hub + * @v port USB port + * @v ep USB endpoint + * @ret rc Return status code + */ +static int ehci_root_clear_tt ( struct usb_hub *hub, struct usb_port *port, + struct usb_endpoint *ep ) { + struct ehci_device *ehci = usb_hub_get_drvdata ( hub ); + + /* Should never be called; this is a root hub */ + DBGC ( ehci, "EHCI %s-%d nonsensical CLEAR_TT for %s %s\n", ehci->name, + port->address, ep->usb->name, usb_endpoint_name ( ep ) ); + + return -ENOTSUP; +} + +/** + * Poll for port status changes + * + * @v hub USB hub + * @v port USB port + */ +static void ehci_root_poll ( struct usb_hub *hub, struct usb_port *port ) { + struct ehci_device *ehci = usb_hub_get_drvdata ( hub ); + uint32_t portsc; + uint32_t change; + + /* Do nothing unless something has changed */ + portsc = readl ( ehci->op + EHCI_OP_PORTSC ( port->address ) ); + change = ( portsc & EHCI_PORTSC_CHANGE ); + if ( ! change ) + return; + + /* Record disconnections and clear changes */ + port->disconnected |= ( portsc & EHCI_PORTSC_CSC ); + writel ( portsc, ehci->op + EHCI_OP_PORTSC ( port->address ) ); + + /* Report port status change */ + usb_port_changed ( port ); +} + +/****************************************************************************** + * + * Bus operations + * + ****************************************************************************** + */ + +/** + * Open USB bus + * + * @v bus USB bus + * @ret rc Return status code + */ +static int ehci_bus_open ( struct usb_bus *bus ) { + struct ehci_device *ehci = usb_bus_get_hostdata ( bus ); + unsigned int frames; + size_t len; + int rc; + + /* Sanity checks */ + assert ( list_empty ( &ehci->async ) ); + assert ( list_empty ( &ehci->periodic ) ); + + /* Allocate and initialise asynchronous queue head */ + ehci->head = malloc_dma ( sizeof ( *ehci->head ), + ehci_align ( sizeof ( *ehci->head ) ) ); + if ( ! ehci->head ) { + rc = -ENOMEM; + goto err_alloc_head; + } + memset ( ehci->head, 0, sizeof ( *ehci->head ) ); + ehci->head->chr = cpu_to_le32 ( EHCI_CHR_HEAD ); + ehci->head->cache.next = cpu_to_le32 ( EHCI_LINK_TERMINATE ); + ehci->head->cache.status = EHCI_STATUS_HALTED; + ehci_async_schedule ( ehci ); + writel ( virt_to_phys ( ehci->head ), + ehci->op + EHCI_OP_ASYNCLISTADDR ); + + /* Use async queue head to determine control data structure segment */ + ehci->ctrldssegment = + ( ( ( uint64_t ) virt_to_phys ( ehci->head ) ) >> 32 ); + if ( ehci->addr64 ) { + writel ( ehci->ctrldssegment, ehci->op + EHCI_OP_CTRLDSSEGMENT); + } else if ( ehci->ctrldssegment ) { + DBGC ( ehci, "EHCI %s CTRLDSSEGMENT not supported\n", + ehci->name ); + rc = -ENOTSUP; + goto err_ctrldssegment; + } + + /* Allocate periodic frame list */ + frames = EHCI_PERIODIC_FRAMES ( ehci->flsize ); + len = ( frames * sizeof ( ehci->frame[0] ) ); + ehci->frame = malloc_dma ( len, EHCI_PAGE_ALIGN ); + if ( ! ehci->frame ) { + rc = -ENOMEM; + goto err_alloc_frame; + } + if ( ( rc = ehci_ctrl_reachable ( ehci, ehci->frame ) ) != 0 ) { + DBGC ( ehci, "EHCI %s frame list unreachable\n", ehci->name ); + goto err_unreachable_frame; + } + ehci_periodic_schedule ( ehci ); + writel ( virt_to_phys ( ehci->frame ), + ehci->op + EHCI_OP_PERIODICLISTBASE ); + + /* Start controller */ + ehci_run ( ehci ); + + return 0; + + ehci_stop ( ehci ); + err_unreachable_frame: + free_dma ( ehci->frame, len ); + err_alloc_frame: + err_ctrldssegment: + free_dma ( ehci->head, sizeof ( *ehci->head ) ); + err_alloc_head: + return rc; +} + +/** + * Close USB bus + * + * @v bus USB bus + */ +static void ehci_bus_close ( struct usb_bus *bus ) { + struct ehci_device *ehci = usb_bus_get_hostdata ( bus ); + unsigned int frames = EHCI_PERIODIC_FRAMES ( ehci->flsize ); + + /* Sanity checks */ + assert ( list_empty ( &ehci->async ) ); + assert ( list_empty ( &ehci->periodic ) ); + + /* Stop controller */ + ehci_stop ( ehci ); + + /* Free periodic frame list */ + free_dma ( ehci->frame, ( frames * sizeof ( ehci->frame[0] ) ) ); + + /* Free asynchronous schedule */ + free_dma ( ehci->head, sizeof ( *ehci->head ) ); +} + +/** + * Poll USB bus + * + * @v bus USB bus + */ +static void ehci_bus_poll ( struct usb_bus *bus ) { + struct ehci_device *ehci = usb_bus_get_hostdata ( bus ); + struct usb_hub *hub = bus->hub; + struct ehci_endpoint *endpoint; + unsigned int i; + uint32_t usbsts; + uint32_t change; + + /* Do nothing unless something has changed */ + usbsts = readl ( ehci->op + EHCI_OP_USBSTS ); + assert ( usbsts & EHCI_USBSTS_ASYNC ); + assert ( usbsts & EHCI_USBSTS_PERIODIC ); + assert ( ! ( usbsts & EHCI_USBSTS_HCH ) ); + change = ( usbsts & EHCI_USBSTS_CHANGE ); + if ( ! change ) + return; + + /* Acknowledge changes */ + writel ( usbsts, ehci->op + EHCI_OP_USBSTS ); + + /* Process completions, if applicable */ + if ( change & ( EHCI_USBSTS_USBINT | EHCI_USBSTS_USBERRINT ) ) { + + /* Iterate over all endpoints looking for completed + * descriptors. We trust that completion handlers are + * minimal and will not do anything that could + * plausibly affect the endpoint list itself. + */ + list_for_each_entry ( endpoint, &ehci->endpoints, list ) + ehci_endpoint_poll ( endpoint ); + } + + /* Process port status changes, if applicable */ + if ( change & EHCI_USBSTS_PORT ) { + + /* Iterate over all ports looking for status changes */ + for ( i = 1 ; i <= ehci->ports ; i++ ) + ehci_root_poll ( hub, usb_port ( hub, i ) ); + } + + /* Report fatal errors */ + if ( change & EHCI_USBSTS_SYSERR ) + DBGC ( ehci, "EHCI %s host system error\n", ehci->name ); +} + +/****************************************************************************** + * + * PCI interface + * + ****************************************************************************** + */ + +/** USB host controller operations */ +static struct usb_host_operations ehci_operations = { + .endpoint = { + .open = ehci_endpoint_open, + .close = ehci_endpoint_close, + .reset = ehci_endpoint_reset, + .mtu = ehci_endpoint_mtu, + .message = ehci_endpoint_message, + .stream = ehci_endpoint_stream, + }, + .device = { + .open = ehci_device_open, + .close = ehci_device_close, + .address = ehci_device_address, + }, + .bus = { + .open = ehci_bus_open, + .close = ehci_bus_close, + .poll = ehci_bus_poll, + }, + .hub = { + .open = ehci_hub_open, + .close = ehci_hub_close, + }, + .root = { + .open = ehci_root_open, + .close = ehci_root_close, + .enable = ehci_root_enable, + .disable = ehci_root_disable, + .speed = ehci_root_speed, + .clear_tt = ehci_root_clear_tt, + }, +}; + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int ehci_probe ( struct pci_device *pci ) { + struct ehci_device *ehci; + struct usb_port *port; + unsigned long bar_start; + size_t bar_size; + unsigned int i; + int rc; + + /* Allocate and initialise structure */ + ehci = zalloc ( sizeof ( *ehci ) ); + if ( ! ehci ) { + rc = -ENOMEM; + goto err_alloc; + } + ehci->name = pci->dev.name; + INIT_LIST_HEAD ( &ehci->endpoints ); + INIT_LIST_HEAD ( &ehci->async ); + INIT_LIST_HEAD ( &ehci->periodic ); + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Map registers */ + bar_start = pci_bar_start ( pci, EHCI_BAR ); + bar_size = pci_bar_size ( pci, EHCI_BAR ); + ehci->regs = pci_ioremap ( pci, bar_start, bar_size ); + if ( ! ehci->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + + /* Initialise EHCI device */ + ehci_init ( ehci, ehci->regs ); + + /* Initialise USB legacy support and claim ownership */ + ehci_legacy_init ( ehci, pci ); + ehci_legacy_claim ( ehci, pci ); + + /* Reset device */ + if ( ( rc = ehci_reset ( ehci ) ) != 0 ) + goto err_reset; + + /* Allocate USB bus */ + ehci->bus = alloc_usb_bus ( &pci->dev, ehci->ports, EHCI_MTU, + &ehci_operations ); + if ( ! ehci->bus ) { + rc = -ENOMEM; + goto err_alloc_bus; + } + usb_bus_set_hostdata ( ehci->bus, ehci ); + usb_hub_set_drvdata ( ehci->bus->hub, ehci ); + + /* Set port protocols */ + for ( i = 1 ; i <= ehci->ports ; i++ ) { + port = usb_port ( ehci->bus->hub, i ); + port->protocol = USB_PROTO_2_0; + } + + /* Register USB bus */ + if ( ( rc = register_usb_bus ( ehci->bus ) ) != 0 ) + goto err_register; + + pci_set_drvdata ( pci, ehci ); + return 0; + + unregister_usb_bus ( ehci->bus ); + err_register: + free_usb_bus ( ehci->bus ); + err_alloc_bus: + ehci_reset ( ehci ); + err_reset: + ehci_legacy_release ( ehci, pci ); + iounmap ( ehci->regs ); + err_ioremap: + free ( ehci ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void ehci_remove ( struct pci_device *pci ) { + struct ehci_device *ehci = pci_get_drvdata ( pci ); + struct usb_bus *bus = ehci->bus; + + unregister_usb_bus ( bus ); + assert ( list_empty ( &ehci->async ) ); + assert ( list_empty ( &ehci->periodic ) ); + free_usb_bus ( bus ); + ehci_reset ( ehci ); + ehci_legacy_release ( ehci, pci ); + iounmap ( ehci->regs ); + free ( ehci ); +} + +/** EHCI PCI device IDs */ +static struct pci_device_id ehci_ids[] = { + PCI_ROM ( 0xffff, 0xffff, "ehci", "EHCI", 0 ), +}; + +/** EHCI PCI driver */ +struct pci_driver ehci_driver __pci_driver = { + .ids = ehci_ids, + .id_count = ( sizeof ( ehci_ids ) / sizeof ( ehci_ids[0] ) ), + .class = PCI_CLASS_ID ( PCI_CLASS_SERIAL, PCI_CLASS_SERIAL_USB, + PCI_CLASS_SERIAL_USB_EHCI ), + .probe = ehci_probe, + .remove = ehci_remove, +}; + +/** + * Prepare for exit + * + * @v booting System is shutting down for OS boot + */ +static void ehci_shutdown ( int booting ) { + /* If we are shutting down to boot an OS, then prevent the + * release of ownership back to BIOS. + */ + ehci_legacy_prevent_release = booting; +} + +/** Startup/shutdown function */ +struct startup_fn ehci_startup __startup_fn ( STARTUP_LATE ) = { + .name = "ehci", + .shutdown = ehci_shutdown, +}; diff --git a/src/drivers/usb/ehci.h b/src/drivers/usb/ehci.h new file mode 100644 index 00000000..42e282e9 --- /dev/null +++ b/src/drivers/usb/ehci.h @@ -0,0 +1,544 @@ +#ifndef _IPXE_EHCI_H +#define _IPXE_EHCI_H + +/** @file + * + * USB Enhanced Host Controller Interface (EHCI) driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** Minimum alignment required for data structures + * + * With the exception of the periodic frame list (which is + * page-aligned), data structures used by EHCI generally require + * 32-byte alignment and must not cross a 4kB page boundary. We + * simplify this requirement by aligning each structure on its own + * size, with a minimum of a 32 byte alignment. + */ +#define EHCI_MIN_ALIGN 32 + +/** Maximum transfer size + * + * EHCI allows for transfers of up to 20kB with page-alignment, or + * 16kB with arbitrary alignment. + */ +#define EHCI_MTU 16384 + +/** Page-alignment required for some data structures */ +#define EHCI_PAGE_ALIGN 4096 + +/** EHCI PCI BAR */ +#define EHCI_BAR PCI_BASE_ADDRESS_0 + +/** Capability register length */ +#define EHCI_CAP_CAPLENGTH 0x00 + +/** Host controller interface version number */ +#define EHCI_CAP_HCIVERSION 0x02 + +/** Structural parameters */ +#define EHCI_CAP_HCSPARAMS 0x04 + +/** Number of ports */ +#define EHCI_HCSPARAMS_PORTS(params) ( ( (params) >> 0 ) & 0x0f ) + +/** Capability parameters */ +#define EHCI_CAP_HCCPARAMS 0x08 + +/** 64-bit addressing capability */ +#define EHCI_HCCPARAMS_ADDR64(params) ( ( (params) >> 0 ) & 0x1 ) + +/** Programmable frame list flag */ +#define EHCI_HCCPARAMS_FLSIZE(params) ( ( (params) >> 1 ) & 0x1 ) + +/** EHCI extended capabilities pointer */ +#define EHCI_HCCPARAMS_EECP(params) ( ( ( (params) >> 8 ) & 0xff ) ) + +/** EHCI extended capability ID */ +#define EHCI_EECP_ID(eecp) ( ( (eecp) >> 0 ) & 0xff ) + +/** Next EHCI extended capability pointer */ +#define EHCI_EECP_NEXT(eecp) ( ( ( (eecp) >> 8 ) & 0xff ) ) + +/** USB legacy support extended capability */ +#define EHCI_EECP_ID_LEGACY 1 + +/** USB legacy support BIOS owned semaphore */ +#define EHCI_USBLEGSUP_BIOS 0x02 + +/** USB legacy support BIOS ownership flag */ +#define EHCI_USBLEGSUP_BIOS_OWNED 0x01 + +/** USB legacy support OS owned semaphore */ +#define EHCI_USBLEGSUP_OS 0x03 + +/** USB legacy support OS ownership flag */ +#define EHCI_USBLEGSUP_OS_OWNED 0x01 + +/** USB legacy support control/status */ +#define EHCI_USBLEGSUP_CTLSTS 0x04 + +/** USB command register */ +#define EHCI_OP_USBCMD 0x00 + +/** Run/stop */ +#define EHCI_USBCMD_RUN 0x00000001UL + +/** Host controller reset */ +#define EHCI_USBCMD_HCRST 0x00000002UL + +/** Frame list size */ +#define EHCI_USBCMD_FLSIZE(flsize) ( (flsize) << 2 ) + +/** Frame list size mask */ +#define EHCI_USBCMD_FLSIZE_MASK EHCI_USBCMD_FLSIZE ( 3 ) + +/** Default frame list size */ +#define EHCI_FLSIZE_DEFAULT 0 + +/** Smallest allowed frame list size */ +#define EHCI_FLSIZE_SMALL 2 + +/** Number of elements in frame list */ +#define EHCI_PERIODIC_FRAMES(flsize) ( 1024 >> (flsize) ) + +/** Periodic schedule enable */ +#define EHCI_USBCMD_PERIODIC 0x00000010UL + +/** Asynchronous schedule enable */ +#define EHCI_USBCMD_ASYNC 0x00000020UL + +/** Asyncchronous schedule advance doorbell */ +#define EHCI_USBCMD_ASYNC_ADVANCE 0x000040UL + +/** USB status register */ +#define EHCI_OP_USBSTS 0x04 + +/** USB interrupt */ +#define EHCI_USBSTS_USBINT 0x00000001UL + +/** USB error interrupt */ +#define EHCI_USBSTS_USBERRINT 0x00000002UL + +/** Port change detect */ +#define EHCI_USBSTS_PORT 0x00000004UL + +/** Frame list rollover */ +#define EHCI_USBSTS_ROLLOVER 0x00000008UL + +/** Host system error */ +#define EHCI_USBSTS_SYSERR 0x00000010UL + +/** Asynchronous schedule advanced */ +#define EHCI_USBSTS_ASYNC_ADVANCE 0x00000020UL + +/** Periodic schedule enabled */ +#define EHCI_USBSTS_PERIODIC 0x00004000UL + +/** Asynchronous schedule enabled */ +#define EHCI_USBSTS_ASYNC 0x00008000UL + +/** Host controller halted */ +#define EHCI_USBSTS_HCH 0x00001000UL + +/** USB status change mask */ +#define EHCI_USBSTS_CHANGE \ + ( EHCI_USBSTS_USBINT | EHCI_USBSTS_USBERRINT | \ + EHCI_USBSTS_PORT | EHCI_USBSTS_ROLLOVER | \ + EHCI_USBSTS_SYSERR | EHCI_USBSTS_ASYNC_ADVANCE ) + +/** USB interrupt enable register */ +#define EHCI_OP_USBINTR 0x08 + +/** Frame index register */ +#define EHCI_OP_FRINDEX 0x0c + +/** Control data structure segment register */ +#define EHCI_OP_CTRLDSSEGMENT 0x10 + +/** Periodic frame list base address register */ +#define EHCI_OP_PERIODICLISTBASE 0x14 + +/** Current asynchronous list address register */ +#define EHCI_OP_ASYNCLISTADDR 0x18 + +/** Configure flag register */ +#define EHCI_OP_CONFIGFLAG 0x40 + +/** Configure flag */ +#define EHCI_CONFIGFLAG_CF 0x00000001UL + +/** Port status and control register */ +#define EHCI_OP_PORTSC(port) ( 0x40 + ( (port) << 2 ) ) + +/** Current connect status */ +#define EHCI_PORTSC_CCS 0x00000001UL + +/** Connect status change */ +#define EHCI_PORTSC_CSC 0x00000002UL + +/** Port enabled */ +#define EHCI_PORTSC_PED 0x00000004UL + +/** Port enabled/disabled change */ +#define EHCI_PORTSC_PEC 0x00000008UL + +/** Over-current change */ +#define EHCI_PORTSC_OCC 0x00000020UL + +/** Port reset */ +#define EHCI_PORTSC_PR 0x00000100UL + +/** Line status */ +#define EHCI_PORTSC_LINE_STATUS(portsc) ( ( (portsc) >> 10 ) & 0x3 ) + +/** Line status: low-speed device */ +#define EHCI_PORTSC_LINE_STATUS_LOW 0x1 + +/** Port power */ +#define EHCI_PORTSC_PP 0x00001000UL + +/** Port owner */ +#define EHCI_PORTSC_OWNER 0x00002000UL + +/** Port status change mask */ +#define EHCI_PORTSC_CHANGE \ + ( EHCI_PORTSC_CSC | EHCI_PORTSC_PEC | EHCI_PORTSC_OCC ) + +/** List terminator */ +#define EHCI_LINK_TERMINATE 0x00000001UL + +/** Frame list type */ +#define EHCI_LINK_TYPE(type) ( (type) << 1 ) + +/** Queue head type */ +#define EHCI_LINK_TYPE_QH EHCI_LINK_TYPE ( 1 ) + +/** A periodic frame list entry */ +struct ehci_periodic_frame { + /** First queue head */ + uint32_t link; +} __attribute__ (( packed )); + +/** A transfer descriptor */ +struct ehci_transfer_descriptor { + /** Next transfer descriptor */ + uint32_t next; + /** Alternate next transfer descriptor */ + uint32_t alt; + /** Status */ + uint8_t status; + /** Flags */ + uint8_t flags; + /** Transfer length */ + uint16_t len; + /** Buffer pointers (low 32 bits) */ + uint32_t low[5]; + /** Extended buffer pointers (high 32 bits) */ + uint32_t high[5]; + /** Reserved */ + uint8_t reserved[12]; +} __attribute__ (( packed )); + +/** Transaction error */ +#define EHCI_STATUS_XACT_ERR 0x08 + +/** Babble detected */ +#define EHCI_STATUS_BABBLE 0x10 + +/** Data buffer error */ +#define EHCI_STATUS_BUFFER 0x20 + +/** Halted */ +#define EHCI_STATUS_HALTED 0x40 + +/** Active */ +#define EHCI_STATUS_ACTIVE 0x80 + +/** PID code */ +#define EHCI_FL_PID(code) ( (code) << 0 ) + +/** OUT token */ +#define EHCI_FL_PID_OUT EHCI_FL_PID ( 0 ) + +/** IN token */ +#define EHCI_FL_PID_IN EHCI_FL_PID ( 1 ) + +/** SETUP token */ +#define EHCI_FL_PID_SETUP EHCI_FL_PID ( 2 ) + +/** Error counter */ +#define EHCI_FL_CERR( count ) ( (count) << 2 ) + +/** Error counter maximum value */ +#define EHCI_FL_CERR_MAX EHCI_FL_CERR ( 3 ) + +/** Interrupt on completion */ +#define EHCI_FL_IOC 0x80 + +/** Length mask */ +#define EHCI_LEN_MASK 0x7fff + +/** Data toggle */ +#define EHCI_LEN_TOGGLE 0x8000 + +/** A queue head */ +struct ehci_queue_head { + /** Horizontal link pointer */ + uint32_t link; + /** Endpoint characteristics */ + uint32_t chr; + /** Endpoint capabilities */ + uint32_t cap; + /** Current transfer descriptor */ + uint32_t current; + /** Transfer descriptor cache */ + struct ehci_transfer_descriptor cache; +} __attribute__ (( packed )); + +/** Device address */ +#define EHCI_CHR_ADDRESS( address ) ( (address) << 0 ) + +/** Endpoint number */ +#define EHCI_CHR_ENDPOINT( address ) ( ( (address) & 0xf ) << 8 ) + +/** Endpoint speed */ +#define EHCI_CHR_EPS( eps ) ( (eps) << 12 ) + +/** Full-speed endpoint */ +#define EHCI_CHR_EPS_FULL EHCI_CHR_EPS ( 0 ) + +/** Low-speed endpoint */ +#define EHCI_CHR_EPS_LOW EHCI_CHR_EPS ( 1 ) + +/** High-speed endpoint */ +#define EHCI_CHR_EPS_HIGH EHCI_CHR_EPS ( 2 ) + +/** Explicit data toggles */ +#define EHCI_CHR_TOGGLE 0x00004000UL + +/** Head of reclamation list flag */ +#define EHCI_CHR_HEAD 0x00008000UL + +/** Maximum packet length */ +#define EHCI_CHR_MAX_LEN( len ) ( (len) << 16 ) + +/** Control endpoint flag */ +#define EHCI_CHR_CONTROL 0x08000000UL + +/** Interrupt schedule mask */ +#define EHCI_CAP_INTR_SCHED( uframe ) ( 1 << ( (uframe) + 0 ) ) + +/** Split completion schedule mask */ +#define EHCI_CAP_SPLIT_SCHED( uframe ) ( 1 << ( (uframe) + 8 ) ) + +/** Default split completion schedule mask + * + * We schedule all split starts in microframe 0, on the assumption + * that we will never have to deal with more than sixteen actively + * interrupting devices via the same transaction translator. We + * schedule split completions for all remaining microframes after + * microframe 1 (in which the low-speed or full-speed transaction is + * assumed to execute). This is a very crude approximation designed + * to avoid the need for calculating exactly when low-speed and + * full-speed transactions will execute. Since we only ever deal with + * interrupt endpoints (rather than isochronous endpoints), the volume + * of periodic traffic is extremely low, and this approximation should + * remain valid. + */ +#define EHCI_CAP_SPLIT_SCHED_DEFAULT \ + ( EHCI_CAP_SPLIT_SCHED ( 2 ) | EHCI_CAP_SPLIT_SCHED ( 3 ) | \ + EHCI_CAP_SPLIT_SCHED ( 4 ) | EHCI_CAP_SPLIT_SCHED ( 5 ) | \ + EHCI_CAP_SPLIT_SCHED ( 6 ) | EHCI_CAP_SPLIT_SCHED ( 7 ) ) + +/** Transaction translator hub address */ +#define EHCI_CAP_TT_HUB( address ) ( (address) << 16 ) + +/** Transaction translator port number */ +#define EHCI_CAP_TT_PORT( port ) ( (port) << 23 ) + +/** High-bandwidth pipe multiplier */ +#define EHCI_CAP_MULT( mult ) ( (mult) << 30 ) + +/** A transfer descriptor ring */ +struct ehci_ring { + /** Producer counter */ + unsigned int prod; + /** Consumer counter */ + unsigned int cons; + + /** Residual untransferred data */ + size_t residual; + + /** I/O buffers */ + struct io_buffer **iobuf; + + /** Queue head */ + struct ehci_queue_head *head; + /** Transfer descriptors */ + struct ehci_transfer_descriptor *desc; +}; + +/** Number of transfer descriptors in a ring + * + * This is a policy decision. + */ +#define EHCI_RING_COUNT 64 + +/** + * Calculate space used in transfer descriptor ring + * + * @v ring Transfer descriptor ring + * @ret fill Number of entries used + */ +static inline __attribute__ (( always_inline )) unsigned int +ehci_ring_fill ( struct ehci_ring *ring ) { + unsigned int fill; + + fill = ( ring->prod - ring->cons ); + assert ( fill <= EHCI_RING_COUNT ); + return fill; +} + +/** + * Calculate space remaining in transfer descriptor ring + * + * @v ring Transfer descriptor ring + * @ret remaining Number of entries remaining + */ +static inline __attribute__ (( always_inline )) unsigned int +ehci_ring_remaining ( struct ehci_ring *ring ) { + unsigned int fill = ehci_ring_fill ( ring ); + + return ( EHCI_RING_COUNT - fill ); +} + +/** Time to delay after enabling power to a port + * + * This is not mandated by EHCI; we use the value given for xHCI. + */ +#define EHCI_PORT_POWER_DELAY_MS 20 + +/** Time to delay after releasing ownership of a port + * + * This is a policy decision. + */ +#define EHCI_DISOWN_DELAY_MS 100 + +/** Maximum time to wait for BIOS to release ownership + * + * This is a policy decision. + */ +#define EHCI_USBLEGSUP_MAX_WAIT_MS 100 + +/** Maximum time to wait for asynchronous schedule to advance + * + * This is a policy decision. + */ +#define EHCI_ASYNC_ADVANCE_MAX_WAIT_MS 100 + +/** Maximum time to wait for host controller to stop + * + * This is a policy decision. + */ +#define EHCI_STOP_MAX_WAIT_MS 100 + +/** Maximum time to wait for reset to complete + * + * This is a policy decision. + */ +#define EHCI_RESET_MAX_WAIT_MS 500 + +/** Maximum time to wait for a port reset to complete + * + * This is a policy decision. + */ +#define EHCI_PORT_RESET_MAX_WAIT_MS 500 + +/** An EHCI transfer */ +struct ehci_transfer { + /** Data buffer */ + void *data; + /** Length */ + size_t len; + /** Flags + * + * This is the bitwise OR of zero or more EHCI_FL_XXX values. + * The low 8 bits are copied to the flags byte within the + * transfer descriptor; the remaining bits hold flags + * meaningful only to our driver code. + */ + unsigned int flags; +}; + +/** Set initial data toggle */ +#define EHCI_FL_TOGGLE 0x8000 + +/** An EHCI device */ +struct ehci_device { + /** Registers */ + void *regs; + /** Name */ + const char *name; + + /** Capability registers */ + void *cap; + /** Operational registers */ + void *op; + + /** Number of ports */ + unsigned int ports; + /** 64-bit addressing capability */ + int addr64; + /** Frame list size */ + unsigned int flsize; + /** EHCI extended capabilities offset */ + unsigned int eecp; + + /** USB legacy support capability (if present and enabled) */ + unsigned int legacy; + + /** Control data structure segment */ + uint32_t ctrldssegment; + /** Asynchronous queue head */ + struct ehci_queue_head *head; + /** Periodic frame list */ + struct ehci_periodic_frame *frame; + + /** List of all endpoints */ + struct list_head endpoints; + /** Asynchronous schedule */ + struct list_head async; + /** Periodic schedule + * + * Listed in decreasing order of endpoint interval. + */ + struct list_head periodic; + + /** USB bus */ + struct usb_bus *bus; +}; + +/** An EHCI endpoint */ +struct ehci_endpoint { + /** EHCI device */ + struct ehci_device *ehci; + /** USB endpoint */ + struct usb_endpoint *ep; + /** List of all endpoints */ + struct list_head list; + /** Endpoint schedule */ + struct list_head schedule; + + /** Transfer descriptor ring */ + struct ehci_ring ring; +}; + +extern unsigned int ehci_companion ( struct pci_device *pci ); + +#endif /* _IPXE_EHCI_H */ diff --git a/src/drivers/usb/uhci.c b/src/drivers/usb/uhci.c new file mode 100644 index 00000000..ce2962d3 --- /dev/null +++ b/src/drivers/usb/uhci.c @@ -0,0 +1,1570 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include "ehci.h" +#include "uhci.h" + +/** @file + * + * USB Universal Host Controller Interface (UHCI) driver + * + */ + +/****************************************************************************** + * + * Register access + * + ****************************************************************************** + */ + +/** + * Check that address is reachable + * + * @v addr Address + * @v len Length + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline)) int +uhci_reachable ( void *addr, size_t len ) { + physaddr_t phys = virt_to_phys ( addr ); + + /* Always reachable in a 32-bit build */ + if ( sizeof ( physaddr_t ) <= sizeof ( uint32_t ) ) + return 0; + + /* Reachable if below 4GB */ + if ( ( ( phys + len - 1 ) & ~0xffffffffULL ) == 0 ) + return 0; + + return -ENOTSUP; +} + +/****************************************************************************** + * + * Run / stop / reset + * + ****************************************************************************** + */ + +/** + * Start UHCI device + * + * @v uhci UHCI device + */ +static void uhci_run ( struct uhci_device *uhci ) { + uint16_t usbcmd; + + /* Set run/stop bit */ + usbcmd = inw ( uhci->regs + UHCI_USBCMD ); + usbcmd |= ( UHCI_USBCMD_RUN | UHCI_USBCMD_MAX64 ); + outw ( usbcmd, uhci->regs + UHCI_USBCMD ); +} + +/** + * Stop UHCI device + * + * @v uhci UHCI device + * @ret rc Return status code + */ +static int uhci_stop ( struct uhci_device *uhci ) { + uint16_t usbcmd; + uint16_t usbsts; + unsigned int i; + + /* Clear run/stop bit */ + usbcmd = inw ( uhci->regs + UHCI_USBCMD ); + usbcmd &= ~UHCI_USBCMD_RUN; + outw ( usbcmd, uhci->regs + UHCI_USBCMD ); + + /* Wait for device to stop */ + for ( i = 0 ; i < UHCI_STOP_MAX_WAIT_MS ; i++ ) { + + /* Check if device is stopped */ + usbsts = inw ( uhci->regs + UHCI_USBSTS ); + if ( usbsts & UHCI_USBSTS_HCHALTED ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( uhci, "UHCI %s timed out waiting for stop\n", uhci->name ); + return -ETIMEDOUT; +} + +/** + * Reset UHCI device + * + * @v uhci UHCI device + * @ret rc Return status code + */ +static int uhci_reset ( struct uhci_device *uhci ) { + uint16_t usbcmd; + unsigned int i; + int rc; + + /* The UHCI specification states that resetting a running + * device may result in undefined behaviour, so try stopping + * it first. + */ + if ( ( rc = uhci_stop ( uhci ) ) != 0 ) { + /* Ignore errors and attempt to reset the device anyway */ + } + + /* Reset device */ + outw ( UHCI_USBCMD_HCRESET, uhci->regs + UHCI_USBCMD ); + + /* Wait for reset to complete */ + for ( i = 0 ; i < UHCI_RESET_MAX_WAIT_MS ; i++ ) { + + /* Check if reset is complete */ + usbcmd = inw ( uhci->regs + UHCI_USBCMD ); + if ( ! ( usbcmd & UHCI_USBCMD_HCRESET ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( uhci, "UHCI %s timed out waiting for reset\n", uhci->name ); + return -ETIMEDOUT; +} + +/****************************************************************************** + * + * Transfer descriptor rings + * + ****************************************************************************** + */ + +/** + * Allocate transfer ring + * + * @v ring Transfer ring + * @ret rc Return status code + */ +static int uhci_ring_alloc ( struct uhci_ring *ring ) { + int rc; + + /* Initialise structure */ + memset ( ring, 0, sizeof ( *ring ) ); + + /* Allocate queue head */ + ring->head = malloc_dma ( sizeof ( *ring->head ), UHCI_ALIGN ); + if ( ! ring->head ) { + rc = -ENOMEM; + goto err_alloc; + } + if ( ( rc = uhci_reachable ( ring->head, + sizeof ( *ring->head ) ) ) != 0 ) + goto err_unreachable; + + /* Initialise queue head */ + ring->head->current = cpu_to_le32 ( UHCI_LINK_TERMINATE ); + + return 0; + + err_unreachable: + free_dma ( ring->head, sizeof ( *ring->head ) ); + err_alloc: + return rc; +} + +/** + * Free transfer ring + * + * @v ring Transfer ring + */ +static void uhci_ring_free ( struct uhci_ring *ring ) { + unsigned int i; + + /* Sanity checks */ + assert ( uhci_ring_fill ( ring ) == 0 ); + for ( i = 0 ; i < UHCI_RING_COUNT ; i++ ) + assert ( ring->xfer[i] == NULL ); + + /* Free queue head */ + free_dma ( ring->head, sizeof ( *ring->head ) ); +} + +/** + * Enqueue new transfer + * + * @v ring Transfer ring + * @v iobuf I/O buffer + * @v count Number of descriptors + * @ret rc Return status code + */ +static int uhci_enqueue ( struct uhci_ring *ring, struct io_buffer *iobuf, + unsigned int count ) { + struct uhci_transfer *xfer; + struct uhci_transfer *end; + struct uhci_transfer_descriptor *desc; + unsigned int index = ( ring->prod % UHCI_RING_COUNT ); + uint32_t link; + size_t len; + int rc; + + /* Sanity check */ + assert ( count > 0 ); + assert ( iobuf != NULL ); + + /* Check for space in ring */ + if ( ! uhci_ring_remaining ( ring ) ) { + rc = -ENOBUFS; + goto err_ring_full; + } + + /* Check for reachability of I/O buffer */ + if ( ( rc = uhci_reachable ( iobuf->data, iob_len ( iobuf ) ) ) != 0 ) + goto err_unreachable_iobuf; + + /* Allocate transfer */ + xfer = malloc ( sizeof ( *xfer ) ); + if ( ! xfer ) { + rc = -ENOMEM; + goto err_alloc_xfer; + } + + /* Initialise transfer */ + xfer->prod = 0; + xfer->cons = 0; + xfer->len = 0; + xfer->iobuf = iobuf; + + /* Allocate transfer descriptors */ + len = ( count * sizeof ( xfer->desc[0] ) ); + xfer->desc = malloc_dma ( len, UHCI_ALIGN ); + if ( ! xfer->desc ) { + rc = -ENOMEM; + goto err_alloc_desc; + } + if ( ( rc = uhci_reachable ( xfer->desc, len ) ) != 0 ) + goto err_unreachable_desc; + + /* Initialise transfer descriptors */ + memset ( xfer->desc, 0, len ); + desc = xfer->desc; + for ( ; --count ; desc++ ) { + link = ( virt_to_phys ( desc + 1 ) | UHCI_LINK_DEPTH_FIRST ); + desc->link = cpu_to_le32 ( link ); + desc->flags = ring->flags; + } + desc->link = cpu_to_le32 ( UHCI_LINK_TERMINATE ); + desc->flags = ( ring->flags | UHCI_FL_IOC ); + + /* Add to ring */ + wmb(); + link = virt_to_phys ( xfer->desc ); + if ( uhci_ring_fill ( ring ) > 0 ) { + end = ring->end; + end->desc[ end->prod - 1 ].link = cpu_to_le32 ( link ); + } else { + ring->head->current = cpu_to_le32 ( link ); + } + assert ( ring->xfer[index] == NULL ); + ring->xfer[index] = xfer; + ring->end = xfer; + ring->prod++; + + return 0; + + err_unreachable_desc: + free_dma ( xfer->desc, len ); + err_alloc_desc: + free ( xfer ); + err_alloc_xfer: + err_unreachable_iobuf: + err_ring_full: + return rc; +} + +/** + * Describe transfer + * + * @v ring Transfer ring + * @v data Data + * @v len Length of data + * @v pid Packet ID + */ +static void uhci_describe ( struct uhci_ring *ring, void *data, + size_t len, uint8_t pid ) { + struct uhci_transfer *xfer = ring->end; + struct uhci_transfer_descriptor *desc; + size_t frag_len; + uint32_t control; + + do { + /* Calculate fragment length */ + frag_len = len; + if ( frag_len > ring->mtu ) + frag_len = ring->mtu; + + /* Populate descriptor */ + desc = &xfer->desc[xfer->prod++]; + if ( pid == USB_PID_IN ) + desc->flags |= UHCI_FL_SPD; + control = ( ring->control | UHCI_CONTROL_PID ( pid ) | + UHCI_CONTROL_LEN ( frag_len ) ); + desc->control = cpu_to_le32 ( control ); + if ( data ) + desc->data = virt_to_phys ( data ); + wmb(); + desc->status = UHCI_STATUS_ACTIVE; + + /* Update data toggle */ + ring->control ^= UHCI_CONTROL_TOGGLE; + + /* Move to next descriptor */ + data += frag_len; + len -= frag_len; + + } while ( len ); +} + +/** + * Dequeue transfer + * + * @v ring Transfer ring + * @ret iobuf I/O buffer + */ +static struct io_buffer * uhci_dequeue ( struct uhci_ring *ring ) { + unsigned int index = ( ring->cons % UHCI_RING_COUNT ); + struct io_buffer *iobuf; + struct uhci_transfer *xfer; + size_t len; + + /* Sanity checks */ + assert ( uhci_ring_fill ( ring ) > 0 ); + + /* Consume transfer */ + xfer = ring->xfer[index]; + assert ( xfer != NULL ); + assert ( xfer->desc != NULL ); + iobuf = xfer->iobuf; + assert ( iobuf != NULL ); + ring->xfer[index] = NULL; + ring->cons++; + + /* Free transfer descriptors */ + len = ( xfer->prod * sizeof ( xfer->desc[0] ) ); + free_dma ( xfer->desc, len ); + + /* Free transfer */ + free ( xfer ); + + return iobuf; +} + +/** + * Restart ring + * + * @v ring Transfer ring + * @v toggle Expected data toggle for next descriptor + */ +static void uhci_restart ( struct uhci_ring *ring, uint32_t toggle ) { + struct uhci_transfer *xfer; + struct uhci_transfer_descriptor *desc; + struct uhci_transfer_descriptor *first; + uint32_t link; + unsigned int i; + unsigned int j; + + /* Sanity check */ + assert ( ring->head->current == cpu_to_le32 ( UHCI_LINK_TERMINATE ) ); + + /* If ring is empty, then just update the data toggle for the + * next descriptor. + */ + if ( uhci_ring_fill ( ring ) == 0 ) { + ring->control &= ~UHCI_CONTROL_TOGGLE; + ring->control |= toggle; + return; + } + + /* If expected toggle does not match the toggle in the first + * unconsumed descriptor, then invert all toggles. + */ + xfer = ring->xfer[ ring->cons % UHCI_RING_COUNT ]; + assert ( xfer != NULL ); + assert ( xfer->cons == 0 ); + first = &xfer->desc[0]; + if ( ( le32_to_cpu ( first->control ) ^ toggle ) & UHCI_CONTROL_TOGGLE){ + + /* Invert toggle on all unconsumed transfer descriptors */ + for ( i = ring->cons ; i != ring->prod ; i++ ) { + xfer = ring->xfer[ i % UHCI_RING_COUNT ]; + assert ( xfer != NULL ); + assert ( xfer->cons == 0 ); + for ( j = 0 ; j < xfer->prod ; j++ ) { + desc = &xfer->desc[j]; + desc->control ^= + cpu_to_le32 ( UHCI_CONTROL_TOGGLE ); + } + } + + /* Invert toggle for next descriptor to be enqueued */ + ring->control ^= UHCI_CONTROL_TOGGLE; + } + + /* Restart ring at first unconsumed transfer */ + link = virt_to_phys ( first ); + wmb(); + ring->head->current = cpu_to_le32 ( link ); +} + +/****************************************************************************** + * + * Schedule management + * + ****************************************************************************** + */ + +/** + * Get link value for a queue head + * + * @v queue Queue head + * @ret link Link value + */ +static inline uint32_t uhci_link_qh ( struct uhci_queue_head *queue ) { + + return ( virt_to_phys ( queue ) | UHCI_LINK_TYPE_QH ); +} + +/** + * (Re)build asynchronous schedule + * + * @v uhci UHCI device + */ +static void uhci_async_schedule ( struct uhci_device *uhci ) { + struct uhci_endpoint *endpoint; + struct uhci_queue_head *queue; + uint32_t end; + uint32_t link; + + /* Build schedule in reverse order of execution. Provided + * that we only ever add or remove single endpoints, this can + * safely run concurrently with hardware execution of the + * schedule. + */ + link = end = uhci_link_qh ( uhci->head ); + list_for_each_entry_reverse ( endpoint, &uhci->async, schedule ) { + queue = endpoint->ring.head; + queue->link = cpu_to_le32 ( link ); + wmb(); + link = uhci_link_qh ( queue ); + } + if ( link == end ) + link = UHCI_LINK_TERMINATE; + uhci->head->link = cpu_to_le32 ( link ); + wmb(); +} + +/** + * Add endpoint to asynchronous schedule + * + * @v endpoint Endpoint + */ +static void uhci_async_add ( struct uhci_endpoint *endpoint ) { + struct uhci_device *uhci = endpoint->uhci; + + /* Add to end of schedule */ + list_add_tail ( &endpoint->schedule, &uhci->async ); + + /* Rebuild schedule */ + uhci_async_schedule ( uhci ); +} + +/** + * Remove endpoint from asynchronous schedule + * + * @v endpoint Endpoint + */ +static void uhci_async_del ( struct uhci_endpoint *endpoint ) { + struct uhci_device *uhci = endpoint->uhci; + + /* Remove from schedule */ + list_check_contains_entry ( endpoint, &uhci->async, schedule ); + list_del ( &endpoint->schedule ); + + /* Rebuild schedule */ + uhci_async_schedule ( uhci ); + + /* Delay for a whole USB frame (with a 100% safety margin) */ + mdelay ( 2 ); +} + +/** + * (Re)build periodic schedule + * + * @v uhci UHCI device + */ +static void uhci_periodic_schedule ( struct uhci_device *uhci ) { + struct uhci_endpoint *endpoint; + struct uhci_queue_head *queue; + uint32_t link; + uint32_t end; + unsigned int max_interval; + unsigned int i; + + /* Build schedule in reverse order of execution. Provided + * that we only ever add or remove single endpoints, this can + * safely run concurrently with hardware execution of the + * schedule. + */ + DBGCP ( uhci, "UHCI %s periodic schedule: ", uhci->name ); + link = end = uhci_link_qh ( uhci->head ); + list_for_each_entry_reverse ( endpoint, &uhci->periodic, schedule ) { + queue = endpoint->ring.head; + queue->link = cpu_to_le32 ( link ); + wmb(); + DBGCP ( uhci, "%s%d", ( ( link == end ) ? "" : "<-" ), + endpoint->ep->interval ); + link = uhci_link_qh ( queue ); + } + DBGCP ( uhci, "\n" ); + + /* Populate periodic frame list */ + DBGCP ( uhci, "UHCI %s periodic frame list:", uhci->name ); + for ( i = 0 ; i < UHCI_FRAMES ; i++ ) { + + /* Calculate maximum interval (in microframes) which + * may appear as part of this frame list. + */ + if ( i == 0 ) { + /* Start of list: include all endpoints */ + max_interval = -1U; + } else { + /* Calculate highest power-of-two frame interval */ + max_interval = ( 1 << ( ffs ( i ) - 1 ) ); + /* Convert to microframes */ + max_interval <<= 3; + /* Round up to nearest 2^n-1 */ + max_interval = ( ( max_interval << 1 ) - 1 ); + } + + /* Find first endpoint in schedule satisfying this + * maximum interval constraint. + */ + link = uhci_link_qh ( uhci->head ); + list_for_each_entry ( endpoint, &uhci->periodic, schedule ) { + if ( endpoint->ep->interval <= max_interval ) { + queue = endpoint->ring.head; + link = uhci_link_qh ( queue ); + DBGCP ( uhci, " %d:%d", + i, endpoint->ep->interval ); + break; + } + } + uhci->frame->link[i] = cpu_to_le32 ( link ); + } + wmb(); + DBGCP ( uhci, "\n" ); +} + +/** + * Add endpoint to periodic schedule + * + * @v endpoint Endpoint + */ +static void uhci_periodic_add ( struct uhci_endpoint *endpoint ) { + struct uhci_device *uhci = endpoint->uhci; + struct uhci_endpoint *before; + unsigned int interval = endpoint->ep->interval; + + /* Find first endpoint with a smaller interval */ + list_for_each_entry ( before, &uhci->periodic, schedule ) { + if ( before->ep->interval < interval ) + break; + } + list_add_tail ( &endpoint->schedule, &before->schedule ); + + /* Rebuild schedule */ + uhci_periodic_schedule ( uhci ); +} + +/** + * Remove endpoint from periodic schedule + * + * @v endpoint Endpoint + */ +static void uhci_periodic_del ( struct uhci_endpoint *endpoint ) { + struct uhci_device *uhci = endpoint->uhci; + + /* Remove from schedule */ + list_check_contains_entry ( endpoint, &uhci->periodic, schedule ); + list_del ( &endpoint->schedule ); + + /* Rebuild schedule */ + uhci_periodic_schedule ( uhci ); + + /* Delay for a whole USB frame (with a 100% safety margin) */ + mdelay ( 2 ); +} + +/** + * Add endpoint to appropriate schedule + * + * @v endpoint Endpoint + */ +static void uhci_schedule_add ( struct uhci_endpoint *endpoint ) { + struct usb_endpoint *ep = endpoint->ep; + unsigned int attr = ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK ); + + if ( attr == USB_ENDPOINT_ATTR_INTERRUPT ) { + uhci_periodic_add ( endpoint ); + } else { + uhci_async_add ( endpoint ); + } +} + +/** + * Remove endpoint from appropriate schedule + * + * @v endpoint Endpoint + */ +static void uhci_schedule_del ( struct uhci_endpoint *endpoint ) { + struct usb_endpoint *ep = endpoint->ep; + unsigned int attr = ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK ); + + if ( attr == USB_ENDPOINT_ATTR_INTERRUPT ) { + uhci_periodic_del ( endpoint ); + } else { + uhci_async_del ( endpoint ); + } +} + +/****************************************************************************** + * + * Endpoint operations + * + ****************************************************************************** + */ + +/** + * Open endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int uhci_endpoint_open ( struct usb_endpoint *ep ) { + struct usb_device *usb = ep->usb; + struct uhci_device *uhci = usb_get_hostdata ( usb ); + struct uhci_endpoint *endpoint; + int rc; + + /* Allocate and initialise structure */ + endpoint = zalloc ( sizeof ( *endpoint ) ); + if ( ! endpoint ) { + rc = -ENOMEM; + goto err_alloc; + } + endpoint->uhci = uhci; + endpoint->ep = ep; + usb_endpoint_set_hostdata ( ep, endpoint ); + + /* Initialise descriptor ring */ + if ( ( rc = uhci_ring_alloc ( &endpoint->ring ) ) != 0 ) + goto err_ring_alloc; + endpoint->ring.mtu = ep->mtu; + endpoint->ring.flags = UHCI_FL_CERR_MAX; + if ( usb->speed < USB_SPEED_FULL ) + endpoint->ring.flags |= UHCI_FL_LS; + endpoint->ring.control = ( UHCI_CONTROL_DEVICE ( usb->address ) | + UHCI_CONTROL_ENDPOINT ( ep->address ) ); + + /* Add to list of endpoints */ + list_add_tail ( &endpoint->list, &uhci->endpoints ); + + /* Add to schedule */ + uhci_schedule_add ( endpoint ); + + return 0; + + uhci_ring_free ( &endpoint->ring ); + err_ring_alloc: + free ( endpoint ); + err_alloc: + return rc; +} + +/** + * Close endpoint + * + * @v ep USB endpoint + */ +static void uhci_endpoint_close ( struct usb_endpoint *ep ) { + struct uhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct io_buffer *iobuf; + + /* Remove from schedule */ + uhci_schedule_del ( endpoint ); + + /* Cancel any incomplete transfers */ + while ( uhci_ring_fill ( &endpoint->ring ) ) { + iobuf = uhci_dequeue ( &endpoint->ring ); + if ( iobuf ) + usb_complete_err ( ep, iobuf, -ECANCELED ); + } + + /* Remove from list of endpoints */ + list_del ( &endpoint->list ); + + /* Free descriptor ring */ + uhci_ring_free ( &endpoint->ring ); + + /* Free endpoint */ + free ( endpoint ); +} + +/** + * Reset endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int uhci_endpoint_reset ( struct usb_endpoint *ep ) { + struct uhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct uhci_ring *ring = &endpoint->ring; + + /* Restart ring */ + uhci_restart ( ring, 0 ); + + return 0; +} + +/** + * Update MTU + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int uhci_endpoint_mtu ( struct usb_endpoint *ep ) { + struct uhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + + /* Update endpoint MTU */ + endpoint->ring.mtu = ep->mtu; + + return 0; +} + +/** + * Enqueue message transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int uhci_endpoint_message ( struct usb_endpoint *ep, + struct io_buffer *iobuf ) { + struct uhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct uhci_ring *ring = &endpoint->ring; + struct usb_setup_packet *packet; + unsigned int count; + size_t len; + int input; + int rc; + + /* Calculate number of descriptors */ + assert ( iob_len ( iobuf ) >= sizeof ( *packet ) ); + len = ( iob_len ( iobuf ) - sizeof ( *packet ) ); + count = ( 1 /* setup stage */ + + ( ( len + ring->mtu - 1 ) / ring->mtu ) /* data stage */ + + 1 /* status stage */ ); + + /* Enqueue transfer */ + if ( ( rc = uhci_enqueue ( ring, iobuf, count ) ) != 0 ) + return rc; + + /* Describe setup stage */ + packet = iobuf->data; + ring->control &= ~UHCI_CONTROL_TOGGLE; + uhci_describe ( ring, packet, sizeof ( *packet ), USB_PID_SETUP ); + iob_pull ( iobuf, sizeof ( *packet ) ); + + /* Describe data stage, if applicable */ + assert ( ring->control & UHCI_CONTROL_TOGGLE ); + input = ( packet->request & cpu_to_le16 ( USB_DIR_IN ) ); + if ( len ) { + uhci_describe ( ring, iobuf->data, len, + ( input ? USB_PID_IN : USB_PID_OUT ) ); + } + + /* Describe status stage */ + ring->control |= UHCI_CONTROL_TOGGLE; + uhci_describe ( ring, NULL, 0, + ( ( len && input ) ? USB_PID_OUT : USB_PID_IN ) ); + + /* Sanity check */ + assert ( ring->end->prod == count ); + + return 0; +} + +/** + * Enqueue stream transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v zlp Append a zero-length packet + * @ret rc Return status code + */ +static int uhci_endpoint_stream ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int zlp ) { + struct uhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct uhci_ring *ring = &endpoint->ring; + unsigned int count; + size_t len; + int input; + int rc; + + /* Calculate number of descriptors */ + len = iob_len ( iobuf ); + count = ( ( ( len + ring->mtu - 1 ) / ring->mtu ) + ( zlp ? 1 : 0 ) ); + + /* Enqueue transfer */ + if ( ( rc = uhci_enqueue ( ring, iobuf, count ) ) != 0 ) + return rc; + + /* Describe data packet */ + input = ( ep->address & USB_DIR_IN ); + uhci_describe ( ring, iobuf->data, len, + ( input ? USB_PID_IN : USB_PID_OUT ) ); + + /* Describe zero-length packet, if applicable */ + if ( zlp ) + uhci_describe ( ring, NULL, 0, USB_PID_OUT ); + + /* Sanity check */ + assert ( ring->end->prod == count ); + + return 0; +} + +/** + * Check if transfer is a message transfer + * + * @v xfer UHCI transfer + * @ret is_message Transfer is a message transfer + */ +static inline int uhci_is_message ( struct uhci_transfer *xfer ) { + struct uhci_transfer_descriptor *desc = &xfer->desc[0]; + + return ( ( desc->control & cpu_to_le32 ( UHCI_CONTROL_PID_MASK ) ) == + cpu_to_le32 ( UHCI_CONTROL_PID ( USB_PID_SETUP ) ) ); +} + +/** + * Poll for completions + * + * @v endpoint Endpoint + */ +static void uhci_endpoint_poll ( struct uhci_endpoint *endpoint ) { + struct uhci_ring *ring = &endpoint->ring; + struct uhci_device *uhci = endpoint->uhci; + struct usb_endpoint *ep = endpoint->ep; + struct usb_device *usb = ep->usb; + struct uhci_transfer *xfer; + struct uhci_transfer_descriptor *desc; + struct io_buffer *iobuf; + unsigned int index; + uint32_t link; + uint32_t toggle; + uint32_t control; + uint16_t actual; + size_t len; + + /* Consume all completed descriptors */ + while ( uhci_ring_fill ( ring ) ) { + + /* Stop if we reach an uncompleted descriptor */ + index = ( ring->cons % UHCI_RING_COUNT ); + xfer = ring->xfer[index]; + assert ( xfer != NULL ); + assert ( xfer->cons < xfer->prod ); + desc = &xfer->desc[xfer->cons]; + rmb(); + if ( desc->status & UHCI_STATUS_ACTIVE ) + break; + control = le32_to_cpu ( desc->control ); + actual = le16_to_cpu ( desc->actual ); + + /* Update data length, if applicable */ + if ( UHCI_DATA_PACKET ( control ) ) + xfer->len += UHCI_ACTUAL_LEN ( actual ); + + /* If we have encountered an error, then deactivate + * the queue head (to prevent further hardware + * accesses to this transfer), consume the transfer, + * and report the error to the USB core. + */ + if ( desc->status & UHCI_STATUS_STALLED ) { + DBGC ( uhci, "UHCI %s %s completion %d.%d failed " + "(status %02x)\n", usb->name, + usb_endpoint_name ( ep ), index, + xfer->cons, desc->status ); + link = UHCI_LINK_TERMINATE; + ring->head->current = cpu_to_le32 ( link ); + wmb(); + iobuf = uhci_dequeue ( ring ); + usb_complete_err ( ep, iobuf, -EIO ); + break; + } + + /* Consume this descriptor */ + xfer->cons++; + + /* Check for short packets */ + if ( UHCI_SHORT_PACKET ( control, actual ) ) { + + /* Sanity checks */ + assert ( desc->flags & UHCI_FL_SPD ); + link = virt_to_phys ( desc ); + assert ( ( le32_to_cpu ( ring->head->current ) & + ~( UHCI_ALIGN - 1 ) ) == link ); + + /* If this is a message transfer, then restart + * at the status stage. + */ + if ( uhci_is_message ( xfer ) ) { + xfer->cons = ( xfer->prod - 1 ); + link = virt_to_phys ( &xfer->desc[xfer->cons] ); + ring->head->current = cpu_to_le32 ( link ); + break; + } + + /* Otherwise, this is a stream transfer. + * First, prevent further hardware access to + * this transfer. + */ + link = UHCI_LINK_TERMINATE; + ring->head->current = cpu_to_le32 ( link ); + wmb(); + + /* Determine expected data toggle for next descriptor */ + toggle = ( ( control ^ UHCI_CONTROL_TOGGLE ) & + UHCI_CONTROL_TOGGLE ); + + /* Consume this transfer */ + len = xfer->len; + iobuf = uhci_dequeue ( ring ); + + /* Update packet length */ + assert ( len <= iob_len ( iobuf ) ); + iob_unput ( iobuf, ( iob_len ( iobuf ) - len ) ); + + /* Restart ring */ + uhci_restart ( ring, toggle ); + + } else if ( xfer->cons == xfer->prod ) { + + /* Completed a transfer: consume it */ + len = xfer->len; + iobuf = uhci_dequeue ( ring ); + assert ( len == iob_len ( iobuf ) ); + + } else { + + /* Not a short packet and not yet complete: + * continue processing. + */ + continue; + } + + /* Report completion to USB core */ + usb_complete ( ep, iobuf ); + } +} + +/****************************************************************************** + * + * Device operations + * + ****************************************************************************** + */ + +/** + * Open device + * + * @v usb USB device + * @ret rc Return status code + */ +static int uhci_device_open ( struct usb_device *usb ) { + struct uhci_device *uhci = usb_bus_get_hostdata ( usb->port->hub->bus ); + + usb_set_hostdata ( usb, uhci ); + return 0; +} + +/** + * Close device + * + * @v usb USB device + */ +static void uhci_device_close ( struct usb_device *usb ) { + struct uhci_device *uhci = usb_get_hostdata ( usb ); + struct usb_bus *bus = uhci->bus; + + /* Free device address, if assigned */ + if ( usb->address ) + usb_free_address ( bus, usb->address ); +} + +/** + * Assign device address + * + * @v usb USB device + * @ret rc Return status code + */ +static int uhci_device_address ( struct usb_device *usb ) { + struct uhci_device *uhci = usb_get_hostdata ( usb ); + struct usb_bus *bus = uhci->bus; + struct usb_endpoint *ep0 = usb_endpoint ( usb, USB_EP0_ADDRESS ); + struct uhci_endpoint *endpoint0 = usb_endpoint_get_hostdata ( ep0 ); + int address; + int rc; + + /* Sanity checks */ + assert ( usb->address == 0 ); + assert ( ep0 != NULL ); + + /* Allocate device address */ + address = usb_alloc_address ( bus ); + if ( address < 0 ) { + rc = address; + DBGC ( uhci, "UHCI %s could not allocate address: %s\n", + usb->name, strerror ( rc ) ); + goto err_alloc_address; + } + + /* Set address */ + if ( ( rc = usb_set_address ( usb, address ) ) != 0 ) + goto err_set_address; + + /* Update device address */ + usb->address = address; + endpoint0->ring.control |= UHCI_CONTROL_DEVICE ( address ); + + return 0; + + err_set_address: + usb_free_address ( bus, address ); + err_alloc_address: + return rc; +} + +/****************************************************************************** + * + * Hub operations + * + ****************************************************************************** + */ + +/** + * Open hub + * + * @v hub USB hub + * @ret rc Return status code + */ +static int uhci_hub_open ( struct usb_hub *hub __unused ) { + + /* Nothing to do */ + return 0; +} + +/** + * Close hub + * + * @v hub USB hub + */ +static void uhci_hub_close ( struct usb_hub *hub __unused ) { + + /* Nothing to do */ +} + +/****************************************************************************** + * + * Root hub operations + * + ****************************************************************************** + */ + +/** + * Open root hub + * + * @v hub USB hub + * @ret rc Return status code + */ +static int uhci_root_open ( struct usb_hub *hub __unused) { + + /* Nothing to do */ + return 0; +} + +/** + * Close root hub + * + * @v hub USB hub + */ +static void uhci_root_close ( struct usb_hub *hub __unused ) { + + /* Nothing to do */ +} + +/** + * Enable port + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int uhci_root_enable ( struct usb_hub *hub, struct usb_port *port ) { + struct uhci_device *uhci = usb_hub_get_drvdata ( hub ); + uint16_t portsc; + unsigned int i; + + /* Reset port */ + portsc = inw ( uhci->regs + UHCI_PORTSC ( port->address ) ); + portsc |= UHCI_PORTSC_PR; + outw ( portsc, uhci->regs + UHCI_PORTSC ( port->address ) ); + mdelay ( USB_RESET_DELAY_MS ); + portsc &= ~UHCI_PORTSC_PR; + outw ( portsc, uhci->regs + UHCI_PORTSC ( port->address ) ); + mdelay ( USB_RESET_RECOVER_DELAY_MS ); + + /* Enable port */ + portsc |= UHCI_PORTSC_PED; + outw ( portsc, uhci->regs + UHCI_PORTSC ( port->address ) ); + mdelay ( USB_RESET_RECOVER_DELAY_MS ); + + /* Wait for port to become enabled */ + for ( i = 0 ; i < UHCI_PORT_ENABLE_MAX_WAIT_MS ; i++ ) { + + /* Check port status */ + portsc = inw ( uhci->regs + UHCI_PORTSC ( port->address ) ); + if ( portsc & UHCI_PORTSC_PED ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( uhci, "UHCI %s-%d timed out waiting for port to enable " + "(status %04x)\n", uhci->name, port->address, portsc ); + return -ETIMEDOUT; +} + +/** + * Disable port + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int uhci_root_disable ( struct usb_hub *hub, struct usb_port *port ) { + struct uhci_device *uhci = usb_hub_get_drvdata ( hub ); + uint16_t portsc; + + /* Disable port */ + portsc = inw ( uhci->regs + UHCI_PORTSC ( port->address ) ); + portsc &= ~UHCI_PORTSC_PED; + outw ( portsc, uhci->regs + UHCI_PORTSC ( port->address ) ); + + return 0; +} + +/** + * Update root hub port speed + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int uhci_root_speed ( struct usb_hub *hub, struct usb_port *port ) { + struct uhci_device *uhci = usb_hub_get_drvdata ( hub ); + struct pci_device pci; + uint16_t portsc; + unsigned int speed; + + /* Read port status */ + portsc = inw ( uhci->regs + UHCI_PORTSC ( port->address ) ); + if ( ! ( portsc & UHCI_PORTSC_CCS ) ) { + /* Port not connected */ + speed = USB_SPEED_NONE; + } else if ( uhci->companion && + ! find_usb_bus_by_location ( BUS_TYPE_PCI, + uhci->companion ) ) { + /* Defer connection detection until companion + * controller has been enumerated. + */ + pci_init ( &pci, uhci->companion ); + DBGC ( uhci, "UHCI %s-%d deferring for companion " PCI_FMT "\n", + uhci->name, port->address, PCI_ARGS ( &pci ) ); + speed = USB_SPEED_NONE; + } else if ( portsc & UHCI_PORTSC_LS ) { + /* Low-speed device */ + speed = USB_SPEED_LOW; + } else { + /* Full-speed device */ + speed = USB_SPEED_FULL; + } + port->speed = speed; + + /* Record disconnections and clear changes */ + port->disconnected |= ( portsc & UHCI_PORTSC_CSC ); + outw ( portsc, uhci->regs + UHCI_PORTSC ( port->address ) ); + + return 0; +} + +/** + * Clear transaction translator buffer + * + * @v hub USB hub + * @v port USB port + * @v ep USB endpoint + * @ret rc Return status code + */ +static int uhci_root_clear_tt ( struct usb_hub *hub, struct usb_port *port, + struct usb_endpoint *ep ) { + struct uhci_device *uhci = usb_hub_get_drvdata ( hub ); + + /* Should never be called; this is a root hub */ + DBGC ( uhci, "UHCI %s-%d nonsensical CLEAR_TT for %s %s\n", uhci->name, + port->address, ep->usb->name, usb_endpoint_name ( ep ) ); + + return -ENOTSUP; +} + +/** + * Poll for port status changes + * + * @v hub USB hub + * @v port USB port + */ +static void uhci_root_poll ( struct usb_hub *hub, struct usb_port *port ) { + struct uhci_device *uhci = usb_hub_get_drvdata ( hub ); + uint16_t portsc; + uint16_t change; + + /* Do nothing unless something has changed */ + portsc = inw ( uhci->regs + UHCI_PORTSC ( port->address ) ); + change = ( portsc & UHCI_PORTSC_CHANGE ); + if ( ! change ) + return; + + /* Record disconnections and clear changes */ + port->disconnected |= ( portsc & UHCI_PORTSC_CSC ); + outw ( portsc, uhci->regs + UHCI_PORTSC ( port->address ) ); + + /* Report port status change */ + usb_port_changed ( port ); +} + +/****************************************************************************** + * + * Bus operations + * + ****************************************************************************** + */ + +/** + * Open USB bus + * + * @v bus USB bus + * @ret rc Return status code + */ +static int uhci_bus_open ( struct usb_bus *bus ) { + struct uhci_device *uhci = usb_bus_get_hostdata ( bus ); + int rc; + + /* Sanity checks */ + assert ( list_empty ( &uhci->async ) ); + assert ( list_empty ( &uhci->periodic ) ); + + /* Allocate and initialise asynchronous queue head */ + uhci->head = malloc_dma ( sizeof ( *uhci->head ), UHCI_ALIGN ); + if ( ! uhci->head ) { + rc = -ENOMEM; + goto err_alloc_head; + } + if ( ( rc = uhci_reachable ( uhci->head, sizeof ( *uhci->head ) ) ) !=0) + goto err_unreachable_head; + memset ( uhci->head, 0, sizeof ( *uhci->head ) ); + uhci->head->current = cpu_to_le32 ( UHCI_LINK_TERMINATE ); + uhci_async_schedule ( uhci ); + + /* Allocate periodic frame list */ + uhci->frame = malloc_dma ( sizeof ( *uhci->frame ), + sizeof ( *uhci->frame ) ); + if ( ! uhci->frame ) { + rc = -ENOMEM; + goto err_alloc_frame; + } + if ( ( rc = uhci_reachable ( uhci->frame, + sizeof ( *uhci->frame ) ) ) != 0 ) + goto err_unreachable_frame; + uhci_periodic_schedule ( uhci ); + outl ( virt_to_phys ( uhci->frame ), uhci->regs + UHCI_FLBASEADD ); + + /* Start controller */ + uhci_run ( uhci ); + + return 0; + + uhci_stop ( uhci ); + err_unreachable_frame: + free_dma ( uhci->frame, sizeof ( *uhci->frame ) ); + err_alloc_frame: + err_unreachable_head: + free_dma ( uhci->head, sizeof ( *uhci->head ) ); + err_alloc_head: + return rc; +} + +/** + * Close USB bus + * + * @v bus USB bus + */ +static void uhci_bus_close ( struct usb_bus *bus ) { + struct uhci_device *uhci = usb_bus_get_hostdata ( bus ); + + /* Sanity checks */ + assert ( list_empty ( &uhci->async ) ); + assert ( list_empty ( &uhci->periodic ) ); + + /* Stop controller */ + uhci_stop ( uhci ); + + /* Free periodic frame list */ + free_dma ( uhci->frame, sizeof ( *uhci->frame ) ); + + /* Free asynchronous schedule */ + free_dma ( uhci->head, sizeof ( *uhci->head ) ); +} + +/** + * Poll USB bus + * + * @v bus USB bus + */ +static void uhci_bus_poll ( struct usb_bus *bus ) { + struct uhci_device *uhci = usb_bus_get_hostdata ( bus ); + struct usb_hub *hub = bus->hub; + struct uhci_endpoint *endpoint; + unsigned int i; + + /* UHCI defers interrupts (including short packet detection) + * until the end of the frame. This can result in bulk IN + * endpoints remaining halted for much of the time, waiting + * for software action to reset the data toggles. We + * therefore ignore USBSTS and unconditionally poll all + * endpoints for completed transfer descriptors. + * + * As with EHCI, we trust that completion handlers are minimal + * and will not do anything that could plausibly affect the + * endpoint list itself. + */ + list_for_each_entry ( endpoint, &uhci->endpoints, list ) + uhci_endpoint_poll ( endpoint ); + + /* UHCI provides no single bit to indicate that a port status + * change has occurred. We therefore unconditionally iterate + * over all ports looking for status changes. + */ + for ( i = 1 ; i <= UHCI_PORTS ; i++ ) + uhci_root_poll ( hub, usb_port ( hub, i ) ); +} + +/****************************************************************************** + * + * PCI interface + * + ****************************************************************************** + */ + +/** USB host controller operations */ +static struct usb_host_operations uhci_operations = { + .endpoint = { + .open = uhci_endpoint_open, + .close = uhci_endpoint_close, + .reset = uhci_endpoint_reset, + .mtu = uhci_endpoint_mtu, + .message = uhci_endpoint_message, + .stream = uhci_endpoint_stream, + }, + .device = { + .open = uhci_device_open, + .close = uhci_device_close, + .address = uhci_device_address, + }, + .bus = { + .open = uhci_bus_open, + .close = uhci_bus_close, + .poll = uhci_bus_poll, + }, + .hub = { + .open = uhci_hub_open, + .close = uhci_hub_close, + }, + .root = { + .open = uhci_root_open, + .close = uhci_root_close, + .enable = uhci_root_enable, + .disable = uhci_root_disable, + .speed = uhci_root_speed, + .clear_tt = uhci_root_clear_tt, + }, +}; + +/** + * Locate EHCI companion controller (when no EHCI support is present) + * + * @v pci PCI device + * @ret busdevfn EHCI companion controller bus:dev.fn (if any) + */ +__weak unsigned int ehci_companion ( struct pci_device *pci __unused ) { + return 0; +} + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int uhci_probe ( struct pci_device *pci ) { + struct uhci_device *uhci; + struct usb_port *port; + unsigned int i; + int rc; + + /* Allocate and initialise structure */ + uhci = zalloc ( sizeof ( *uhci ) ); + if ( ! uhci ) { + rc = -ENOMEM; + goto err_alloc; + } + uhci->name = pci->dev.name; + INIT_LIST_HEAD ( &uhci->endpoints ); + INIT_LIST_HEAD ( &uhci->async ); + INIT_LIST_HEAD ( &uhci->periodic ); + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Identify EHCI companion controller, if any */ + uhci->companion = ehci_companion ( pci ); + + /* Claim ownership from BIOS. (There is no release mechanism + * for UHCI.) + */ + pci_write_config_word ( pci, UHCI_USBLEGSUP, UHCI_USBLEGSUP_DEFAULT ); + + /* Map registers */ + uhci->regs = pci->ioaddr; + if ( ! uhci->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + + /* Reset device */ + if ( ( rc = uhci_reset ( uhci ) ) != 0 ) + goto err_reset; + + /* Allocate USB bus */ + uhci->bus = alloc_usb_bus ( &pci->dev, UHCI_PORTS, UHCI_MTU, + &uhci_operations ); + if ( ! uhci->bus ) { + rc = -ENOMEM; + goto err_alloc_bus; + } + usb_bus_set_hostdata ( uhci->bus, uhci ); + usb_hub_set_drvdata ( uhci->bus->hub, uhci ); + + /* Set port protocols */ + for ( i = 1 ; i <= UHCI_PORTS ; i++ ) { + port = usb_port ( uhci->bus->hub, i ); + port->protocol = USB_PROTO_2_0; + } + + /* Register USB bus */ + if ( ( rc = register_usb_bus ( uhci->bus ) ) != 0 ) + goto err_register; + + pci_set_drvdata ( pci, uhci ); + return 0; + + unregister_usb_bus ( uhci->bus ); + err_register: + free_usb_bus ( uhci->bus ); + err_alloc_bus: + uhci_reset ( uhci ); + err_reset: + err_ioremap: + free ( uhci ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void uhci_remove ( struct pci_device *pci ) { + struct uhci_device *uhci = pci_get_drvdata ( pci ); + struct usb_bus *bus = uhci->bus; + + unregister_usb_bus ( bus ); + assert ( list_empty ( &uhci->async ) ); + assert ( list_empty ( &uhci->periodic ) ); + free_usb_bus ( bus ); + uhci_reset ( uhci ); + free ( uhci ); +} + +/** UHCI PCI device IDs */ +static struct pci_device_id uhci_ids[] = { + PCI_ROM ( 0xffff, 0xffff, "uhci", "UHCI", 0 ), +}; + +/** UHCI PCI driver */ +struct pci_driver uhci_driver __pci_driver = { + .ids = uhci_ids, + .id_count = ( sizeof ( uhci_ids ) / sizeof ( uhci_ids[0] ) ), + .class = PCI_CLASS_ID ( PCI_CLASS_SERIAL, PCI_CLASS_SERIAL_USB, + PCI_CLASS_SERIAL_USB_UHCI ), + .probe = uhci_probe, + .remove = uhci_remove, +}; diff --git a/src/drivers/usb/uhci.h b/src/drivers/usb/uhci.h new file mode 100644 index 00000000..ba4c28f7 --- /dev/null +++ b/src/drivers/usb/uhci.h @@ -0,0 +1,350 @@ +#ifndef _IPXE_UHCI_H +#define _IPXE_UHCI_H + +/** @file + * + * USB Universal Host Controller Interface (UHCI) driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** Minimum alignment required for data structures + * + * With the exception of the frame list (which is page-aligned), data + * structures used by UHCI generally require 16-byte alignment. + */ +#define UHCI_ALIGN 16 + +/** Number of ports */ +#define UHCI_PORTS 2 + +/** Maximum transfer size */ +#define UHCI_MTU 1280 + +/** I/O BAR size */ +#define UHCI_BAR_SIZE 0x14 + +/** USB command register */ +#define UHCI_USBCMD 0x00 + +/** Max packet is 64 bytes */ +#define UHCI_USBCMD_MAX64 0x0080 + +/** Host controller reset */ +#define UHCI_USBCMD_HCRESET 0x0002 + +/** Run/stop */ +#define UHCI_USBCMD_RUN 0x0001 + +/** USB status register */ +#define UHCI_USBSTS 0x02 + +/** Host controller halted */ +#define UHCI_USBSTS_HCHALTED 0x0020 + +/** USB interrupt */ +#define UHCI_USBSTS_USBINT 0x0001 + +/** Frame list base address register */ +#define UHCI_FLBASEADD 0x08 + +/** Port status and control register */ +#define UHCI_PORTSC(port) ( 0x0e + ( (port) << 1 ) ) + +/** Port reset */ +#define UHCI_PORTSC_PR 0x0200 + +/** Low-speed device attached */ +#define UHCI_PORTSC_LS 0x0100 + +/** Port enabled/disabled change */ +#define UHCI_PORTSC_PEC 0x0008 + +/** Port enabled */ +#define UHCI_PORTSC_PED 0x0004 + +/** Connect status change */ +#define UHCI_PORTSC_CSC 0x0002 + +/** Current connect status */ +#define UHCI_PORTSC_CCS 0x0001 + +/** Port status change mask */ +#define UHCI_PORTSC_CHANGE ( UHCI_PORTSC_CSC | UHCI_PORTSC_PEC ) + +/** Depth-first processing */ +#define UHCI_LINK_DEPTH_FIRST 0x00000004UL + +/** Queue head type */ +#define UHCI_LINK_TYPE_QH 0x00000002UL + +/** List terminator */ +#define UHCI_LINK_TERMINATE 0x00000001UL + +/** Number of frames in frame list */ +#define UHCI_FRAMES 1024 + +/** A frame list */ +struct uhci_frame_list { + /** Link pointer */ + uint32_t link[UHCI_FRAMES]; +} __attribute__ (( packed )); + +/** A transfer descriptor */ +struct uhci_transfer_descriptor { + /** Link pointer */ + uint32_t link; + /** Actual length */ + uint16_t actual; + /** Status */ + uint8_t status; + /** Flags */ + uint8_t flags; + /** Control */ + uint32_t control; + /** Buffer pointer */ + uint32_t data; +} __attribute__ (( packed )); + +/** Length mask */ +#define UHCI_LEN_MASK 0x7ff + +/** Actual length */ +#define UHCI_ACTUAL_LEN( actual ) ( ( (actual) + 1 ) & UHCI_LEN_MASK ) + +/** Active */ +#define UHCI_STATUS_ACTIVE 0x80 + +/** Stalled */ +#define UHCI_STATUS_STALLED 0x40 + +/** Data buffer error */ +#define UHCI_STATUS_BUFFER 0x20 + +/** Babble detected */ +#define UHCI_STATUS_BABBLE 0x10 + +/** NAK received */ +#define UHCI_STATUS_NAK 0x08 + +/** CRC/timeout error */ +#define UHCI_STATUS_CRC_TIMEOUT 0x04 + +/** Bitstuff error */ +#define UHCI_STATUS_BITSTUFF 0x02 + +/** Short packet detect */ +#define UHCI_FL_SPD 0x20 + +/** Error counter */ +#define UHCI_FL_CERR( count ) ( (count) << 3 ) + +/** Error counter maximum value */ +#define UHCI_FL_CERR_MAX UHCI_FL_CERR ( 3 ) + +/** Low speed device */ +#define UHCI_FL_LS 0x04 + +/** Interrupt on completion */ +#define UHCI_FL_IOC 0x01 + +/** Packet ID */ +#define UHCI_CONTROL_PID( pid ) ( (pid) << 0 ) + +/** Packet ID mask */ +#define UHCI_CONTROL_PID_MASK UHCI_CONTROL_PID ( 0xff ) + +/** Device address */ +#define UHCI_CONTROL_DEVICE( address ) ( (address) << 8 ) + +/** Endpoint address */ +#define UHCI_CONTROL_ENDPOINT( address ) ( (address) << 15 ) + +/** Data toggle */ +#define UHCI_CONTROL_TOGGLE ( 1 << 19 ) + +/** Data length */ +#define UHCI_CONTROL_LEN( len ) ( ( ( (len) - 1 ) & UHCI_LEN_MASK ) << 21 ) + +/** Check for data packet + * + * This check is based on the fact that only USB_PID_SETUP has bit 2 + * set. + */ +#define UHCI_DATA_PACKET( control ) ( ! ( control & 0x04 ) ) + +/** Check for short packet */ +#define UHCI_SHORT_PACKET( control, actual ) \ + ( ( ( (control) >> 21 ) ^ (actual) ) & UHCI_LEN_MASK ) + +/** USB legacy support register (in PCI configuration space) */ +#define UHCI_USBLEGSUP 0xc0 + +/** USB legacy support default value */ +#define UHCI_USBLEGSUP_DEFAULT 0x2000 + +/** A queue head */ +struct uhci_queue_head { + /** Horizontal link pointer */ + uint32_t link; + /** Current transfer descriptor */ + uint32_t current; +} __attribute__ (( packed )); + +/** A single UHCI transfer + * + * UHCI hardware is extremely simple, and requires software to build + * the entire packet schedule (including manually handling all of the + * data toggles). The hardware requires at least 16 bytes of transfer + * descriptors per 64 bytes of transmitted/received data. We allocate + * the transfer descriptors at the time that the transfer is enqueued, + * to avoid the need to allocate unreasonably large blocks when the + * endpoint is opened. + */ +struct uhci_transfer { + /** Producer counter */ + unsigned int prod; + /** Consumer counter */ + unsigned int cons; + /** Completed data length */ + size_t len; + + /** Transfer descriptors */ + struct uhci_transfer_descriptor *desc; + + /** I/O buffer */ + struct io_buffer *iobuf; +}; + +/** Number of transfer descriptors in a ring + * + * This is a policy decision. + */ +#define UHCI_RING_COUNT 16 + +/** A transfer ring */ +struct uhci_ring { + /** Producer counter */ + unsigned int prod; + /** Consumer counter */ + unsigned int cons; + + /** Maximum packet length */ + size_t mtu; + /** Base flags + * + * This incorporates the CERR and LS bits + */ + uint8_t flags; + /** Base control word + * + * This incorporates the device address, the endpoint address, + * and the data toggle for the next descriptor to be enqueued. + */ + uint32_t control; + + /** Transfers */ + struct uhci_transfer *xfer[UHCI_RING_COUNT]; + /** End of transfer ring (if non-empty) */ + struct uhci_transfer *end; + + /** Queue head */ + struct uhci_queue_head *head; +}; + +/** + * Calculate space used in transfer ring + * + * @v ring Transfer ring + * @ret fill Number of entries used + */ +static inline __attribute__ (( always_inline )) unsigned int +uhci_ring_fill ( struct uhci_ring *ring ) { + unsigned int fill; + + fill = ( ring->prod - ring->cons ); + assert ( fill <= UHCI_RING_COUNT ); + return fill; +} + +/** + * Calculate space remaining in transfer ring + * + * @v ring Transfer ring + * @ret remaining Number of entries remaining + */ +static inline __attribute__ (( always_inline )) unsigned int +uhci_ring_remaining ( struct uhci_ring *ring ) { + unsigned int fill = uhci_ring_fill ( ring ); + + return ( UHCI_RING_COUNT - fill ); +} + +/** Maximum time to wait for host controller to stop + * + * This is a policy decision. + */ +#define UHCI_STOP_MAX_WAIT_MS 100 + +/** Maximum time to wait for reset to complete + * + * This is a policy decision. + */ +#define UHCI_RESET_MAX_WAIT_MS 500 + +/** Maximum time to wait for a port to be enabled + * + * This is a policy decision. + */ +#define UHCI_PORT_ENABLE_MAX_WAIT_MS 500 + +/** A UHCI device */ +struct uhci_device { + /** Registers */ + unsigned long regs; + /** Name */ + const char *name; + + /** EHCI companion controller bus:dev.fn address (if any) */ + unsigned int companion; + + /** Asynchronous queue head */ + struct uhci_queue_head *head; + /** Frame list */ + struct uhci_frame_list *frame; + + /** List of all endpoints */ + struct list_head endpoints; + /** Asynchronous schedule */ + struct list_head async; + /** Periodic schedule + * + * Listed in decreasing order of endpoint interval. + */ + struct list_head periodic; + + /** USB bus */ + struct usb_bus *bus; +}; + +/** A UHCI endpoint */ +struct uhci_endpoint { + /** UHCI device */ + struct uhci_device *uhci; + /** USB endpoint */ + struct usb_endpoint *ep; + /** List of all endpoints */ + struct list_head list; + /** Endpoint schedule */ + struct list_head schedule; + + /** Transfer ring */ + struct uhci_ring ring; +}; + +#endif /* _IPXE_UHCI_H */ diff --git a/src/drivers/usb/usbblk.c b/src/drivers/usb/usbblk.c new file mode 100644 index 00000000..5a086d3f --- /dev/null +++ b/src/drivers/usb/usbblk.c @@ -0,0 +1,912 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "usbblk.h" + +/** @file + * + * USB mass storage driver + * + */ + +static void usbblk_stop ( struct usbblk_device *usbblk, int rc ); + +/** List of USB block devices */ +static LIST_HEAD ( usbblk_devices ); + +/****************************************************************************** + * + * Endpoint management + * + ****************************************************************************** + */ + +/** + * Open endpoints + * + * @v usbblk USB block device + * @ret rc Return status code + */ +static int usbblk_open ( struct usbblk_device *usbblk ) { + struct usb_device *usb = usbblk->func->usb; + unsigned int interface = usbblk->func->interface[0]; + int rc; + + /* Sanity checks */ + assert ( ! usbblk->in.open ); + assert ( ! usbblk->out.open ); + + /* Issue reset */ + if ( ( rc = usb_control ( usb, USBBLK_RESET, 0, interface, + NULL, 0 ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not issue reset: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_reset; + } + + /* Open bulk OUT endpoint */ + if ( ( rc = usb_endpoint_open ( &usbblk->out ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not open bulk OUT: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_open_out; + } + + /* Clear any bulk OUT halt condition */ + if ( ( rc = usb_endpoint_clear_halt ( &usbblk->out ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not reset bulk OUT: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_clear_out; + } + + /* Open bulk IN endpoint */ + if ( ( rc = usb_endpoint_open ( &usbblk->in ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not open bulk IN: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_open_in; + } + + /* Clear any bulk IN halt condition */ + if ( ( rc = usb_endpoint_clear_halt ( &usbblk->in ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not reset bulk IN: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_clear_in; + } + + return 0; + + err_clear_in: + usb_endpoint_close ( &usbblk->in ); + err_open_in: + err_clear_out: + usb_endpoint_close ( &usbblk->out ); + err_open_out: + err_reset: + return rc; +} + +/** + * Close endpoints + * + * @v usbblk USB block device + */ +static void usbblk_close ( struct usbblk_device *usbblk ) { + + /* Close bulk OUT endpoint */ + if ( usbblk->out.open ) + usb_endpoint_close ( &usbblk->out ); + + /* Close bulk IN endpoint */ + if ( usbblk->in.open ) + usb_endpoint_close ( &usbblk->in ); +} + +/****************************************************************************** + * + * Bulk OUT endpoint + * + ****************************************************************************** + */ + +/** + * Issue bulk OUT command + * + * @v usbblk USB block device + * @ret rc Return status code + */ +static int usbblk_out_command ( struct usbblk_device *usbblk ) { + struct usbblk_command *cmd = &usbblk->cmd; + struct usbblk_command_wrapper *wrapper; + struct io_buffer *iobuf; + int rc; + + /* Sanity checks */ + assert ( cmd->tag ); + assert ( ! ( cmd->scsi.data_in_len && cmd->scsi.data_out_len ) ); + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( sizeof ( *wrapper ) ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Populate command */ + wrapper = iob_put ( iobuf, sizeof ( *wrapper ) ); + memset ( wrapper, 0, sizeof ( *wrapper ) ); + wrapper->signature = cpu_to_le32 ( USBBLK_COMMAND_SIGNATURE ); + wrapper->tag = cmd->tag; /* non-endian */ + if ( cmd->scsi.data_out_len ) { + wrapper->len = cpu_to_le32 ( cmd->scsi.data_out_len ); + } else { + wrapper->len = cpu_to_le32 ( cmd->scsi.data_in_len ); + wrapper->flags = USB_DIR_IN; + } + wrapper->lun = ntohs ( cmd->scsi.lun.u16[0] ); + wrapper->cblen = sizeof ( wrapper->cb ); + memcpy ( wrapper->cb, &cmd->scsi.cdb, sizeof ( wrapper->cb ) ); + + /* Issue command */ + if ( ( rc = usb_stream ( &usbblk->out, iobuf, 0 ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s bulk OUT could not issue command: " + "%s\n", usbblk->func->name, strerror ( rc ) ); + goto err_stream; + } + + return 0; + + err_stream: + free_iob ( iobuf ); + err_alloc: + return rc; +} + +/** + * Send bulk OUT data block + * + * @v usbblk USB block device + * @ret rc Return status code + */ +static int usbblk_out_data ( struct usbblk_device *usbblk ) { + struct usbblk_command *cmd = &usbblk->cmd; + struct io_buffer *iobuf; + size_t len; + int rc; + + /* Calculate length */ + assert ( cmd->tag ); + assert ( cmd->scsi.data_out != UNULL ); + assert ( cmd->offset < cmd->scsi.data_out_len ); + len = ( cmd->scsi.data_out_len - cmd->offset ); + if ( len > USBBLK_MAX_LEN ) + len = USBBLK_MAX_LEN; + assert ( ( len % usbblk->out.mtu ) == 0 ); + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( len ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Populate I/O buffer */ + copy_from_user ( iob_put ( iobuf, len ), cmd->scsi.data_out, + cmd->offset, len ); + + /* Send data */ + if ( ( rc = usb_stream ( &usbblk->out, iobuf, 0 ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s bulk OUT could not send data: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_stream; + } + + /* Consume data */ + cmd->offset += len; + + return 0; + + err_stream: + free_iob ( iobuf ); + err_alloc: + return rc; +} + +/** + * Refill bulk OUT endpoint + * + * @v usbblk USB block device + * @ret rc Return status code + */ +static int usbblk_out_refill ( struct usbblk_device *usbblk ) { + struct usbblk_command *cmd = &usbblk->cmd; + int rc; + + /* Sanity checks */ + assert ( cmd->tag ); + + /* Refill endpoint */ + while ( ( cmd->offset < cmd->scsi.data_out_len ) && + ( usbblk->out.fill < USBBLK_MAX_FILL ) ) { + if ( ( rc = usbblk_out_data ( usbblk ) ) != 0 ) + return rc; + } + + return 0; +} + +/** + * Complete bulk OUT transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void usbblk_out_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct usbblk_device *usbblk = + container_of ( ep, struct usbblk_device, out ); + struct usbblk_command *cmd = &usbblk->cmd; + + /* Ignore cancellations after closing endpoint */ + if ( ! ep->open ) + goto drop; + + /* Sanity check */ + assert ( cmd->tag ); + + /* Check for failures */ + if ( rc != 0 ) { + DBGC ( usbblk, "USBBLK %s bulk OUT failed: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err; + } + + /* Trigger refill process, if applicable */ + if ( cmd->offset < cmd->scsi.data_out_len ) + process_add ( &usbblk->process ); + + drop: + /* Free I/O buffer */ + free_iob ( iobuf ); + + return; + + err: + free_iob ( iobuf ); + usbblk_stop ( usbblk, rc ); +} + +/** Bulk OUT endpoint operations */ +static struct usb_endpoint_driver_operations usbblk_out_operations = { + .complete = usbblk_out_complete, +}; + +/****************************************************************************** + * + * Bulk IN endpoint + * + ****************************************************************************** + */ + +/** + * Handle bulk IN data block + * + * @v usbblk USB block device + * @v data Data block + * @v len Length of data + * @ret rc Return status code + */ +static int usbblk_in_data ( struct usbblk_device *usbblk, const void *data, + size_t len ) { + struct usbblk_command *cmd = &usbblk->cmd; + + /* Sanity checks */ + assert ( cmd->tag ); + assert ( cmd->scsi.data_in != UNULL ); + assert ( cmd->offset <= cmd->scsi.data_in_len ); + assert ( len <= ( cmd->scsi.data_in_len - cmd->offset ) ); + + /* Store data */ + copy_to_user ( cmd->scsi.data_in, cmd->offset, data, len ); + cmd->offset += len; + + return 0; +} + +/** + * Handle bulk IN status + * + * @v usbblk USB block device + * @v data Status data + * @v len Length of status data + * @ret rc Return status code + */ +static int usbblk_in_status ( struct usbblk_device *usbblk, const void *data, + size_t len ) { + struct usbblk_command *cmd = &usbblk->cmd; + const struct usbblk_status_wrapper *stat; + + /* Sanity checks */ + assert ( cmd->tag ); + + /* Validate length */ + if ( len < sizeof ( *stat ) ) { + DBGC ( usbblk, "USBBLK %s bulk IN malformed status:\n", + usbblk->func->name ); + DBGC_HDA ( usbblk, 0, data, len ); + return -EIO; + } + stat = data; + + /* Validate signature */ + if ( stat->signature != cpu_to_le32 ( USBBLK_STATUS_SIGNATURE ) ) { + DBGC ( usbblk, "USBBLK %s bulk IN invalid signature %08x:\n", + usbblk->func->name, le32_to_cpu ( stat->signature ) ); + DBGC_HDA ( usbblk, 0, stat, sizeof ( *stat ) ); + return -EIO; + } + + /* Validate tag */ + if ( stat->tag != cmd->tag ) { + DBGC ( usbblk, "USBBLK %s bulk IN tag mismatch (got %08x, " + "expected %08x):\n", + usbblk->func->name, stat->tag, cmd->tag ); + DBGC_HDA ( usbblk, 0, stat, sizeof ( *stat ) ); + return -EIO; + } + + /* Check status */ + if ( stat->status ) { + DBGC ( usbblk, "USBBLK %s bulk IN status %02x:\n", + usbblk->func->name, stat->status ); + DBGC_HDA ( usbblk, 0, stat, sizeof ( *stat ) ); + return -EIO; + } + + /* Check for residual data */ + if ( stat->residue ) { + DBGC ( usbblk, "USBBLK %s bulk IN residue %#x:\n", + usbblk->func->name, le32_to_cpu ( stat->residue ) ); + return -EIO; + } + + /* Mark command as complete */ + usbblk_stop ( usbblk, 0 ); + + return 0; +} + +/** + * Refill bulk IN endpoint + * + * @v usbblk USB block device + * @ret rc Return status code + */ +static int usbblk_in_refill ( struct usbblk_device *usbblk ) { + struct usbblk_command *cmd = &usbblk->cmd; + struct usbblk_status_wrapper *stat; + size_t remaining; + unsigned int max; + int rc; + + /* Sanity checks */ + assert ( cmd->tag ); + + /* Calculate maximum required refill */ + remaining = sizeof ( *stat ); + if ( cmd->scsi.data_in_len ) { + assert ( cmd->offset <= cmd->scsi.data_in_len ); + remaining += ( cmd->scsi.data_in_len - cmd->offset ); + } + max = ( ( remaining + USBBLK_MAX_LEN - 1 ) / USBBLK_MAX_LEN ); + + /* Refill bulk IN endpoint */ + if ( ( rc = usb_refill_limit ( &usbblk->in, max ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Complete bulk IN transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void usbblk_in_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct usbblk_device *usbblk = + container_of ( ep, struct usbblk_device, in ); + struct usbblk_command *cmd = &usbblk->cmd; + size_t remaining; + size_t len; + + /* Ignore cancellations after closing endpoint */ + if ( ! ep->open ) + goto drop; + + /* Sanity check */ + assert ( cmd->tag ); + + /* Handle errors */ + if ( rc != 0 ) { + DBGC ( usbblk, "USBBLK %s bulk IN failed: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err; + } + + /* Trigger refill process */ + process_add ( &usbblk->process ); + + /* Handle data portion, if any */ + if ( cmd->scsi.data_in_len ) { + assert ( cmd->offset <= cmd->scsi.data_in_len ); + remaining = ( cmd->scsi.data_in_len - cmd->offset ); + len = iob_len ( iobuf ); + if ( len > remaining ) + len = remaining; + if ( len ) { + if ( ( rc = usbblk_in_data ( usbblk, iobuf->data, + len ) ) != 0 ) + goto err; + iob_pull ( iobuf, len ); + } + } + + /* Handle status portion, if any */ + len = iob_len ( iobuf ); + if ( len ) { + if ( ( rc = usbblk_in_status ( usbblk, iobuf->data, + len ) ) != 0 ) + goto err; + } + + drop: + /* Free I/O buffer */ + free_iob ( iobuf ); + + return; + + err: + free_iob ( iobuf ); + usbblk_stop ( usbblk, rc ); +} + +/** Bulk IN endpoint operations */ +static struct usb_endpoint_driver_operations usbblk_in_operations = { + .complete = usbblk_in_complete, +}; + +/****************************************************************************** + * + * Refill process + * + ****************************************************************************** + */ + +/** + * Refill endpoints + * + * @v usbblk USB block device + */ +static void usbblk_step ( struct usbblk_device *usbblk ) { + + /* Refill bulk OUT endpoint */ + usbblk_out_refill ( usbblk ); + + /* Refill bulk IN endpoint */ + usbblk_in_refill ( usbblk ); +} + +/** Refill process descriptor */ +static struct process_descriptor usbblk_process_desc = + PROC_DESC ( struct usbblk_device, process, usbblk_step ); + +/****************************************************************************** + * + * SCSI command management + * + ****************************************************************************** + */ + +/** Next command tag */ +static uint16_t usbblk_tag; + +/** + * Stop SCSI command + * + * @v usbblk USB block device + * @v rc Reason for stop + */ +static void usbblk_stop ( struct usbblk_device *usbblk, int rc ) { + + /* Stop process */ + process_del ( &usbblk->process ); + + /* Reset command */ + memset ( &usbblk->cmd, 0, sizeof ( usbblk->cmd ) ); + + /* Close endpoints if an error occurred */ + if ( rc != 0 ) { + DBGC ( usbblk, "USBBLK %s closing for error recovery\n", + usbblk->func->name ); + usbblk_close ( usbblk ); + } + + /* Terminate command */ + intf_restart ( &usbblk->data, rc ); +} + +/** + * Start new SCSI command + * + * @v usbblk USB block device + * @v scsicmd SCSI command + * @ret rc Return status code + */ +static int usbblk_start ( struct usbblk_device *usbblk, + struct scsi_cmd *scsicmd ) { + struct usbblk_command *cmd = &usbblk->cmd; + int rc; + + /* Fail if command is in progress */ + if ( cmd->tag ) { + rc = -EBUSY; + DBGC ( usbblk, "USBBLK %s cannot support multiple commands\n", + usbblk->func->name ); + goto err_busy; + } + + /* Refuse bidirectional commands */ + if ( scsicmd->data_in_len && scsicmd->data_out_len ) { + rc = -EOPNOTSUPP; + DBGC ( usbblk, "USBBLK %s cannot support bidirectional " + "commands\n", usbblk->func->name ); + goto err_bidirectional; + } + + /* Sanity checks */ + assert ( ! process_running ( &usbblk->process ) ); + assert ( cmd->offset == 0 ); + + /* Initialise command */ + memcpy ( &cmd->scsi, scsicmd, sizeof ( cmd->scsi ) ); + cmd->tag = ( USBBLK_TAG_MAGIC | ++usbblk_tag ); + + /* Issue bulk OUT command */ + if ( ( rc = usbblk_out_command ( usbblk ) ) != 0 ) + goto err_command; + + /* Start refill process */ + process_add ( &usbblk->process ); + + return 0; + + err_command: + memset ( &usbblk->cmd, 0, sizeof ( usbblk->cmd ) ); + err_bidirectional: + err_busy: + return rc; +} + +/****************************************************************************** + * + * SCSI interfaces + * + ****************************************************************************** + */ + +/** SCSI data interface operations */ +static struct interface_operation usbblk_data_operations[] = { + INTF_OP ( intf_close, struct usbblk_device *, usbblk_stop ), +}; + +/** SCSI data interface descriptor */ +static struct interface_descriptor usbblk_data_desc = + INTF_DESC ( struct usbblk_device, data, usbblk_data_operations ); + +/** + * Check SCSI command flow-control window + * + * @v usbblk USB block device + * @ret len Length of window + */ +static size_t usbblk_scsi_window ( struct usbblk_device *usbblk ) { + struct usbblk_command *cmd = &usbblk->cmd; + + /* Allow a single command if no command is currently in progress */ + return ( cmd->tag ? 0 : 1 ); +} + +/** + * Issue SCSI command + * + * @v usbblk USB block device + * @v data SCSI data interface + * @v scsicmd SCSI command + * @ret tag Command tag, or negative error + */ +static int usbblk_scsi_command ( struct usbblk_device *usbblk, + struct interface *data, + struct scsi_cmd *scsicmd ) { + struct usbblk_command *cmd = &usbblk->cmd; + int rc; + + /* (Re)open endpoints if needed */ + if ( ( ! usbblk->in.open ) && ( ( rc = usbblk_open ( usbblk ) ) != 0 ) ) + goto err_open; + + /* Start new command */ + if ( ( rc = usbblk_start ( usbblk, scsicmd ) ) != 0 ) + goto err_start; + + /* Attach to parent interface and return */ + intf_plug_plug ( &usbblk->data, data ); + return cmd->tag; + + usbblk_stop ( usbblk, rc ); + err_start: + usbblk_close ( usbblk ); + err_open: + return rc; +} + +/** + * Close SCSI interface + * + * @v usbblk USB block device + * @v rc Reason for close + */ +static void usbblk_scsi_close ( struct usbblk_device *usbblk, int rc ) { + + /* Restart interfaces */ + intfs_restart ( rc, &usbblk->scsi, &usbblk->data, NULL ); + + /* Stop any in-progress command */ + usbblk_stop ( usbblk, rc ); + + /* Close endpoints */ + usbblk_close ( usbblk ); + + /* Flag as closed */ + usbblk->opened = 0; +} + +/** + * Describe as an EFI device path + * + * @v usbblk USB block device + * @ret path EFI device path, or NULL on error + */ +static EFI_DEVICE_PATH_PROTOCOL * +usbblk_efi_describe ( struct usbblk_device *usbblk ) { + + return efi_usb_path ( usbblk->func ); +} + +/** SCSI command interface operations */ +static struct interface_operation usbblk_scsi_operations[] = { + INTF_OP ( scsi_command, struct usbblk_device *, usbblk_scsi_command ), + INTF_OP ( xfer_window, struct usbblk_device *, usbblk_scsi_window ), + INTF_OP ( intf_close, struct usbblk_device *, usbblk_scsi_close ), + EFI_INTF_OP ( efi_describe, struct usbblk_device *, + usbblk_efi_describe ), +}; + +/** SCSI command interface descriptor */ +static struct interface_descriptor usbblk_scsi_desc = + INTF_DESC ( struct usbblk_device, scsi, usbblk_scsi_operations ); + +/****************************************************************************** + * + * SAN device interface + * + ****************************************************************************** + */ + +/** + * Find USB block device + * + * @v name USB block device name + * @ret usbblk USB block device, or NULL + */ +static struct usbblk_device * usbblk_find ( const char *name ) { + struct usbblk_device *usbblk; + + /* Look for matching device */ + list_for_each_entry ( usbblk, &usbblk_devices, list ) { + if ( strcmp ( usbblk->func->name, name ) == 0 ) + return usbblk; + } + + return NULL; +} + +/** + * Open USB block device URI + * + * @v parent Parent interface + * @v uri URI + * @ret rc Return status code + */ +static int usbblk_open_uri ( struct interface *parent, struct uri *uri ) { + static struct scsi_lun lun; + struct usbblk_device *usbblk; + int rc; + + /* Sanity check */ + if ( ! uri->opaque ) + return -EINVAL; + + /* Find matching device */ + usbblk = usbblk_find ( uri->opaque ); + if ( ! usbblk ) + return -ENOENT; + + /* Fail if device is already open */ + if ( usbblk->opened ) + return -EBUSY; + + /* Open SCSI device */ + if ( ( rc = scsi_open ( parent, &usbblk->scsi, &lun ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not open SCSI device: %s\n", + usbblk->func->name, strerror ( rc ) ); + return rc; + } + + /* Mark as opened */ + usbblk->opened = 1; + + return 0; +} + +/** USB block device URI opener */ +struct uri_opener usbblk_uri_opener __uri_opener = { + .scheme = "usb", + .open = usbblk_open_uri, +}; + +/****************************************************************************** + * + * USB interface + * + ****************************************************************************** + */ + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int usbblk_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct usb_device *usb = func->usb; + struct usbblk_device *usbblk; + struct usb_interface_descriptor *desc; + int rc; + + /* Allocate and initialise structure */ + usbblk = zalloc ( sizeof ( *usbblk ) ); + if ( ! usbblk ) { + rc = -ENOMEM; + goto err_alloc; + } + usbblk->func = func; + usb_endpoint_init ( &usbblk->out, usb, &usbblk_out_operations ); + usb_endpoint_init ( &usbblk->in, usb, &usbblk_in_operations ); + usb_refill_init ( &usbblk->in, 0, USBBLK_MAX_LEN, USBBLK_MAX_FILL ); + intf_init ( &usbblk->scsi, &usbblk_scsi_desc, &usbblk->refcnt ); + intf_init ( &usbblk->data, &usbblk_data_desc, &usbblk->refcnt ); + process_init_stopped ( &usbblk->process, &usbblk_process_desc, + &usbblk->refcnt ); + + /* Locate interface descriptor */ + desc = usb_interface_descriptor ( config, func->interface[0], 0 ); + if ( ! desc ) { + DBGC ( usbblk, "USBBLK %s missing interface descriptor\n", + usbblk->func->name ); + rc = -ENOENT; + goto err_desc; + } + + /* Describe endpoints */ + if ( ( rc = usb_endpoint_described ( &usbblk->out, config, desc, + USB_BULK_OUT, 0 ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not describe bulk OUT: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_out; + } + if ( ( rc = usb_endpoint_described ( &usbblk->in, config, desc, + USB_BULK_IN, 0 ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not describe bulk IN: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_in; + } + + /* Add to list of devices */ + list_add_tail ( &usbblk->list, &usbblk_devices ); + + usb_func_set_drvdata ( func, usbblk ); + return 0; + + err_in: + err_out: + err_desc: + ref_put ( &usbblk->refcnt ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void usbblk_remove ( struct usb_function *func ) { + struct usbblk_device *usbblk = usb_func_get_drvdata ( func ); + + /* Remove from list of devices */ + list_del ( &usbblk->list ); + + /* Close all interfaces */ + usbblk_scsi_close ( usbblk, -ENODEV ); + + /* Shut down interfaces */ + intfs_shutdown ( -ENODEV, &usbblk->scsi, &usbblk->data, NULL ); + + /* Drop reference */ + ref_put ( &usbblk->refcnt ); +} + +/** Mass storage class device IDs */ +static struct usb_device_id usbblk_ids[] = { + { + .name = "usbblk", + .vendor = USB_ANY_ID, + .product = USB_ANY_ID, + }, +}; + +/** Mass storage driver */ +struct usb_driver usbblk_driver __usb_driver = { + .ids = usbblk_ids, + .id_count = ( sizeof ( usbblk_ids ) / sizeof ( usbblk_ids[0] ) ), + .class = USB_CLASS_ID ( USB_CLASS_MSC, USB_SUBCLASS_MSC_SCSI, + USB_PROTOCOL_MSC_BULK ), + .score = USB_SCORE_NORMAL, + .probe = usbblk_probe, + .remove = usbblk_remove, +}; diff --git a/src/drivers/usb/usbblk.h b/src/drivers/usb/usbblk.h new file mode 100644 index 00000000..65d0705e --- /dev/null +++ b/src/drivers/usb/usbblk.h @@ -0,0 +1,121 @@ +#ifndef _USBBLK_H +#define _USBBLK_H + +/** @file + * + * USB mass storage driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** Mass storage class code */ +#define USB_CLASS_MSC 0x08 + +/** SCSI command set subclass code */ +#define USB_SUBCLASS_MSC_SCSI 0x06 + +/** Bulk-only transport protocol */ +#define USB_PROTOCOL_MSC_BULK 0x50 + +/** Mass storage reset command */ +#define USBBLK_RESET ( USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 255 ) ) + +/** Command block wrapper */ +struct usbblk_command_wrapper { + /** Signature */ + uint32_t signature; + /** Tag */ + uint32_t tag; + /** Data transfer length */ + uint32_t len; + /** Flags */ + uint8_t flags; + /** LUN */ + uint8_t lun; + /** Command block length */ + uint8_t cblen; + /** Command block */ + uint8_t cb[16]; +} __attribute__ (( packed )); + +/** Command block wrapper signature */ +#define USBBLK_COMMAND_SIGNATURE 0x43425355UL + +/** Command status wrapper */ +struct usbblk_status_wrapper { + /** Signature */ + uint32_t signature; + /** Tag */ + uint32_t tag; + /** Data residue */ + uint32_t residue; + /** Status */ + uint8_t status; +} __attribute__ (( packed )); + +/** Command status wrapper signature */ +#define USBBLK_STATUS_SIGNATURE 0x53425355UL + +/** A USB mass storage command */ +struct usbblk_command { + /** SCSI command */ + struct scsi_cmd scsi; + /** Command tag (0 for no command in progress) */ + uint32_t tag; + /** Offset within data buffer */ + size_t offset; +}; + +/** A USB mass storage device */ +struct usbblk_device { + /** Reference count */ + struct refcnt refcnt; + /** List of devices */ + struct list_head list; + + /** USB function */ + struct usb_function *func; + /** Bulk OUT endpoint */ + struct usb_endpoint out; + /** Bulk IN endpoint */ + struct usb_endpoint in; + + /** SCSI command-issuing interface */ + struct interface scsi; + /** SCSI data interface */ + struct interface data; + /** Command process */ + struct process process; + /** Device opened flag */ + int opened; + + /** Current command (if any) */ + struct usbblk_command cmd; +}; + +/** Command tag magic + * + * This is a policy decision. + */ +#define USBBLK_TAG_MAGIC 0x18ae0000 + +/** Maximum length of USB data block + * + * This is a policy decision. + */ +#define USBBLK_MAX_LEN 2048 + +/** Maximum endpoint fill level + * + * This is a policy decision. + */ +#define USBBLK_MAX_FILL 4 + +#endif /* _USBBLK_H */ diff --git a/src/drivers/usb/usbhid.c b/src/drivers/usb/usbhid.c new file mode 100644 index 00000000..c74535a0 --- /dev/null +++ b/src/drivers/usb/usbhid.c @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** @file + * + * USB human interface devices (HID) + * + */ + +/** + * Open USB human interface device + * + * @v hid USB human interface device + * @ret rc Return status code + */ +int usbhid_open ( struct usb_hid *hid ) { + int rc; + + /* Open interrupt IN endpoint */ + if ( ( rc = usb_endpoint_open ( &hid->in ) ) != 0 ) { + DBGC ( hid, "HID %s could not open interrupt IN: %s\n", + hid->func->name, strerror ( rc ) ); + goto err_open_in; + } + + /* Refill interrupt IN endpoint */ + if ( ( rc = usb_refill ( &hid->in ) ) != 0 ) { + DBGC ( hid, "HID %s could not refill interrupt IN: %s\n", + hid->func->name, strerror ( rc ) ); + goto err_refill_in; + } + + /* Open interrupt OUT endpoint, if applicable */ + if ( hid->out.usb && + ( ( rc = usb_endpoint_open ( &hid->out ) ) != 0 ) ) { + DBGC ( hid, "HID %s could not open interrupt OUT: %s\n", + hid->func->name, strerror ( rc ) ); + goto err_open_out; + } + + return 0; + + usb_endpoint_close ( &hid->out ); + err_open_out: + err_refill_in: + usb_endpoint_close ( &hid->in ); + err_open_in: + return rc; +} + +/** + * Close USB human interface device + * + * @v hid USB human interface device + */ +void usbhid_close ( struct usb_hid *hid ) { + + /* Close interrupt OUT endpoint, if applicable */ + if ( hid->out.usb ) + usb_endpoint_close ( &hid->out ); + + /* Close interrupt IN endpoint */ + usb_endpoint_close ( &hid->in ); +} + +/** + * Refill USB human interface device endpoints + * + * @v hid USB human interface device + * @ret rc Return status code + */ +int usbhid_refill ( struct usb_hid *hid ) { + int rc; + + /* Refill interrupt IN endpoint */ + if ( ( rc = usb_refill ( &hid->in ) ) != 0 ) + return rc; + + /* Refill interrupt OUT endpoint, if applicable */ + if ( hid->out.usb && ( ( rc = usb_refill ( &hid->out ) ) != 0 ) ) + return rc; + + return 0; +} + +/** + * Describe USB human interface device + * + * @v hid USB human interface device + * @v config Configuration descriptor + * @ret rc Return status code + */ +int usbhid_describe ( struct usb_hid *hid, + struct usb_configuration_descriptor *config ) { + struct usb_interface_descriptor *desc; + int rc; + + /* Locate interface descriptor */ + desc = usb_interface_descriptor ( config, hid->func->interface[0], 0 ); + if ( ! desc ) { + DBGC ( hid, "HID %s has no interface descriptor\n", + hid->func->name ); + return -EINVAL; + } + + /* Describe interrupt IN endpoint */ + if ( ( rc = usb_endpoint_described ( &hid->in, config, desc, + USB_INTERRUPT_IN, 0 ) ) != 0 ) { + DBGC ( hid, "HID %s could not describe interrupt IN: %s\n", + hid->func->name, strerror ( rc ) ); + return rc; + } + + /* Describe interrupt OUT endpoint, if applicable */ + if ( hid->out.usb && + ( ( rc = usb_endpoint_described ( &hid->out, config, desc, + USB_INTERRUPT_OUT, 0 ) ) != 0 )){ + DBGC ( hid, "HID %s could not describe interrupt OUT: %s\n", + hid->func->name, strerror ( rc ) ); + return rc; + } + + return 0; +} diff --git a/src/drivers/usb/usbhub.c b/src/drivers/usb/usbhub.c new file mode 100644 index 00000000..28d6cb33 --- /dev/null +++ b/src/drivers/usb/usbhub.c @@ -0,0 +1,552 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include "usbhub.h" + +/** @file + * + * USB hub driver + * + */ + +/** + * Refill interrupt ring + * + * @v hubdev Hub device + */ +static void hub_refill ( struct usb_hub_device *hubdev ) { + int rc; + + /* Refill interrupt endpoint */ + if ( ( rc = usb_refill ( &hubdev->intr ) ) != 0 ) { + DBGC ( hubdev, "HUB %s could not refill interrupt: %s\n", + hubdev->name, strerror ( rc ) ); + /* Continue attempting to refill */ + return; + } + + /* Stop refill process */ + process_del ( &hubdev->refill ); +} + +/** Refill process descriptor */ +static struct process_descriptor hub_refill_desc = + PROC_DESC ( struct usb_hub_device, refill, hub_refill ); + +/** + * Complete interrupt transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void hub_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct usb_hub_device *hubdev = + container_of ( ep, struct usb_hub_device, intr ); + struct usb_hub *hub = hubdev->hub; + uint8_t *data = iobuf->data; + unsigned int bits = ( 8 * iob_len ( iobuf ) ); + unsigned int i; + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto done; + + /* Ignore packets with errors */ + if ( rc != 0 ) { + DBGC ( hubdev, "HUB %s interrupt failed: %s\n", + hubdev->name, strerror ( rc ) ); + DBGC_HDA ( hubdev, 0, iobuf->data, iob_len ( iobuf ) ); + goto done; + } + + /* Report any port status changes */ + for ( i = 1 ; i <= hub->ports ; i++ ) { + + /* Sanity check */ + if ( i > bits ) { + DBGC ( hubdev, "HUB %s underlength interrupt:\n", + hubdev->name ); + DBGC_HDA ( hubdev, 0, iobuf->data, iob_len ( iobuf ) ); + goto done; + } + + /* Report port status change if applicable */ + if ( data[ i / 8 ] & ( 1 << ( i % 8 ) ) ) { + DBGC2 ( hubdev, "HUB %s port %d status changed\n", + hubdev->name, i ); + usb_port_changed ( usb_port ( hub, i ) ); + } + } + + done: + + /* Recycle I/O buffer */ + usb_recycle ( &hubdev->intr, iobuf ); + + /* Start refill process */ + process_add ( &hubdev->refill ); +} + +/** Interrupt endpoint operations */ +static struct usb_endpoint_driver_operations usb_hub_intr_operations = { + .complete = hub_complete, +}; + +/** + * Open hub + * + * @v hub USB hub + * @ret rc Return status code + */ +static int hub_open ( struct usb_hub *hub ) { + struct usb_hub_device *hubdev = usb_hub_get_drvdata ( hub ); + struct usb_device *usb = hubdev->usb; + unsigned int i; + int rc; + + /* Ensure ports are powered */ + for ( i = 1 ; i <= hub->ports ; i++ ) { + if ( ( rc = usb_hub_set_port_feature ( usb, i, + USB_HUB_PORT_POWER, + 0 ) ) != 0 ) { + DBGC ( hubdev, "HUB %s port %d could not apply power: " + "%s\n", hubdev->name, i, strerror ( rc ) ); + goto err_power; + } + } + + /* Open interrupt endpoint */ + if ( ( rc = usb_endpoint_open ( &hubdev->intr ) ) != 0 ) { + DBGC ( hubdev, "HUB %s could not register interrupt: %s\n", + hubdev->name, strerror ( rc ) ); + goto err_open; + } + + /* Start refill process */ + process_add ( &hubdev->refill ); + + /* Refill interrupt ring */ + hub_refill ( hubdev ); + + /* Delay to allow ports to stabilise on out-of-spec hubs */ + if ( hubdev->flags & USB_HUB_SLOW_START ) + mdelay ( USB_HUB_SLOW_START_DELAY_MS ); + + return 0; + + usb_endpoint_close ( &hubdev->intr ); + err_open: + err_power: + return rc; +} + +/** + * Close hub + * + * @v hub USB hub + */ +static void hub_close ( struct usb_hub *hub ) { + struct usb_hub_device *hubdev = usb_hub_get_drvdata ( hub ); + + /* Close interrupt endpoint */ + usb_endpoint_close ( &hubdev->intr ); + + /* Stop refill process */ + process_del ( &hubdev->refill ); +} + +/** + * Enable port + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int hub_enable ( struct usb_hub *hub, struct usb_port *port ) { + struct usb_hub_device *hubdev = usb_hub_get_drvdata ( hub ); + struct usb_device *usb = hubdev->usb; + struct usb_hub_port_status status; + unsigned int current; + unsigned int i; + int rc; + + /* Initiate reset if applicable */ + if ( ( hub->protocol < USB_PROTO_3_0 ) && + ( ( rc = usb_hub_set_port_feature ( usb, port->address, + USB_HUB_PORT_RESET, 0 ) )!=0)){ + DBGC ( hubdev, "HUB %s port %d could not initiate reset: %s\n", + hubdev->name, port->address, strerror ( rc ) ); + return rc; + } + + /* Wait for port to become enabled */ + for ( i = 0 ; i < USB_HUB_ENABLE_MAX_WAIT_MS ; i++ ) { + + /* Check for port being enabled */ + if ( ( rc = usb_hub_get_port_status ( usb, port->address, + &status ) ) != 0 ) { + DBGC ( hubdev, "HUB %s port %d could not get status: " + "%s\n", hubdev->name, port->address, + strerror ( rc ) ); + return rc; + } + current = le16_to_cpu ( status.current ); + if ( current & ( 1 << USB_HUB_PORT_ENABLE ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( hubdev, "HUB %s port %d timed out waiting for enable\n", + hubdev->name, port->address ); + return -ETIMEDOUT; +} + +/** + * Disable port + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int hub_disable ( struct usb_hub *hub, struct usb_port *port ) { + struct usb_hub_device *hubdev = usb_hub_get_drvdata ( hub ); + struct usb_device *usb = hubdev->usb; + int rc; + + /* Disable port */ + if ( ( hub->protocol < USB_PROTO_3_0 ) && + ( ( rc = usb_hub_clear_port_feature ( usb, port->address, + USB_HUB_PORT_ENABLE, + 0 ) ) != 0 ) ) { + DBGC ( hubdev, "HUB %s port %d could not disable: %s\n", + hubdev->name, port->address, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Clear port status change bits + * + * @v hubdev USB hub device + * @v port Port number + * @v changed Port status change bits + * @ret rc Return status code + */ +static int hub_clear_changes ( struct usb_hub_device *hubdev, + unsigned int port, uint16_t changed ) { + struct usb_device *usb = hubdev->usb; + unsigned int bit; + unsigned int feature; + int rc; + + /* Clear each set bit */ + for ( bit = 0 ; bit < 16 ; bit++ ) { + + /* Skip unset bits */ + if ( ! ( changed & ( 1 << bit ) ) ) + continue; + + /* Skip unused features */ + feature = USB_HUB_C_FEATURE ( bit ); + if ( ! ( hubdev->features & ( 1 << feature ) ) ) + continue; + + /* Clear bit */ + if ( ( rc = usb_hub_clear_port_feature ( usb, port, + feature, 0 ) ) != 0 ) { + DBGC ( hubdev, "HUB %s port %d could not clear feature " + "%d: %s\n", hubdev->name, port, feature, + strerror ( rc ) ); + return rc; + } + } + + return 0; +} + +/** + * Update port speed + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int hub_speed ( struct usb_hub *hub, struct usb_port *port ) { + struct usb_hub_device *hubdev = usb_hub_get_drvdata ( hub ); + struct usb_device *usb = hubdev->usb; + struct usb_hub_port_status status; + unsigned int current; + unsigned int changed; + int rc; + + /* Get port status */ + if ( ( rc = usb_hub_get_port_status ( usb, port->address, + &status ) ) != 0 ) { + DBGC ( hubdev, "HUB %s port %d could not get status: %s\n", + hubdev->name, port->address, strerror ( rc ) ); + return rc; + } + current = le16_to_cpu ( status.current ); + changed = le16_to_cpu ( status.changed ); + DBGC2 ( hubdev, "HUB %s port %d status is %04x:%04x\n", + hubdev->name, port->address, changed, current ); + + /* Update port speed */ + if ( current & ( 1 << USB_HUB_PORT_CONNECTION ) ) { + if ( hub->protocol >= USB_PROTO_3_0 ) { + port->speed = USB_SPEED_SUPER; + } else if ( current & ( 1 << USB_HUB_PORT_LOW_SPEED ) ) { + port->speed = USB_SPEED_LOW; + } else if ( current & ( 1 << USB_HUB_PORT_HIGH_SPEED ) ) { + port->speed = USB_SPEED_HIGH; + } else { + port->speed = USB_SPEED_FULL; + } + } else { + port->speed = USB_SPEED_NONE; + } + + /* Record disconnections */ + port->disconnected |= ( changed & ( 1 << USB_HUB_PORT_CONNECTION ) ); + + /* Clear port status change bits */ + if ( ( rc = hub_clear_changes ( hubdev, port->address, changed ) ) != 0) + return rc; + + return 0; +} + +/** + * Clear transaction translator buffer + * + * @v hub USB hub + * @v port USB port + * @v ep USB endpoint + * @ret rc Return status code + */ +static int hub_clear_tt ( struct usb_hub *hub, struct usb_port *port, + struct usb_endpoint *ep ) { + struct usb_hub_device *hubdev = usb_hub_get_drvdata ( hub ); + struct usb_device *usb = hubdev->usb; + int rc; + + /* Clear transaction translator buffer. All hubs must support + * single-TT operation; we simplify our code by supporting + * only this configuration. + */ + if ( ( rc = usb_hub_clear_tt_buffer ( usb, ep->usb->address, + ep->address, ep->attributes, + USB_HUB_TT_SINGLE ) ) != 0 ) { + DBGC ( hubdev, "HUB %s port %d could not clear TT buffer: %s\n", + hubdev->name, port->address, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** USB hub operations */ +static struct usb_hub_driver_operations hub_operations = { + .open = hub_open, + .close = hub_close, + .enable = hub_enable, + .disable = hub_disable, + .speed = hub_speed, + .clear_tt = hub_clear_tt, +}; + +/** + * Probe USB hub + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int hub_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct usb_device *usb = func->usb; + struct usb_bus *bus = usb->port->hub->bus; + struct usb_hub_device *hubdev; + struct usb_interface_descriptor *interface; + union usb_hub_descriptor desc; + unsigned int depth; + unsigned int ports; + int enhanced; + int rc; + + /* Allocate and initialise structure */ + hubdev = zalloc ( sizeof ( *hubdev ) ); + if ( ! hubdev ) { + rc = -ENOMEM; + goto err_alloc; + } + enhanced = ( usb->port->protocol >= USB_PROTO_3_0 ); + hubdev->name = func->name; + hubdev->usb = usb; + hubdev->features = + ( enhanced ? USB_HUB_FEATURES_ENHANCED : USB_HUB_FEATURES ); + hubdev->flags = func->id->driver_data; + usb_endpoint_init ( &hubdev->intr, usb, &usb_hub_intr_operations ); + usb_refill_init ( &hubdev->intr, 0, 0, USB_HUB_INTR_FILL ); + process_init_stopped ( &hubdev->refill, &hub_refill_desc, NULL ); + + /* Locate hub interface descriptor */ + interface = usb_interface_descriptor ( config, func->interface[0], 0 ); + if ( ! interface ) { + DBGC ( hubdev, "HUB %s has no interface descriptor\n", + hubdev->name ); + rc = -EINVAL; + goto err_interface; + } + + /* Locate interrupt endpoint descriptor */ + if ( ( rc = usb_endpoint_described ( &hubdev->intr, config, interface, + USB_INTERRUPT_IN, 0 ) ) != 0 ) { + DBGC ( hubdev, "HUB %s could not describe interrupt endpoint: " + "%s\n", hubdev->name, strerror ( rc ) ); + goto err_endpoint; + } + + /* Set hub depth */ + depth = usb_depth ( usb ); + if ( enhanced ) { + if ( ( rc = usb_hub_set_hub_depth ( usb, depth ) ) != 0 ) { + DBGC ( hubdev, "HUB %s could not set hub depth to %d: " + "%s\n", hubdev->name, depth, strerror ( rc ) ); + goto err_set_hub_depth; + } + } + + /* Get hub descriptor */ + if ( ( rc = usb_hub_get_descriptor ( usb, enhanced, &desc ) ) != 0 ) { + DBGC ( hubdev, "HUB %s could not get hub descriptor: %s\n", + hubdev->name, strerror ( rc ) ); + goto err_hub_descriptor; + } + ports = desc.basic.ports; + DBGC ( hubdev, "HUB %s has %d ports at depth %d%s\n", hubdev->name, + ports, depth, ( enhanced ? " (enhanced)" : "" ) ); + + /* Allocate hub */ + hubdev->hub = alloc_usb_hub ( bus, usb, ports, &hub_operations ); + if ( ! hubdev->hub ) { + rc = -ENOMEM; + goto err_alloc_hub; + } + usb_hub_set_drvdata ( hubdev->hub, hubdev ); + + /* Register hub */ + if ( ( rc = register_usb_hub ( hubdev->hub ) ) != 0 ) { + DBGC ( hubdev, "HUB %s could not register: %s\n", + hubdev->name, strerror ( rc ) ); + goto err_register_hub; + } + + usb_func_set_drvdata ( func, hubdev ); + return 0; + + unregister_usb_hub ( hubdev->hub ); + err_register_hub: + free_usb_hub ( hubdev->hub ); + err_alloc_hub: + err_hub_descriptor: + err_set_hub_depth: + err_endpoint: + err_interface: + free ( hubdev ); + err_alloc: + return rc; +} + +/** + * Remove USB hub + * + * @v func USB function + * @ret rc Return status code + */ +static void hub_remove ( struct usb_function *func ) { + struct usb_hub_device *hubdev = usb_func_get_drvdata ( func ); + struct usb_hub *hub = hubdev->hub; + struct usb_device *usb = hubdev->usb; + struct usb_port *port; + unsigned int i; + + /* If hub has been unplugged, mark all ports as unplugged */ + if ( usb->port->disconnected ) { + for ( i = 1 ; i <= hub->ports ; i++ ) { + port = usb_port ( hub, i ); + port->disconnected = 1; + port->speed = USB_SPEED_NONE; + } + } + + /* Unregister hub */ + unregister_usb_hub ( hubdev->hub ); + assert ( ! process_running ( &hubdev->refill ) ); + + /* Free hub */ + free_usb_hub ( hubdev->hub ); + + /* Free hub device */ + free ( hubdev ); +} + +/** USB hub device IDs */ +static struct usb_device_id hub_ids[] = { + { + .name = "avocent-hub", + .vendor = 0x0624, + .product = 0x0248, + .driver_data = USB_HUB_SLOW_START, + }, + { + .name = "hub", + .vendor = USB_ANY_ID, + .product = USB_ANY_ID, + }, +}; + +/** USB hub driver */ +struct usb_driver usb_hub_driver __usb_driver = { + .ids = hub_ids, + .id_count = ( sizeof ( hub_ids ) / sizeof ( hub_ids[0] ) ), + .class = USB_CLASS_ID ( USB_CLASS_HUB, 0, USB_ANY_ID ), + .score = USB_SCORE_NORMAL, + .probe = hub_probe, + .remove = hub_remove, +}; diff --git a/src/drivers/usb/usbhub.h b/src/drivers/usb/usbhub.h new file mode 100644 index 00000000..a5f123ac --- /dev/null +++ b/src/drivers/usb/usbhub.h @@ -0,0 +1,287 @@ +#ifndef _USBHUB_H +#define _USBHUB_H + +/** @file + * + * USB hubs + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** Request recipient is a port */ +#define USB_HUB_RECIP_PORT ( 3 << 0 ) + +/** A basic USB hub descriptor */ +struct usb_hub_descriptor_basic { + /** Descriptor header */ + struct usb_descriptor_header header; + /** Number of ports */ + uint8_t ports; + /** Characteristics */ + uint16_t characteristics; + /** Power-on delay (in 2ms intervals */ + uint8_t delay; + /** Controller current (in mA) */ + uint8_t current; +} __attribute__ (( packed )); + +/** A basic USB hub descriptor */ +#define USB_HUB_DESCRIPTOR 41 + +/** An enhanced USB hub descriptor */ +struct usb_hub_descriptor_enhanced { + /** Basic USB hub descriptor */ + struct usb_hub_descriptor_basic basic; + /** Header decode latency */ + uint8_t latency; + /** Maximum delay */ + uint16_t delay; + /** Removable device bitmask */ + uint16_t removable; +} __attribute__ (( packed )); + +/** An enhanced USB hub descriptor */ +#define USB_HUB_DESCRIPTOR_ENHANCED 42 + +/** A USB hub descriptor */ +union usb_hub_descriptor { + /** Descriptor header */ + struct usb_descriptor_header header; + /** Basic hub descriptor */ + struct usb_hub_descriptor_basic basic; + /** Enhanced hub descriptor */ + struct usb_hub_descriptor_enhanced enhanced; +} __attribute__ (( packed )); + +/** Port status */ +struct usb_hub_port_status { + /** Current status */ + uint16_t current; + /** Changed status */ + uint16_t changed; +} __attribute__ (( packed )); + +/** Current connect status feature */ +#define USB_HUB_PORT_CONNECTION 0 + +/** Port enabled/disabled feature */ +#define USB_HUB_PORT_ENABLE 1 + +/** Port reset feature */ +#define USB_HUB_PORT_RESET 4 + +/** Port power feature */ +#define USB_HUB_PORT_POWER 8 + +/** Low-speed device attached */ +#define USB_HUB_PORT_LOW_SPEED 9 + +/** High-speed device attached */ +#define USB_HUB_PORT_HIGH_SPEED 10 + +/** Connect status changed */ +#define USB_HUB_C_PORT_CONNECTION 16 + +/** Port enable/disable changed */ +#define USB_HUB_C_PORT_ENABLE 17 + +/** Suspend changed */ +#define USB_HUB_C_PORT_SUSPEND 18 + +/** Over-current indicator changed */ +#define USB_HUB_C_PORT_OVER_CURRENT 19 + +/** Reset changed */ +#define USB_HUB_C_PORT_RESET 20 + +/** Link state changed */ +#define USB_HUB_C_PORT_LINK_STATE 25 + +/** Configuration error */ +#define USB_HUB_C_PORT_CONFIG_ERROR 26 + +/** Calculate feature from change bit number */ +#define USB_HUB_C_FEATURE( bit ) ( 16 + (bit) ) + +/** USB features */ +#define USB_HUB_FEATURES \ + ( ( 1 << USB_HUB_C_PORT_CONNECTION ) | \ + ( 1 << USB_HUB_C_PORT_ENABLE ) | \ + ( 1 << USB_HUB_C_PORT_SUSPEND ) | \ + ( 1 << USB_HUB_C_PORT_OVER_CURRENT ) | \ + ( 1 << USB_HUB_C_PORT_RESET ) ) + +/** USB features for enhanced hubs */ +#define USB_HUB_FEATURES_ENHANCED \ + ( ( 1 << USB_HUB_C_PORT_CONNECTION ) | \ + ( 1 << USB_HUB_C_PORT_OVER_CURRENT ) | \ + ( 1 << USB_HUB_C_PORT_RESET ) | \ + ( 1 << USB_HUB_C_PORT_LINK_STATE ) | \ + ( 1 << USB_HUB_C_PORT_CONFIG_ERROR ) ) + +/** Set hub depth */ +#define USB_HUB_SET_HUB_DEPTH \ + ( USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_DEVICE | \ + USB_REQUEST_TYPE ( 12 ) ) + +/** Clear transaction translator buffer */ +#define USB_HUB_CLEAR_TT_BUFFER \ + ( USB_DIR_OUT | USB_TYPE_CLASS | USB_HUB_RECIP_PORT | \ + USB_REQUEST_TYPE ( 8 ) ) + +/** + * Get hub descriptor + * + * @v usb USB device + * @v enhanced Hub is an enhanced hub + * @v data Hub descriptor to fill in + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_hub_get_descriptor ( struct usb_device *usb, int enhanced, + union usb_hub_descriptor *data ) { + unsigned int desc; + size_t len; + + /* Determine descriptor type and length */ + desc = ( enhanced ? USB_HUB_DESCRIPTOR_ENHANCED : USB_HUB_DESCRIPTOR ); + len = ( enhanced ? sizeof ( data->enhanced ) : sizeof ( data->basic ) ); + + return usb_get_descriptor ( usb, USB_TYPE_CLASS, desc, 0, 0, + &data->header, len ); +} + +/** + * Get port status + * + * @v usb USB device + * @v port Port address + * @v status Port status descriptor to fill in + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_hub_get_port_status ( struct usb_device *usb, unsigned int port, + struct usb_hub_port_status *status ) { + + return usb_get_status ( usb, ( USB_TYPE_CLASS | USB_HUB_RECIP_PORT ), + port, status, sizeof ( *status ) ); +} + +/** + * Clear port feature + * + * @v usb USB device + * @v port Port address + * @v feature Feature to clear + * @v index Index (when clearing a port indicator) + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_hub_clear_port_feature ( struct usb_device *usb, unsigned int port, + unsigned int feature, unsigned int index ) { + + return usb_clear_feature ( usb, ( USB_TYPE_CLASS | USB_HUB_RECIP_PORT ), + feature, ( ( index << 8 ) | port ) ); +} + +/** + * Set port feature + * + * @v usb USB device + * @v port Port address + * @v feature Feature to clear + * @v index Index (when clearing a port indicator) + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_hub_set_port_feature ( struct usb_device *usb, unsigned int port, + unsigned int feature, unsigned int index ) { + + return usb_set_feature ( usb, ( USB_TYPE_CLASS | USB_HUB_RECIP_PORT ), + feature, ( ( index << 8 ) | port ) ); +} + +/** + * Set hub depth + * + * @v usb USB device + * @v depth Hub depth + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_hub_set_hub_depth ( struct usb_device *usb, unsigned int depth ) { + + return usb_control ( usb, USB_HUB_SET_HUB_DEPTH, depth, 0, NULL, 0 ); +} + +/** + * Clear transaction translator buffer + * + * @v usb USB device + * @v device Device address + * @v endpoint Endpoint address + * @v attributes Endpoint attributes + * @v tt_port Transaction translator port (or 1 for single-TT hubs) + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_hub_clear_tt_buffer ( struct usb_device *usb, unsigned int device, + unsigned int endpoint, unsigned int attributes, + unsigned int tt_port ) { + unsigned int value; + + /* Calculate value */ + value = ( ( ( endpoint & USB_ENDPOINT_MAX ) << 0 ) | ( device << 4 ) | + ( ( attributes & USB_ENDPOINT_ATTR_TYPE_MASK ) << 11 ) | + ( ( endpoint & USB_ENDPOINT_IN ) << 8 ) ); + + return usb_control ( usb, USB_HUB_CLEAR_TT_BUFFER, value, + tt_port, NULL, 0 ); +} + +/** Transaction translator port value for single-TT hubs */ +#define USB_HUB_TT_SINGLE 1 + +/** A USB hub device */ +struct usb_hub_device { + /** Name */ + const char *name; + /** USB device */ + struct usb_device *usb; + /** USB hub */ + struct usb_hub *hub; + /** Features */ + unsigned int features; + /** Flags */ + unsigned int flags; + + /** Interrupt endpoint */ + struct usb_endpoint intr; + /** Interrupt endpoint refill process */ + struct process refill; +}; + +/** Hub requires additional settling delay */ +#define USB_HUB_SLOW_START 0x0001 + +/** Additional setting delay for out-of-spec hubs */ +#define USB_HUB_SLOW_START_DELAY_MS 500 + +/** Interrupt ring fill level + * + * This is a policy decision. + */ +#define USB_HUB_INTR_FILL 4 + +/** Maximum time to wait for port to become enabled + * + * This is a policy decision. + */ +#define USB_HUB_ENABLE_MAX_WAIT_MS 100 + +#endif /* _USBHUB_H */ diff --git a/src/drivers/usb/usbio.c b/src/drivers/usb/usbio.c new file mode 100644 index 00000000..278b43cd --- /dev/null +++ b/src/drivers/usb/usbio.c @@ -0,0 +1,1730 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "usbio.h" + +/** @file + * + * EFI_USB_IO_PROTOCOL pseudo Host Controller Interface driver + * + * + * The EFI_USB_IO_PROTOCOL is an almost unbelievably poorly designed + * abstraction of a USB device. It would be just about forgivable for + * an API to support only synchronous operation for bulk OUT + * endpoints. It is imbecilic to support only synchronous operation + * for bulk IN endpoints. This apparently intern-designed API + * throttles a typical NIC down to 1.5% of its maximum throughput. + * That isn't a typo. It really is that slow. + * + * We can't even work around this stupidity by talking to the host + * controller abstraction directly, because an identical limitation + * exists in the EFI_USB2_HC_PROTOCOL. + * + * Unless you derive therapeutic value from watching download progress + * indicators lethargically creep through every single integer from 0 + * to 100, you should use iPXE's native USB host controller drivers + * instead. (Or just upgrade from UEFI to "legacy" BIOS, which will + * produce a similar speed increase.) + * + * + * For added excitement, the EFI_USB_IO_PROTOCOL makes the + * (demonstrably incorrect) assumption that a USB driver needs to + * attach to exactly one interface within a USB device, and provides a + * helper method to retrieve "the" interface descriptor. Since pretty + * much every USB network device requires binding to a pair of + * control+data interfaces, this aspect of EFI_USB_IO_PROTOCOL is of + * no use to us. + * + * We have our own existing code for reading USB descriptors, so we + * don't actually care that the UsbGetInterfaceDescriptor() method + * provided by EFI_USB_IO_PROTOCOL is useless for network devices. We + * can read the descriptors ourselves (via UsbControlTransfer()) and + * get all of the information we need this way. We can even work + * around the fact that EFI_USB_IO_PROTOCOL provides separate handles + * for each of the two interfaces comprising our network device. + * + * However, if we discover that we need to select an alternative + * device configuration (e.g. for devices exposing both RNDIS and + * ECM), then all hell breaks loose. EFI_USB_IO_PROTOCOL starts to + * panic because its cached interface and endpoint descriptors will no + * longer be valid. As mentioned above, the cached descriptors are + * useless for network devices anyway so we _really_ don't care about + * this, but EFI_USB_IO_PROTOCOL certainly cares. It prints out a + * manic warning message containing no fewer than six exclamation + * marks and then literally commits seppuku in the middle of the + * UsbControlTransfer() method by attempting to uninstall itself. + * Quite how the caller is supposed to react when asked to stop using + * the EFI_USB_IO_PROTOCOL instance while in the middle of an + * uninterruptible call to said instance is left as an exercise for + * the interested reader. + * + * There is no sensible way to work around this, so we just + * preemptively fail if asked to change the device configuration, on + * the basis that reporting a sarcastic error message is often + * preferable to jumping through a NULL pointer and crashing the + * system. + */ + +/* Disambiguate the various error causes */ +#define ENOTSUP_MORONIC_SPECIFICATION \ + __einfo_error ( EINFO_ENOTSUP_MORONIC_SPECIFICATION ) +#define EINFO_ENOTSUP_MORONIC_SPECIFICATION \ + __einfo_uniqify ( EINFO_ENOTSUP, 0x01, \ + "EFI_USB_IO_PROTOCOL was designed by morons" ) + +/****************************************************************************** + * + * Device model + * + ****************************************************************************** + */ + +/** + * Determine endpoint interface number + * + * @v usbio USB I/O device + * @v ep USB Endpoint + * @ret interface Interface number, or negative error + */ +static int usbio_interface ( struct usbio_device *usbio, + struct usb_endpoint *ep ) { + EFI_HANDLE handle = usbio->handle; + struct usb_device *usb = ep->usb; + struct usb_configuration_descriptor *config; + struct usb_interface_descriptor *interface; + struct usb_endpoint_descriptor *endpoint; + struct usb_function *func; + unsigned int i; + + /* The control endpoint is not part of a described interface */ + if ( ep->address == USB_EP0_ADDRESS ) + return 0; + + /* Iterate over all interface descriptors looking for a match */ + config = usbio->config; + for_each_config_descriptor ( interface, config ) { + + /* Skip non-interface descriptors */ + if ( interface->header.type != USB_INTERFACE_DESCRIPTOR ) + continue; + + /* Iterate over all endpoint descriptors looking for a match */ + for_each_interface_descriptor ( endpoint, config, interface ) { + + /* Skip non-endpoint descriptors */ + if ( endpoint->header.type != USB_ENDPOINT_DESCRIPTOR ) + continue; + + /* Check endpoint address */ + if ( endpoint->endpoint != ep->address ) + continue; + + /* Check interface belongs to this function */ + list_for_each_entry ( func, &usb->functions, list ) { + + /* Skip non-matching functions */ + if ( func->interface[0] != usbio->first ) + continue; + + /* Iterate over all interfaces for a match */ + for ( i = 0 ; i < func->desc.count ; i++ ) { + if ( interface->interface == + func->interface[i] ) + return interface->interface; + } + } + } + } + + DBGC ( usbio, "USBIO %s cannot find interface for %s", + efi_handle_name ( handle ), usb_endpoint_name ( ep ) ); + return -ENOENT; +} + +/** + * Open USB I/O interface + * + * @v usbio USB I/O device + * @v interface Interface number + * @ret rc Return status code + */ +static int usbio_open ( struct usbio_device *usbio, unsigned int interface ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_HANDLE handle = usbio->handle; + struct usbio_interface *intf = &usbio->interface[interface]; + EFI_DEVICE_PATH_PROTOCOL *path; + EFI_DEVICE_PATH_PROTOCOL *end; + USB_DEVICE_PATH *usbpath; + union { + void *interface; + EFI_USB_IO_PROTOCOL *io; + } u; + EFI_STATUS efirc; + int rc; + + /* Sanity check */ + assert ( interface < usbio->config->interfaces ); + + /* If interface is already open, just increment the usage count */ + if ( intf->count ) { + intf->count++; + return 0; + } + + /* Construct device path for this interface */ + path = usbio->path; + usbpath = usbio->usbpath; + usbpath->InterfaceNumber = interface; + end = efi_path_end ( path ); + + /* Locate handle for this endpoint's interface */ + if ( ( efirc = bs->LocateDevicePath ( &efi_usb_io_protocol_guid, &path, + &intf->handle ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( usbio, "USBIO %s could not locate ", + efi_handle_name ( handle ) ); + DBGC ( usbio, "%s: %s\n", + efi_devpath_text ( usbio->path ), strerror ( rc ) ); + return rc; + } + + /* Check that expected path was located */ + if ( path != end ) { + DBGC ( usbio, "USBIO %s located incomplete ", + efi_handle_name ( handle ) ); + DBGC ( usbio, "%s\n", efi_handle_name ( intf->handle ) ); + return -EXDEV; + } + + /* Open USB I/O protocol on this handle */ + if ( ( efirc = bs->OpenProtocol ( intf->handle, + &efi_usb_io_protocol_guid, + &u.interface, efi_image_handle, + intf->handle, + ( EFI_OPEN_PROTOCOL_BY_DRIVER | + EFI_OPEN_PROTOCOL_EXCLUSIVE )))!=0){ + rc = -EEFI ( efirc ); + DBGC ( usbio, "USBIO %s cannot open ", + efi_handle_name ( handle ) ); + DBGC ( usbio, "%s: %s\n", + efi_handle_name ( intf->handle ), strerror ( rc ) ); + DBGC_EFI_OPENERS ( usbio, intf->handle, + &efi_usb_io_protocol_guid ); + return rc; + } + intf->io = u.io; + + /* Increment usage count */ + intf->count++; + + return 0; +} + +/** + * Close USB I/O interface + * + * @v usbio USB I/O device + * @v interface Interface number + */ +static void usbio_close ( struct usbio_device *usbio, unsigned int interface ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct usbio_interface *intf = &usbio->interface[interface]; + + /* Sanity checks */ + assert ( interface < usbio->config->interfaces ); + assert ( intf->count > 0 ); + + /* Decrement usage count */ + intf->count--; + + /* Do nothing if interface is still in use */ + if ( intf->count ) + return; + + /* Close USB I/O protocol */ + bs->CloseProtocol ( intf->handle, &efi_usb_io_protocol_guid, + efi_image_handle, intf->handle ); +} + +/****************************************************************************** + * + * Control endpoints + * + ****************************************************************************** + */ + +/** + * Open control endpoint + * + * @v endpoint Endpoint + * @ret rc Return status code + */ +static int usbio_control_open ( struct usbio_endpoint *endpoint __unused ) { + + /* Nothing to do */ + return 0; +} + +/** + * Close control endpoint + * + * @v endpoint Endpoint + */ +static void usbio_control_close ( struct usbio_endpoint *endpoint __unused ) { + + /* Nothing to do */ +} + +/** + * Poll control endpoint + * + * @v endpoint Endpoint + */ +static void usbio_control_poll ( struct usbio_endpoint *endpoint ) { + struct usbio_device *usbio = endpoint->usbio; + struct usb_endpoint *ep = endpoint->ep; + EFI_HANDLE handle = usbio->handle; + EFI_USB_IO_PROTOCOL *io; + union { + struct usb_setup_packet setup; + EFI_USB_DEVICE_REQUEST efi; + } *msg; + EFI_USB_DATA_DIRECTION direction; + struct io_buffer *iobuf; + unsigned int index; + unsigned int flags; + unsigned int recipient; + unsigned int interface; + uint16_t request; + void *data; + size_t len; + UINT32 status; + EFI_STATUS efirc; + int rc; + + /* Do nothing if ring is empty */ + if ( endpoint->cons == endpoint->prod ) + return; + + /* Consume next transfer */ + index = ( endpoint->cons++ % USBIO_RING_COUNT ); + iobuf = endpoint->iobuf[index]; + flags = endpoint->flags[index]; + + /* Sanity check */ + if ( ! ( flags & USBIO_MESSAGE ) ) { + DBGC ( usbio, "USBIO %s %s non-message transfer\n", + efi_handle_name ( handle ), usb_endpoint_name ( ep ) ); + rc = -ENOTSUP; + goto err_not_message; + } + + /* Construct transfer */ + msg = iob_push ( iobuf, sizeof ( *msg ) ); + iob_pull ( iobuf, sizeof ( *msg ) ); + request = le16_to_cpu ( msg->setup.request ); + len = iob_len ( iobuf ); + if ( len ) { + data = iobuf->data; + direction = ( ( request & USB_DIR_IN ) ? + EfiUsbDataIn : EfiUsbDataOut ); + } else { + data = NULL; + direction = EfiUsbNoData; + } + + /* Determine interface for this transfer */ + recipient = ( request & USB_RECIP_MASK ); + if ( recipient == USB_RECIP_INTERFACE ) { + /* Recipient is an interface: use interface number directly */ + interface = le16_to_cpu ( msg->setup.index ); + } else { + /* Route all other requests through the first interface */ + interface = 0; + } + + /* Open interface */ + if ( ( rc = usbio_open ( usbio, interface ) ) != 0 ) + goto err_open; + io = usbio->interface[interface].io; + + /* Due to the design of EFI_USB_IO_PROTOCOL, attempting to set + * the configuration to a non-default value is basically a + * self-destruct button. + */ + if ( ( request == USB_SET_CONFIGURATION ) && + ( le16_to_cpu ( msg->setup.value ) != usbio->config->config ) ) { + rc = -ENOTSUP_MORONIC_SPECIFICATION; + DBGC ( usbio, "USBIO %s cannot change configuration: %s\n", + efi_handle_name ( handle ), strerror ( rc ) ); + goto err_moronic_specification; + } + + /* Submit transfer */ + if ( ( efirc = io->UsbControlTransfer ( io, &msg->efi, direction, 0, + data, len, &status ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( usbio, "USBIO %s %s could not submit control transfer ", + efi_handle_name ( handle ), usb_endpoint_name ( ep ) ); + DBGC ( usbio, "via %s: %s (status %04x)\n", + efi_handle_name ( usbio->interface[interface].handle ), + strerror ( rc ), status ); + goto err_transfer; + } + + /* Close interface */ + usbio_close ( usbio, interface ); + + /* Complete transfer */ + usb_complete ( ep, iobuf ); + + return; + + err_transfer: + err_moronic_specification: + usbio_close ( usbio, interface ); + err_open: + err_not_message: + usb_complete_err ( ep, iobuf, rc ); +} + +/** Control endpoint operations */ +static struct usbio_operations usbio_control_operations = { + .open = usbio_control_open, + .close = usbio_control_close, + .poll = usbio_control_poll, +}; + +/****************************************************************************** + * + * Bulk IN endpoints + * + ****************************************************************************** + */ + +/** + * Open bulk IN endpoint + * + * @v endpoint Endpoint + * @ret rc Return status code + */ +static int usbio_bulk_in_open ( struct usbio_endpoint *endpoint __unused ) { + + /* Nothing to do */ + return 0; +} + +/** + * Close bulk IN endpoint + * + * @v endpoint Endpoint + */ +static void usbio_bulk_in_close ( struct usbio_endpoint *endpoint __unused ) { + + /* Nothing to do */ +} + +/** + * Poll bulk IN endpoint + * + * @v endpoint Endpoint + */ +static void usbio_bulk_in_poll ( struct usbio_endpoint *endpoint ) { + struct usbio_device *usbio = endpoint->usbio; + struct usb_endpoint *ep = endpoint->ep; + EFI_USB_IO_PROTOCOL *io = endpoint->io; + EFI_HANDLE handle = usbio->handle; + struct io_buffer *iobuf; + unsigned int index; + UINTN len; + UINT32 status; + EFI_STATUS efirc; + int rc; + + /* Do nothing if ring is empty */ + if ( endpoint->cons == endpoint->prod ) + return; + + /* Attempt (but do not yet consume) next transfer */ + index = ( endpoint->cons % USBIO_RING_COUNT ); + iobuf = endpoint->iobuf[index]; + + /* Construct transfer */ + len = iob_len ( iobuf ); + + /* Upon being turned on, the EFI_USB_IO_PROTOCOL did nothing + * for several minutes before firing a small ARP packet a few + * millimetres into the ether. + */ + efirc = io->UsbBulkTransfer ( io, ep->address, iobuf->data, + &len, 1, &status ); + if ( efirc == EFI_TIMEOUT ) + return; + + /* Consume transfer */ + endpoint->cons++; + + /* Check for failure */ + if ( efirc != 0 ) { + rc = -EEFI ( efirc ); + DBGC2 ( usbio, "USBIO %s %s could not submit bulk IN transfer: " + "%s (status %04x)\n", efi_handle_name ( handle ), + usb_endpoint_name ( ep ), strerror ( rc ), status ); + usb_complete_err ( ep, iobuf, rc ); + return; + } + + /* Update length */ + iob_put ( iobuf, ( len - iob_len ( iobuf ) ) ); + + /* Complete transfer */ + usb_complete ( ep, iobuf ); +} + +/** Bulk endpoint operations */ +static struct usbio_operations usbio_bulk_in_operations = { + .open = usbio_bulk_in_open, + .close = usbio_bulk_in_close, + .poll = usbio_bulk_in_poll, +}; + +/****************************************************************************** + * + * Bulk OUT endpoints + * + ****************************************************************************** + */ + +/** + * Open bulk OUT endpoint + * + * @v endpoint Endpoint + * @ret rc Return status code + */ +static int usbio_bulk_out_open ( struct usbio_endpoint *endpoint __unused ) { + + /* Nothing to do */ + return 0; +} + +/** + * Close bulk OUT endpoint + * + * @v endpoint Endpoint + */ +static void usbio_bulk_out_close ( struct usbio_endpoint *endpoint __unused ) { + + /* Nothing to do */ +} + +/** + * Poll bulk OUT endpoint + * + * @v endpoint Endpoint + */ +static void usbio_bulk_out_poll ( struct usbio_endpoint *endpoint ) { + struct usbio_device *usbio = endpoint->usbio; + struct usb_endpoint *ep = endpoint->ep; + EFI_USB_IO_PROTOCOL *io = endpoint->io; + EFI_HANDLE handle = usbio->handle; + struct io_buffer *iobuf; + unsigned int index; + unsigned int flags; + UINTN len; + UINT32 status; + EFI_STATUS efirc; + int rc; + + /* Do nothing if ring is empty */ + if ( endpoint->cons == endpoint->prod ) + return; + + /* Consume next transfer */ + index = ( endpoint->cons++ % USBIO_RING_COUNT ); + iobuf = endpoint->iobuf[index]; + flags = endpoint->flags[index]; + + /* Construct transfer */ + len = iob_len ( iobuf ); + + /* Submit transfer */ + if ( ( efirc = io->UsbBulkTransfer ( io, ep->address, iobuf->data, + &len, 0, &status ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( usbio, "USBIO %s %s could not submit bulk OUT transfer: " + "%s (status %04x)\n", efi_handle_name ( handle ), + usb_endpoint_name ( ep ), strerror ( rc ), status ); + goto err; + } + + /* Update length */ + iob_put ( iobuf, ( len - iob_len ( iobuf ) ) ); + + /* Submit zero-length transfer if required */ + len = 0; + if ( ( flags & USBIO_ZLEN ) && + ( efirc = io->UsbBulkTransfer ( io, ep->address, NULL, &len, 0, + &status ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( usbio, "USBIO %s %s could not submit zero-length " + "transfer: %s (status %04x)\n", + efi_handle_name ( handle ), usb_endpoint_name ( ep ), + strerror ( rc ), status ); + goto err; + } + + /* Complete transfer */ + usb_complete ( ep, iobuf ); + + return; + + err: + usb_complete_err ( ep, iobuf, rc ); +} + +/** Bulk endpoint operations */ +static struct usbio_operations usbio_bulk_out_operations = { + .open = usbio_bulk_out_open, + .close = usbio_bulk_out_close, + .poll = usbio_bulk_out_poll, +}; + +/****************************************************************************** + * + * Interrupt endpoints + * + ****************************************************************************** + * + * The EFI_USB_IO_PROTOCOL provides two ways to interact with + * interrupt endpoints, neither of which naturally model the hardware + * interaction. The UsbSyncInterruptTransfer() method allows imposes + * a 1ms overhead for every interrupt transfer (which could result in + * up to a 50% decrease in overall throughput for the device). The + * UsbAsyncInterruptTransfer() method provides no way for us to + * prevent transfers when no I/O buffers are available. + * + * We work around this design by utilising a small, fixed ring buffer + * into which the interrupt callback delivers the data. This aims to + * provide buffer space even if no I/O buffers have yet been enqueued. + * The scheme is not guaranteed since the fixed ring buffer may also + * become full. However: + * + * - devices which send a constant stream of interrupts (and which + * therefore might exhaust the fixed ring buffer) tend to be + * responding to every interrupt request, and can tolerate lost + * packets, and + * + * - devices which cannot tolerate lost interrupt packets tend to send + * only a few small messages. + * + * The scheme should therefore work in practice. + */ + +/** + * Interrupt endpoint callback + * + * @v data Received data + * @v len Length of received data + * @v context Callback context + * @v status Transfer status + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI usbio_interrupt_callback ( VOID *data, UINTN len, + VOID *context, + UINT32 status ) { + struct usbio_interrupt_ring *intr = context; + struct usbio_endpoint *endpoint = intr->endpoint; + struct usbio_device *usbio = endpoint->usbio; + struct usb_endpoint *ep = endpoint->ep; + EFI_HANDLE handle = usbio->handle; + unsigned int fill; + unsigned int index; + + /* Sanity check */ + assert ( len <= ep->mtu ); + + /* Do nothing if ring is empty */ + fill = ( intr->prod - intr->cons ); + if ( fill >= USBIO_INTR_COUNT ) { + DBGC ( usbio, "USBIO %s %s dropped interrupt completion\n", + efi_handle_name ( handle ), usb_endpoint_name ( ep ) ); + return 0; + } + + /* Do nothing if transfer was unsuccessful */ + if ( status != 0 ) { + DBGC ( usbio, "USBIO %s %s interrupt completion status %04x\n", + efi_handle_name ( handle ), usb_endpoint_name ( ep ), + status ); + return 0; /* Unclear what failure actually means here */ + } + + /* Copy data to buffer and increment producer counter */ + index = ( intr->prod % USBIO_INTR_COUNT ); + memcpy ( intr->data[index], data, len ); + intr->len[index] = len; + intr->prod++; + + return 0; +} + +/** + * Open interrupt endpoint + * + * @v endpoint Endpoint + * @ret rc Return status code + */ +static int usbio_interrupt_open ( struct usbio_endpoint *endpoint ) { + struct usbio_device *usbio = endpoint->usbio; + struct usbio_interrupt_ring *intr; + struct usb_endpoint *ep = endpoint->ep; + EFI_USB_IO_PROTOCOL *io = endpoint->io; + EFI_HANDLE handle = usbio->handle; + unsigned int interval; + unsigned int i; + void *data; + EFI_STATUS efirc; + int rc; + + /* Allocate interrupt ring buffer */ + intr = zalloc ( sizeof ( *intr ) + ( USBIO_INTR_COUNT * ep->mtu ) ); + if ( ! intr ) { + rc = -ENOMEM; + goto err_alloc; + } + endpoint->intr = intr; + intr->endpoint = endpoint; + data = ( ( ( void * ) intr ) + sizeof ( *intr ) ); + for ( i = 0 ; i < USBIO_INTR_COUNT ; i++ ) { + intr->data[i] = data; + data += ep->mtu; + } + + /* Determine polling interval */ + interval = ( ep->interval >> 3 /* microframes -> milliseconds */ ); + if ( ! interval ) + interval = 1; /* May not be zero */ + + /* Add to periodic schedule */ + if ( ( efirc = io->UsbAsyncInterruptTransfer ( io, ep->address, TRUE, + interval, ep->mtu, + usbio_interrupt_callback, + intr ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( usbio, "USBIO %s %s could not schedule interrupt " + "transfer: %s\n", efi_handle_name ( handle ), + usb_endpoint_name ( ep ), strerror ( rc ) ); + goto err_schedule; + } + + return 0; + + io->UsbAsyncInterruptTransfer ( io, ep->address, FALSE, 0, 0, + NULL, NULL ); + err_schedule: + free ( intr ); + err_alloc: + return rc; +} + +/** + * Close interrupt endpoint + * + * @v endpoint Endpoint + */ +static void usbio_interrupt_close ( struct usbio_endpoint *endpoint ) { + struct usb_endpoint *ep = endpoint->ep; + EFI_USB_IO_PROTOCOL *io = endpoint->io; + + /* Remove from periodic schedule */ + io->UsbAsyncInterruptTransfer ( io, ep->address, FALSE, 0, 0, + NULL, NULL ); + + /* Free interrupt ring buffer */ + free ( endpoint->intr ); +} + +/** + * Poll interrupt endpoint + * + * @v endpoint Endpoint + */ +static void usbio_interrupt_poll ( struct usbio_endpoint *endpoint ) { + struct usbio_interrupt_ring *intr = endpoint->intr; + struct usb_endpoint *ep = endpoint->ep; + struct io_buffer *iobuf; + unsigned int index; + unsigned int intr_index; + size_t len; + + /* Do nothing if ring is empty */ + if ( endpoint->cons == endpoint->prod ) + return; + + /* Do nothing if interrupt ring is empty */ + if ( intr->cons == intr->prod ) + return; + + /* Consume next transfer */ + index = ( endpoint->cons++ % USBIO_RING_COUNT ); + iobuf = endpoint->iobuf[index]; + + /* Populate I/O buffer */ + intr_index = ( intr->cons++ % USBIO_INTR_COUNT ); + len = intr->len[intr_index]; + assert ( len <= iob_len ( iobuf ) ); + iob_put ( iobuf, ( len - iob_len ( iobuf ) ) ); + memcpy ( iobuf->data, intr->data[intr_index], len ); + + /* Complete transfer */ + usb_complete ( ep, iobuf ); +} + +/** Interrupt endpoint operations */ +static struct usbio_operations usbio_interrupt_operations = { + .open = usbio_interrupt_open, + .close = usbio_interrupt_close, + .poll = usbio_interrupt_poll, +}; + +/****************************************************************************** + * + * Endpoint operations + * + ****************************************************************************** + */ + +/** + * Open endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int usbio_endpoint_open ( struct usb_endpoint *ep ) { + struct usb_bus *bus = ep->usb->port->hub->bus; + struct usbio_device *usbio = usb_bus_get_hostdata ( bus ); + struct usbio_endpoint *endpoint; + EFI_HANDLE handle = usbio->handle; + unsigned int attr = ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK ); + int interface; + int rc; + + /* Allocate and initialise structure */ + endpoint = zalloc ( sizeof ( *endpoint ) ); + if ( ! endpoint ) { + rc = -ENOMEM; + goto err_alloc; + } + usb_endpoint_set_hostdata ( ep, endpoint ); + endpoint->usbio = usbio; + endpoint->ep = ep; + + /* Identify endpoint operations */ + if ( attr == USB_ENDPOINT_ATTR_CONTROL ) { + endpoint->op = &usbio_control_operations; + } else if ( attr == USB_ENDPOINT_ATTR_BULK ) { + endpoint->op = ( ( ep->address & USB_DIR_IN ) ? + &usbio_bulk_in_operations : + &usbio_bulk_out_operations ); + } else if ( attr == USB_ENDPOINT_ATTR_INTERRUPT ) { + endpoint->op = &usbio_interrupt_operations; + } else { + rc = -ENOTSUP; + goto err_operations; + } + + /* Identify interface for this endpoint */ + interface = usbio_interface ( usbio, ep ); + if ( interface < 0 ) { + rc = interface; + goto err_interface; + } + endpoint->interface = interface; + + /* Open interface */ + if ( ( rc = usbio_open ( usbio, interface ) ) != 0 ) + goto err_open_interface; + endpoint->handle = usbio->interface[interface].handle; + endpoint->io = usbio->interface[interface].io; + DBGC ( usbio, "USBIO %s %s using ", + efi_handle_name ( handle ), usb_endpoint_name ( ep ) ); + DBGC ( usbio, "%s\n", efi_handle_name ( endpoint->handle ) ); + + /* Open endpoint */ + if ( ( rc = endpoint->op->open ( endpoint ) ) != 0 ) + goto err_open_endpoint; + + /* Add to list of endpoints */ + list_add_tail ( &endpoint->list, &usbio->endpoints ); + + return 0; + + list_del ( &endpoint->list ); + endpoint->op->close ( endpoint ); + err_open_endpoint: + usbio_close ( usbio, interface ); + err_open_interface: + err_interface: + err_operations: + free ( endpoint ); + err_alloc: + return rc; +} + +/** + * Close endpoint + * + * @v ep USB endpoint + */ +static void usbio_endpoint_close ( struct usb_endpoint *ep ) { + struct usbio_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct usbio_device *usbio = endpoint->usbio; + struct io_buffer *iobuf; + unsigned int index; + + /* Remove from list of endpoints */ + list_del ( &endpoint->list ); + + /* Close endpoint */ + endpoint->op->close ( endpoint ); + + /* Close interface */ + usbio_close ( usbio, endpoint->interface ); + + /* Cancel any incomplete transfers */ + while ( endpoint->cons != endpoint->prod ) { + index = ( endpoint->cons++ % USBIO_RING_COUNT ); + iobuf = endpoint->iobuf[index]; + usb_complete_err ( ep, iobuf, -ECANCELED ); + } + + /* Free endpoint */ + free ( endpoint ); +} + +/** + * Reset endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int usbio_endpoint_reset ( struct usb_endpoint *ep __unused ) { + + /* Nothing to do */ + return 0; +} + +/** + * Update MTU + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int usbio_endpoint_mtu ( struct usb_endpoint *ep __unused ) { + + /* Nothing to do */ + return 0; +} + +/** + * Enqueue transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v flags Transfer flags + * @ret rc Return status code + */ +static int usbio_endpoint_enqueue ( struct usb_endpoint *ep, + struct io_buffer *iobuf, + unsigned int flags ) { + struct usbio_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + unsigned int fill; + unsigned int index; + + /* Fail if shutdown is in progress */ + if ( efi_shutdown_in_progress ) + return -ECANCELED; + + /* Fail if transfer ring is full */ + fill = ( endpoint->prod - endpoint->cons ); + if ( fill >= USBIO_RING_COUNT ) + return -ENOBUFS; + + /* Add to ring */ + index = ( endpoint->prod++ % USBIO_RING_COUNT ); + endpoint->iobuf[index] = iobuf; + endpoint->flags[index] = flags; + + return 0; +} + +/** + * Enqueue message transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int usbio_endpoint_message ( struct usb_endpoint *ep, + struct io_buffer *iobuf ) { + struct usb_setup_packet *setup; + + /* Adjust I/O buffer to start of data payload */ + assert ( iob_len ( iobuf ) >= sizeof ( *setup ) ); + iob_pull ( iobuf, sizeof ( *setup ) ); + + /* Enqueue transfer */ + return usbio_endpoint_enqueue ( ep, iobuf, USBIO_MESSAGE ); +} + +/** + * Enqueue stream transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v zlp Append a zero-length packet + * @ret rc Return status code + */ +static int usbio_endpoint_stream ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int zlp ) { + + /* Enqueue transfer */ + return usbio_endpoint_enqueue ( ep, iobuf, ( zlp ? USBIO_ZLEN : 0 ) ); +} + +/** + * Poll for completions + * + * @v endpoint Endpoint + */ +static void usbio_endpoint_poll ( struct usbio_endpoint *endpoint ) { + + /* Do nothing if shutdown is in progress */ + if ( efi_shutdown_in_progress ) + return; + + /* Poll endpoint */ + endpoint->op->poll ( endpoint ); +} + +/****************************************************************************** + * + * Device operations + * + ****************************************************************************** + */ + +/** + * Open device + * + * @v usb USB device + * @ret rc Return status code + */ +static int usbio_device_open ( struct usb_device *usb ) { + struct usbio_device *usbio = + usb_bus_get_hostdata ( usb->port->hub->bus ); + + usb_set_hostdata ( usb, usbio ); + return 0; +} + +/** + * Close device + * + * @v usb USB device + */ +static void usbio_device_close ( struct usb_device *usb __unused ) { + + /* Nothing to do */ +} + +/** + * Assign device address + * + * @v usb USB device + * @ret rc Return status code + */ +static int usbio_device_address ( struct usb_device *usb __unused ) { + + /* Nothing to do */ + return 0; +} + +/****************************************************************************** + * + * Hub operations + * + ****************************************************************************** + */ + +/** + * Open hub + * + * @v hub USB hub + * @ret rc Return status code + */ +static int usbio_hub_open ( struct usb_hub *hub ) { + + /* Disallow non-root hubs */ + if ( hub->usb ) + return -ENOTSUP; + + /* Nothing to do */ + return 0; +} + +/** + * Close hub + * + * @v hub USB hub + */ +static void usbio_hub_close ( struct usb_hub *hub __unused ) { + + /* Nothing to do */ +} + +/****************************************************************************** + * + * Root hub operations + * + ****************************************************************************** + */ + +/** + * Open root hub + * + * @v hub USB hub + * @ret rc Return status code + */ +static int usbio_root_open ( struct usb_hub *hub __unused ) { + + /* Nothing to do */ + return 0; +} + +/** + * Close root hub + * + * @v hub USB hub + */ +static void usbio_root_close ( struct usb_hub *hub __unused ) { + + /* Nothing to do */ +} + +/** + * Enable port + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int usbio_root_enable ( struct usb_hub *hub __unused, + struct usb_port *port __unused ) { + + /* Nothing to do */ + return 0; +} + +/** + * Disable port + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int usbio_root_disable ( struct usb_hub *hub __unused, + struct usb_port *port __unused ) { + + /* Nothing to do */ + return 0; +} + +/** + * Update root hub port speed + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int usbio_root_speed ( struct usb_hub *hub __unused, + struct usb_port *port ) { + + /* Not actually exposed via EFI_USB_IO_PROTOCOL */ + port->speed = USB_SPEED_HIGH; + return 0; +} + +/** + * Clear transaction translator buffer + * + * @v hub USB hub + * @v port USB port + * @v ep USB endpoint + * @ret rc Return status code + */ +static int usbio_root_clear_tt ( struct usb_hub *hub __unused, + struct usb_port *port __unused, + struct usb_endpoint *ep __unused ) { + + /* Should never be called; this is a root hub */ + return -ENOTSUP; +} + +/****************************************************************************** + * + * Bus operations + * + ****************************************************************************** + */ + +/** + * Open USB bus + * + * @v bus USB bus + * @ret rc Return status code + */ +static int usbio_bus_open ( struct usb_bus *bus __unused ) { + + /* Nothing to do */ + return 0; +} + +/** + * Close USB bus + * + * @v bus USB bus + */ +static void usbio_bus_close ( struct usb_bus *bus __unused ) { + + /* Nothing to do */ +} + +/** + * Poll USB bus + * + * @v bus USB bus + */ +static void usbio_bus_poll ( struct usb_bus *bus ) { + struct usbio_device *usbio = usb_bus_get_hostdata ( bus ); + struct usbio_endpoint *endpoint; + + /* Poll all endpoints. We trust that completion handlers are + * minimal and will not do anything that could plausibly + * affect the endpoint list itself. + */ + list_for_each_entry ( endpoint, &usbio->endpoints, list ) + usbio_endpoint_poll ( endpoint ); +} + +/****************************************************************************** + * + * EFI driver interface + * + ****************************************************************************** + */ + +/** USB I/O host controller driver operations */ +static struct usb_host_operations usbio_operations = { + .endpoint = { + .open = usbio_endpoint_open, + .close = usbio_endpoint_close, + .reset = usbio_endpoint_reset, + .mtu = usbio_endpoint_mtu, + .message = usbio_endpoint_message, + .stream = usbio_endpoint_stream, + }, + .device = { + .open = usbio_device_open, + .close = usbio_device_close, + .address = usbio_device_address, + }, + .bus = { + .open = usbio_bus_open, + .close = usbio_bus_close, + .poll = usbio_bus_poll, + }, + .hub = { + .open = usbio_hub_open, + .close = usbio_hub_close, + }, + .root = { + .open = usbio_root_open, + .close = usbio_root_close, + .enable = usbio_root_enable, + .disable = usbio_root_disable, + .speed = usbio_root_speed, + .clear_tt = usbio_root_clear_tt, + }, +}; + +/** + * Check to see if driver supports a device + * + * @v handle EFI device handle + * @ret rc Return status code + */ +static int usbio_supported ( EFI_HANDLE handle ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_USB_DEVICE_DESCRIPTOR device; + EFI_USB_INTERFACE_DESCRIPTOR interface; + struct usb_function_descriptor desc; + struct usb_driver *driver; + struct usb_device_id *id; + union { + void *interface; + EFI_USB_IO_PROTOCOL *io; + } usb; + EFI_STATUS efirc; + int rc; + + /* Get protocol */ + if ( ( efirc = bs->OpenProtocol ( handle, &efi_usb_io_protocol_guid, + &usb.interface, efi_image_handle, + handle, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ + rc = -EEFI ( efirc ); + DBGCP ( handle, "USB %s is not a USB device\n", + efi_handle_name ( handle ) ); + goto err_open_protocol; + } + + /* Get device descriptor */ + if ( ( efirc = usb.io->UsbGetDeviceDescriptor ( usb.io, + &device ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( handle, "USB %s could not get device descriptor: " + "%s\n", efi_handle_name ( handle ), strerror ( rc ) ); + goto err_get_device_descriptor; + } + memset ( &desc, 0, sizeof ( desc ) ); + desc.vendor = device.IdVendor; + desc.product = device.IdProduct; + + /* Get interface descriptor */ + if ( ( efirc = usb.io->UsbGetInterfaceDescriptor ( usb.io, + &interface ) ) !=0){ + rc = -EEFI ( efirc ); + DBGC ( handle, "USB %s could not get interface descriptor: " + "%s\n", efi_handle_name ( handle ), strerror ( rc ) ); + goto err_get_interface_descriptor; + } + desc.class.class.class = interface.InterfaceClass; + desc.class.class.subclass = interface.InterfaceSubClass; + desc.class.class.protocol = interface.InterfaceProtocol; + + /* Look for a driver for this interface */ + driver = usb_find_driver ( &desc, &id ); + if ( ! driver ) { + rc = -ENOTSUP; + goto err_unsupported; + } + + /* Success */ + rc = 0; + + err_unsupported: + err_get_interface_descriptor: + err_get_device_descriptor: + bs->CloseProtocol ( handle, &efi_usb_io_protocol_guid, + efi_image_handle, handle ); + err_open_protocol: + return rc; +} + +/** + * Fetch configuration descriptor + * + * @v usbio USB I/O device + * @ret rc Return status code + */ +static int usbio_config ( struct usbio_device *usbio ) { + EFI_HANDLE handle = usbio->handle; + EFI_USB_IO_PROTOCOL *io = usbio->io; + EFI_USB_DEVICE_DESCRIPTOR device; + EFI_USB_CONFIG_DESCRIPTOR partial; + union { + struct usb_setup_packet setup; + EFI_USB_DEVICE_REQUEST efi; + } msg; + UINT32 status; + size_t len; + unsigned int count; + unsigned int value; + unsigned int i; + EFI_STATUS efirc; + int rc; + + /* Get device descriptor */ + if ( ( efirc = io->UsbGetDeviceDescriptor ( io, &device ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( usbio, "USB %s could not get device descriptor: " + "%s\n", efi_handle_name ( handle ), strerror ( rc ) ); + goto err_get_device_descriptor; + } + count = device.NumConfigurations; + + /* Get current partial configuration descriptor */ + if ( ( efirc = io->UsbGetConfigDescriptor ( io, &partial ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( usbio, "USB %s could not get partial configuration " + "descriptor: %s\n", efi_handle_name ( handle ), + strerror ( rc ) ); + goto err_get_configuration_descriptor; + } + len = le16_to_cpu ( partial.TotalLength ); + + /* Allocate configuration descriptor */ + usbio->config = malloc ( len ); + if ( ! usbio->config ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* There is, naturally, no way to retrieve the entire device + * configuration descriptor via EFI_USB_IO_PROTOCOL. Worse, + * there is no way to even retrieve the index of the current + * configuration descriptor. We have to iterate over all + * possible configuration descriptors looking for the + * descriptor that matches the current configuration value. + */ + for ( i = 0 ; i < count ; i++ ) { + + /* Construct request */ + msg.setup.request = cpu_to_le16 ( USB_GET_DESCRIPTOR ); + value = ( ( USB_CONFIGURATION_DESCRIPTOR << 8 ) | i ); + msg.setup.value = cpu_to_le16 ( value ); + msg.setup.index = 0; + msg.setup.len = cpu_to_le16 ( len ); + + /* Get full configuration descriptor */ + if ( ( efirc = io->UsbControlTransfer ( io, &msg.efi, + EfiUsbDataIn, 0, + usbio->config, len, + &status ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( usbio, "USB %s could not get configuration %d " + "descriptor: %s\n", efi_handle_name ( handle ), + i, strerror ( rc ) ); + goto err_control_transfer; + } + + /* Ignore unless this is the current configuration */ + if ( usbio->config->config != partial.ConfigurationValue ) + continue; + + /* Check length */ + if ( le16_to_cpu ( usbio->config->len ) != len ) { + DBGC ( usbio, "USB %s configuration descriptor length " + "mismatch\n", efi_handle_name ( handle ) ); + rc = -EINVAL; + goto err_len; + } + + return 0; + } + + /* No match found */ + DBGC ( usbio, "USB %s could not find current configuration " + "descriptor\n", efi_handle_name ( handle ) ); + rc = -ENOENT; + + err_len: + err_control_transfer: + free ( usbio->config ); + err_alloc: + err_get_configuration_descriptor: + err_get_device_descriptor: + return rc; +} + +/** + * Construct device path for opening other interfaces + * + * @v usbio USB I/O device + * @ret rc Return status code + */ +static int usbio_path ( struct usbio_device *usbio ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_HANDLE handle = usbio->handle; + EFI_DEVICE_PATH_PROTOCOL *path; + EFI_DEVICE_PATH_PROTOCOL *end; + USB_DEVICE_PATH *usbpath; + union { + void *interface; + EFI_DEVICE_PATH_PROTOCOL *path; + } u; + size_t len; + EFI_STATUS efirc; + int rc; + + /* Open device path protocol */ + if ( ( efirc = bs->OpenProtocol ( handle, + &efi_device_path_protocol_guid, + &u.interface, efi_image_handle, + handle, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ + rc = -EEFI ( efirc ); + DBGC ( usbio, "USBIO %s cannot open device path protocol: " + "%s\n", efi_handle_name ( handle ), strerror ( rc ) ); + goto err_open_protocol; + } + path = u.interface; + + /* Locate end of device path and sanity check */ + len = efi_path_len ( path ); + if ( len < sizeof ( *usbpath ) ) { + DBGC ( usbio, "USBIO %s underlength device path\n", + efi_handle_name ( handle ) ); + rc = -EINVAL; + goto err_underlength; + } + usbpath = ( ( ( void * ) path ) + len - sizeof ( *usbpath ) ); + if ( ! ( ( usbpath->Header.Type == MESSAGING_DEVICE_PATH ) && + ( usbpath->Header.SubType == MSG_USB_DP ) ) ) { + DBGC ( usbio, "USBIO %s not a USB device path: ", + efi_handle_name ( handle ) ); + DBGC ( usbio, "%s\n", efi_devpath_text ( path ) ); + rc = -EINVAL; + goto err_non_usb; + } + + /* Allocate copy of device path */ + usbio->path = malloc ( len + sizeof ( *end ) ); + if ( ! usbio->path ) { + rc = -ENOMEM; + goto err_alloc; + } + memcpy ( usbio->path, path, ( len + sizeof ( *end ) ) ); + usbio->usbpath = ( ( ( void * ) usbio->path ) + len - + sizeof ( *usbpath ) ); + + /* Close protocol */ + bs->CloseProtocol ( handle, &efi_device_path_protocol_guid, + efi_image_handle, handle ); + + return 0; + + free ( usbio->path ); + err_alloc: + err_non_usb: + err_underlength: + bs->CloseProtocol ( handle, &efi_device_path_protocol_guid, + efi_image_handle, handle ); + err_open_protocol: + return rc; +} + +/** + * Construct interface list + * + * @v usbio USB I/O device + * @ret rc Return status code + */ +static int usbio_interfaces ( struct usbio_device *usbio ) { + EFI_HANDLE handle = usbio->handle; + EFI_USB_IO_PROTOCOL *io = usbio->io; + EFI_USB_INTERFACE_DESCRIPTOR interface; + unsigned int first; + unsigned int count; + EFI_STATUS efirc; + int rc; + + /* Get interface descriptor */ + if ( ( efirc = io->UsbGetInterfaceDescriptor ( io, &interface ) ) != 0){ + rc = -EEFI ( efirc ); + DBGC ( usbio, "USB %s could not get interface descriptor: " + "%s\n", efi_handle_name ( handle ), strerror ( rc ) ); + goto err_get_interface_descriptor; + } + + /* Record first interface number */ + first = interface.InterfaceNumber; + count = usbio->config->interfaces; + assert ( first < count ); + usbio->first = first; + + /* Allocate interface list */ + usbio->interface = zalloc ( count * sizeof ( usbio->interface[0] ) ); + if ( ! usbio->interface ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Use already-opened protocol for control transfers and for + * the first interface. + */ + usbio->interface[0].handle = handle; + usbio->interface[0].io = io; + usbio->interface[0].count = 1; + usbio->interface[first].handle = handle; + usbio->interface[first].io = io; + usbio->interface[first].count = 1; + + return 0; + + free ( usbio->interface ); + err_alloc: + err_get_interface_descriptor: + return rc; +} + +/** + * Attach driver to device + * + * @v efidev EFI device + * @ret rc Return status code + */ +static int usbio_start ( struct efi_device *efidev ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_HANDLE handle = efidev->device; + struct usbio_device *usbio; + struct usb_port *port; + union { + void *interface; + EFI_USB_IO_PROTOCOL *io; + } u; + EFI_STATUS efirc; + int rc; + + /* Allocate and initialise structure */ + usbio = zalloc ( sizeof ( *usbio ) ); + if ( ! usbio ) { + rc = -ENOMEM; + goto err_alloc; + } + efidev_set_drvdata ( efidev, usbio ); + usbio->handle = handle; + INIT_LIST_HEAD ( &usbio->endpoints ); + + /* Open USB I/O protocol */ + if ( ( efirc = bs->OpenProtocol ( handle, &efi_usb_io_protocol_guid, + &u.interface, efi_image_handle, + handle, + ( EFI_OPEN_PROTOCOL_BY_DRIVER | + EFI_OPEN_PROTOCOL_EXCLUSIVE )))!=0){ + rc = -EEFI ( efirc ); + DBGC ( usbio, "USBIO %s cannot open USB I/O protocol: %s\n", + efi_handle_name ( handle ), strerror ( rc ) ); + DBGC_EFI_OPENERS ( usbio, handle, &efi_usb_io_protocol_guid ); + goto err_open_usbio; + } + usbio->io = u.io; + + /* Describe generic device */ + efi_device_info ( handle, "USB", &usbio->dev ); + usbio->dev.parent = &efidev->dev; + list_add ( &usbio->dev.siblings, &efidev->dev.children ); + INIT_LIST_HEAD ( &usbio->dev.children ); + + /* Fetch configuration descriptor */ + if ( ( rc = usbio_config ( usbio ) ) != 0 ) + goto err_config; + + /* Construct device path */ + if ( ( rc = usbio_path ( usbio ) ) != 0 ) + goto err_path; + + /* Construct interface list */ + if ( ( rc = usbio_interfaces ( usbio ) ) != 0 ) + goto err_interfaces; + + /* Allocate USB bus */ + usbio->bus = alloc_usb_bus ( &usbio->dev, 1 /* single "port" */, + USBIO_MTU, &usbio_operations ); + if ( ! usbio->bus ) { + rc = -ENOMEM; + goto err_alloc_bus; + } + usb_bus_set_hostdata ( usbio->bus, usbio ); + usb_hub_set_drvdata ( usbio->bus->hub, usbio ); + + /* Set port protocol */ + port = usb_port ( usbio->bus->hub, 1 ); + port->protocol = USB_PROTO_2_0; + + /* Register USB bus */ + if ( ( rc = register_usb_bus ( usbio->bus ) ) != 0 ) + goto err_register; + + return 0; + + unregister_usb_bus ( usbio->bus ); + err_register: + free_usb_bus ( usbio->bus ); + err_alloc_bus: + free ( usbio->interface ); + err_interfaces: + free ( usbio->path ); + err_path: + free ( usbio->config ); + err_config: + list_del ( &usbio->dev.siblings ); + bs->CloseProtocol ( handle, &efi_usb_io_protocol_guid, + efi_image_handle, handle ); + err_open_usbio: + free ( usbio ); + err_alloc: + return rc; +} + +/** + * Detach driver from device + * + * @v efidev EFI device + */ +static void usbio_stop ( struct efi_device *efidev ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_HANDLE handle = efidev->device; + struct usbio_device *usbio = efidev_get_drvdata ( efidev ); + + unregister_usb_bus ( usbio->bus ); + free_usb_bus ( usbio->bus ); + free ( usbio->interface ); + free ( usbio->path ); + free ( usbio->config ); + list_del ( &usbio->dev.siblings ); + bs->CloseProtocol ( handle, &efi_usb_io_protocol_guid, + efi_image_handle, handle ); + free ( usbio ); +} + +/** EFI USB I/O driver */ +struct efi_driver usbio_driver __efi_driver ( EFI_DRIVER_NORMAL ) = { + .name = "USBIO", + .supported = usbio_supported, + .start = usbio_start, + .stop = usbio_stop, +}; diff --git a/src/drivers/usb/usbio.h b/src/drivers/usb/usbio.h new file mode 100644 index 00000000..1d02876f --- /dev/null +++ b/src/drivers/usb/usbio.h @@ -0,0 +1,153 @@ +#ifndef _USBIO_H +#define _USBIO_H + +/** @file + * + * EFI_USB_IO_PROTOCOL pseudo Host Controller Interface driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** USB I/O maximum transfer size + * + * The API provides no way to discover the maximum transfer size. + * Assume the 16kB supported by EHCI. + */ +#define USBIO_MTU 16384 + +/** USB I/O interrupt ring buffer size + * + * This is a policy decision. + */ +#define USBIO_INTR_COUNT 4 + +/** A USB interrupt ring buffer */ +struct usbio_interrupt_ring { + /** USB I/O endpoint */ + struct usbio_endpoint *endpoint; + /** Producer counter */ + unsigned int prod; + /** Consumer counter */ + unsigned int cons; + /** Data buffers */ + void *data[USBIO_INTR_COUNT]; + /** Lengths */ + size_t len[USBIO_INTR_COUNT]; +}; + +/** USB I/O ring buffer size + * + * This is a policy decision. + */ +#define USBIO_RING_COUNT 64 + +/** A USB I/O endpoint */ +struct usbio_endpoint { + /** USB I/O device */ + struct usbio_device *usbio; + /** USB endpoint */ + struct usb_endpoint *ep; + /** List of endpoints */ + struct list_head list; + /** USB I/O endpoint operations */ + struct usbio_operations *op; + + /** Containing interface number */ + unsigned int interface; + /** EFI handle */ + EFI_HANDLE handle; + /** USB I/O protocol */ + EFI_USB_IO_PROTOCOL *io; + + /** Producer counter */ + unsigned int prod; + /** Consumer counter */ + unsigned int cons; + /** I/O buffers */ + struct io_buffer *iobuf[USBIO_RING_COUNT]; + /** Flags */ + uint8_t flags[USBIO_RING_COUNT]; + + /** Interrupt ring buffer (if applicable) */ + struct usbio_interrupt_ring *intr; +}; + +/** USB I/O transfer flags */ +enum usbio_flags { + /** This is a message transfer */ + USBIO_MESSAGE = 0x01, + /** This transfer requires zero-length packet termination */ + USBIO_ZLEN = 0x02, +}; + +/** USB I/O endpoint operations */ +struct usbio_operations { + /** Open endpoint + * + * @v endpoint Endpoint + * @ret rc Return status code + */ + int ( * open ) ( struct usbio_endpoint *endpoint ); + /** Close endpoint + * + * @v endpoint Endpoint + */ + void ( * close ) ( struct usbio_endpoint *endpoint ); + /** Poll endpoint + * + * @v endpoint Endpoint + */ + void ( * poll ) ( struct usbio_endpoint *endpoint ); +}; + +/** A USB I/O protocol interface */ +struct usbio_interface { + /** EFI device handle */ + EFI_HANDLE handle; + /** USB I/O protocol */ + EFI_USB_IO_PROTOCOL *io; + /** Usage count */ + unsigned int count; +}; + +/** A USB I/O protocol device + * + * We model each externally-provided USB I/O protocol device as a host + * controller containing a root hub with a single port. + */ +struct usbio_device { + /** EFI device handle */ + EFI_HANDLE handle; + /** USB I/O protocol */ + EFI_USB_IO_PROTOCOL *io; + /** Generic device */ + struct device dev; + + /** Configuration descriptor */ + struct usb_configuration_descriptor *config; + + /** Device path */ + EFI_DEVICE_PATH_PROTOCOL *path; + /** Final component of USB device path */ + USB_DEVICE_PATH *usbpath; + + /** First interface number */ + uint8_t first; + /** USB I/O protocol interfaces */ + struct usbio_interface *interface; + + /** USB bus */ + struct usb_bus *bus; + /** List of endpoints */ + struct list_head endpoints; +}; + +#endif /* _USBIO_H */ diff --git a/src/drivers/usb/usbkbd.c b/src/drivers/usb/usbkbd.c new file mode 100644 index 00000000..a8ab6ab7 --- /dev/null +++ b/src/drivers/usb/usbkbd.c @@ -0,0 +1,583 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include "usbkbd.h" + +/** @file + * + * USB keyboard driver + * + */ + +/** List of USB keyboards */ +static LIST_HEAD ( usb_keyboards ); + +/****************************************************************************** + * + * Keyboard map + * + ****************************************************************************** + */ + +/** + * Map USB keycode to iPXE key + * + * @v keycode Keycode + * @v modifiers Modifiers + * @v leds LED state + * @ret key iPXE key + * + * Key codes are defined in the USB HID Usage Tables Keyboard/Keypad + * page. + */ +static unsigned int usbkbd_map ( unsigned int keycode, unsigned int modifiers, + unsigned int leds ) { + unsigned int key; + + if ( keycode < USBKBD_KEY_A ) { + /* Not keys */ + key = 0; + } else if ( keycode <= USBKBD_KEY_Z ) { + /* Alphabetic keys */ + key = ( keycode - USBKBD_KEY_A + 'a' ); + if ( modifiers & USBKBD_CTRL ) { + key -= ( 'a' - CTRL_A ); + } else if ( ( modifiers & USBKBD_SHIFT ) || + ( leds & USBKBD_LED_CAPS_LOCK ) ) { + key -= ( 'a' - 'A' ); + } + } else if ( keycode <= USBKBD_KEY_0 ) { + /* Numeric key row */ + if ( modifiers & USBKBD_SHIFT ) { + key = "!@#$%^&*()" [ keycode - USBKBD_KEY_1 ]; + } else { + key = ( ( ( keycode - USBKBD_KEY_1 + 1 ) % 10 ) + '0' ); + } + } else if ( keycode <= USBKBD_KEY_SPACE ) { + /* Unmodifiable keys */ + static const uint8_t unmodifable[] = + { LF, ESC, BACKSPACE, TAB, ' ' }; + key = unmodifable[ keycode - USBKBD_KEY_ENTER ]; + } else if ( keycode <= USBKBD_KEY_SLASH ) { + /* Punctuation keys */ + if ( modifiers & USBKBD_SHIFT ) { + key = "_+{}|~:\"~<>?" [ keycode - USBKBD_KEY_MINUS ]; + } else { + key = "-=[]\\#;'`,./" [ keycode - USBKBD_KEY_MINUS ]; + } + } else if ( keycode <= USBKBD_KEY_UP ) { + /* Special keys */ + static const uint16_t special[] = { + 0, 0, 0, 0, 0, KEY_F5, KEY_F6, KEY_F7, KEY_F8, KEY_F9, + KEY_F10, KEY_F11, KEY_F12, 0, 0, 0, KEY_IC, KEY_HOME, + KEY_PPAGE, KEY_DC, KEY_END, KEY_NPAGE, KEY_RIGHT, + KEY_LEFT, KEY_DOWN, KEY_UP + }; + key = special[ keycode - USBKBD_KEY_CAPS_LOCK ]; + } else if ( keycode <= USBKBD_KEY_PAD_ENTER ) { + /* Keypad (unaffected by Num Lock) */ + key = "\0/*-+\n" [ keycode - USBKBD_KEY_NUM_LOCK ]; + } else if ( keycode <= USBKBD_KEY_PAD_DOT ) { + /* Keypad (affected by Num Lock) */ + if ( leds & USBKBD_LED_NUM_LOCK ) { + key = "1234567890." [ keycode - USBKBD_KEY_PAD_1 ]; + } else { + static const uint16_t keypad[] = { + KEY_END, KEY_DOWN, KEY_NPAGE, KEY_LEFT, 0, + KEY_RIGHT, KEY_HOME, KEY_UP, KEY_PPAGE, + KEY_IC, KEY_DC + }; + key = keypad[ keycode - USBKBD_KEY_PAD_1 ]; + }; + } else { + key = 0; + } + + return key; +} + +/****************************************************************************** + * + * Keyboard buffer + * + ****************************************************************************** + */ + +/** + * Insert keypress into keyboard buffer + * + * @v kbd USB keyboard + * @v keycode Keycode + * @v modifiers Modifiers + */ +static void usbkbd_produce ( struct usb_keyboard *kbd, unsigned int keycode, + unsigned int modifiers ) { + unsigned int leds = 0; + unsigned int key; + + /* Check for LED-modifying keys */ + if ( keycode == USBKBD_KEY_CAPS_LOCK ) { + leds = USBKBD_LED_CAPS_LOCK; + } else if ( keycode == USBKBD_KEY_NUM_LOCK ) { + leds = USBKBD_LED_NUM_LOCK; + } + + /* Handle LED-modifying keys */ + if ( leds ) { + kbd->leds ^= leds; + kbd->leds_changed = 1; + return; + } + + /* Map to iPXE key */ + key = usbkbd_map ( keycode, modifiers, kbd->leds ); + + /* Do nothing if this keycode has no corresponding iPXE key */ + if ( ! key ) { + DBGC ( kbd, "KBD %s has no key for keycode %#02x:%#02x\n", + kbd->name, modifiers, keycode ); + return; + } + + /* Check for buffer overrun */ + if ( usbkbd_fill ( kbd ) >= USBKBD_BUFSIZE ) { + DBGC ( kbd, "KBD %s buffer overrun (key %#02x)\n", + kbd->name, key ); + return; + } + + /* Insert into buffer */ + kbd->key[ ( kbd->prod++ ) % USBKBD_BUFSIZE ] = key; + DBGC2 ( kbd, "KBD %s key %#02x produced\n", kbd->name, key ); +} + +/** + * Consume character from keyboard buffer + * + * @v kbd USB keyboard + * @ret character Character + */ +static unsigned int usbkbd_consume ( struct usb_keyboard *kbd ) { + static char buf[] = "\x1b[xx~"; + char *tmp = &buf[2]; + unsigned int key; + unsigned int character; + unsigned int ansi_n; + unsigned int len; + + /* Sanity check */ + assert ( usbkbd_fill ( kbd ) > 0 ); + + /* Get current keypress */ + key = kbd->key[ kbd->cons % USBKBD_BUFSIZE ]; + + /* If this is a straightforward key, just consume and return it */ + if ( key < KEY_MIN ) { + kbd->cons++; + DBGC2 ( kbd, "KBD %s key %#02x consumed\n", kbd->name, key ); + return key; + } + + /* Construct ANSI sequence */ + ansi_n = KEY_ANSI_N ( key ); + if ( ansi_n ) + tmp += sprintf ( tmp, "%d", ansi_n ); + *(tmp++) = KEY_ANSI_TERMINATOR ( key ); + *tmp = '\0'; + len = ( tmp - buf ); + assert ( len < sizeof ( buf ) ); + if ( kbd->subcons == 0 ) { + DBGC2 ( kbd, "KBD %s key %#02x consumed as ^[%s\n", + kbd->name, key, &buf[1] ); + } + + /* Extract character from ANSI sequence */ + assert ( kbd->subcons < len ); + character = buf[ kbd->subcons++ ]; + + /* Consume key if applicable */ + if ( kbd->subcons == len ) { + kbd->cons++; + kbd->subcons = 0; + } + + return character; +} + +/****************************************************************************** + * + * Keyboard report + * + ****************************************************************************** + */ + +/** + * Check for presence of keycode in report + * + * @v report Keyboard report + * @v keycode Keycode (must be non-zero) + * @ret has_keycode Keycode is present in report + */ +static int usbkbd_has_keycode ( struct usb_keyboard_report *report, + unsigned int keycode ) { + unsigned int i; + + /* Check for keycode */ + for ( i = 0 ; i < ( sizeof ( report->keycode ) / + sizeof ( report->keycode[0] ) ) ; i++ ) { + if ( report->keycode[i] == keycode ) + return keycode; + } + + return 0; +} + +/** + * Handle keyboard report + * + * @v kbd USB keyboard + * @v new New keyboard report + */ +static void usbkbd_report ( struct usb_keyboard *kbd, + struct usb_keyboard_report *new ) { + struct usb_keyboard_report *old = &kbd->report; + unsigned int keycode; + unsigned int i; + + /* Check if current key has been released */ + if ( kbd->keycode && ! usbkbd_has_keycode ( new, kbd->keycode ) ) { + DBGC2 ( kbd, "KBD %s keycode %#02x released\n", + kbd->name, kbd->keycode ); + kbd->keycode = 0; + } + + /* Decrement auto-repeat hold-off timer, if applicable */ + if ( kbd->holdoff ) + kbd->holdoff--; + + /* Check if a new key has been pressed */ + for ( i = 0 ; i < ( sizeof ( new->keycode ) / + sizeof ( new->keycode[0] ) ) ; i++ ) { + + /* Ignore keys present in the previous report */ + keycode = new->keycode[i]; + if ( ( keycode == 0 ) || usbkbd_has_keycode ( old, keycode ) ) + continue; + DBGC2 ( kbd, "KBD %s keycode %#02x pressed\n", + kbd->name, keycode ); + + /* Insert keypress into keyboard buffer */ + usbkbd_produce ( kbd, keycode, new->modifiers ); + + /* Record as most recent keycode */ + kbd->keycode = keycode; + + /* Start auto-repeat hold-off timer */ + kbd->holdoff = USBKBD_HOLDOFF; + } + + /* Insert auto-repeated keypress into keyboard buffer, if applicable */ + if ( kbd->keycode && ! kbd->holdoff ) + usbkbd_produce ( kbd, kbd->keycode, new->modifiers ); + + /* Record report */ + memcpy ( old, new, sizeof ( *old ) ); +} + +/****************************************************************************** + * + * Interrupt endpoint + * + ****************************************************************************** + */ + +/** + * Complete interrupt transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void usbkbd_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct usb_keyboard *kbd = container_of ( ep, struct usb_keyboard, + hid.in ); + struct usb_keyboard_report *report; + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto drop; + + /* Ignore packets with errors */ + if ( rc != 0 ) { + DBGC ( kbd, "KBD %s interrupt IN failed: %s\n", + kbd->name, strerror ( rc ) ); + goto drop; + } + + /* Ignore underlength packets */ + if ( iob_len ( iobuf ) < sizeof ( *report ) ) { + DBGC ( kbd, "KBD %s underlength report:\n", kbd->name ); + DBGC_HDA ( kbd, 0, iobuf->data, iob_len ( iobuf ) ); + goto drop; + } + report = iobuf->data; + + /* Handle keyboard report */ + usbkbd_report ( kbd, report ); + + drop: + /* Recycle I/O buffer */ + usb_recycle ( &kbd->hid.in, iobuf ); +} + +/** Interrupt endpoint operations */ +static struct usb_endpoint_driver_operations usbkbd_operations = { + .complete = usbkbd_complete, +}; + +/****************************************************************************** + * + * Keyboard LEDs + * + ****************************************************************************** + */ + +/** + * Set keyboard LEDs + * + * @v kbd USB keyboard + * @ret rc Return status code + */ +static int usbkbd_set_leds ( struct usb_keyboard *kbd ) { + struct usb_function *func = kbd->hid.func; + int rc; + + DBGC2 ( kbd, "KBD %s setting LEDs to %#02x\n", kbd->name, kbd->leds ); + + /* Set keyboard LEDs */ + if ( ( rc = usbhid_set_report ( func->usb, func->interface[0], + USBHID_REPORT_OUTPUT, 0, &kbd->leds, + sizeof ( kbd->leds ) ) ) != 0 ) { + DBGC ( kbd, "KBD %s could not set LEDs to %#02x: %s\n", + kbd->name, kbd->leds, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/****************************************************************************** + * + * USB interface + * + ****************************************************************************** + */ + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int usbkbd_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct usb_device *usb = func->usb; + struct usb_keyboard *kbd; + int rc; + + /* Allocate and initialise structure */ + kbd = zalloc ( sizeof ( *kbd ) ); + if ( ! kbd ) { + rc = -ENOMEM; + goto err_alloc; + } + kbd->name = func->name; + kbd->bus = usb->port->hub->bus; + usbhid_init ( &kbd->hid, func, &usbkbd_operations, NULL ); + usb_refill_init ( &kbd->hid.in, 0, sizeof ( kbd->report ), + USBKBD_INTR_MAX_FILL ); + + /* Describe USB human interface device */ + if ( ( rc = usbhid_describe ( &kbd->hid, config ) ) != 0 ) { + DBGC ( kbd, "KBD %s could not describe: %s\n", + kbd->name, strerror ( rc ) ); + goto err_describe; + } + DBGC ( kbd, "KBD %s using %s (len %zd)\n", + kbd->name, usb_endpoint_name ( &kbd->hid.in ), kbd->hid.in.mtu ); + + /* Set boot protocol */ + if ( ( rc = usbhid_set_protocol ( usb, func->interface[0], + USBHID_PROTOCOL_BOOT ) ) != 0 ) { + DBGC ( kbd, "KBD %s could not set boot protocol: %s\n", + kbd->name, strerror ( rc ) ); + goto err_set_protocol; + } + + /* Set idle time */ + if ( ( rc = usbhid_set_idle ( usb, func->interface[0], 0, + USBKBD_IDLE_DURATION ) ) != 0 ) { + DBGC ( kbd, "KBD %s could not set idle time: %s\n", + kbd->name, strerror ( rc ) ); + goto err_set_idle; + } + + /* Open USB human interface device */ + if ( ( rc = usbhid_open ( &kbd->hid ) ) != 0 ) { + DBGC ( kbd, "KBD %s could not open: %s\n", + kbd->name, strerror ( rc ) ); + goto err_open; + } + + /* Add to list of USB keyboards */ + list_add_tail ( &kbd->list, &usb_keyboards ); + + /* Set initial LED state */ + usbkbd_set_leds ( kbd ); + + usb_func_set_drvdata ( func, kbd ); + return 0; + + usbhid_close ( &kbd->hid ); + err_open: + err_set_idle: + err_set_protocol: + err_describe: + free ( kbd ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void usbkbd_remove ( struct usb_function *func ) { + struct usb_keyboard *kbd = usb_func_get_drvdata ( func ); + + /* Remove from list of USB keyboards */ + list_del ( &kbd->list ); + + /* Close USB human interface device */ + usbhid_close ( &kbd->hid ); + + /* Free device */ + free ( kbd ); +} + +/** USB keyboard device IDs */ +static struct usb_device_id usbkbd_ids[] = { + { + .name = "kbd", + .vendor = USB_ANY_ID, + .product = USB_ANY_ID, + }, +}; + +/** USB keyboard driver */ +struct usb_driver usbkbd_driver __usb_driver = { + .ids = usbkbd_ids, + .id_count = ( sizeof ( usbkbd_ids ) / sizeof ( usbkbd_ids[0] ) ), + .class = USB_CLASS_ID ( USB_CLASS_HID, USB_SUBCLASS_HID_BOOT, + USBKBD_PROTOCOL ), + .score = USB_SCORE_NORMAL, + .probe = usbkbd_probe, + .remove = usbkbd_remove, +}; + +/****************************************************************************** + * + * Console interface + * + ****************************************************************************** + */ + +/** + * Read a character from the console + * + * @ret character Character read + */ +static int usbkbd_getchar ( void ) { + struct usb_keyboard *kbd; + + /* Consume first available key */ + list_for_each_entry ( kbd, &usb_keyboards, list ) { + if ( usbkbd_fill ( kbd ) ) + return usbkbd_consume ( kbd ); + } + + return 0; +} + +/** + * Check for available input + * + * @ret is_available Input is available + */ +static int usbkbd_iskey ( void ) { + struct usb_keyboard *kbd; + unsigned int fill; + + /* Poll USB keyboards, refill endpoints, and set LEDs if applicable */ + list_for_each_entry ( kbd, &usb_keyboards, list ) { + + /* Poll keyboard */ + usb_poll ( kbd->bus ); + + /* Refill endpoints */ + usb_refill ( &kbd->hid.in ); + + /* Update keyboard LEDs, if applicable */ + if ( kbd->leds_changed ) { + usbkbd_set_leds ( kbd ); + kbd->leds_changed = 0; + } + } + + /* Check for a non-empty keyboard buffer */ + list_for_each_entry ( kbd, &usb_keyboards, list ) { + fill = usbkbd_fill ( kbd ); + if ( fill ) + return fill; + } + + return 0; +} + +/** USB keyboard console */ +struct console_driver usbkbd_console __console_driver = { + .getchar = usbkbd_getchar, + .iskey = usbkbd_iskey, +}; diff --git a/src/drivers/usb/usbkbd.h b/src/drivers/usb/usbkbd.h new file mode 100644 index 00000000..cedebfe7 --- /dev/null +++ b/src/drivers/usb/usbkbd.h @@ -0,0 +1,171 @@ +#ifndef _USBKBD_H +#define _USBKBD_H + +/** @file + * + * USB keyboard driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** Keyboard protocol */ +#define USBKBD_PROTOCOL 1 + +/** A USB keyboard report */ +struct usb_keyboard_report { + /** Modifier keys */ + uint8_t modifiers; + /** Reserved */ + uint8_t reserved; + /** Keycodes */ + uint8_t keycode[6]; +} __attribute__ (( packed )); + +/** USB modifier keys */ +enum usb_keyboard_modifier { + /** Left Ctrl key */ + USBKBD_CTRL_LEFT = 0x01, + /** Left Shift key */ + USBKBD_SHIFT_LEFT = 0x02, + /** Left Alt key */ + USBKBD_ALT_LEFT = 0x04, + /** Left GUI key */ + USBKBD_GUI_LEFT = 0x08, + /** Right Ctrl key */ + USBKBD_CTRL_RIGHT = 0x10, + /** Right Shift key */ + USBKBD_SHIFT_RIGHT = 0x20, + /** Right Alt key */ + USBKBD_ALT_RIGHT = 0x40, + /** Right GUI key */ + USBKBD_GUI_RIGHT = 0x80, +}; + +/** Either Ctrl key */ +#define USBKBD_CTRL ( USBKBD_CTRL_LEFT | USBKBD_CTRL_RIGHT ) + +/** Either Shift key */ +#define USBKBD_SHIFT ( USBKBD_SHIFT_LEFT | USBKBD_SHIFT_RIGHT ) + +/** Either Alt key */ +#define USBKBD_ALT ( USBKBD_ALT_LEFT | USBKBD_ALT_RIGHT ) + +/** Either GUI key */ +#define USBKBD_GUI ( USBKBD_GUI_LEFT | USBKBD_GUI_RIGHT ) + +/** USB keycodes */ +enum usb_keycode { + USBKBD_KEY_A = 0x04, + USBKBD_KEY_Z = 0x1d, + USBKBD_KEY_1 = 0x1e, + USBKBD_KEY_0 = 0x27, + USBKBD_KEY_ENTER = 0x28, + USBKBD_KEY_SPACE = 0x2c, + USBKBD_KEY_MINUS = 0x2d, + USBKBD_KEY_SLASH = 0x38, + USBKBD_KEY_CAPS_LOCK = 0x39, + USBKBD_KEY_F1 = 0x3a, + USBKBD_KEY_UP = 0x52, + USBKBD_KEY_NUM_LOCK = 0x53, + USBKBD_KEY_PAD_ENTER = 0x58, + USBKBD_KEY_PAD_1 = 0x59, + USBKBD_KEY_PAD_DOT = 0x63, +}; + +/** USB keyboard LEDs */ +enum usb_keyboard_led { + USBKBD_LED_NUM_LOCK = 0x01, + USBKBD_LED_CAPS_LOCK = 0x02, + USBKBD_LED_SCROLL_LOCK = 0x04, +}; + +/** Keyboard idle duration (in 4ms units) + * + * This is a policy decision. We choose to use an autorepeat rate of + * approximately 40ms. + */ +#define USBKBD_IDLE_DURATION 10 /* 10 x 4ms = 40ms */ + +/** Keyboard auto-repeat hold-off (in units of USBKBD_IDLE_DURATION) + * + * This is a policy decision. We choose to use an autorepeat delay of + * approximately 500ms. + */ +#define USBKBD_HOLDOFF 12 /* 12 x 40ms = 480ms */ + +/** Interrupt endpoint maximum fill level + * + * When idling, we are likely to poll the USB endpoint at only the + * 18.2Hz system timer tick rate. With a typical observed bInterval + * of 10ms (which will be rounded down to 8ms by the HCI drivers), + * this gives approximately 7 completions per poll. + */ +#define USBKBD_INTR_MAX_FILL 8 + +/** Keyboard buffer size + * + * Must be a power of two. + */ +#define USBKBD_BUFSIZE 8 + +/** A USB keyboard device */ +struct usb_keyboard { + /** Name */ + const char *name; + /** List of all USB keyboards */ + struct list_head list; + + /** USB bus */ + struct usb_bus *bus; + /** USB human interface device */ + struct usb_hid hid; + + /** Most recent keyboard report */ + struct usb_keyboard_report report; + /** Most recently pressed non-modifier key (if any) */ + unsigned int keycode; + /** Autorepeat hold-off time (in number of completions reported) */ + unsigned int holdoff; + + /** Keyboard LED state */ + uint8_t leds; + /** Keyboard LEDs changed */ + uint8_t leds_changed; + + /** Keyboard buffer + * + * This stores iPXE key values. + */ + unsigned int key[USBKBD_BUFSIZE]; + /** Keyboard buffer producer counter */ + unsigned int prod; + /** Keyboard buffer consumer counter */ + unsigned int cons; + /** Keyboard buffer sub-consumer counter + * + * This represents the index within the ANSI escape sequence + * corresponding to an iPXE key value. + */ + unsigned int subcons; +}; + +/** + * Calculate keyboard buffer fill level + * + * @v kbd USB keyboard + * @ret fill Keyboard buffer fill level + */ +static inline __attribute__ (( always_inline )) unsigned int +usbkbd_fill ( struct usb_keyboard *kbd ) { + unsigned int fill = ( kbd->prod - kbd->cons ); + + assert ( fill <= USBKBD_BUFSIZE ); + return fill; +} + +#endif /* _USBKBD_H */ diff --git a/src/drivers/usb/usbnet.c b/src/drivers/usb/usbnet.c new file mode 100644 index 00000000..0fac00b5 --- /dev/null +++ b/src/drivers/usb/usbnet.c @@ -0,0 +1,292 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** @file + * + * USB network devices + * + * USB network devices use a variety of packet formats and interface + * descriptors, but tend to have several features in common: + * + * - a single bulk OUT endpoint + * + * - a single bulk IN endpoint using the generic refill mechanism + * + * - an optional interrupt endpoint using the generic refill mechanism + * + * - optional use of an alternate setting to enable the data interface + * + */ + +/** + * Open USB network device + * + * @v usbnet USB network device + * @ret rc Return status code + */ +int usbnet_open ( struct usbnet_device *usbnet ) { + struct usb_device *usb = usbnet->func->usb; + int rc; + + /* Open interrupt endpoint, if applicable */ + if ( usbnet_has_intr ( usbnet ) && + ( rc = usb_endpoint_open ( &usbnet->intr ) ) != 0 ) { + DBGC ( usbnet, "USBNET %s could not open interrupt: %s\n", + usbnet->func->name, strerror ( rc ) ); + goto err_open_intr; + } + + /* Refill interrupt endpoint, if applicable */ + if ( usbnet_has_intr ( usbnet ) && + ( rc = usb_refill ( &usbnet->intr ) ) != 0 ) { + DBGC ( usbnet, "USBNET %s could not refill interrupt: %s\n", + usbnet->func->name, strerror ( rc ) ); + goto err_refill_intr; + } + + /* Select alternate setting for data interface, if applicable */ + if ( usbnet->alternate && + ( ( rc = usb_set_interface ( usb, usbnet->data, + usbnet->alternate ) ) != 0 ) ) { + DBGC ( usbnet, "USBNET %s could not set alternate interface " + "%d: %s\n", usbnet->func->name, usbnet->alternate, + strerror ( rc ) ); + goto err_set_interface; + } + + /* Open bulk IN endpoint */ + if ( ( rc = usb_endpoint_open ( &usbnet->in ) ) != 0 ) { + DBGC ( usbnet, "USBNET %s could not open bulk IN: %s\n", + usbnet->func->name, strerror ( rc ) ); + goto err_open_in; + } + + /* Open bulk OUT endpoint */ + if ( ( rc = usb_endpoint_open ( &usbnet->out ) ) != 0 ) { + DBGC ( usbnet, "USBNET %s could not open bulk OUT: %s\n", + usbnet->func->name, strerror ( rc ) ); + goto err_open_out; + } + + /* Refill bulk IN endpoint */ + if ( ( rc = usb_refill ( &usbnet->in ) ) != 0 ) { + DBGC ( usbnet, "USBNET %s could not refill bulk IN: %s\n", + usbnet->func->name, strerror ( rc ) ); + goto err_refill_in; + } + + return 0; + + err_refill_in: + usb_endpoint_close ( &usbnet->out ); + err_open_out: + usb_endpoint_close ( &usbnet->in ); + err_open_in: + if ( usbnet->alternate ) + usb_set_interface ( usb, usbnet->data, 0 ); + err_set_interface: + err_refill_intr: + if ( usbnet_has_intr ( usbnet ) ) + usb_endpoint_close ( &usbnet->intr ); + err_open_intr: + return rc; +} + +/** + * Close USB network device + * + * @v usbnet USB network device + */ +void usbnet_close ( struct usbnet_device *usbnet ) { + struct usb_device *usb = usbnet->func->usb; + + /* Close bulk OUT endpoint */ + usb_endpoint_close ( &usbnet->out ); + + /* Close bulk IN endpoint */ + usb_endpoint_close ( &usbnet->in ); + + /* Reset alternate setting for data interface, if applicable */ + if ( usbnet->alternate ) + usb_set_interface ( usb, usbnet->data, 0 ); + + /* Close interrupt endpoint, if applicable */ + if ( usbnet_has_intr ( usbnet ) ) + usb_endpoint_close ( &usbnet->intr ); +} + +/** + * Refill USB network device bulk IN and interrupt endpoints + * + * @v usbnet USB network device + * @ret rc Return status code + */ +int usbnet_refill ( struct usbnet_device *usbnet ) { + int rc; + + /* Refill bulk IN endpoint */ + if ( ( rc = usb_refill ( &usbnet->in ) ) != 0 ) + return rc; + + /* Refill interrupt endpoint, if applicable */ + if ( usbnet_has_intr ( usbnet ) && + ( rc = usb_refill ( &usbnet->intr ) ) != 0 ) { + return rc; + } + + return 0; +} + +/** + * Describe communications interface and interrupt endpoint + * + * @v usbnet USB network device + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int usbnet_comms_describe ( struct usbnet_device *usbnet, + struct usb_configuration_descriptor *config){ + struct usb_interface_descriptor *desc; + unsigned int comms; + unsigned int i; + int rc; + + /* Iterate over all available interfaces */ + for ( i = 0 ; i < usbnet->func->desc.count ; i++ ) { + + /* Get interface number */ + comms = usbnet->func->interface[i]; + + /* Locate interface descriptor */ + desc = usb_interface_descriptor ( config, comms, 0 ); + if ( ! desc ) + continue; + + /* Describe interrupt endpoint */ + if ( ( rc = usb_endpoint_described ( &usbnet->intr, config, + desc, USB_INTERRUPT_IN, + 0 ) ) != 0 ) + continue; + + /* Record communications interface */ + usbnet->comms = comms; + DBGC ( usbnet, "USBNET %s found communications interface %d\n", + usbnet->func->name, comms ); + return 0; + } + + DBGC ( usbnet, "USBNET %s found no communications interface\n", + usbnet->func->name ); + return -ENOENT; +} + +/** + * Describe data interface and bulk endpoints + * + * @v usbnet USB network device + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int usbnet_data_describe ( struct usbnet_device *usbnet, + struct usb_configuration_descriptor *config ){ + struct usb_interface_descriptor *desc; + unsigned int data; + unsigned int alt; + unsigned int i; + int rc; + + /* Iterate over all available interfaces */ + for ( i = 0 ; i < usbnet->func->desc.count ; i++ ) { + + /* Get interface number */ + data = usbnet->func->interface[i]; + + /* Iterate over all existent alternate settings */ + for ( alt = 0 ; ; alt++ ) { + + /* Locate interface descriptor */ + desc = usb_interface_descriptor ( config, data, alt ); + if ( ! desc ) + break; + + /* Describe bulk IN endpoint */ + if ( ( rc = usb_endpoint_described ( &usbnet->in, + config, desc, + USB_BULK_IN, + 0 ) ) != 0 ) + continue; + + /* Describe bulk OUT endpoint */ + if ( ( rc = usb_endpoint_described ( &usbnet->out, + config, desc, + USB_BULK_OUT, + 0 ) ) != 0 ) + continue; + + /* Record data interface and alternate setting */ + usbnet->data = data; + usbnet->alternate = alt; + DBGC ( usbnet, "USBNET %s found data interface %d", + usbnet->func->name, data ); + if ( alt ) + DBGC ( usbnet, " using alternate %d", alt ); + DBGC ( usbnet, "\n" ); + return 0; + } + } + + DBGC ( usbnet, "USBNET %s found no data interface\n", + usbnet->func->name ); + return -ENOENT; +} + +/** + * Describe USB network device interfaces + * + * @v usbnet USB network device + * @v config Configuration descriptor + * @ret rc Return status code + */ +int usbnet_describe ( struct usbnet_device *usbnet, + struct usb_configuration_descriptor *config ) { + int rc; + + /* Describe communications interface, if applicable */ + if ( usbnet_has_intr ( usbnet ) && + ( rc = usbnet_comms_describe ( usbnet, config ) ) != 0 ) { + return rc; + } + + /* Describe data interface */ + if ( ( rc = usbnet_data_describe ( usbnet, config ) ) != 0 ) + return rc; + + return 0; +} diff --git a/src/drivers/usb/xhci.c b/src/drivers/usb/xhci.c new file mode 100644 index 00000000..c4a1dc33 --- /dev/null +++ b/src/drivers/usb/xhci.c @@ -0,0 +1,3376 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "xhci.h" + +/** @file + * + * USB eXtensible Host Controller Interface (xHCI) driver + * + */ + +/** Message transfer profiler */ +static struct profiler xhci_message_profiler __profiler = + { .name = "xhci.message" }; + +/** Stream transfer profiler */ +static struct profiler xhci_stream_profiler __profiler = + { .name = "xhci.stream" }; + +/** Event ring profiler */ +static struct profiler xhci_event_profiler __profiler = + { .name = "xhci.event" }; + +/** Transfer event profiler */ +static struct profiler xhci_transfer_profiler __profiler = + { .name = "xhci.transfer" }; + +/* Disambiguate the various error causes */ +#define EIO_DATA \ + __einfo_error ( EINFO_EIO_DATA ) +#define EINFO_EIO_DATA \ + __einfo_uniqify ( EINFO_EIO, ( 2 - 0 ), \ + "Data buffer error" ) +#define EIO_BABBLE \ + __einfo_error ( EINFO_EIO_BABBLE ) +#define EINFO_EIO_BABBLE \ + __einfo_uniqify ( EINFO_EIO, ( 3 - 0 ), \ + "Babble detected" ) +#define EIO_USB \ + __einfo_error ( EINFO_EIO_USB ) +#define EINFO_EIO_USB \ + __einfo_uniqify ( EINFO_EIO, ( 4 - 0 ), \ + "USB transaction error" ) +#define EIO_TRB \ + __einfo_error ( EINFO_EIO_TRB ) +#define EINFO_EIO_TRB \ + __einfo_uniqify ( EINFO_EIO, ( 5 - 0 ), \ + "TRB error" ) +#define EIO_STALL \ + __einfo_error ( EINFO_EIO_STALL ) +#define EINFO_EIO_STALL \ + __einfo_uniqify ( EINFO_EIO, ( 6 - 0 ), \ + "Stall error" ) +#define EIO_RESOURCE \ + __einfo_error ( EINFO_EIO_RESOURCE ) +#define EINFO_EIO_RESOURCE \ + __einfo_uniqify ( EINFO_EIO, ( 7 - 0 ), \ + "Resource error" ) +#define EIO_BANDWIDTH \ + __einfo_error ( EINFO_EIO_BANDWIDTH ) +#define EINFO_EIO_BANDWIDTH \ + __einfo_uniqify ( EINFO_EIO, ( 8 - 0 ), \ + "Bandwidth error" ) +#define EIO_NO_SLOTS \ + __einfo_error ( EINFO_EIO_NO_SLOTS ) +#define EINFO_EIO_NO_SLOTS \ + __einfo_uniqify ( EINFO_EIO, ( 9 - 0 ), \ + "No slots available" ) +#define EIO_STREAM_TYPE \ + __einfo_error ( EINFO_EIO_STREAM_TYPE ) +#define EINFO_EIO_STREAM_TYPE \ + __einfo_uniqify ( EINFO_EIO, ( 10 - 0 ), \ + "Invalid stream type" ) +#define EIO_SLOT \ + __einfo_error ( EINFO_EIO_SLOT ) +#define EINFO_EIO_SLOT \ + __einfo_uniqify ( EINFO_EIO, ( 11 - 0 ), \ + "Slot not enabled" ) +#define EIO_ENDPOINT \ + __einfo_error ( EINFO_EIO_ENDPOINT ) +#define EINFO_EIO_ENDPOINT \ + __einfo_uniqify ( EINFO_EIO, ( 12 - 0 ), \ + "Endpoint not enabled" ) +#define EIO_SHORT \ + __einfo_error ( EINFO_EIO_SHORT ) +#define EINFO_EIO_SHORT \ + __einfo_uniqify ( EINFO_EIO, ( 13 - 0 ), \ + "Short packet" ) +#define EIO_UNDERRUN \ + __einfo_error ( EINFO_EIO_UNDERRUN ) +#define EINFO_EIO_UNDERRUN \ + __einfo_uniqify ( EINFO_EIO, ( 14 - 0 ), \ + "Ring underrun" ) +#define EIO_OVERRUN \ + __einfo_error ( EINFO_EIO_OVERRUN ) +#define EINFO_EIO_OVERRUN \ + __einfo_uniqify ( EINFO_EIO, ( 15 - 0 ), \ + "Ring overrun" ) +#define EIO_VF_RING_FULL \ + __einfo_error ( EINFO_EIO_VF_RING_FULL ) +#define EINFO_EIO_VF_RING_FULL \ + __einfo_uniqify ( EINFO_EIO, ( 16 - 0 ), \ + "Virtual function event ring full" ) +#define EIO_PARAMETER \ + __einfo_error ( EINFO_EIO_PARAMETER ) +#define EINFO_EIO_PARAMETER \ + __einfo_uniqify ( EINFO_EIO, ( 17 - 0 ), \ + "Parameter error" ) +#define EIO_BANDWIDTH_OVERRUN \ + __einfo_error ( EINFO_EIO_BANDWIDTH_OVERRUN ) +#define EINFO_EIO_BANDWIDTH_OVERRUN \ + __einfo_uniqify ( EINFO_EIO, ( 18 - 0 ), \ + "Bandwidth overrun" ) +#define EIO_CONTEXT \ + __einfo_error ( EINFO_EIO_CONTEXT ) +#define EINFO_EIO_CONTEXT \ + __einfo_uniqify ( EINFO_EIO, ( 19 - 0 ), \ + "Context state error" ) +#define EIO_NO_PING \ + __einfo_error ( EINFO_EIO_NO_PING ) +#define EINFO_EIO_NO_PING \ + __einfo_uniqify ( EINFO_EIO, ( 20 - 0 ), \ + "No ping response" ) +#define EIO_RING_FULL \ + __einfo_error ( EINFO_EIO_RING_FULL ) +#define EINFO_EIO_RING_FULL \ + __einfo_uniqify ( EINFO_EIO, ( 21 - 0 ), \ + "Event ring full" ) +#define EIO_INCOMPATIBLE \ + __einfo_error ( EINFO_EIO_INCOMPATIBLE ) +#define EINFO_EIO_INCOMPATIBLE \ + __einfo_uniqify ( EINFO_EIO, ( 22 - 0 ), \ + "Incompatible device" ) +#define EIO_MISSED \ + __einfo_error ( EINFO_EIO_MISSED ) +#define EINFO_EIO_MISSED \ + __einfo_uniqify ( EINFO_EIO, ( 23 - 0 ), \ + "Missed service error" ) +#define EIO_CMD_STOPPED \ + __einfo_error ( EINFO_EIO_CMD_STOPPED ) +#define EINFO_EIO_CMD_STOPPED \ + __einfo_uniqify ( EINFO_EIO, ( 24 - 0 ), \ + "Command ring stopped" ) +#define EIO_CMD_ABORTED \ + __einfo_error ( EINFO_EIO_CMD_ABORTED ) +#define EINFO_EIO_CMD_ABORTED \ + __einfo_uniqify ( EINFO_EIO, ( 25 - 0 ), \ + "Command aborted" ) +#define EIO_STOP \ + __einfo_error ( EINFO_EIO_STOP ) +#define EINFO_EIO_STOP \ + __einfo_uniqify ( EINFO_EIO, ( 26 - 0 ), \ + "Stopped" ) +#define EIO_STOP_LEN \ + __einfo_error ( EINFO_EIO_STOP_LEN ) +#define EINFO_EIO_STOP_LEN \ + __einfo_uniqify ( EINFO_EIO, ( 27 - 0 ), \ + "Stopped - length invalid" ) +#define EIO_STOP_SHORT \ + __einfo_error ( EINFO_EIO_STOP_SHORT ) +#define EINFO_EIO_STOP_SHORT \ + __einfo_uniqify ( EINFO_EIO, ( 28 - 0 ), \ + "Stopped - short packet" ) +#define EIO_LATENCY \ + __einfo_error ( EINFO_EIO_LATENCY ) +#define EINFO_EIO_LATENCY \ + __einfo_uniqify ( EINFO_EIO, ( 29 - 0 ), \ + "Maximum exit latency too large" ) +#define EIO_ISOCH \ + __einfo_error ( EINFO_EIO_ISOCH ) +#define EINFO_EIO_ISOCH \ + __einfo_uniqify ( EINFO_EIO, ( 31 - 0 ), \ + "Isochronous buffer overrun" ) +#define EPROTO_LOST \ + __einfo_error ( EINFO_EPROTO_LOST ) +#define EINFO_EPROTO_LOST \ + __einfo_uniqify ( EINFO_EPROTO, ( 32 - 32 ), \ + "Event lost" ) +#define EPROTO_UNDEFINED \ + __einfo_error ( EINFO_EPROTO_UNDEFINED ) +#define EINFO_EPROTO_UNDEFINED \ + __einfo_uniqify ( EINFO_EPROTO, ( 33 - 32 ), \ + "Undefined error" ) +#define EPROTO_STREAM_ID \ + __einfo_error ( EINFO_EPROTO_STREAM_ID ) +#define EINFO_EPROTO_STREAM_ID \ + __einfo_uniqify ( EINFO_EPROTO, ( 34 - 32 ), \ + "Invalid stream ID" ) +#define EPROTO_SECONDARY \ + __einfo_error ( EINFO_EPROTO_SECONDARY ) +#define EINFO_EPROTO_SECONDARY \ + __einfo_uniqify ( EINFO_EPROTO, ( 35 - 32 ), \ + "Secondary bandwidth error" ) +#define EPROTO_SPLIT \ + __einfo_error ( EINFO_EPROTO_SPLIT ) +#define EINFO_EPROTO_SPLIT \ + __einfo_uniqify ( EINFO_EPROTO, ( 36 - 32 ), \ + "Split transaction error" ) +#define ECODE(code) \ + ( ( (code) < 32 ) ? \ + EUNIQ ( EINFO_EIO, ( (code) & 31 ), EIO_DATA, EIO_BABBLE, \ + EIO_USB, EIO_TRB, EIO_STALL, EIO_RESOURCE, \ + EIO_BANDWIDTH, EIO_NO_SLOTS, EIO_STREAM_TYPE, \ + EIO_SLOT, EIO_ENDPOINT, EIO_SHORT, EIO_UNDERRUN, \ + EIO_OVERRUN, EIO_VF_RING_FULL, EIO_PARAMETER, \ + EIO_BANDWIDTH_OVERRUN, EIO_CONTEXT, EIO_NO_PING, \ + EIO_RING_FULL, EIO_INCOMPATIBLE, EIO_MISSED, \ + EIO_CMD_STOPPED, EIO_CMD_ABORTED, EIO_STOP, \ + EIO_STOP_LEN, EIO_STOP_SHORT, EIO_LATENCY, \ + EIO_ISOCH ) : \ + ( (code) < 64 ) ? \ + EUNIQ ( EINFO_EPROTO, ( (code) & 31 ), EPROTO_LOST, \ + EPROTO_UNDEFINED, EPROTO_STREAM_ID, \ + EPROTO_SECONDARY, EPROTO_SPLIT ) : \ + EFAULT ) + +/****************************************************************************** + * + * Register access + * + ****************************************************************************** + */ + +/** + * Initialise device + * + * @v xhci xHCI device + * @v regs MMIO registers + */ +static void xhci_init ( struct xhci_device *xhci, void *regs ) { + uint32_t hcsparams1; + uint32_t hcsparams2; + uint32_t hccparams1; + uint32_t pagesize; + size_t caplength; + size_t rtsoff; + size_t dboff; + + /* Locate capability, operational, runtime, and doorbell registers */ + xhci->cap = regs; + caplength = readb ( xhci->cap + XHCI_CAP_CAPLENGTH ); + rtsoff = readl ( xhci->cap + XHCI_CAP_RTSOFF ); + dboff = readl ( xhci->cap + XHCI_CAP_DBOFF ); + xhci->op = ( xhci->cap + caplength ); + xhci->run = ( xhci->cap + rtsoff ); + xhci->db = ( xhci->cap + dboff ); + DBGC2 ( xhci, "XHCI %s cap %08lx op %08lx run %08lx db %08lx\n", + xhci->name, virt_to_phys ( xhci->cap ), + virt_to_phys ( xhci->op ), virt_to_phys ( xhci->run ), + virt_to_phys ( xhci->db ) ); + + /* Read structural parameters 1 */ + hcsparams1 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS1 ); + xhci->slots = XHCI_HCSPARAMS1_SLOTS ( hcsparams1 ); + xhci->intrs = XHCI_HCSPARAMS1_INTRS ( hcsparams1 ); + xhci->ports = XHCI_HCSPARAMS1_PORTS ( hcsparams1 ); + DBGC ( xhci, "XHCI %s has %d slots %d intrs %d ports\n", + xhci->name, xhci->slots, xhci->intrs, xhci->ports ); + + /* Read structural parameters 2 */ + hcsparams2 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS2 ); + xhci->scratchpads = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 ); + DBGC2 ( xhci, "XHCI %s needs %d scratchpads\n", + xhci->name, xhci->scratchpads ); + + /* Read capability parameters 1 */ + hccparams1 = readl ( xhci->cap + XHCI_CAP_HCCPARAMS1 ); + xhci->addr64 = XHCI_HCCPARAMS1_ADDR64 ( hccparams1 ); + xhci->csz_shift = XHCI_HCCPARAMS1_CSZ_SHIFT ( hccparams1 ); + xhci->xecp = XHCI_HCCPARAMS1_XECP ( hccparams1 ); + + /* Read page size */ + pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE ); + xhci->pagesize = XHCI_PAGESIZE ( pagesize ); + assert ( xhci->pagesize != 0 ); + assert ( ( ( xhci->pagesize ) & ( xhci->pagesize - 1 ) ) == 0 ); + DBGC2 ( xhci, "XHCI %s page size %zd bytes\n", + xhci->name, xhci->pagesize ); +} + +/** + * Find extended capability + * + * @v xhci xHCI device + * @v id Capability ID + * @v offset Offset to previous extended capability instance, or zero + * @ret offset Offset to extended capability, or zero if not found + */ +static unsigned int xhci_extended_capability ( struct xhci_device *xhci, + unsigned int id, + unsigned int offset ) { + uint32_t xecp; + unsigned int next; + + /* Locate the extended capability */ + while ( 1 ) { + + /* Locate first or next capability as applicable */ + if ( offset ) { + xecp = readl ( xhci->cap + offset ); + next = XHCI_XECP_NEXT ( xecp ); + } else { + next = xhci->xecp; + } + if ( ! next ) + return 0; + offset += next; + + /* Check if this is the requested capability */ + xecp = readl ( xhci->cap + offset ); + if ( XHCI_XECP_ID ( xecp ) == id ) + return offset; + } +} + +/** + * Write potentially 64-bit register + * + * @v xhci xHCI device + * @v value Value + * @v reg Register address + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +xhci_writeq ( struct xhci_device *xhci, physaddr_t value, void *reg ) { + + /* If this is a 32-bit build, then this can never fail + * (allowing the compiler to optimise out the error path). + */ + if ( sizeof ( value ) <= sizeof ( uint32_t ) ) { + writel ( value, reg ); + writel ( 0, ( reg + sizeof ( uint32_t ) ) ); + return 0; + } + + /* If the device does not support 64-bit addresses and this + * address is outside the 32-bit address space, then fail. + */ + if ( ( value & ~0xffffffffULL ) && ! xhci->addr64 ) { + DBGC ( xhci, "XHCI %s cannot access address %lx\n", + xhci->name, value ); + return -ENOTSUP; + } + + /* If this is a 64-bit build, then writeq() is available */ + writeq ( value, reg ); + return 0; +} + +/** + * Calculate buffer alignment + * + * @v len Length + * @ret align Buffer alignment + * + * Determine alignment required for a buffer which must be aligned to + * at least XHCI_MIN_ALIGN and which must not cross a page boundary. + */ +static inline size_t xhci_align ( size_t len ) { + size_t align; + + /* Align to own length (rounded up to a power of two) */ + align = ( 1 << fls ( len - 1 ) ); + + /* Round up to XHCI_MIN_ALIGN if needed */ + if ( align < XHCI_MIN_ALIGN ) + align = XHCI_MIN_ALIGN; + + return align; +} + +/** + * Calculate device context offset + * + * @v xhci xHCI device + * @v ctx Context index + */ +static inline size_t xhci_device_context_offset ( struct xhci_device *xhci, + unsigned int ctx ) { + + return ( XHCI_DCI ( ctx ) << xhci->csz_shift ); +} + +/** + * Calculate input context offset + * + * @v xhci xHCI device + * @v ctx Context index + */ +static inline size_t xhci_input_context_offset ( struct xhci_device *xhci, + unsigned int ctx ) { + + return ( XHCI_ICI ( ctx ) << xhci->csz_shift ); +} + +/****************************************************************************** + * + * Diagnostics + * + ****************************************************************************** + */ + +/** + * Dump host controller registers + * + * @v xhci xHCI device + */ +static inline void xhci_dump ( struct xhci_device *xhci ) { + uint32_t usbcmd; + uint32_t usbsts; + uint32_t pagesize; + uint32_t dnctrl; + uint32_t config; + + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return; + + /* Dump USBCMD */ + usbcmd = readl ( xhci->op + XHCI_OP_USBCMD ); + DBGC ( xhci, "XHCI %s USBCMD %08x%s%s\n", xhci->name, usbcmd, + ( ( usbcmd & XHCI_USBCMD_RUN ) ? " run" : "" ), + ( ( usbcmd & XHCI_USBCMD_HCRST ) ? " hcrst" : "" ) ); + + /* Dump USBSTS */ + usbsts = readl ( xhci->op + XHCI_OP_USBSTS ); + DBGC ( xhci, "XHCI %s USBSTS %08x%s\n", xhci->name, usbsts, + ( ( usbsts & XHCI_USBSTS_HCH ) ? " hch" : "" ) ); + + /* Dump PAGESIZE */ + pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE ); + DBGC ( xhci, "XHCI %s PAGESIZE %08x\n", xhci->name, pagesize ); + + /* Dump DNCTRL */ + dnctrl = readl ( xhci->op + XHCI_OP_DNCTRL ); + DBGC ( xhci, "XHCI %s DNCTRL %08x\n", xhci->name, dnctrl ); + + /* Dump CONFIG */ + config = readl ( xhci->op + XHCI_OP_CONFIG ); + DBGC ( xhci, "XHCI %s CONFIG %08x\n", xhci->name, config ); +} + +/** + * Dump port registers + * + * @v xhci xHCI device + * @v port Port number + */ +static inline void xhci_dump_port ( struct xhci_device *xhci, + unsigned int port ) { + uint32_t portsc; + uint32_t portpmsc; + uint32_t portli; + uint32_t porthlpmc; + + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return; + + /* Dump PORTSC */ + portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port ) ); + DBGC ( xhci, "XHCI %s-%d PORTSC %08x%s%s%s%s psiv=%d\n", + xhci->name, port, portsc, + ( ( portsc & XHCI_PORTSC_CCS ) ? " ccs" : "" ), + ( ( portsc & XHCI_PORTSC_PED ) ? " ped" : "" ), + ( ( portsc & XHCI_PORTSC_PR ) ? " pr" : "" ), + ( ( portsc & XHCI_PORTSC_PP ) ? " pp" : "" ), + XHCI_PORTSC_PSIV ( portsc ) ); + + /* Dump PORTPMSC */ + portpmsc = readl ( xhci->op + XHCI_OP_PORTPMSC ( port ) ); + DBGC ( xhci, "XHCI %s-%d PORTPMSC %08x\n", xhci->name, port, portpmsc ); + + /* Dump PORTLI */ + portli = readl ( xhci->op + XHCI_OP_PORTLI ( port ) ); + DBGC ( xhci, "XHCI %s-%d PORTLI %08x\n", xhci->name, port, portli ); + + /* Dump PORTHLPMC */ + porthlpmc = readl ( xhci->op + XHCI_OP_PORTHLPMC ( port ) ); + DBGC ( xhci, "XHCI %s-%d PORTHLPMC %08x\n", + xhci->name, port, porthlpmc ); +} + +/****************************************************************************** + * + * USB legacy support + * + ****************************************************************************** + */ + +/** Prevent the release of ownership back to BIOS */ +static int xhci_legacy_prevent_release; + +/** + * Initialise USB legacy support + * + * @v xhci xHCI device + */ +static void xhci_legacy_init ( struct xhci_device *xhci ) { + unsigned int legacy; + uint8_t bios; + + /* Locate USB legacy support capability (if present) */ + legacy = xhci_extended_capability ( xhci, XHCI_XECP_ID_LEGACY, 0 ); + if ( ! legacy ) { + /* Not an error; capability may not be present */ + DBGC ( xhci, "XHCI %s has no USB legacy support capability\n", + xhci->name ); + return; + } + + /* Check if legacy USB support is enabled */ + bios = readb ( xhci->cap + legacy + XHCI_USBLEGSUP_BIOS ); + if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) { + /* Not an error; already owned by OS */ + DBGC ( xhci, "XHCI %s USB legacy support already disabled\n", + xhci->name ); + return; + } + + /* Record presence of USB legacy support capability */ + xhci->legacy = legacy; +} + +/** + * Claim ownership from BIOS + * + * @v xhci xHCI device + */ +static void xhci_legacy_claim ( struct xhci_device *xhci ) { + uint32_t ctlsts; + uint8_t bios; + unsigned int i; + + /* Do nothing unless legacy support capability is present */ + if ( ! xhci->legacy ) + return; + + /* Claim ownership */ + writeb ( XHCI_USBLEGSUP_OS_OWNED, + xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS ); + + /* Wait for BIOS to release ownership */ + for ( i = 0 ; i < XHCI_USBLEGSUP_MAX_WAIT_MS ; i++ ) { + + /* Check if BIOS has released ownership */ + bios = readb ( xhci->cap + xhci->legacy + XHCI_USBLEGSUP_BIOS ); + if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) { + DBGC ( xhci, "XHCI %s claimed ownership from BIOS\n", + xhci->name ); + ctlsts = readl ( xhci->cap + xhci->legacy + + XHCI_USBLEGSUP_CTLSTS ); + if ( ctlsts ) { + DBGC ( xhci, "XHCI %s warning: BIOS retained " + "SMIs: %08x\n", xhci->name, ctlsts ); + } + return; + } + + /* Delay */ + mdelay ( 1 ); + } + + /* BIOS did not release ownership. Claim it forcibly by + * disabling all SMIs. + */ + DBGC ( xhci, "XHCI %s could not claim ownership from BIOS: forcibly " + "disabling SMIs\n", xhci->name ); + writel ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_CTLSTS ); +} + +/** + * Release ownership back to BIOS + * + * @v xhci xHCI device + */ +static void xhci_legacy_release ( struct xhci_device *xhci ) { + + /* Do nothing unless legacy support capability is present */ + if ( ! xhci->legacy ) + return; + + /* Do nothing if releasing ownership is prevented */ + if ( xhci_legacy_prevent_release ) { + DBGC ( xhci, "XHCI %s not releasing ownership to BIOS\n", + xhci->name ); + return; + } + + /* Release ownership */ + writeb ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS ); + DBGC ( xhci, "XHCI %s released ownership to BIOS\n", xhci->name ); +} + +/****************************************************************************** + * + * Supported protocols + * + ****************************************************************************** + */ + +/** + * Transcribe port speed (for debugging) + * + * @v psi Protocol speed ID + * @ret speed Transcribed speed + */ +static inline const char * xhci_speed_name ( uint32_t psi ) { + static const char *exponents[4] = { "", "k", "M", "G" }; + static char buf[ 10 /* "xxxxxXbps" + NUL */ ]; + unsigned int mantissa; + unsigned int exponent; + + /* Extract mantissa and exponent */ + mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi ); + exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi ); + + /* Transcribe speed */ + snprintf ( buf, sizeof ( buf ), "%d%sbps", + mantissa, exponents[exponent] ); + return buf; +} + +/** + * Find supported protocol extended capability for a port + * + * @v xhci xHCI device + * @v port Port number + * @ret supported Offset to extended capability, or zero if not found + */ +static unsigned int xhci_supported_protocol ( struct xhci_device *xhci, + unsigned int port ) { + unsigned int supported = 0; + unsigned int offset; + unsigned int count; + uint32_t ports; + + /* Iterate over all supported protocol structures */ + while ( ( supported = xhci_extended_capability ( xhci, + XHCI_XECP_ID_SUPPORTED, + supported ) ) ) { + + /* Determine port range */ + ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS ); + offset = XHCI_SUPPORTED_PORTS_OFFSET ( ports ); + count = XHCI_SUPPORTED_PORTS_COUNT ( ports ); + + /* Check if port lies within this range */ + if ( ( port - offset ) < count ) + return supported; + } + + DBGC ( xhci, "XHCI %s-%d has no supported protocol\n", + xhci->name, port ); + return 0; +} + +/** + * Find port protocol + * + * @v xhci xHCI device + * @v port Port number + * @ret protocol USB protocol, or zero if not found + */ +static unsigned int xhci_port_protocol ( struct xhci_device *xhci, + unsigned int port ) { + unsigned int supported = xhci_supported_protocol ( xhci, port ); + union { + uint32_t raw; + char text[5]; + } name; + unsigned int protocol; + unsigned int type; + unsigned int psic; + unsigned int psiv; + unsigned int i; + uint32_t revision; + uint32_t ports; + uint32_t slot; + uint32_t psi; + + /* Fail if there is no supported protocol */ + if ( ! supported ) + return 0; + + /* Determine protocol version */ + revision = readl ( xhci->cap + supported + XHCI_SUPPORTED_REVISION ); + protocol = XHCI_SUPPORTED_REVISION_VER ( revision ); + + /* Describe port protocol */ + if ( DBG_EXTRA ) { + name.raw = cpu_to_le32 ( readl ( xhci->cap + supported + + XHCI_SUPPORTED_NAME ) ); + name.text[4] = '\0'; + slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT ); + type = XHCI_SUPPORTED_SLOT_TYPE ( slot ); + DBGC2 ( xhci, "XHCI %s-%d %sv%04x type %d", + xhci->name, port, name.text, protocol, type ); + ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS ); + psic = XHCI_SUPPORTED_PORTS_PSIC ( ports ); + if ( psic ) { + DBGC2 ( xhci, " speeds" ); + for ( i = 0 ; i < psic ; i++ ) { + psi = readl ( xhci->cap + supported + + XHCI_SUPPORTED_PSI ( i ) ); + psiv = XHCI_SUPPORTED_PSI_VALUE ( psi ); + DBGC2 ( xhci, " %d:%s", psiv, + xhci_speed_name ( psi ) ); + } + } + if ( xhci->quirks & XHCI_BAD_PSIV ) + DBGC2 ( xhci, " (ignored)" ); + DBGC2 ( xhci, "\n" ); + } + + return protocol; +} + +/** + * Find port slot type + * + * @v xhci xHCI device + * @v port Port number + * @ret type Slot type, or negative error + */ +static int xhci_port_slot_type ( struct xhci_device *xhci, unsigned int port ) { + unsigned int supported = xhci_supported_protocol ( xhci, port ); + unsigned int type; + uint32_t slot; + + /* Fail if there is no supported protocol */ + if ( ! supported ) + return -ENOTSUP; + + /* Get slot type */ + slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT ); + type = XHCI_SUPPORTED_SLOT_TYPE ( slot ); + + return type; +} + +/** + * Find port speed + * + * @v xhci xHCI device + * @v port Port number + * @v psiv Protocol speed ID value + * @ret speed Port speed, or negative error + */ +static int xhci_port_speed ( struct xhci_device *xhci, unsigned int port, + unsigned int psiv ) { + unsigned int supported = xhci_supported_protocol ( xhci, port ); + unsigned int psic; + unsigned int mantissa; + unsigned int exponent; + unsigned int speed; + unsigned int i; + uint32_t ports; + uint32_t psi; + + /* Fail if there is no supported protocol */ + if ( ! supported ) + return -ENOTSUP; + + /* Get protocol speed ID count */ + ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS ); + psic = XHCI_SUPPORTED_PORTS_PSIC ( ports ); + + /* Use protocol speed ID table unless device is known to be faulty */ + if ( ! ( xhci->quirks & XHCI_BAD_PSIV ) ) { + + /* Iterate over PSI dwords looking for a match */ + for ( i = 0 ; i < psic ; i++ ) { + psi = readl ( xhci->cap + supported + + XHCI_SUPPORTED_PSI ( i ) ); + if ( psiv == XHCI_SUPPORTED_PSI_VALUE ( psi ) ) { + mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi ); + exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi ); + speed = USB_SPEED ( mantissa, exponent ); + return speed; + } + } + + /* Record device as faulty if no match is found */ + if ( psic != 0 ) { + DBGC ( xhci, "XHCI %s-%d spurious PSI value %d: " + "assuming PSI table is invalid\n", + xhci->name, port, psiv ); + xhci->quirks |= XHCI_BAD_PSIV; + } + } + + /* Use the default mappings */ + switch ( psiv ) { + case XHCI_SPEED_LOW : return USB_SPEED_LOW; + case XHCI_SPEED_FULL : return USB_SPEED_FULL; + case XHCI_SPEED_HIGH : return USB_SPEED_HIGH; + case XHCI_SPEED_SUPER : return USB_SPEED_SUPER; + default: + DBGC ( xhci, "XHCI %s-%d unrecognised PSI value %d\n", + xhci->name, port, psiv ); + return -ENOTSUP; + } +} + +/** + * Find protocol speed ID value + * + * @v xhci xHCI device + * @v port Port number + * @v speed USB speed + * @ret psiv Protocol speed ID value, or negative error + */ +static int xhci_port_psiv ( struct xhci_device *xhci, unsigned int port, + unsigned int speed ) { + unsigned int supported = xhci_supported_protocol ( xhci, port ); + unsigned int psic; + unsigned int mantissa; + unsigned int exponent; + unsigned int psiv; + unsigned int i; + uint32_t ports; + uint32_t psi; + + /* Fail if there is no supported protocol */ + if ( ! supported ) + return -ENOTSUP; + + /* Get protocol speed ID count */ + ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS ); + psic = XHCI_SUPPORTED_PORTS_PSIC ( ports ); + + /* Use the default mappings if applicable */ + if ( ( psic == 0 ) || ( xhci->quirks & XHCI_BAD_PSIV ) ) { + switch ( speed ) { + case USB_SPEED_LOW : return XHCI_SPEED_LOW; + case USB_SPEED_FULL : return XHCI_SPEED_FULL; + case USB_SPEED_HIGH : return XHCI_SPEED_HIGH; + case USB_SPEED_SUPER : return XHCI_SPEED_SUPER; + default: + DBGC ( xhci, "XHCI %s-%d non-standard speed %d\n", + xhci->name, port, speed ); + return -ENOTSUP; + } + } + + /* Iterate over PSI dwords looking for a match */ + for ( i = 0 ; i < psic ; i++ ) { + psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i )); + mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi ); + exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi ); + if ( speed == USB_SPEED ( mantissa, exponent ) ) { + psiv = XHCI_SUPPORTED_PSI_VALUE ( psi ); + return psiv; + } + } + + DBGC ( xhci, "XHCI %s-%d unrepresentable speed %#x\n", + xhci->name, port, speed ); + return -ENOENT; +} + +/****************************************************************************** + * + * Device context base address array + * + ****************************************************************************** + */ + +/** + * Allocate device context base address array + * + * @v xhci xHCI device + * @ret rc Return status code + */ +static int xhci_dcbaa_alloc ( struct xhci_device *xhci ) { + size_t len; + physaddr_t dcbaap; + int rc; + + /* Allocate and initialise structure. Must be at least + * 64-byte aligned and must not cross a page boundary, so + * align on its own size (rounded up to a power of two and + * with a minimum of 64 bytes). + */ + len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) ); + xhci->dcbaa = malloc_dma ( len, xhci_align ( len ) ); + if ( ! xhci->dcbaa ) { + DBGC ( xhci, "XHCI %s could not allocate DCBAA\n", xhci->name ); + rc = -ENOMEM; + goto err_alloc; + } + memset ( xhci->dcbaa, 0, len ); + + /* Program DCBAA pointer */ + dcbaap = virt_to_phys ( xhci->dcbaa ); + if ( ( rc = xhci_writeq ( xhci, dcbaap, + xhci->op + XHCI_OP_DCBAAP ) ) != 0 ) + goto err_writeq; + + DBGC2 ( xhci, "XHCI %s DCBAA at [%08lx,%08lx)\n", + xhci->name, dcbaap, ( dcbaap + len ) ); + return 0; + + err_writeq: + free_dma ( xhci->dcbaa, len ); + err_alloc: + return rc; +} + +/** + * Free device context base address array + * + * @v xhci xHCI device + */ +static void xhci_dcbaa_free ( struct xhci_device *xhci ) { + size_t len; + unsigned int i; + + /* Sanity check */ + for ( i = 0 ; i <= xhci->slots ; i++ ) + assert ( xhci->dcbaa[i] == 0 ); + + /* Clear DCBAA pointer */ + xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_DCBAAP ); + + /* Free DCBAA */ + len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) ); + free_dma ( xhci->dcbaa, len ); +} + +/****************************************************************************** + * + * Scratchpad buffers + * + ****************************************************************************** + */ + +/** + * Allocate scratchpad buffers + * + * @v xhci xHCI device + * @ret rc Return status code + */ +static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) { + size_t array_len; + size_t len; + physaddr_t phys; + unsigned int i; + int rc; + + /* Do nothing if no scratchpad buffers are used */ + if ( ! xhci->scratchpads ) + return 0; + + /* Allocate scratchpads */ + len = ( xhci->scratchpads * xhci->pagesize ); + xhci->scratchpad = umalloc ( len ); + if ( ! xhci->scratchpad ) { + DBGC ( xhci, "XHCI %s could not allocate scratchpad buffers\n", + xhci->name ); + rc = -ENOMEM; + goto err_alloc; + } + memset_user ( xhci->scratchpad, 0, 0, len ); + + /* Allocate scratchpad array */ + array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] )); + xhci->scratchpad_array = + malloc_dma ( array_len, xhci_align ( array_len ) ); + if ( ! xhci->scratchpad_array ) { + DBGC ( xhci, "XHCI %s could not allocate scratchpad buffer " + "array\n", xhci->name ); + rc = -ENOMEM; + goto err_alloc_array; + } + + /* Populate scratchpad array */ + for ( i = 0 ; i < xhci->scratchpads ; i++ ) { + phys = user_to_phys ( xhci->scratchpad, ( i * xhci->pagesize )); + xhci->scratchpad_array[i] = phys; + } + + /* Set scratchpad array pointer */ + assert ( xhci->dcbaa != NULL ); + xhci->dcbaa[0] = cpu_to_le64 ( virt_to_phys ( xhci->scratchpad_array )); + + DBGC2 ( xhci, "XHCI %s scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n", + xhci->name, user_to_phys ( xhci->scratchpad, 0 ), + user_to_phys ( xhci->scratchpad, len ), + virt_to_phys ( xhci->scratchpad_array ), + ( virt_to_phys ( xhci->scratchpad_array ) + array_len ) ); + return 0; + + free_dma ( xhci->scratchpad_array, array_len ); + err_alloc_array: + ufree ( xhci->scratchpad ); + err_alloc: + return rc; +} + +/** + * Free scratchpad buffers + * + * @v xhci xHCI device + */ +static void xhci_scratchpad_free ( struct xhci_device *xhci ) { + size_t array_len; + + /* Do nothing if no scratchpad buffers are used */ + if ( ! xhci->scratchpads ) + return; + + /* Clear scratchpad array pointer */ + assert ( xhci->dcbaa != NULL ); + xhci->dcbaa[0] = 0; + + /* Free scratchpad array */ + array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] )); + free_dma ( xhci->scratchpad_array, array_len ); + + /* Free scratchpads */ + ufree ( xhci->scratchpad ); +} + +/****************************************************************************** + * + * Run / stop / reset + * + ****************************************************************************** + */ + +/** + * Start xHCI device + * + * @v xhci xHCI device + */ +static void xhci_run ( struct xhci_device *xhci ) { + uint32_t config; + uint32_t usbcmd; + + /* Configure number of device slots */ + config = readl ( xhci->op + XHCI_OP_CONFIG ); + config &= ~XHCI_CONFIG_MAX_SLOTS_EN_MASK; + config |= XHCI_CONFIG_MAX_SLOTS_EN ( xhci->slots ); + writel ( config, xhci->op + XHCI_OP_CONFIG ); + + /* Set run/stop bit */ + usbcmd = readl ( xhci->op + XHCI_OP_USBCMD ); + usbcmd |= XHCI_USBCMD_RUN; + writel ( usbcmd, xhci->op + XHCI_OP_USBCMD ); +} + +/** + * Stop xHCI device + * + * @v xhci xHCI device + * @ret rc Return status code + */ +static int xhci_stop ( struct xhci_device *xhci ) { + uint32_t usbcmd; + uint32_t usbsts; + unsigned int i; + + /* Clear run/stop bit */ + usbcmd = readl ( xhci->op + XHCI_OP_USBCMD ); + usbcmd &= ~XHCI_USBCMD_RUN; + writel ( usbcmd, xhci->op + XHCI_OP_USBCMD ); + + /* Wait for device to stop */ + for ( i = 0 ; i < XHCI_STOP_MAX_WAIT_MS ; i++ ) { + + /* Check if device is stopped */ + usbsts = readl ( xhci->op + XHCI_OP_USBSTS ); + if ( usbsts & XHCI_USBSTS_HCH ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( xhci, "XHCI %s timed out waiting for stop\n", xhci->name ); + return -ETIMEDOUT; +} + +/** + * Reset xHCI device + * + * @v xhci xHCI device + * @ret rc Return status code + */ +static int xhci_reset ( struct xhci_device *xhci ) { + uint32_t usbcmd; + unsigned int i; + int rc; + + /* The xHCI specification states that resetting a running + * device may result in undefined behaviour, so try stopping + * it first. + */ + if ( ( rc = xhci_stop ( xhci ) ) != 0 ) { + /* Ignore errors and attempt to reset the device anyway */ + } + + /* Reset device */ + writel ( XHCI_USBCMD_HCRST, xhci->op + XHCI_OP_USBCMD ); + + /* Wait for reset to complete */ + for ( i = 0 ; i < XHCI_RESET_MAX_WAIT_MS ; i++ ) { + + /* Check if reset is complete */ + usbcmd = readl ( xhci->op + XHCI_OP_USBCMD ); + if ( ! ( usbcmd & XHCI_USBCMD_HCRST ) ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( xhci, "XHCI %s timed out waiting for reset\n", xhci->name ); + return -ETIMEDOUT; +} + +/****************************************************************************** + * + * Transfer request blocks + * + ****************************************************************************** + */ + +/** + * Allocate transfer request block ring + * + * @v xhci xHCI device + * @v ring TRB ring + * @v shift Ring size (log2) + * @v slot Device slot + * @v target Doorbell target + * @v stream Doorbell stream ID + * @ret rc Return status code + */ +static int xhci_ring_alloc ( struct xhci_device *xhci, + struct xhci_trb_ring *ring, + unsigned int shift, unsigned int slot, + unsigned int target, unsigned int stream ) { + struct xhci_trb_link *link; + unsigned int count; + int rc; + + /* Sanity check */ + assert ( shift > 0 ); + + /* Initialise structure */ + memset ( ring, 0, sizeof ( *ring ) ); + ring->shift = shift; + count = ( 1U << shift ); + ring->mask = ( count - 1 ); + ring->len = ( ( count + 1 /* Link TRB */ ) * sizeof ( ring->trb[0] ) ); + ring->db = ( xhci->db + ( slot * sizeof ( ring->dbval ) ) ); + ring->dbval = XHCI_DBVAL ( target, stream ); + + /* Allocate I/O buffers */ + ring->iobuf = zalloc ( count * sizeof ( ring->iobuf[0] ) ); + if ( ! ring->iobuf ) { + rc = -ENOMEM; + goto err_alloc_iobuf; + } + + /* Allocate TRBs */ + ring->trb = malloc_dma ( ring->len, xhci_align ( ring->len ) ); + if ( ! ring->trb ) { + rc = -ENOMEM; + goto err_alloc_trb; + } + memset ( ring->trb, 0, ring->len ); + + /* Initialise Link TRB */ + link = &ring->trb[count].link; + link->next = cpu_to_le64 ( virt_to_phys ( ring->trb ) ); + link->flags = XHCI_TRB_TC; + link->type = XHCI_TRB_LINK; + ring->link = link; + + return 0; + + free_dma ( ring->trb, ring->len ); + err_alloc_trb: + free ( ring->iobuf ); + err_alloc_iobuf: + return rc; +} + +/** + * Reset transfer request block ring + * + * @v ring TRB ring + */ +static void xhci_ring_reset ( struct xhci_trb_ring *ring ) { + unsigned int count = ( 1U << ring->shift ); + + /* Reset producer and consumer counters */ + ring->prod = 0; + ring->cons = 0; + + /* Reset TRBs (except Link TRB) */ + memset ( ring->trb, 0, ( count * sizeof ( ring->trb[0] ) ) ); +} + +/** + * Free transfer request block ring + * + * @v ring TRB ring + */ +static void xhci_ring_free ( struct xhci_trb_ring *ring ) { + unsigned int count = ( 1U << ring->shift ); + unsigned int i; + + /* Sanity checks */ + assert ( ring->cons == ring->prod ); + for ( i = 0 ; i < count ; i++ ) + assert ( ring->iobuf[i] == NULL ); + + /* Free TRBs */ + free_dma ( ring->trb, ring->len ); + + /* Free I/O buffers */ + free ( ring->iobuf ); +} + +/** + * Enqueue a transfer request block + * + * @v ring TRB ring + * @v iobuf I/O buffer (if any) + * @v trb Transfer request block (with empty Cycle flag) + * @ret rc Return status code + * + * This operation does not implicitly ring the doorbell register. + */ +static int xhci_enqueue ( struct xhci_trb_ring *ring, struct io_buffer *iobuf, + const union xhci_trb *trb ) { + union xhci_trb *dest; + unsigned int prod; + unsigned int mask; + unsigned int index; + unsigned int cycle; + + /* Sanity check */ + assert ( ! ( trb->common.flags & XHCI_TRB_C ) ); + + /* Fail if ring is full */ + if ( ! xhci_ring_remaining ( ring ) ) + return -ENOBUFS; + + /* Update producer counter (and link TRB, if applicable) */ + prod = ring->prod++; + mask = ring->mask; + cycle = ( ( ~( prod >> ring->shift ) ) & XHCI_TRB_C ); + index = ( prod & mask ); + if ( index == 0 ) + ring->link->flags = ( XHCI_TRB_TC | ( cycle ^ XHCI_TRB_C ) ); + + /* Record I/O buffer */ + ring->iobuf[index] = iobuf; + + /* Enqueue TRB */ + dest = &ring->trb[index]; + dest->template.parameter = trb->template.parameter; + dest->template.status = trb->template.status; + wmb(); + dest->template.control = ( trb->template.control | + cpu_to_le32 ( cycle ) ); + + return 0; +} + +/** + * Dequeue a transfer request block + * + * @v ring TRB ring + * @ret iobuf I/O buffer + */ +static struct io_buffer * xhci_dequeue ( struct xhci_trb_ring *ring ) { + struct io_buffer *iobuf; + unsigned int cons; + unsigned int mask; + unsigned int index; + + /* Sanity check */ + assert ( xhci_ring_fill ( ring ) != 0 ); + + /* Update consumer counter */ + cons = ring->cons++; + mask = ring->mask; + index = ( cons & mask ); + + /* Retrieve I/O buffer */ + iobuf = ring->iobuf[index]; + ring->iobuf[index] = NULL; + + return iobuf; +} + +/** + * Enqueue multiple transfer request blocks + * + * @v ring TRB ring + * @v iobuf I/O buffer + * @v trbs Transfer request blocks (with empty Cycle flag) + * @v count Number of transfer request blocks + * @ret rc Return status code + * + * This operation does not implicitly ring the doorbell register. + */ +static int xhci_enqueue_multi ( struct xhci_trb_ring *ring, + struct io_buffer *iobuf, + const union xhci_trb *trbs, + unsigned int count ) { + const union xhci_trb *trb = trbs; + int rc; + + /* Sanity check */ + assert ( iobuf != NULL ); + + /* Fail if ring does not have sufficient space */ + if ( xhci_ring_remaining ( ring ) < count ) + return -ENOBUFS; + + /* Enqueue each TRB, recording the I/O buffer with the final TRB */ + while ( count-- ) { + rc = xhci_enqueue ( ring, ( count ? NULL : iobuf ), trb++ ); + assert ( rc == 0 ); /* Should never be able to fail */ + } + + return 0; +} + +/** + * Dequeue multiple transfer request blocks + * + * @v ring TRB ring + * @ret iobuf I/O buffer + */ +static struct io_buffer * xhci_dequeue_multi ( struct xhci_trb_ring *ring ) { + struct io_buffer *iobuf; + + /* Dequeue TRBs until we reach the final TRB for an I/O buffer */ + do { + iobuf = xhci_dequeue ( ring ); + } while ( iobuf == NULL ); + + return iobuf; +} + +/** + * Ring doorbell register + * + * @v ring TRB ring + */ +static inline __attribute__ (( always_inline )) void +xhci_doorbell ( struct xhci_trb_ring *ring ) { + + wmb(); + writel ( ring->dbval, ring->db ); +} + +/****************************************************************************** + * + * Command and event rings + * + ****************************************************************************** + */ + +/** + * Allocate command ring + * + * @v xhci xHCI device + * @ret rc Return status code + */ +static int xhci_command_alloc ( struct xhci_device *xhci ) { + physaddr_t crp; + int rc; + + /* Allocate TRB ring */ + if ( ( rc = xhci_ring_alloc ( xhci, &xhci->command, XHCI_CMD_TRBS_LOG2, + 0, 0, 0 ) ) != 0 ) + goto err_ring_alloc; + + /* Program command ring control register */ + crp = virt_to_phys ( xhci->command.trb ); + if ( ( rc = xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), + xhci->op + XHCI_OP_CRCR ) ) != 0 ) + goto err_writeq; + + DBGC2 ( xhci, "XHCI %s CRCR at [%08lx,%08lx)\n", + xhci->name, crp, ( crp + xhci->command.len ) ); + return 0; + + err_writeq: + xhci_ring_free ( &xhci->command ); + err_ring_alloc: + return rc; +} + +/** + * Free command ring + * + * @v xhci xHCI device + */ +static void xhci_command_free ( struct xhci_device *xhci ) { + + /* Sanity check */ + assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 ); + + /* Clear command ring control register */ + xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_CRCR ); + + /* Free TRB ring */ + xhci_ring_free ( &xhci->command ); +} + +/** + * Allocate event ring + * + * @v xhci xHCI device + * @ret rc Return status code + */ +static int xhci_event_alloc ( struct xhci_device *xhci ) { + struct xhci_event_ring *event = &xhci->event; + unsigned int count; + size_t len; + int rc; + + /* Allocate event ring */ + count = ( 1 << XHCI_EVENT_TRBS_LOG2 ); + len = ( count * sizeof ( event->trb[0] ) ); + event->trb = malloc_dma ( len, xhci_align ( len ) ); + if ( ! event->trb ) { + rc = -ENOMEM; + goto err_alloc_trb; + } + memset ( event->trb, 0, len ); + + /* Allocate event ring segment table */ + event->segment = malloc_dma ( sizeof ( event->segment[0] ), + xhci_align ( sizeof (event->segment[0]))); + if ( ! event->segment ) { + rc = -ENOMEM; + goto err_alloc_segment; + } + memset ( event->segment, 0, sizeof ( event->segment[0] ) ); + event->segment[0].base = cpu_to_le64 ( virt_to_phys ( event->trb ) ); + event->segment[0].count = cpu_to_le32 ( count ); + + /* Program event ring registers */ + writel ( 1, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) ); + if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->trb ), + xhci->run + XHCI_RUN_ERDP ( 0 ) ) ) != 0 ) + goto err_writeq_erdp; + if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->segment ), + xhci->run + XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 ) + goto err_writeq_erstba; + + DBGC2 ( xhci, "XHCI %s event ring [%08lx,%08lx) table [%08lx,%08lx)\n", + xhci->name, virt_to_phys ( event->trb ), + ( virt_to_phys ( event->trb ) + len ), + virt_to_phys ( event->segment ), + ( virt_to_phys ( event->segment ) + + sizeof (event->segment[0] ) ) ); + return 0; + + xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) ); + err_writeq_erstba: + xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) ); + err_writeq_erdp: + free_dma ( event->trb, len ); + err_alloc_segment: + free_dma ( event->segment, sizeof ( event->segment[0] ) ); + err_alloc_trb: + return rc; +} + +/** + * Free event ring + * + * @v xhci xHCI device + */ +static void xhci_event_free ( struct xhci_device *xhci ) { + struct xhci_event_ring *event = &xhci->event; + unsigned int count; + size_t len; + + /* Clear event ring registers */ + writel ( 0, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) ); + xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) ); + xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) ); + + /* Free event ring segment table */ + free_dma ( event->segment, sizeof ( event->segment[0] ) ); + + /* Free event ring */ + count = ( 1 << XHCI_EVENT_TRBS_LOG2 ); + len = ( count * sizeof ( event->trb[0] ) ); + free_dma ( event->trb, len ); +} + +/** + * Handle transfer event + * + * @v xhci xHCI device + * @v trb Transfer event TRB + */ +static void xhci_transfer ( struct xhci_device *xhci, + struct xhci_trb_transfer *trb ) { + struct xhci_slot *slot; + struct xhci_endpoint *endpoint; + struct io_buffer *iobuf; + int rc; + + /* Profile transfer events */ + profile_start ( &xhci_transfer_profiler ); + + /* Identify slot */ + if ( ( trb->slot > xhci->slots ) || + ( ( slot = xhci->slot[trb->slot] ) == NULL ) ) { + DBGC ( xhci, "XHCI %s transfer event invalid slot %d:\n", + xhci->name, trb->slot ); + DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) ); + return; + } + + /* Identify endpoint */ + if ( ( trb->endpoint >= XHCI_CTX_END ) || + ( ( endpoint = slot->endpoint[trb->endpoint] ) == NULL ) ) { + DBGC ( xhci, "XHCI %s slot %d transfer event invalid epid " + "%d:\n", xhci->name, slot->id, trb->endpoint ); + DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) ); + return; + } + + /* Dequeue TRB(s) */ + iobuf = xhci_dequeue_multi ( &endpoint->ring ); + assert ( iobuf != NULL ); + + /* Check for errors */ + if ( ! ( ( trb->code == XHCI_CMPLT_SUCCESS ) || + ( trb->code == XHCI_CMPLT_SHORT ) ) ) { + + /* Construct error */ + rc = -ECODE ( trb->code ); + DBGC ( xhci, "XHCI %s slot %d ctx %d failed (code %d): %s\n", + xhci->name, slot->id, endpoint->ctx, trb->code, + strerror ( rc ) ); + DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) ); + + /* Sanity check */ + assert ( ( endpoint->context->state & XHCI_ENDPOINT_STATE_MASK ) + != XHCI_ENDPOINT_RUNNING ); + + /* Report failure to USB core */ + usb_complete_err ( endpoint->ep, iobuf, rc ); + return; + } + + /* Record actual transfer size */ + iob_unput ( iobuf, le16_to_cpu ( trb->residual ) ); + + /* Sanity check (for successful completions only) */ + assert ( xhci_ring_consumed ( &endpoint->ring ) == + le64_to_cpu ( trb->transfer ) ); + + /* Report completion to USB core */ + usb_complete ( endpoint->ep, iobuf ); + profile_stop ( &xhci_transfer_profiler ); +} + +/** + * Handle command completion event + * + * @v xhci xHCI device + * @v trb Command completion event + */ +static void xhci_complete ( struct xhci_device *xhci, + struct xhci_trb_complete *trb ) { + int rc; + + /* Ignore "command ring stopped" notifications */ + if ( trb->code == XHCI_CMPLT_CMD_STOPPED ) { + DBGC2 ( xhci, "XHCI %s command ring stopped\n", xhci->name ); + return; + } + + /* Ignore unexpected completions */ + if ( ! xhci->pending ) { + rc = -ECODE ( trb->code ); + DBGC ( xhci, "XHCI %s unexpected completion (code %d): %s\n", + xhci->name, trb->code, strerror ( rc ) ); + DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) ); + return; + } + + /* Dequeue command TRB */ + xhci_dequeue ( &xhci->command ); + + /* Sanity check */ + assert ( xhci_ring_consumed ( &xhci->command ) == + le64_to_cpu ( trb->command ) ); + + /* Record completion */ + memcpy ( xhci->pending, trb, sizeof ( *xhci->pending ) ); + xhci->pending = NULL; +} + +/** + * Handle port status event + * + * @v xhci xHCI device + * @v trb Port status event + */ +static void xhci_port_status ( struct xhci_device *xhci, + struct xhci_trb_port_status *trb ) { + struct usb_port *port = usb_port ( xhci->bus->hub, trb->port ); + uint32_t portsc; + + /* Sanity check */ + assert ( ( trb->port > 0 ) && ( trb->port <= xhci->ports ) ); + + /* Record disconnections and clear changes */ + portsc = readl ( xhci->op + XHCI_OP_PORTSC ( trb->port ) ); + port->disconnected |= ( portsc & XHCI_PORTSC_CSC ); + portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE ); + writel ( portsc, xhci->op + XHCI_OP_PORTSC ( trb->port ) ); + + /* Report port status change */ + usb_port_changed ( port ); +} + +/** + * Handle host controller event + * + * @v xhci xHCI device + * @v trb Host controller event + */ +static void xhci_host_controller ( struct xhci_device *xhci, + struct xhci_trb_host_controller *trb ) { + int rc; + + /* Construct error */ + rc = -ECODE ( trb->code ); + DBGC ( xhci, "XHCI %s host controller event (code %d): %s\n", + xhci->name, trb->code, strerror ( rc ) ); +} + +/** + * Poll event ring + * + * @v xhci xHCI device + */ +static void xhci_event_poll ( struct xhci_device *xhci ) { + struct xhci_event_ring *event = &xhci->event; + union xhci_trb *trb; + unsigned int shift = XHCI_EVENT_TRBS_LOG2; + unsigned int count = ( 1 << shift ); + unsigned int mask = ( count - 1 ); + unsigned int consumed; + unsigned int type; + + /* Poll for events */ + profile_start ( &xhci_event_profiler ); + for ( consumed = 0 ; ; consumed++ ) { + + /* Stop if we reach an empty TRB */ + rmb(); + trb = &event->trb[ event->cons & mask ]; + if ( ! ( ( trb->common.flags ^ + ( event->cons >> shift ) ) & XHCI_TRB_C ) ) + break; + + /* Consume this TRB */ + event->cons++; + + /* Handle TRB */ + type = ( trb->common.type & XHCI_TRB_TYPE_MASK ); + switch ( type ) { + + case XHCI_TRB_TRANSFER : + xhci_transfer ( xhci, &trb->transfer ); + break; + + case XHCI_TRB_COMPLETE : + xhci_complete ( xhci, &trb->complete ); + break; + + case XHCI_TRB_PORT_STATUS: + xhci_port_status ( xhci, &trb->port ); + break; + + case XHCI_TRB_HOST_CONTROLLER: + xhci_host_controller ( xhci, &trb->host ); + break; + + default: + DBGC ( xhci, "XHCI %s unrecognised event %#x\n:", + xhci->name, ( event->cons - 1 ) ); + DBGC_HDA ( xhci, virt_to_phys ( trb ), + trb, sizeof ( *trb ) ); + break; + } + } + + /* Update dequeue pointer if applicable */ + if ( consumed ) { + xhci_writeq ( xhci, virt_to_phys ( trb ), + xhci->run + XHCI_RUN_ERDP ( 0 ) ); + profile_stop ( &xhci_event_profiler ); + } +} + +/** + * Abort command + * + * @v xhci xHCI device + */ +static void xhci_abort ( struct xhci_device *xhci ) { + physaddr_t crp; + + /* Abort the command */ + DBGC2 ( xhci, "XHCI %s aborting command\n", xhci->name ); + xhci_writeq ( xhci, XHCI_CRCR_CA, xhci->op + XHCI_OP_CRCR ); + + /* Allow time for command to abort */ + mdelay ( XHCI_COMMAND_ABORT_DELAY_MS ); + + /* Sanity check */ + assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 ); + + /* Consume (and ignore) any final command status */ + xhci_event_poll ( xhci ); + + /* Reset the command ring control register */ + xhci_ring_reset ( &xhci->command ); + crp = virt_to_phys ( xhci->command.trb ); + xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), xhci->op + XHCI_OP_CRCR ); +} + +/** + * Issue command and wait for completion + * + * @v xhci xHCI device + * @v trb Transfer request block (with empty Cycle flag) + * @ret rc Return status code + * + * On a successful completion, the TRB will be overwritten with the + * completion. + */ +static int xhci_command ( struct xhci_device *xhci, union xhci_trb *trb ) { + struct xhci_trb_complete *complete = &trb->complete; + unsigned int i; + int rc; + + /* Record the pending command */ + xhci->pending = trb; + + /* Enqueue the command */ + if ( ( rc = xhci_enqueue ( &xhci->command, NULL, trb ) ) != 0 ) + goto err_enqueue; + + /* Ring the command doorbell */ + xhci_doorbell ( &xhci->command ); + + /* Wait for the command to complete */ + for ( i = 0 ; i < XHCI_COMMAND_MAX_WAIT_MS ; i++ ) { + + /* Poll event ring */ + xhci_event_poll ( xhci ); + + /* Check for completion */ + if ( ! xhci->pending ) { + if ( complete->code != XHCI_CMPLT_SUCCESS ) { + rc = -ECODE ( complete->code ); + DBGC ( xhci, "XHCI %s command failed (code " + "%d): %s\n", xhci->name, complete->code, + strerror ( rc ) ); + DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) ); + return rc; + } + return 0; + } + + /* Delay */ + mdelay ( 1 ); + } + + /* Timeout */ + DBGC ( xhci, "XHCI %s timed out waiting for completion\n", xhci->name ); + rc = -ETIMEDOUT; + + /* Abort command */ + xhci_abort ( xhci ); + + err_enqueue: + xhci->pending = NULL; + return rc; +} + +/** + * Issue NOP and wait for completion + * + * @v xhci xHCI device + * @ret rc Return status code + */ +static inline int xhci_nop ( struct xhci_device *xhci ) { + union xhci_trb trb; + struct xhci_trb_common *nop = &trb.common; + int rc; + + /* Construct command */ + memset ( nop, 0, sizeof ( *nop ) ); + nop->flags = XHCI_TRB_IOC; + nop->type = XHCI_TRB_NOP_CMD; + + /* Issue command and wait for completion */ + if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Enable slot + * + * @v xhci xHCI device + * @v type Slot type + * @ret slot Device slot ID, or negative error + */ +static inline int xhci_enable_slot ( struct xhci_device *xhci, + unsigned int type ) { + union xhci_trb trb; + struct xhci_trb_enable_slot *enable = &trb.enable; + struct xhci_trb_complete *enabled = &trb.complete; + unsigned int slot; + int rc; + + /* Construct command */ + memset ( enable, 0, sizeof ( *enable ) ); + enable->slot = type; + enable->type = XHCI_TRB_ENABLE_SLOT; + + /* Issue command and wait for completion */ + if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) { + DBGC ( xhci, "XHCI %s could not enable new slot: %s\n", + xhci->name, strerror ( rc ) ); + return rc; + } + + /* Extract slot number */ + slot = enabled->slot; + + DBGC2 ( xhci, "XHCI %s slot %d enabled\n", xhci->name, slot ); + return slot; +} + +/** + * Disable slot + * + * @v xhci xHCI device + * @v slot Device slot + * @ret rc Return status code + */ +static inline int xhci_disable_slot ( struct xhci_device *xhci, + unsigned int slot ) { + union xhci_trb trb; + struct xhci_trb_disable_slot *disable = &trb.disable; + int rc; + + /* Construct command */ + memset ( disable, 0, sizeof ( *disable ) ); + disable->type = XHCI_TRB_DISABLE_SLOT; + disable->slot = slot; + + /* Issue command and wait for completion */ + if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) { + DBGC ( xhci, "XHCI %s could not disable slot %d: %s\n", + xhci->name, slot, strerror ( rc ) ); + return rc; + } + + DBGC2 ( xhci, "XHCI %s slot %d disabled\n", xhci->name, slot ); + return 0; +} + +/** + * Issue context-based command and wait for completion + * + * @v xhci xHCI device + * @v slot Device slot + * @v endpoint Endpoint + * @v type TRB type + * @v populate Input context populater + * @ret rc Return status code + */ +static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot, + struct xhci_endpoint *endpoint, unsigned int type, + void ( * populate ) ( struct xhci_device *xhci, + struct xhci_slot *slot, + struct xhci_endpoint *endpoint, + void *input ) ) { + union xhci_trb trb; + struct xhci_trb_context *context = &trb.context; + size_t len; + void *input; + int rc; + + /* Allocate an input context */ + len = xhci_input_context_offset ( xhci, XHCI_CTX_END ); + input = malloc_dma ( len, xhci_align ( len ) ); + if ( ! input ) { + rc = -ENOMEM; + goto err_alloc; + } + memset ( input, 0, len ); + + /* Populate input context */ + populate ( xhci, slot, endpoint, input ); + + /* Construct command */ + memset ( context, 0, sizeof ( *context ) ); + context->type = type; + context->input = cpu_to_le64 ( virt_to_phys ( input ) ); + context->slot = slot->id; + + /* Issue command and wait for completion */ + if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) + goto err_command; + + err_command: + free_dma ( input, len ); + err_alloc: + return rc; +} + +/** + * Populate address device input context + * + * @v xhci xHCI device + * @v slot Device slot + * @v endpoint Endpoint + * @v input Input context + */ +static void xhci_address_device_input ( struct xhci_device *xhci, + struct xhci_slot *slot, + struct xhci_endpoint *endpoint, + void *input ) { + struct xhci_control_context *control_ctx; + struct xhci_slot_context *slot_ctx; + struct xhci_endpoint_context *ep_ctx; + + /* Sanity checks */ + assert ( endpoint->ctx == XHCI_CTX_EP0 ); + + /* Populate control context */ + control_ctx = input; + control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) | + ( 1 << XHCI_CTX_EP0 ) ); + + /* Populate slot context */ + slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT )); + slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( 1, 0, slot->psiv, + slot->route ) ); + slot_ctx->port = slot->port; + slot_ctx->tt_id = slot->tt_id; + slot_ctx->tt_port = slot->tt_port; + + /* Populate control endpoint context */ + ep_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_EP0 ) ); + ep_ctx->type = XHCI_EP_TYPE_CONTROL; + ep_ctx->burst = endpoint->ep->burst; + ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu ); + ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) | + XHCI_EP_DCS ); + ep_ctx->trb_len = cpu_to_le16 ( XHCI_EP0_TRB_LEN ); +} + +/** + * Address device + * + * @v xhci xHCI device + * @v slot Device slot + * @ret rc Return status code + */ +static inline int xhci_address_device ( struct xhci_device *xhci, + struct xhci_slot *slot ) { + struct usb_device *usb = slot->usb; + struct xhci_slot_context *slot_ctx; + int rc; + + /* Assign device address */ + if ( ( rc = xhci_context ( xhci, slot, slot->endpoint[XHCI_CTX_EP0], + XHCI_TRB_ADDRESS_DEVICE, + xhci_address_device_input ) ) != 0 ) + return rc; + + /* Get assigned address */ + slot_ctx = ( slot->context + + xhci_device_context_offset ( xhci, XHCI_CTX_SLOT ) ); + usb->address = slot_ctx->address; + DBGC2 ( xhci, "XHCI %s assigned address %d to %s\n", + xhci->name, usb->address, usb->name ); + + return 0; +} + +/** + * Populate configure endpoint input context + * + * @v xhci xHCI device + * @v slot Device slot + * @v endpoint Endpoint + * @v input Input context + */ +static void xhci_configure_endpoint_input ( struct xhci_device *xhci, + struct xhci_slot *slot, + struct xhci_endpoint *endpoint, + void *input ) { + struct xhci_control_context *control_ctx; + struct xhci_slot_context *slot_ctx; + struct xhci_endpoint_context *ep_ctx; + + /* Populate control context */ + control_ctx = input; + control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) | + ( 1 << endpoint->ctx ) ); + + /* Populate slot context */ + slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT )); + slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ), + ( slot->ports ? 1 : 0 ), + slot->psiv, 0 ) ); + slot_ctx->ports = slot->ports; + + /* Populate endpoint context */ + ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) ); + ep_ctx->interval = endpoint->interval; + ep_ctx->type = endpoint->type; + ep_ctx->burst = endpoint->ep->burst; + ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu ); + ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) | + XHCI_EP_DCS ); + ep_ctx->trb_len = cpu_to_le16 ( endpoint->ep->mtu ); /* best guess */ +} + +/** + * Configure endpoint + * + * @v xhci xHCI device + * @v slot Device slot + * @v endpoint Endpoint + * @ret rc Return status code + */ +static inline int xhci_configure_endpoint ( struct xhci_device *xhci, + struct xhci_slot *slot, + struct xhci_endpoint *endpoint ) { + int rc; + + /* Configure endpoint */ + if ( ( rc = xhci_context ( xhci, slot, endpoint, + XHCI_TRB_CONFIGURE_ENDPOINT, + xhci_configure_endpoint_input ) ) != 0 ) + return rc; + + DBGC2 ( xhci, "XHCI %s slot %d ctx %d configured\n", + xhci->name, slot->id, endpoint->ctx ); + return 0; +} + +/** + * Populate deconfigure endpoint input context + * + * @v xhci xHCI device + * @v slot Device slot + * @v endpoint Endpoint + * @v input Input context + */ +static void +xhci_deconfigure_endpoint_input ( struct xhci_device *xhci __unused, + struct xhci_slot *slot __unused, + struct xhci_endpoint *endpoint, + void *input ) { + struct xhci_control_context *control_ctx; + struct xhci_slot_context *slot_ctx; + + /* Populate control context */ + control_ctx = input; + control_ctx->add = cpu_to_le32 ( 1 << XHCI_CTX_SLOT ); + control_ctx->drop = cpu_to_le32 ( 1 << endpoint->ctx ); + + /* Populate slot context */ + slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT )); + slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ), + 0, 0, 0 ) ); +} + +/** + * Deconfigure endpoint + * + * @v xhci xHCI device + * @v slot Device slot + * @v endpoint Endpoint + * @ret rc Return status code + */ +static inline int xhci_deconfigure_endpoint ( struct xhci_device *xhci, + struct xhci_slot *slot, + struct xhci_endpoint *endpoint ) { + int rc; + + /* Deconfigure endpoint */ + if ( ( rc = xhci_context ( xhci, slot, endpoint, + XHCI_TRB_CONFIGURE_ENDPOINT, + xhci_deconfigure_endpoint_input ) ) != 0 ) + return rc; + + DBGC2 ( xhci, "XHCI %s slot %d ctx %d deconfigured\n", + xhci->name, slot->id, endpoint->ctx ); + return 0; +} + +/** + * Populate evaluate context input context + * + * @v xhci xHCI device + * @v slot Device slot + * @v endpoint Endpoint + * @v input Input context + */ +static void xhci_evaluate_context_input ( struct xhci_device *xhci, + struct xhci_slot *slot __unused, + struct xhci_endpoint *endpoint, + void *input ) { + struct xhci_control_context *control_ctx; + struct xhci_slot_context *slot_ctx; + struct xhci_endpoint_context *ep_ctx; + + /* Populate control context */ + control_ctx = input; + control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) | + ( 1 << endpoint->ctx ) ); + + /* Populate slot context */ + slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT )); + slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ), + 0, 0, 0 ) ); + + /* Populate endpoint context */ + ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) ); + ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu ); +} + +/** + * Evaluate context + * + * @v xhci xHCI device + * @v slot Device slot + * @v endpoint Endpoint + * @ret rc Return status code + */ +static inline int xhci_evaluate_context ( struct xhci_device *xhci, + struct xhci_slot *slot, + struct xhci_endpoint *endpoint ) { + int rc; + + /* Configure endpoint */ + if ( ( rc = xhci_context ( xhci, slot, endpoint, + XHCI_TRB_EVALUATE_CONTEXT, + xhci_evaluate_context_input ) ) != 0 ) + return rc; + + DBGC2 ( xhci, "XHCI %s slot %d ctx %d (re-)evaluated\n", + xhci->name, slot->id, endpoint->ctx ); + return 0; +} + +/** + * Reset endpoint + * + * @v xhci xHCI device + * @v slot Device slot + * @v endpoint Endpoint + * @ret rc Return status code + */ +static inline int xhci_reset_endpoint ( struct xhci_device *xhci, + struct xhci_slot *slot, + struct xhci_endpoint *endpoint ) { + union xhci_trb trb; + struct xhci_trb_reset_endpoint *reset = &trb.reset; + int rc; + + /* Construct command */ + memset ( reset, 0, sizeof ( *reset ) ); + reset->slot = slot->id; + reset->endpoint = endpoint->ctx; + reset->type = XHCI_TRB_RESET_ENDPOINT; + + /* Issue command and wait for completion */ + if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) { + DBGC ( xhci, "XHCI %s slot %d ctx %d could not reset endpoint " + "in state %d: %s\n", xhci->name, slot->id, endpoint->ctx, + endpoint->context->state, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Stop endpoint + * + * @v xhci xHCI device + * @v slot Device slot + * @v endpoint Endpoint + * @ret rc Return status code + */ +static inline int xhci_stop_endpoint ( struct xhci_device *xhci, + struct xhci_slot *slot, + struct xhci_endpoint *endpoint ) { + union xhci_trb trb; + struct xhci_trb_stop_endpoint *stop = &trb.stop; + int rc; + + /* Construct command */ + memset ( stop, 0, sizeof ( *stop ) ); + stop->slot = slot->id; + stop->endpoint = endpoint->ctx; + stop->type = XHCI_TRB_STOP_ENDPOINT; + + /* Issue command and wait for completion */ + if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) { + DBGC ( xhci, "XHCI %s slot %d ctx %d could not stop endpoint " + "in state %d: %s\n", xhci->name, slot->id, endpoint->ctx, + endpoint->context->state, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Set transfer ring dequeue pointer + * + * @v xhci xHCI device + * @v slot Device slot + * @v endpoint Endpoint + * @ret rc Return status code + */ +static inline int +xhci_set_tr_dequeue_pointer ( struct xhci_device *xhci, + struct xhci_slot *slot, + struct xhci_endpoint *endpoint ) { + union xhci_trb trb; + struct xhci_trb_set_tr_dequeue_pointer *dequeue = &trb.dequeue; + struct xhci_trb_ring *ring = &endpoint->ring; + unsigned int cons; + unsigned int mask; + unsigned int index; + unsigned int dcs; + int rc; + + /* Construct command */ + memset ( dequeue, 0, sizeof ( *dequeue ) ); + cons = ring->cons; + mask = ring->mask; + dcs = ( ( ~( cons >> ring->shift ) ) & XHCI_EP_DCS ); + index = ( cons & mask ); + dequeue->dequeue = + cpu_to_le64 ( virt_to_phys ( &ring->trb[index] ) | dcs ); + dequeue->slot = slot->id; + dequeue->endpoint = endpoint->ctx; + dequeue->type = XHCI_TRB_SET_TR_DEQUEUE_POINTER; + + /* Issue command and wait for completion */ + if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) { + DBGC ( xhci, "XHCI %s slot %d ctx %d could not set TR dequeue " + "pointer in state %d: %s\n", xhci->name, slot->id, + endpoint->ctx, endpoint->context->state, strerror ( rc)); + return rc; + } + + return 0; +} + +/****************************************************************************** + * + * Endpoint operations + * + ****************************************************************************** + */ + +/** + * Open endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int xhci_endpoint_open ( struct usb_endpoint *ep ) { + struct usb_device *usb = ep->usb; + struct xhci_slot *slot = usb_get_hostdata ( usb ); + struct xhci_device *xhci = slot->xhci; + struct xhci_endpoint *endpoint; + unsigned int ctx; + unsigned int type; + unsigned int interval; + int rc; + + /* Calculate context index */ + ctx = XHCI_CTX ( ep->address ); + assert ( slot->endpoint[ctx] == NULL ); + + /* Calculate endpoint type */ + type = XHCI_EP_TYPE ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK ); + if ( type == XHCI_EP_TYPE ( USB_ENDPOINT_ATTR_CONTROL ) ) + type = XHCI_EP_TYPE_CONTROL; + if ( ep->address & USB_DIR_IN ) + type |= XHCI_EP_TYPE_IN; + + /* Calculate interval */ + if ( type & XHCI_EP_TYPE_PERIODIC ) { + interval = ( fls ( ep->interval ) - 1 ); + } else { + interval = ep->interval; + } + + /* Allocate and initialise structure */ + endpoint = zalloc ( sizeof ( *endpoint ) ); + if ( ! endpoint ) { + rc = -ENOMEM; + goto err_alloc; + } + usb_endpoint_set_hostdata ( ep, endpoint ); + slot->endpoint[ctx] = endpoint; + endpoint->xhci = xhci; + endpoint->slot = slot; + endpoint->ep = ep; + endpoint->ctx = ctx; + endpoint->type = type; + endpoint->interval = interval; + endpoint->context = ( ( ( void * ) slot->context ) + + xhci_device_context_offset ( xhci, ctx ) ); + + /* Allocate transfer ring */ + if ( ( rc = xhci_ring_alloc ( xhci, &endpoint->ring, + XHCI_TRANSFER_TRBS_LOG2, + slot->id, ctx, 0 ) ) != 0 ) + goto err_ring_alloc; + + /* Configure endpoint, if applicable */ + if ( ( ctx != XHCI_CTX_EP0 ) && + ( ( rc = xhci_configure_endpoint ( xhci, slot, endpoint ) ) != 0 )) + goto err_configure_endpoint; + + DBGC2 ( xhci, "XHCI %s slot %d ctx %d ring [%08lx,%08lx)\n", + xhci->name, slot->id, ctx, virt_to_phys ( endpoint->ring.trb ), + ( virt_to_phys ( endpoint->ring.trb ) + endpoint->ring.len ) ); + return 0; + + xhci_deconfigure_endpoint ( xhci, slot, endpoint ); + err_configure_endpoint: + xhci_ring_free ( &endpoint->ring ); + err_ring_alloc: + slot->endpoint[ctx] = NULL; + free ( endpoint ); + err_alloc: + return rc; +} + +/** + * Close endpoint + * + * @v ep USB endpoint + */ +static void xhci_endpoint_close ( struct usb_endpoint *ep ) { + struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct xhci_slot *slot = endpoint->slot; + struct xhci_device *xhci = slot->xhci; + struct io_buffer *iobuf; + unsigned int ctx = endpoint->ctx; + + /* Deconfigure endpoint, if applicable */ + if ( ctx != XHCI_CTX_EP0 ) + xhci_deconfigure_endpoint ( xhci, slot, endpoint ); + + /* Cancel any incomplete transfers */ + while ( xhci_ring_fill ( &endpoint->ring ) ) { + iobuf = xhci_dequeue_multi ( &endpoint->ring ); + usb_complete_err ( ep, iobuf, -ECANCELED ); + } + + /* Free endpoint */ + xhci_ring_free ( &endpoint->ring ); + slot->endpoint[ctx] = NULL; + free ( endpoint ); +} + +/** + * Reset endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int xhci_endpoint_reset ( struct usb_endpoint *ep ) { + struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct xhci_slot *slot = endpoint->slot; + struct xhci_device *xhci = slot->xhci; + int rc; + + /* Reset endpoint context */ + if ( ( rc = xhci_reset_endpoint ( xhci, slot, endpoint ) ) != 0 ) + return rc; + + /* Set transfer ring dequeue pointer */ + if ( ( rc = xhci_set_tr_dequeue_pointer ( xhci, slot, endpoint ) ) != 0) + return rc; + + /* Ring doorbell to resume processing */ + xhci_doorbell ( &endpoint->ring ); + + DBGC ( xhci, "XHCI %s slot %d ctx %d reset\n", + xhci->name, slot->id, endpoint->ctx ); + return 0; +} + +/** + * Update MTU + * + * @v ep USB endpoint + * @ret rc Return status code + */ +static int xhci_endpoint_mtu ( struct usb_endpoint *ep ) { + struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct xhci_slot *slot = endpoint->slot; + struct xhci_device *xhci = slot->xhci; + int rc; + + /* Evalulate context */ + if ( ( rc = xhci_evaluate_context ( xhci, slot, endpoint ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Enqueue message transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int xhci_endpoint_message ( struct usb_endpoint *ep, + struct io_buffer *iobuf ) { + struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct usb_setup_packet *packet; + unsigned int input; + size_t len; + union xhci_trb trbs[ 1 /* setup */ + 1 /* possible data */ + + 1 /* status */ ]; + union xhci_trb *trb = trbs; + struct xhci_trb_setup *setup; + struct xhci_trb_data *data; + struct xhci_trb_status *status; + int rc; + + /* Profile message transfers */ + profile_start ( &xhci_message_profiler ); + + /* Construct setup stage TRB */ + memset ( trbs, 0, sizeof ( trbs ) ); + assert ( iob_len ( iobuf ) >= sizeof ( *packet ) ); + packet = iobuf->data; + iob_pull ( iobuf, sizeof ( *packet ) ); + setup = &(trb++)->setup; + memcpy ( &setup->packet, packet, sizeof ( setup->packet ) ); + setup->len = cpu_to_le32 ( sizeof ( *packet ) ); + setup->flags = XHCI_TRB_IDT; + setup->type = XHCI_TRB_SETUP; + len = iob_len ( iobuf ); + input = ( packet->request & cpu_to_le16 ( USB_DIR_IN ) ); + if ( len ) + setup->direction = ( input ? XHCI_SETUP_IN : XHCI_SETUP_OUT ); + + /* Construct data stage TRB, if applicable */ + if ( len ) { + data = &(trb++)->data; + data->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) ); + data->len = cpu_to_le32 ( len ); + data->type = XHCI_TRB_DATA; + data->direction = ( input ? XHCI_DATA_IN : XHCI_DATA_OUT ); + } + + /* Construct status stage TRB */ + status = &(trb++)->status; + status->flags = XHCI_TRB_IOC; + status->type = XHCI_TRB_STATUS; + status->direction = + ( ( len && input ) ? XHCI_STATUS_OUT : XHCI_STATUS_IN ); + + /* Enqueue TRBs */ + if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs, + ( trb - trbs ) ) ) != 0 ) + return rc; + + /* Ring the doorbell */ + xhci_doorbell ( &endpoint->ring ); + + profile_stop ( &xhci_message_profiler ); + return 0; +} + +/** + * Calculate number of TRBs + * + * @v len Length of data + * @v zlp Append a zero-length packet + * @ret count Number of transfer descriptors + */ +static unsigned int xhci_endpoint_count ( size_t len, int zlp ) { + unsigned int count; + + /* Split into 64kB TRBs */ + count = ( ( len + XHCI_MTU - 1 ) / XHCI_MTU ); + + /* Append a zero-length TRB if applicable */ + if ( zlp || ( count == 0 ) ) + count++; + + return count; +} + +/** + * Enqueue stream transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v zlp Append a zero-length packet + * @ret rc Return status code + */ +static int xhci_endpoint_stream ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int zlp ) { + struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + void *data = iobuf->data; + size_t len = iob_len ( iobuf ); + unsigned int count = xhci_endpoint_count ( len, zlp ); + union xhci_trb trbs[count]; + union xhci_trb *trb = trbs; + struct xhci_trb_normal *normal; + unsigned int i; + size_t trb_len; + int rc; + + /* Profile stream transfers */ + profile_start ( &xhci_stream_profiler ); + + /* Construct normal TRBs */ + memset ( &trbs, 0, sizeof ( trbs ) ); + for ( i = 0 ; i < count ; i ++ ) { + + /* Calculate TRB length */ + trb_len = XHCI_MTU; + if ( trb_len > len ) + trb_len = len; + + /* Construct normal TRB */ + normal = &trb->normal; + normal->data = cpu_to_le64 ( virt_to_phys ( data ) ); + normal->len = cpu_to_le32 ( trb_len ); + normal->type = XHCI_TRB_NORMAL; + normal->flags = XHCI_TRB_CH; + + /* Move to next TRB */ + data += trb_len; + len -= trb_len; + trb++; + } + + /* Mark zero-length packet (if present) as a separate transfer */ + if ( zlp && ( count > 1 ) ) + trb[-2].normal.flags = 0; + + /* Generate completion for final TRB */ + trb[-1].normal.flags = XHCI_TRB_IOC; + + /* Enqueue TRBs */ + if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs, + count ) ) != 0 ) + return rc; + + /* Ring the doorbell */ + xhci_doorbell ( &endpoint->ring ); + + profile_stop ( &xhci_stream_profiler ); + return 0; +} + +/****************************************************************************** + * + * Device operations + * + ****************************************************************************** + */ + +/** + * Open device + * + * @v usb USB device + * @ret rc Return status code + */ +static int xhci_device_open ( struct usb_device *usb ) { + struct xhci_device *xhci = usb_bus_get_hostdata ( usb->port->hub->bus ); + struct usb_port *tt = usb_transaction_translator ( usb ); + struct xhci_slot *slot; + struct xhci_slot *tt_slot; + size_t len; + int type; + int id; + int rc; + + /* Determine applicable slot type */ + type = xhci_port_slot_type ( xhci, usb->port->address ); + if ( type < 0 ) { + rc = type; + DBGC ( xhci, "XHCI %s-%d has no slot type\n", + xhci->name, usb->port->address ); + goto err_type; + } + + /* Allocate a device slot number */ + id = xhci_enable_slot ( xhci, type ); + if ( id < 0 ) { + rc = id; + goto err_enable_slot; + } + assert ( ( id > 0 ) && ( ( unsigned int ) id <= xhci->slots ) ); + assert ( xhci->slot[id] == NULL ); + + /* Allocate and initialise structure */ + slot = zalloc ( sizeof ( *slot ) ); + if ( ! slot ) { + rc = -ENOMEM; + goto err_alloc; + } + usb_set_hostdata ( usb, slot ); + xhci->slot[id] = slot; + slot->xhci = xhci; + slot->usb = usb; + slot->id = id; + if ( tt ) { + tt_slot = usb_get_hostdata ( tt->hub->usb ); + slot->tt_id = tt_slot->id; + slot->tt_port = tt->address; + } + + /* Allocate a device context */ + len = xhci_device_context_offset ( xhci, XHCI_CTX_END ); + slot->context = malloc_dma ( len, xhci_align ( len ) ); + if ( ! slot->context ) { + rc = -ENOMEM; + goto err_alloc_context; + } + memset ( slot->context, 0, len ); + + /* Set device context base address */ + assert ( xhci->dcbaa[id] == 0 ); + xhci->dcbaa[id] = cpu_to_le64 ( virt_to_phys ( slot->context ) ); + + DBGC2 ( xhci, "XHCI %s slot %d device context [%08lx,%08lx) for %s\n", + xhci->name, slot->id, virt_to_phys ( slot->context ), + ( virt_to_phys ( slot->context ) + len ), usb->name ); + return 0; + + xhci->dcbaa[id] = 0; + free_dma ( slot->context, len ); + err_alloc_context: + xhci->slot[id] = NULL; + free ( slot ); + err_alloc: + xhci_disable_slot ( xhci, id ); + err_enable_slot: + err_type: + return rc; +} + +/** + * Close device + * + * @v usb USB device + */ +static void xhci_device_close ( struct usb_device *usb ) { + struct xhci_slot *slot = usb_get_hostdata ( usb ); + struct xhci_device *xhci = slot->xhci; + size_t len = xhci_device_context_offset ( xhci, XHCI_CTX_END ); + unsigned int id = slot->id; + int rc; + + /* Disable slot */ + if ( ( rc = xhci_disable_slot ( xhci, id ) ) != 0 ) { + /* Slot is still enabled. Leak the slot context, + * since the controller may still write to this + * memory, and leave the DCBAA entry intact. + * + * If the controller later reports that this same slot + * has been re-enabled, then some assertions will be + * triggered. + */ + DBGC ( xhci, "XHCI %s slot %d leaking context memory\n", + xhci->name, slot->id ); + slot->context = NULL; + } + + /* Free slot */ + if ( slot->context ) { + free_dma ( slot->context, len ); + xhci->dcbaa[id] = 0; + } + xhci->slot[id] = NULL; + free ( slot ); +} + +/** + * Assign device address + * + * @v usb USB device + * @ret rc Return status code + */ +static int xhci_device_address ( struct usb_device *usb ) { + struct xhci_slot *slot = usb_get_hostdata ( usb ); + struct xhci_device *xhci = slot->xhci; + struct usb_port *root_port; + int psiv; + int rc; + + /* Calculate route string */ + slot->route = usb_route_string ( usb ); + + /* Calculate root hub port number */ + root_port = usb_root_hub_port ( usb ); + slot->port = root_port->address; + + /* Calculate protocol speed ID */ + psiv = xhci_port_psiv ( xhci, slot->port, usb->speed ); + if ( psiv < 0 ) { + rc = psiv; + return rc; + } + slot->psiv = psiv; + + /* Address device */ + if ( ( rc = xhci_address_device ( xhci, slot ) ) != 0 ) + return rc; + + return 0; +} + +/****************************************************************************** + * + * Bus operations + * + ****************************************************************************** + */ + +/** + * Open USB bus + * + * @v bus USB bus + * @ret rc Return status code + */ +static int xhci_bus_open ( struct usb_bus *bus ) { + struct xhci_device *xhci = usb_bus_get_hostdata ( bus ); + int rc; + + /* Allocate device slot array */ + xhci->slot = zalloc ( ( xhci->slots + 1 ) * sizeof ( xhci->slot[0] ) ); + if ( ! xhci->slot ) { + rc = -ENOMEM; + goto err_slot_alloc; + } + + /* Allocate device context base address array */ + if ( ( rc = xhci_dcbaa_alloc ( xhci ) ) != 0 ) + goto err_dcbaa_alloc; + + /* Allocate scratchpad buffers */ + if ( ( rc = xhci_scratchpad_alloc ( xhci ) ) != 0 ) + goto err_scratchpad_alloc; + + /* Allocate command ring */ + if ( ( rc = xhci_command_alloc ( xhci ) ) != 0 ) + goto err_command_alloc; + + /* Allocate event ring */ + if ( ( rc = xhci_event_alloc ( xhci ) ) != 0 ) + goto err_event_alloc; + + /* Start controller */ + xhci_run ( xhci ); + + return 0; + + xhci_stop ( xhci ); + xhci_event_free ( xhci ); + err_event_alloc: + xhci_command_free ( xhci ); + err_command_alloc: + xhci_scratchpad_free ( xhci ); + err_scratchpad_alloc: + xhci_dcbaa_free ( xhci ); + err_dcbaa_alloc: + free ( xhci->slot ); + err_slot_alloc: + return rc; +} + +/** + * Close USB bus + * + * @v bus USB bus + */ +static void xhci_bus_close ( struct usb_bus *bus ) { + struct xhci_device *xhci = usb_bus_get_hostdata ( bus ); + unsigned int i; + + /* Sanity checks */ + assert ( xhci->slot != NULL ); + for ( i = 0 ; i <= xhci->slots ; i++ ) + assert ( xhci->slot[i] == NULL ); + + xhci_stop ( xhci ); + xhci_event_free ( xhci ); + xhci_command_free ( xhci ); + xhci_scratchpad_free ( xhci ); + xhci_dcbaa_free ( xhci ); + free ( xhci->slot ); +} + +/** + * Poll USB bus + * + * @v bus USB bus + */ +static void xhci_bus_poll ( struct usb_bus *bus ) { + struct xhci_device *xhci = usb_bus_get_hostdata ( bus ); + + /* Poll event ring */ + xhci_event_poll ( xhci ); +} + +/****************************************************************************** + * + * Hub operations + * + ****************************************************************************** + */ + +/** + * Open hub + * + * @v hub USB hub + * @ret rc Return status code + */ +static int xhci_hub_open ( struct usb_hub *hub ) { + struct xhci_slot *slot; + + /* Do nothing if this is the root hub */ + if ( ! hub->usb ) + return 0; + + /* Get device slot */ + slot = usb_get_hostdata ( hub->usb ); + + /* Update device slot hub parameters. We don't inform the + * hardware of this information until the hub's interrupt + * endpoint is opened, since the only mechanism for so doing + * provided by the xHCI specification is a Configure Endpoint + * command, and we can't issue that command until we have a + * non-EP0 endpoint to configure. + */ + slot->ports = hub->ports; + + return 0; +} + +/** + * Close hub + * + * @v hub USB hub + */ +static void xhci_hub_close ( struct usb_hub *hub __unused ) { + + /* Nothing to do */ +} + +/****************************************************************************** + * + * Root hub operations + * + ****************************************************************************** + */ + +/** + * Open root hub + * + * @v hub USB hub + * @ret rc Return status code + */ +static int xhci_root_open ( struct usb_hub *hub ) { + struct xhci_device *xhci = usb_hub_get_drvdata ( hub ); + struct usb_port *port; + uint32_t portsc; + unsigned int i; + + /* Enable power to all ports */ + for ( i = 1 ; i <= xhci->ports ; i++ ) { + portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) ); + portsc &= XHCI_PORTSC_PRESERVE; + portsc |= XHCI_PORTSC_PP; + writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) ); + } + + /* xHCI spec requires us to potentially wait 20ms after + * enabling power to a port. + */ + mdelay ( XHCI_PORT_POWER_DELAY_MS ); + + /* USB3 ports may power up as Disabled */ + for ( i = 1 ; i <= xhci->ports ; i++ ) { + portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) ); + port = usb_port ( hub, i ); + if ( ( port->protocol >= USB_PROTO_3_0 ) && + ( ( portsc & XHCI_PORTSC_PLS_MASK ) == + XHCI_PORTSC_PLS_DISABLED ) ) { + /* Force link state to RxDetect */ + portsc &= XHCI_PORTSC_PRESERVE; + portsc |= ( XHCI_PORTSC_PLS_RXDETECT | XHCI_PORTSC_LWS); + writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) ); + } + } + + /* Some xHCI cards seem to require an additional delay after + * setting the link state to RxDetect. + */ + mdelay ( XHCI_LINK_STATE_DELAY_MS ); + + return 0; +} + +/** + * Close root hub + * + * @v hub USB hub + */ +static void xhci_root_close ( struct usb_hub *hub __unused ) { + + /* Nothing to do */ +} + +/** + * Enable port + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int xhci_root_enable ( struct usb_hub *hub, struct usb_port *port ) { + struct xhci_device *xhci = usb_hub_get_drvdata ( hub ); + uint32_t portsc; + unsigned int i; + + /* Reset port */ + portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) ); + portsc &= XHCI_PORTSC_PRESERVE; + portsc |= XHCI_PORTSC_PR; + writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) ); + + /* Wait for port to become enabled */ + for ( i = 0 ; i < XHCI_PORT_RESET_MAX_WAIT_MS ; i++ ) { + + /* Check port status */ + portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) ); + if ( portsc & XHCI_PORTSC_PED ) + return 0; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( xhci, "XHCI %s-%d timed out waiting for port to enable\n", + xhci->name, port->address ); + return -ETIMEDOUT; +} + +/** + * Disable port + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int xhci_root_disable ( struct usb_hub *hub, struct usb_port *port ) { + struct xhci_device *xhci = usb_hub_get_drvdata ( hub ); + uint32_t portsc; + + /* Disable port */ + portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) ); + portsc &= XHCI_PORTSC_PRESERVE; + portsc |= XHCI_PORTSC_PED; + writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) ); + + /* Allow time for link state to stabilise */ + mdelay ( XHCI_LINK_STATE_DELAY_MS ); + + /* Set link state to RxDetect for USB3 ports */ + if ( port->protocol >= USB_PROTO_3_0 ) { + portsc &= XHCI_PORTSC_PRESERVE; + portsc |= ( XHCI_PORTSC_PLS_RXDETECT | XHCI_PORTSC_LWS ); + writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) ); + } + + /* Allow time for link state to stabilise */ + mdelay ( XHCI_LINK_STATE_DELAY_MS ); + + return 0; +} + +/** + * Update root hub port speed + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ +static int xhci_root_speed ( struct usb_hub *hub, struct usb_port *port ) { + struct xhci_device *xhci = usb_hub_get_drvdata ( hub ); + uint32_t portsc; + unsigned int psiv; + int ccs; + int ped; + int csc; + int speed; + int rc; + + /* Read port status */ + portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) ); + DBGC2 ( xhci, "XHCI %s-%d status is %08x\n", + xhci->name, port->address, portsc ); + ccs = ( portsc & XHCI_PORTSC_CCS ); + ped = ( portsc & XHCI_PORTSC_PED ); + csc = ( portsc & XHCI_PORTSC_CSC ); + psiv = XHCI_PORTSC_PSIV ( portsc ); + + /* Record disconnections and clear changes */ + port->disconnected |= csc; + portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE ); + writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) ); + + /* Port speed is not valid unless port is connected */ + if ( ! ccs ) { + port->speed = USB_SPEED_NONE; + return 0; + } + + /* For USB2 ports, the PSIV field is not valid until the port + * completes reset and becomes enabled. + */ + if ( ( port->protocol < USB_PROTO_3_0 ) && ! ped ) { + port->speed = USB_SPEED_FULL; + return 0; + } + + /* Get port speed and map to generic USB speed */ + speed = xhci_port_speed ( xhci, port->address, psiv ); + if ( speed < 0 ) { + rc = speed; + return rc; + } + + port->speed = speed; + return 0; +} + +/** + * Clear transaction translator buffer + * + * @v hub USB hub + * @v port USB port + * @v ep USB endpoint + * @ret rc Return status code + */ +static int xhci_root_clear_tt ( struct usb_hub *hub, struct usb_port *port, + struct usb_endpoint *ep ) { + struct xhci_device *xhci = usb_hub_get_drvdata ( hub ); + + /* Should never be called; this is a root hub */ + DBGC ( xhci, "XHCI %s-%d nonsensical CLEAR_TT for %s %s\n", xhci->name, + port->address, ep->usb->name, usb_endpoint_name ( ep ) ); + + return -ENOTSUP; +} + +/****************************************************************************** + * + * PCI interface + * + ****************************************************************************** + */ + +/** USB host controller operations */ +static struct usb_host_operations xhci_operations = { + .endpoint = { + .open = xhci_endpoint_open, + .close = xhci_endpoint_close, + .reset = xhci_endpoint_reset, + .mtu = xhci_endpoint_mtu, + .message = xhci_endpoint_message, + .stream = xhci_endpoint_stream, + }, + .device = { + .open = xhci_device_open, + .close = xhci_device_close, + .address = xhci_device_address, + }, + .bus = { + .open = xhci_bus_open, + .close = xhci_bus_close, + .poll = xhci_bus_poll, + }, + .hub = { + .open = xhci_hub_open, + .close = xhci_hub_close, + }, + .root = { + .open = xhci_root_open, + .close = xhci_root_close, + .enable = xhci_root_enable, + .disable = xhci_root_disable, + .speed = xhci_root_speed, + .clear_tt = xhci_root_clear_tt, + }, +}; + +/** + * Fix Intel PCH-specific quirks + * + * @v xhci xHCI device + * @v pci PCI device + */ +static void xhci_pch_fix ( struct xhci_device *xhci, struct pci_device *pci ) { + struct xhci_pch *pch = &xhci->pch; + uint32_t xusb2pr; + uint32_t xusb2prm; + uint32_t usb3pssen; + uint32_t usb3prm; + + /* Enable SuperSpeed capability. Do this before rerouting + * USB2 ports, so that USB3 devices connect at SuperSpeed. + */ + pci_read_config_dword ( pci, XHCI_PCH_USB3PSSEN, &usb3pssen ); + pci_read_config_dword ( pci, XHCI_PCH_USB3PRM, &usb3prm ); + if ( usb3prm & ~usb3pssen ) { + DBGC ( xhci, "XHCI %s enabling SuperSpeed on ports %08x\n", + xhci->name, ( usb3prm & ~usb3pssen ) ); + } + pch->usb3pssen = usb3pssen; + usb3pssen |= usb3prm; + pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, usb3pssen ); + + /* Route USB2 ports from EHCI to xHCI */ + pci_read_config_dword ( pci, XHCI_PCH_XUSB2PR, &xusb2pr ); + pci_read_config_dword ( pci, XHCI_PCH_XUSB2PRM, &xusb2prm ); + if ( xusb2prm & ~xusb2pr ) { + DBGC ( xhci, "XHCI %s routing ports %08x from EHCI to xHCI\n", + xhci->name, ( xusb2prm & ~xusb2pr ) ); + } + pch->xusb2pr = xusb2pr; + xusb2pr |= xusb2prm; + pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, xusb2pr ); +} + +/** + * Undo Intel PCH-specific quirk fixes + * + * @v xhci xHCI device + * @v pci PCI device + */ +static void xhci_pch_undo ( struct xhci_device *xhci, struct pci_device *pci ) { + struct xhci_pch *pch = &xhci->pch; + + /* Restore USB2 port routing to original state */ + pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, pch->xusb2pr ); + + /* Restore SuperSpeed capability to original state */ + pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, pch->usb3pssen ); +} + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int xhci_probe ( struct pci_device *pci ) { + struct xhci_device *xhci; + struct usb_port *port; + unsigned long bar_start; + size_t bar_size; + unsigned int i; + int rc; + + /* Allocate and initialise structure */ + xhci = zalloc ( sizeof ( *xhci ) ); + if ( ! xhci ) { + rc = -ENOMEM; + goto err_alloc; + } + xhci->name = pci->dev.name; + xhci->quirks = pci->id->driver_data; + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Map registers */ + bar_start = pci_bar_start ( pci, XHCI_BAR ); + bar_size = pci_bar_size ( pci, XHCI_BAR ); + xhci->regs = pci_ioremap ( pci, bar_start, bar_size ); + if ( ! xhci->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + + /* Initialise xHCI device */ + xhci_init ( xhci, xhci->regs ); + + /* Initialise USB legacy support and claim ownership */ + xhci_legacy_init ( xhci ); + xhci_legacy_claim ( xhci ); + + /* Fix Intel PCH-specific quirks, if applicable */ + if ( xhci->quirks & XHCI_PCH ) + xhci_pch_fix ( xhci, pci ); + + /* Reset device */ + if ( ( rc = xhci_reset ( xhci ) ) != 0 ) + goto err_reset; + + /* Allocate USB bus */ + xhci->bus = alloc_usb_bus ( &pci->dev, xhci->ports, XHCI_MTU, + &xhci_operations ); + if ( ! xhci->bus ) { + rc = -ENOMEM; + goto err_alloc_bus; + } + usb_bus_set_hostdata ( xhci->bus, xhci ); + usb_hub_set_drvdata ( xhci->bus->hub, xhci ); + + /* Set port protocols */ + for ( i = 1 ; i <= xhci->ports ; i++ ) { + port = usb_port ( xhci->bus->hub, i ); + port->protocol = xhci_port_protocol ( xhci, i ); + } + + /* Register USB bus */ + if ( ( rc = register_usb_bus ( xhci->bus ) ) != 0 ) + goto err_register; + + pci_set_drvdata ( pci, xhci ); + return 0; + + unregister_usb_bus ( xhci->bus ); + err_register: + free_usb_bus ( xhci->bus ); + err_alloc_bus: + xhci_reset ( xhci ); + err_reset: + if ( xhci->quirks & XHCI_PCH ) + xhci_pch_undo ( xhci, pci ); + xhci_legacy_release ( xhci ); + iounmap ( xhci->regs ); + err_ioremap: + free ( xhci ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void xhci_remove ( struct pci_device *pci ) { + struct xhci_device *xhci = pci_get_drvdata ( pci ); + struct usb_bus *bus = xhci->bus; + + unregister_usb_bus ( bus ); + free_usb_bus ( bus ); + xhci_reset ( xhci ); + if ( xhci->quirks & XHCI_PCH ) + xhci_pch_undo ( xhci, pci ); + xhci_legacy_release ( xhci ); + iounmap ( xhci->regs ); + free ( xhci ); +} + +/** XHCI PCI device IDs */ +static struct pci_device_id xhci_ids[] = { + PCI_ROM ( 0x8086, 0x9d2f, "xhci-skylake", "xHCI (Skylake)", ( XHCI_PCH | XHCI_BAD_PSIV ) ), + PCI_ROM ( 0x8086, 0xffff, "xhci-pch", "xHCI (Intel PCH)", XHCI_PCH ), + PCI_ROM ( 0xffff, 0xffff, "xhci", "xHCI", 0 ), +}; + +/** XHCI PCI driver */ +struct pci_driver xhci_driver __pci_driver = { + .ids = xhci_ids, + .id_count = ( sizeof ( xhci_ids ) / sizeof ( xhci_ids[0] ) ), + .class = PCI_CLASS_ID ( PCI_CLASS_SERIAL, PCI_CLASS_SERIAL_USB, + PCI_CLASS_SERIAL_USB_XHCI ), + .probe = xhci_probe, + .remove = xhci_remove, +}; + +/** + * Prepare for exit + * + * @v booting System is shutting down for OS boot + */ +static void xhci_shutdown ( int booting ) { + /* If we are shutting down to boot an OS, then prevent the + * release of ownership back to BIOS. + */ + xhci_legacy_prevent_release = booting; +} + +/** Startup/shutdown function */ +struct startup_fn xhci_startup __startup_fn ( STARTUP_LATE ) = { + .name = "xhci", + .shutdown = xhci_shutdown, +}; diff --git a/src/drivers/usb/xhci.h b/src/drivers/usb/xhci.h new file mode 100644 index 00000000..e996363e --- /dev/null +++ b/src/drivers/usb/xhci.h @@ -0,0 +1,1150 @@ +#ifndef _IPXE_XHCI_H +#define _IPXE_XHCI_H + +/** @file + * + * USB eXtensible Host Controller Interface (xHCI) driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** Minimum alignment required for data structures + * + * With the exception of the scratchpad buffer pages (which are + * page-aligned), data structures used by xHCI generally require from + * 16 to 64 byte alignment and must not cross an (xHCI) page boundary. + * We simplify this requirement by aligning each structure on its own + * size, with a minimum of a 64 byte alignment. + */ +#define XHCI_MIN_ALIGN 64 + +/** Maximum transfer size */ +#define XHCI_MTU 65536 + +/** xHCI PCI BAR */ +#define XHCI_BAR PCI_BASE_ADDRESS_0 + +/** Capability register length */ +#define XHCI_CAP_CAPLENGTH 0x00 + +/** Host controller interface version number */ +#define XHCI_CAP_HCIVERSION 0x02 + +/** Structural parameters 1 */ +#define XHCI_CAP_HCSPARAMS1 0x04 + +/** Number of device slots */ +#define XHCI_HCSPARAMS1_SLOTS(params) ( ( (params) >> 0 ) & 0xff ) + +/** Number of interrupters */ +#define XHCI_HCSPARAMS1_INTRS(params) ( ( (params) >> 8 ) & 0x3ff ) + +/** Number of ports */ +#define XHCI_HCSPARAMS1_PORTS(params) ( ( (params) >> 24 ) & 0xff ) + +/** Structural parameters 2 */ +#define XHCI_CAP_HCSPARAMS2 0x08 + +/** Number of page-sized scratchpad buffers */ +#define XHCI_HCSPARAMS2_SCRATCHPADS(params) \ + ( ( ( (params) >> 16 ) & 0x3e0 ) | ( ( (params) >> 27 ) & 0x1f ) ) + +/** Capability parameters */ +#define XHCI_CAP_HCCPARAMS1 0x10 + +/** 64-bit addressing capability */ +#define XHCI_HCCPARAMS1_ADDR64(params) ( ( (params) >> 0 ) & 0x1 ) + +/** Context size shift */ +#define XHCI_HCCPARAMS1_CSZ_SHIFT(params) ( 5 + ( ( (params) >> 2 ) & 0x1 ) ) + +/** xHCI extended capabilities pointer */ +#define XHCI_HCCPARAMS1_XECP(params) ( ( ( (params) >> 16 ) & 0xffff ) << 2 ) + +/** Doorbell offset */ +#define XHCI_CAP_DBOFF 0x14 + +/** Runtime register space offset */ +#define XHCI_CAP_RTSOFF 0x18 + +/** xHCI extended capability ID */ +#define XHCI_XECP_ID(xecp) ( ( (xecp) >> 0 ) & 0xff ) + +/** Next xHCI extended capability pointer */ +#define XHCI_XECP_NEXT(xecp) ( ( ( (xecp) >> 8 ) & 0xff ) << 2 ) + +/** USB legacy support extended capability */ +#define XHCI_XECP_ID_LEGACY 1 + +/** USB legacy support BIOS owned semaphore */ +#define XHCI_USBLEGSUP_BIOS 0x02 + +/** USB legacy support BIOS ownership flag */ +#define XHCI_USBLEGSUP_BIOS_OWNED 0x01 + +/** USB legacy support OS owned semaphore */ +#define XHCI_USBLEGSUP_OS 0x03 + +/** USB legacy support OS ownership flag */ +#define XHCI_USBLEGSUP_OS_OWNED 0x01 + +/** USB legacy support control/status */ +#define XHCI_USBLEGSUP_CTLSTS 0x04 + +/** Supported protocol extended capability */ +#define XHCI_XECP_ID_SUPPORTED 2 + +/** Supported protocol revision */ +#define XHCI_SUPPORTED_REVISION 0x00 + +/** Supported protocol minor revision */ +#define XHCI_SUPPORTED_REVISION_VER(revision) ( ( (revision) >> 16 ) & 0xffff ) + +/** Supported protocol name */ +#define XHCI_SUPPORTED_NAME 0x04 + +/** Supported protocol ports */ +#define XHCI_SUPPORTED_PORTS 0x08 + +/** Supported protocol port offset */ +#define XHCI_SUPPORTED_PORTS_OFFSET(ports) ( ( (ports) >> 0 ) & 0xff ) + +/** Supported protocol port count */ +#define XHCI_SUPPORTED_PORTS_COUNT(ports) ( ( (ports) >> 8 ) & 0xff ) + +/** Supported protocol PSI count */ +#define XHCI_SUPPORTED_PORTS_PSIC(ports) ( ( (ports) >> 28 ) & 0x0f ) + +/** Supported protocol slot */ +#define XHCI_SUPPORTED_SLOT 0x0c + +/** Supported protocol slot type */ +#define XHCI_SUPPORTED_SLOT_TYPE(slot) ( ( (slot) >> 0 ) & 0x1f ) + +/** Supported protocol PSI */ +#define XHCI_SUPPORTED_PSI(index) ( 0x10 + ( (index) * 4 ) ) + +/** Supported protocol PSI value */ +#define XHCI_SUPPORTED_PSI_VALUE(psi) ( ( (psi) >> 0 ) & 0x0f ) + +/** Supported protocol PSI mantissa */ +#define XHCI_SUPPORTED_PSI_MANTISSA(psi) ( ( (psi) >> 16 ) & 0xffff ) + +/** Supported protocol PSI exponent */ +#define XHCI_SUPPORTED_PSI_EXPONENT(psi) ( ( (psi) >> 4 ) & 0x03 ) + +/** Default PSI values */ +enum xhci_default_psi_value { + /** Full speed (12Mbps) */ + XHCI_SPEED_FULL = 1, + /** Low speed (1.5Mbps) */ + XHCI_SPEED_LOW = 2, + /** High speed (480Mbps) */ + XHCI_SPEED_HIGH = 3, + /** Super speed */ + XHCI_SPEED_SUPER = 4, +}; + +/** USB command register */ +#define XHCI_OP_USBCMD 0x00 + +/** Run/stop */ +#define XHCI_USBCMD_RUN 0x00000001UL + +/** Host controller reset */ +#define XHCI_USBCMD_HCRST 0x00000002UL + +/** USB status register */ +#define XHCI_OP_USBSTS 0x04 + +/** Host controller halted */ +#define XHCI_USBSTS_HCH 0x00000001UL + +/** Page size register */ +#define XHCI_OP_PAGESIZE 0x08 + +/** Page size */ +#define XHCI_PAGESIZE(pagesize) ( (pagesize) << 12 ) + +/** Device notifcation control register */ +#define XHCI_OP_DNCTRL 0x14 + +/** Command ring control register */ +#define XHCI_OP_CRCR 0x18 + +/** Command ring cycle state */ +#define XHCI_CRCR_RCS 0x00000001UL + +/** Command abort */ +#define XHCI_CRCR_CA 0x00000004UL + +/** Command ring running */ +#define XHCI_CRCR_CRR 0x00000008UL + +/** Device context base address array pointer */ +#define XHCI_OP_DCBAAP 0x30 + +/** Configure register */ +#define XHCI_OP_CONFIG 0x38 + +/** Maximum device slots enabled */ +#define XHCI_CONFIG_MAX_SLOTS_EN(slots) ( (slots) << 0 ) + +/** Maximum device slots enabled mask */ +#define XHCI_CONFIG_MAX_SLOTS_EN_MASK \ + XHCI_CONFIG_MAX_SLOTS_EN ( 0xff ) + +/** Port status and control register */ +#define XHCI_OP_PORTSC(port) ( 0x400 - 0x10 + ( (port) << 4 ) ) + +/** Current connect status */ +#define XHCI_PORTSC_CCS 0x00000001UL + +/** Port enabled */ +#define XHCI_PORTSC_PED 0x00000002UL + +/** Port reset */ +#define XHCI_PORTSC_PR 0x00000010UL + +/** Port link state */ +#define XHCI_PORTSC_PLS(pls) ( (pls) << 5 ) + +/** Disabled port link state */ +#define XHCI_PORTSC_PLS_DISABLED XHCI_PORTSC_PLS ( 4 ) + +/** RxDetect port link state */ +#define XHCI_PORTSC_PLS_RXDETECT XHCI_PORTSC_PLS ( 5 ) + +/** Port link state mask */ +#define XHCI_PORTSC_PLS_MASK XHCI_PORTSC_PLS ( 0xf ) + +/** Port power */ +#define XHCI_PORTSC_PP 0x00000200UL + +/** Time to delay after enabling power to a port */ +#define XHCI_PORT_POWER_DELAY_MS 20 + +/** Port speed ID value */ +#define XHCI_PORTSC_PSIV(portsc) ( ( (portsc) >> 10 ) & 0xf ) + +/** Port indicator control */ +#define XHCI_PORTSC_PIC(indicators) ( (indicators) << 14 ) + +/** Port indicator control mask */ +#define XHCI_PORTSC_PIC_MASK XHCI_PORTSC_PIC ( 3 ) + +/** Port link state write strobe */ +#define XHCI_PORTSC_LWS 0x00010000UL + +/** Time to delay after writing the port link state */ +#define XHCI_LINK_STATE_DELAY_MS 100 + +/** Connect status change */ +#define XHCI_PORTSC_CSC 0x00020000UL + +/** Port enabled/disabled change */ +#define XHCI_PORTSC_PEC 0x00040000UL + +/** Warm port reset change */ +#define XHCI_PORTSC_WRC 0x00080000UL + +/** Over-current change */ +#define XHCI_PORTSC_OCC 0x00100000UL + +/** Port reset change */ +#define XHCI_PORTSC_PRC 0x00200000UL + +/** Port link state change */ +#define XHCI_PORTSC_PLC 0x00400000UL + +/** Port config error change */ +#define XHCI_PORTSC_CEC 0x00800000UL + +/** Port status change mask */ +#define XHCI_PORTSC_CHANGE \ + ( XHCI_PORTSC_CSC | XHCI_PORTSC_PEC | XHCI_PORTSC_WRC | \ + XHCI_PORTSC_OCC | XHCI_PORTSC_PRC | XHCI_PORTSC_PLC | \ + XHCI_PORTSC_CEC ) + +/** Port status and control bits which should be preserved + * + * The port status and control register is a horrendous mix of + * differing semantics. Some bits are written to only when a separate + * write strobe bit is set. Some bits should be preserved when + * modifying other bits. Some bits will be cleared if written back as + * a one. Most excitingly, the "port enabled" bit has the semantics + * that 1=enabled, 0=disabled, yet writing a 1 will disable the port. + */ +#define XHCI_PORTSC_PRESERVE ( XHCI_PORTSC_PP | XHCI_PORTSC_PIC_MASK ) + +/** Port power management status and control register */ +#define XHCI_OP_PORTPMSC(port) ( 0x404 - 0x10 + ( (port) << 4 ) ) + +/** Port link info register */ +#define XHCI_OP_PORTLI(port) ( 0x408 - 0x10 + ( (port) << 4 ) ) + +/** Port hardware link power management control register */ +#define XHCI_OP_PORTHLPMC(port) ( 0x40c - 0x10 + ( (port) << 4 ) ) + +/** Event ring segment table size register */ +#define XHCI_RUN_ERSTSZ(intr) ( 0x28 + ( (intr) << 5 ) ) + +/** Event ring segment table base address register */ +#define XHCI_RUN_ERSTBA(intr) ( 0x30 + ( (intr) << 5 ) ) + +/** Event ring dequeue pointer register */ +#define XHCI_RUN_ERDP(intr) ( 0x38 + ( (intr) << 5 ) ) + +/** A transfer request block template */ +struct xhci_trb_template { + /** Parameter */ + uint64_t parameter; + /** Status */ + uint32_t status; + /** Control */ + uint32_t control; +}; + +/** A transfer request block */ +struct xhci_trb_common { + /** Reserved */ + uint64_t reserved_a; + /** Reserved */ + uint32_t reserved_b; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Reserved */ + uint16_t reserved_c; +} __attribute__ (( packed )); + +/** Transfer request block cycle bit flag */ +#define XHCI_TRB_C 0x01 + +/** Transfer request block toggle cycle bit flag */ +#define XHCI_TRB_TC 0x02 + +/** Transfer request block chain flag */ +#define XHCI_TRB_CH 0x10 + +/** Transfer request block interrupt on completion flag */ +#define XHCI_TRB_IOC 0x20 + +/** Transfer request block immediate data flag */ +#define XHCI_TRB_IDT 0x40 + +/** Transfer request block type */ +#define XHCI_TRB_TYPE(type) ( (type) << 2 ) + +/** Transfer request block type mask */ +#define XHCI_TRB_TYPE_MASK XHCI_TRB_TYPE ( 0x3f ) + +/** A normal transfer request block */ +struct xhci_trb_normal { + /** Data buffer */ + uint64_t data; + /** Length */ + uint32_t len; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Reserved */ + uint16_t reserved; +} __attribute__ (( packed )); + +/** A normal transfer request block */ +#define XHCI_TRB_NORMAL XHCI_TRB_TYPE ( 1 ) + +/** Construct TD size field */ +#define XHCI_TD_SIZE(remaining) \ + ( ( ( (remaining) <= 0xf ) ? remaining : 0xf ) << 17 ) + +/** A setup stage transfer request block */ +struct xhci_trb_setup { + /** Setup packet */ + struct usb_setup_packet packet; + /** Length */ + uint32_t len; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Transfer direction */ + uint8_t direction; + /** Reserved */ + uint8_t reserved; +} __attribute__ (( packed )); + +/** A setup stage transfer request block */ +#define XHCI_TRB_SETUP XHCI_TRB_TYPE ( 2 ) + +/** Setup stage input data direction */ +#define XHCI_SETUP_IN 3 + +/** Setup stage output data direction */ +#define XHCI_SETUP_OUT 2 + +/** A data stage transfer request block */ +struct xhci_trb_data { + /** Data buffer */ + uint64_t data; + /** Length */ + uint32_t len; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Transfer direction */ + uint8_t direction; + /** Reserved */ + uint8_t reserved; +} __attribute__ (( packed )); + +/** A data stage transfer request block */ +#define XHCI_TRB_DATA XHCI_TRB_TYPE ( 3 ) + +/** Input data direction */ +#define XHCI_DATA_IN 0x01 + +/** Output data direction */ +#define XHCI_DATA_OUT 0x00 + +/** A status stage transfer request block */ +struct xhci_trb_status { + /** Reserved */ + uint64_t reserved_a; + /** Reserved */ + uint32_t reserved_b; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Direction */ + uint8_t direction; + /** Reserved */ + uint8_t reserved_c; +} __attribute__ (( packed )); + +/** A status stage transfer request block */ +#define XHCI_TRB_STATUS XHCI_TRB_TYPE ( 4 ) + +/** Input status direction */ +#define XHCI_STATUS_IN 0x01 + +/** Output status direction */ +#define XHCI_STATUS_OUT 0x00 + +/** A link transfer request block */ +struct xhci_trb_link { + /** Next ring segment */ + uint64_t next; + /** Reserved */ + uint32_t reserved_a; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Reserved */ + uint16_t reserved_c; +} __attribute__ (( packed )); + +/** A link transfer request block */ +#define XHCI_TRB_LINK XHCI_TRB_TYPE ( 6 ) + +/** A no-op transfer request block */ +#define XHCI_TRB_NOP XHCI_TRB_TYPE ( 8 ) + +/** An enable slot transfer request block */ +struct xhci_trb_enable_slot { + /** Reserved */ + uint64_t reserved_a; + /** Reserved */ + uint32_t reserved_b; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Slot type */ + uint8_t slot; + /** Reserved */ + uint8_t reserved_c; +} __attribute__ (( packed )); + +/** An enable slot transfer request block */ +#define XHCI_TRB_ENABLE_SLOT XHCI_TRB_TYPE ( 9 ) + +/** A disable slot transfer request block */ +struct xhci_trb_disable_slot { + /** Reserved */ + uint64_t reserved_a; + /** Reserved */ + uint32_t reserved_b; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Reserved */ + uint8_t reserved_c; + /** Slot ID */ + uint8_t slot; +} __attribute__ (( packed )); + +/** A disable slot transfer request block */ +#define XHCI_TRB_DISABLE_SLOT XHCI_TRB_TYPE ( 10 ) + +/** A context transfer request block */ +struct xhci_trb_context { + /** Input context */ + uint64_t input; + /** Reserved */ + uint32_t reserved_a; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Reserved */ + uint8_t reserved_b; + /** Slot ID */ + uint8_t slot; +} __attribute__ (( packed )); + +/** An address device transfer request block */ +#define XHCI_TRB_ADDRESS_DEVICE XHCI_TRB_TYPE ( 11 ) + +/** A configure endpoint transfer request block */ +#define XHCI_TRB_CONFIGURE_ENDPOINT XHCI_TRB_TYPE ( 12 ) + +/** An evaluate context transfer request block */ +#define XHCI_TRB_EVALUATE_CONTEXT XHCI_TRB_TYPE ( 13 ) + +/** A reset endpoint transfer request block */ +struct xhci_trb_reset_endpoint { + /** Reserved */ + uint64_t reserved_a; + /** Reserved */ + uint32_t reserved_b; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Endpoint ID */ + uint8_t endpoint; + /** Slot ID */ + uint8_t slot; +} __attribute__ (( packed )); + +/** A reset endpoint transfer request block */ +#define XHCI_TRB_RESET_ENDPOINT XHCI_TRB_TYPE ( 14 ) + +/** A stop endpoint transfer request block */ +struct xhci_trb_stop_endpoint { + /** Reserved */ + uint64_t reserved_a; + /** Reserved */ + uint32_t reserved_b; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Endpoint ID */ + uint8_t endpoint; + /** Slot ID */ + uint8_t slot; +} __attribute__ (( packed )); + +/** A stop endpoint transfer request block */ +#define XHCI_TRB_STOP_ENDPOINT XHCI_TRB_TYPE ( 15 ) + +/** A set transfer ring dequeue pointer transfer request block */ +struct xhci_trb_set_tr_dequeue_pointer { + /** Dequeue pointer */ + uint64_t dequeue; + /** Reserved */ + uint32_t reserved; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Endpoint ID */ + uint8_t endpoint; + /** Slot ID */ + uint8_t slot; +} __attribute__ (( packed )); + +/** A set transfer ring dequeue pointer transfer request block */ +#define XHCI_TRB_SET_TR_DEQUEUE_POINTER XHCI_TRB_TYPE ( 16 ) + +/** A no-op command transfer request block */ +#define XHCI_TRB_NOP_CMD XHCI_TRB_TYPE ( 23 ) + +/** A transfer event transfer request block */ +struct xhci_trb_transfer { + /** Transfer TRB pointer */ + uint64_t transfer; + /** Residual transfer length */ + uint16_t residual; + /** Reserved */ + uint8_t reserved; + /** Completion code */ + uint8_t code; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Endpoint ID */ + uint8_t endpoint; + /** Slot ID */ + uint8_t slot; +} __attribute__ (( packed )); + +/** A transfer event transfer request block */ +#define XHCI_TRB_TRANSFER XHCI_TRB_TYPE ( 32 ) + +/** A command completion event transfer request block */ +struct xhci_trb_complete { + /** Command TRB pointer */ + uint64_t command; + /** Parameter */ + uint8_t parameter[3]; + /** Completion code */ + uint8_t code; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Virtual function ID */ + uint8_t vf; + /** Slot ID */ + uint8_t slot; +} __attribute__ (( packed )); + +/** A command completion event transfer request block */ +#define XHCI_TRB_COMPLETE XHCI_TRB_TYPE ( 33 ) + +/** xHCI completion codes */ +enum xhci_completion_code { + /** Success */ + XHCI_CMPLT_SUCCESS = 1, + /** Short packet */ + XHCI_CMPLT_SHORT = 13, + /** Command ring stopped */ + XHCI_CMPLT_CMD_STOPPED = 24, +}; + +/** A port status change transfer request block */ +struct xhci_trb_port_status { + /** Reserved */ + uint8_t reserved_a[3]; + /** Port ID */ + uint8_t port; + /** Reserved */ + uint8_t reserved_b[7]; + /** Completion code */ + uint8_t code; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Reserved */ + uint16_t reserved_c; +} __attribute__ (( packed )); + +/** A port status change transfer request block */ +#define XHCI_TRB_PORT_STATUS XHCI_TRB_TYPE ( 34 ) + +/** A port status change transfer request block */ +struct xhci_trb_host_controller { + /** Reserved */ + uint64_t reserved_a; + /** Reserved */ + uint8_t reserved_b[3]; + /** Completion code */ + uint8_t code; + /** Flags */ + uint8_t flags; + /** Type */ + uint8_t type; + /** Reserved */ + uint16_t reserved_c; +} __attribute__ (( packed )); + +/** A port status change transfer request block */ +#define XHCI_TRB_HOST_CONTROLLER XHCI_TRB_TYPE ( 37 ) + +/** A transfer request block */ +union xhci_trb { + /** Template */ + struct xhci_trb_template template; + /** Common fields */ + struct xhci_trb_common common; + /** Normal TRB */ + struct xhci_trb_normal normal; + /** Setup stage TRB */ + struct xhci_trb_setup setup; + /** Data stage TRB */ + struct xhci_trb_data data; + /** Status stage TRB */ + struct xhci_trb_status status; + /** Link TRB */ + struct xhci_trb_link link; + /** Enable slot TRB */ + struct xhci_trb_enable_slot enable; + /** Disable slot TRB */ + struct xhci_trb_disable_slot disable; + /** Input context TRB */ + struct xhci_trb_context context; + /** Reset endpoint TRB */ + struct xhci_trb_reset_endpoint reset; + /** Stop endpoint TRB */ + struct xhci_trb_stop_endpoint stop; + /** Set transfer ring dequeue pointer TRB */ + struct xhci_trb_set_tr_dequeue_pointer dequeue; + /** Transfer event */ + struct xhci_trb_transfer transfer; + /** Command completion event */ + struct xhci_trb_complete complete; + /** Port status changed event */ + struct xhci_trb_port_status port; + /** Host controller event */ + struct xhci_trb_host_controller host; +} __attribute__ (( packed )); + +/** An input control context */ +struct xhci_control_context { + /** Drop context flags */ + uint32_t drop; + /** Add context flags */ + uint32_t add; + /** Reserved */ + uint32_t reserved_a[5]; + /** Configuration value */ + uint8_t config; + /** Interface number */ + uint8_t intf; + /** Alternate setting */ + uint8_t alt; + /** Reserved */ + uint8_t reserved_b; +} __attribute__ (( packed )); + +/** A slot context */ +struct xhci_slot_context { + /** Device info */ + uint32_t info; + /** Maximum exit latency */ + uint16_t latency; + /** Root hub port number */ + uint8_t port; + /** Number of downstream ports */ + uint8_t ports; + /** TT hub slot ID */ + uint8_t tt_id; + /** TT port number */ + uint8_t tt_port; + /** Interrupter target */ + uint16_t intr; + /** USB address */ + uint8_t address; + /** Reserved */ + uint16_t reserved_a; + /** Slot state */ + uint8_t state; + /** Reserved */ + uint32_t reserved_b[4]; +} __attribute__ (( packed )); + +/** Construct slot context device info */ +#define XHCI_SLOT_INFO( entries, hub, speed, route ) \ + ( ( (entries) << 27 ) | ( (hub) << 26 ) | ( (speed) << 20 ) | (route) ) + +/** An endpoint context */ +struct xhci_endpoint_context { + /** Endpoint state */ + uint8_t state; + /** Stream configuration */ + uint8_t stream; + /** Polling interval */ + uint8_t interval; + /** Max ESIT payload high */ + uint8_t esit_high; + /** Endpoint type */ + uint8_t type; + /** Maximum burst size */ + uint8_t burst; + /** Maximum packet size */ + uint16_t mtu; + /** Transfer ring dequeue pointer */ + uint64_t dequeue; + /** Average TRB length */ + uint16_t trb_len; + /** Max ESIT payload low */ + uint16_t esit_low; + /** Reserved */ + uint32_t reserved[3]; +} __attribute__ (( packed )); + +/** Endpoint states */ +enum xhci_endpoint_state { + /** Endpoint is disabled */ + XHCI_ENDPOINT_DISABLED = 0, + /** Endpoint is running */ + XHCI_ENDPOINT_RUNNING = 1, + /** Endpoint is halted due to a USB Halt condition */ + XHCI_ENDPOINT_HALTED = 2, + /** Endpoint is stopped */ + XHCI_ENDPOINT_STOPPED = 3, + /** Endpoint is halted due to a TRB error */ + XHCI_ENDPOINT_ERROR = 4, +}; + +/** Endpoint state mask */ +#define XHCI_ENDPOINT_STATE_MASK 0x07 + +/** Endpoint type */ +#define XHCI_EP_TYPE(type) ( (type) << 3 ) + +/** Control endpoint type */ +#define XHCI_EP_TYPE_CONTROL XHCI_EP_TYPE ( 4 ) + +/** Input endpoint type */ +#define XHCI_EP_TYPE_IN XHCI_EP_TYPE ( 4 ) + +/** Periodic endpoint type */ +#define XHCI_EP_TYPE_PERIODIC XHCI_EP_TYPE ( 1 ) + +/** Endpoint dequeue cycle state */ +#define XHCI_EP_DCS 0x00000001UL + +/** Control endpoint average TRB length */ +#define XHCI_EP0_TRB_LEN 8 + +/** An event ring segment */ +struct xhci_event_ring_segment { + /** Base address */ + uint64_t base; + /** Number of TRBs */ + uint32_t count; + /** Reserved */ + uint32_t reserved; +} __attribute__ (( packed )); + +/** A transfer request block command/transfer ring */ +struct xhci_trb_ring { + /** Producer counter */ + unsigned int prod; + /** Consumer counter */ + unsigned int cons; + /** Ring size (log2) */ + unsigned int shift; + /** Ring counter mask */ + unsigned int mask; + + /** I/O buffers */ + struct io_buffer **iobuf; + + /** Transfer request blocks */ + union xhci_trb *trb; + /** Length of transfer request blocks */ + size_t len; + /** Link TRB (if applicable) */ + struct xhci_trb_link *link; + + /** Doorbell register */ + void *db; + /** Doorbell register value */ + uint32_t dbval; +}; + +/** An event ring */ +struct xhci_event_ring { + /** Consumer counter */ + unsigned int cons; + /** Event ring segment table */ + struct xhci_event_ring_segment *segment; + /** Transfer request blocks */ + union xhci_trb *trb; +}; + +/** + * Calculate doorbell register value + * + * @v target Doorbell target + * @v stream Doorbell stream ID + * @ret dbval Doorbell register value + */ +#define XHCI_DBVAL( target, stream ) ( (target) | ( (stream) << 16 ) ) + +/** + * Calculate space used in TRB ring + * + * @v ring TRB ring + * @ret fill Number of entries used + */ +static inline __attribute__ (( always_inline )) unsigned int +xhci_ring_fill ( struct xhci_trb_ring *ring ) { + + return ( ring->prod - ring->cons ); +} + +/** + * Calculate space remaining in TRB ring + * + * @v ring TRB ring + * @ret remaining Number of entries remaining + * + * xHCI does not allow us to completely fill a ring; there must be at + * least one free entry (excluding the Link TRB). + */ +static inline __attribute__ (( always_inline )) unsigned int +xhci_ring_remaining ( struct xhci_trb_ring *ring ) { + unsigned int fill = xhci_ring_fill ( ring ); + + /* We choose to utilise rings with ( 2^n + 1 ) entries, with + * the final entry being a Link TRB. The maximum fill level + * is therefore + * + * ( ( 2^n + 1 ) - 1 (Link TRB) - 1 (one slot always empty) + * == ( 2^n - 1 ) + * + * which is therefore equal to the ring mask. + */ + assert ( fill <= ring->mask ); + return ( ring->mask - fill ); +} + +/** + * Calculate physical address of most recently consumed TRB + * + * @v ring TRB ring + * @ret trb TRB physical address + */ +static inline __attribute__ (( always_inline )) physaddr_t +xhci_ring_consumed ( struct xhci_trb_ring *ring ) { + unsigned int index = ( ( ring->cons - 1 ) & ring->mask ); + + return virt_to_phys ( &ring->trb[index] ); +} + +/** Slot context index */ +#define XHCI_CTX_SLOT 0 + +/** Calculate context index from USB endpoint address */ +#define XHCI_CTX(address) \ + ( (address) ? ( ( ( (address) & 0x0f ) << 1 ) | \ + ( ( (address) & 0x80 ) >> 7 ) ) : 1 ) + +/** Endpoint zero context index */ +#define XHCI_CTX_EP0 XHCI_CTX ( 0x00 ) + +/** End of contexts */ +#define XHCI_CTX_END 32 + +/** Device context index */ +#define XHCI_DCI(ctx) ( (ctx) + 0 ) + +/** Input context index */ +#define XHCI_ICI(ctx) ( (ctx) + 1 ) + +/** Number of TRBs (excluding Link TRB) in the command ring + * + * This is a policy decision. + */ +#define XHCI_CMD_TRBS_LOG2 2 + +/** Number of TRBs in the event ring + * + * This is a policy decision. + */ +#define XHCI_EVENT_TRBS_LOG2 6 + +/** Number of TRBs in a transfer ring + * + * This is a policy decision. + */ +#define XHCI_TRANSFER_TRBS_LOG2 6 + +/** Maximum time to wait for BIOS to release ownership + * + * This is a policy decision. + */ +#define XHCI_USBLEGSUP_MAX_WAIT_MS 100 + +/** Maximum time to wait for host controller to stop + * + * This is a policy decision. + */ +#define XHCI_STOP_MAX_WAIT_MS 100 + +/** Maximum time to wait for reset to complete + * + * This is a policy decision. + */ +#define XHCI_RESET_MAX_WAIT_MS 500 + +/** Maximum time to wait for a command to complete + * + * The "address device" command involves waiting for a response to a + * USB control transaction, and so we must wait for up to the 5000ms + * that USB allows for devices to respond to control transactions. + */ +#define XHCI_COMMAND_MAX_WAIT_MS USB_CONTROL_MAX_WAIT_MS + +/** Time to delay after aborting a command + * + * This is a policy decision + */ +#define XHCI_COMMAND_ABORT_DELAY_MS 500 + +/** Maximum time to wait for a port reset to complete + * + * This is a policy decision. + */ +#define XHCI_PORT_RESET_MAX_WAIT_MS 500 + +/** Intel PCH quirk */ +struct xhci_pch { + /** USB2 port routing register original value */ + uint32_t xusb2pr; + /** USB3 port SuperSpeed enable register original value */ + uint32_t usb3pssen; +}; + +/** Intel PCH quirk flag */ +#define XHCI_PCH 0x0001 + +/** Intel PCH USB2 port routing register */ +#define XHCI_PCH_XUSB2PR 0xd0 + +/** Intel PCH USB2 port routing mask register */ +#define XHCI_PCH_XUSB2PRM 0xd4 + +/** Intel PCH SuperSpeed enable register */ +#define XHCI_PCH_USB3PSSEN 0xd8 + +/** Intel PCH USB3 port routing mask register */ +#define XHCI_PCH_USB3PRM 0xdc + +/** Invalid protocol speed ID values quirk */ +#define XHCI_BAD_PSIV 0x0002 + +/** An xHCI device */ +struct xhci_device { + /** Registers */ + void *regs; + /** Name */ + const char *name; + /** Quirks */ + unsigned int quirks; + + /** Capability registers */ + void *cap; + /** Operational registers */ + void *op; + /** Runtime registers */ + void *run; + /** Doorbell registers */ + void *db; + + /** Number of device slots */ + unsigned int slots; + /** Number of interrupters */ + unsigned int intrs; + /** Number of ports */ + unsigned int ports; + + /** Number of page-sized scratchpad buffers */ + unsigned int scratchpads; + + /** 64-bit addressing capability */ + int addr64; + /** Context size shift */ + unsigned int csz_shift; + /** xHCI extended capabilities offset */ + unsigned int xecp; + + /** Page size */ + size_t pagesize; + + /** USB legacy support capability (if present and enabled) */ + unsigned int legacy; + + /** Device context base address array */ + uint64_t *dcbaa; + + /** Scratchpad buffer area */ + userptr_t scratchpad; + /** Scratchpad buffer array */ + uint64_t *scratchpad_array; + + /** Command ring */ + struct xhci_trb_ring command; + /** Event ring */ + struct xhci_event_ring event; + /** Current command (if any) */ + union xhci_trb *pending; + + /** Device slots, indexed by slot ID */ + struct xhci_slot **slot; + + /** USB bus */ + struct usb_bus *bus; + + /** Intel PCH quirk */ + struct xhci_pch pch; +}; + +/** An xHCI device slot */ +struct xhci_slot { + /** xHCI device */ + struct xhci_device *xhci; + /** USB device */ + struct usb_device *usb; + /** Slot ID */ + unsigned int id; + /** Slot context */ + struct xhci_slot_context *context; + /** Route string */ + unsigned int route; + /** Root hub port number */ + unsigned int port; + /** Protocol speed ID */ + unsigned int psiv; + /** Number of ports (if this device is a hub) */ + unsigned int ports; + /** Transaction translator slot ID */ + unsigned int tt_id; + /** Transaction translator port */ + unsigned int tt_port; + /** Endpoints, indexed by context ID */ + struct xhci_endpoint *endpoint[XHCI_CTX_END]; +}; + +/** An xHCI endpoint */ +struct xhci_endpoint { + /** xHCI device */ + struct xhci_device *xhci; + /** xHCI slot */ + struct xhci_slot *slot; + /** USB endpoint */ + struct usb_endpoint *ep; + /** Context index */ + unsigned int ctx; + /** Endpoint type */ + unsigned int type; + /** Endpoint interval */ + unsigned int interval; + /** Endpoint context */ + struct xhci_endpoint_context *context; + /** Transfer ring */ + struct xhci_trb_ring ring; +}; + +#endif /* _IPXE_XHCI_H */ diff --git a/src/hci/commands/cert_cmd.c b/src/hci/commands/cert_cmd.c new file mode 100644 index 00000000..24b18bf5 --- /dev/null +++ b/src/hci/commands/cert_cmd.c @@ -0,0 +1,304 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Certificate management commands + * + */ + +/** "cert" options */ +struct cert_options { + /** Certificate subject name */ + char *name; + /** Keep certificate file after parsing */ + int keep; +}; + +/** "cert" option list */ +static union { + /* "certstore" takes both options */ + struct option_descriptor certstore[2]; + /* "certstat" takes only --subject */ + struct option_descriptor certstat[1]; + /* "certfree" takes only --subject */ + struct option_descriptor certfree[1]; +} opts = { + .certstore = { + OPTION_DESC ( "subject", 's', required_argument, + struct cert_options, name, parse_string ), + OPTION_DESC ( "keep", 'k', no_argument, + struct cert_options, keep, parse_flag ), + }, +}; + +/** A "cert" command descriptor */ +struct cert_command_descriptor { + /** Command descriptor */ + struct command_descriptor cmd; + /** Payload + * + * @v cert X.509 certificate + * @ret rc Return status code + */ + int ( * payload ) ( struct x509_certificate *cert ); +}; + +/** + * Construct "cert" command descriptor + * + * @v _struct Options structure type + * @v _options Option descriptor array + * @v _min_args Minimum number of non-option arguments + * @v _max_args Maximum number of non-option arguments + * @v _usage Command usage + * @v _payload Payload method + * @ret _command Command descriptor + */ +#define CERT_COMMAND_DESC( _struct, _options, _min_args, _max_args, \ + _usage, _payload ) \ + { \ + .cmd = COMMAND_DESC ( _struct, _options, _min_args, \ + _max_args, _usage ), \ + .payload = _payload, \ + } + +/** + * Execute "cert" command + * + * @v argc Argument count + * @v argv Argument list + * @v certcmd Command descriptor + * @ret rc Return status code + */ +static int cert_exec ( int argc, char **argv, + struct cert_command_descriptor *certcmd ) { + struct command_descriptor *cmd = &certcmd->cmd; + struct cert_options opts; + struct image *image = NULL; + struct x509_certificate *cert; + struct x509_certificate *tmp; + unsigned int count = 0; + size_t offset = 0; + int next; + int rc; + + /* Parse options */ + if ( ( rc = parse_options ( argc, argv, cmd, &opts ) ) != 0 ) + goto err_parse; + + /* Acquire image, if applicable */ + if ( ( optind < argc ) && + ( ( rc = imgacquire ( argv[optind], 0, &image ) ) != 0 ) ) + goto err_acquire; + + /* Get first entry in certificate store */ + tmp = list_first_entry ( &certstore.links, struct x509_certificate, + store.list ); + + /* Iterate over certificates */ + while ( 1 ) { + + /* Get next certificate from image or store as applicable */ + if ( image ) { + + /* Get next certificate from image */ + if ( offset >= image->len ) + break; + next = image_x509 ( image, offset, &cert ); + if ( next < 0 ) { + rc = next; + printf ( "Could not parse certificate: %s\n", + strerror ( rc ) ); + goto err_x509; + } + offset = next; + + } else { + + /* Get next certificate from store */ + cert = tmp; + if ( ! cert ) + break; + tmp = list_next_entry ( tmp, &certstore.links, + store.list ); + x509_get ( cert ); + } + + /* Skip non-matching names, if a name was specified */ + if ( opts.name && ( x509_check_name ( cert, opts.name ) != 0 )){ + x509_put ( cert ); + continue; + } + + /* Execute payload */ + if ( ( rc = certcmd->payload ( cert ) ) != 0 ) { + x509_put ( cert ); + goto err_payload; + } + + /* Count number of certificates processed */ + count++; + + /* Drop reference to certificate */ + x509_put ( cert ); + } + + /* Fail if a name was specified and no matching certificates + * were found. + */ + if ( opts.name && ( count == 0 ) ) { + printf ( "\"%s\" : no such certificate\n", opts.name ); + rc = -ENOENT; + goto err_none; + } + + err_none: + err_payload: + err_x509: + if ( image && ( ! opts.keep ) ) + unregister_image ( image ); + err_acquire: + err_parse: + return rc; +} + +/** + * "certstat" payload + * + * @v cert X.509 certificate + * @ret rc Return status code + */ +static int certstat_payload ( struct x509_certificate *cert ) { + + certstat ( cert ); + return 0; +} + +/** "certstat" command descriptor */ +static struct cert_command_descriptor certstat_cmd = + CERT_COMMAND_DESC ( struct cert_options, opts.certstat, 0, 0, NULL, + certstat_payload ); + +/** + * The "certstat" command + * + * @v argc Argument count + * @v argv Argument list + * @ret rc Return status code + */ +static int certstat_exec ( int argc, char **argv ) { + + return cert_exec ( argc, argv, &certstat_cmd ); +} + +/** + * "certstore" payload + * + * @v cert X.509 certificate + * @ret rc Return status code + */ +static int certstore_payload ( struct x509_certificate *cert ) { + + /* Mark certificate as having been added explicitly */ + cert->flags |= X509_FL_EXPLICIT; + + return 0; +} + +/** "certstore" command descriptor */ +static struct cert_command_descriptor certstore_cmd = + CERT_COMMAND_DESC ( struct cert_options, opts.certstore, 0, 1, + "[]", certstore_payload ); + +/** + * The "certstore" command + * + * @v argc Argument count + * @v argv Argument list + * @ret rc Return status code + */ +static int certstore_exec ( int argc, char **argv ) { + + return cert_exec ( argc, argv, &certstore_cmd ); +} + +/** + * "certfree" payload + * + * @v cert X.509 certificate + * @ret rc Return status code + */ +static int certfree_payload ( struct x509_certificate *cert ) { + + /* Remove from certificate store */ + certstore_del ( cert ); + + return 0; +} + +/** "certfree" command descriptor */ +static struct cert_command_descriptor certfree_cmd = + CERT_COMMAND_DESC ( struct cert_options, opts.certfree, 0, 0, NULL, + certfree_payload ); + +/** + * The "certfree" command + * + * @v argc Argument count + * @v argv Argument list + * @ret rc Return status code + */ +static int certfree_exec ( int argc, char **argv ) { + + return cert_exec ( argc, argv, &certfree_cmd ); +} + +/** Certificate management commands */ +struct command certmgmt_commands[] __command = { + { + .name = "certstat", + .exec = certstat_exec, + }, + { + .name = "certstore", + .exec = certstore_exec, + }, + { + .name = "certfree", + .exec = certfree_exec, + }, +}; diff --git a/src/hci/commands/ibmgmt_cmd.c b/src/hci/commands/ibmgmt_cmd.c new file mode 100644 index 00000000..1154d749 --- /dev/null +++ b/src/hci/commands/ibmgmt_cmd.c @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Infiniband device management commands + * + */ + +/** "ibstat" options */ +struct ibstat_options {}; + +/** "ibstat" option list */ +static struct option_descriptor ibstat_opts[] = {}; + +/** "ibstat" command descriptor */ +static struct command_descriptor ibstat_cmd = + COMMAND_DESC ( struct ibstat_options, ibstat_opts, 0, 0, "" ); + +/** + * The "ibstat" command + * + * @v argc Argument count + * @v argv Argument list + * @ret rc Return status code + */ +static int ibstat_exec ( int argc, char **argv ) { + struct ibstat_options opts; + struct ib_device *ibdev; + int rc; + + /* Parse options */ + if ( ( rc = parse_options ( argc, argv, &ibstat_cmd, &opts ) ) != 0 ) + return rc; + + /* Show all Infiniband devices */ + for_each_ibdev ( ibdev ) + ibstat ( ibdev ); + + return 0; +} + +/** Infiniband commands */ +struct command ibmgmt_commands[] __command = { + { + .name = "ibstat", + .exec = ibstat_exec, + }, +}; diff --git a/src/hci/commands/ntp_cmd.c b/src/hci/commands/ntp_cmd.c new file mode 100644 index 00000000..8f741a51 --- /dev/null +++ b/src/hci/commands/ntp_cmd.c @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * NTP commands + * + */ + +/** "ntp" options */ +struct ntp_options {}; + +/** "ntp" option list */ +static struct option_descriptor ntp_opts[] = {}; + +/** "ntp" command descriptor */ +static struct command_descriptor ntp_cmd = + COMMAND_DESC ( struct ntp_options, ntp_opts, 1, 1, "" ); + +/** + * "ntp" command + * + * @v argc Argument count + * @v argv Argument list + * @ret rc Return status code + */ +static int ntp_exec ( int argc, char **argv ) { + struct ntp_options opts; + const char *hostname; + int rc; + + /* Parse options */ + if ( ( rc = parse_options ( argc, argv, &ntp_cmd, &opts ) ) != 0 ) + return rc; + + /* Parse hostname */ + hostname = argv[optind]; + + /* Get time and date via NTP */ + if ( ( rc = ntp ( hostname ) ) != 0 ) { + printf ( "Could not get time and date: %s\n", strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** NTP command */ +struct command ntp_command __command = { + .name = "ntp", + .exec = ntp_exec, +}; diff --git a/src/hci/jumpscroll.c b/src/hci/jumpscroll.c new file mode 100644 index 00000000..dd6bcac2 --- /dev/null +++ b/src/hci/jumpscroll.c @@ -0,0 +1,140 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * Jump scrolling + * + */ + +#include +#include +#include + +/** + * Handle keypress + * + * @v scroll Jump scroller + * @v key Key pressed by user + * @ret move Scroller movement, or zero + */ +int jump_scroll_key ( struct jump_scroller *scroll, int key ) { + + /* Sanity checks */ + assert ( scroll->rows != 0 ); + assert ( scroll->count != 0 ); + assert ( scroll->current < scroll->count ); + assert ( scroll->first < scroll->count ); + assert ( scroll->first <= scroll->current ); + assert ( scroll->current < ( scroll->first + scroll->rows ) ); + + /* Handle key, if applicable */ + switch ( key ) { + case KEY_UP: + return -1; + case KEY_DOWN: + return +1; + case KEY_PPAGE: + return ( scroll->first - scroll->current - 1 ); + case KEY_NPAGE: + return ( scroll->first - scroll->current + scroll->rows ); + case KEY_HOME: + return -( scroll->count ); + case KEY_END: + return +( scroll->count ); + default: + return 0; + } +} + +/** + * Move scroller + * + * @v scroll Jump scroller + * @v move Scroller movement + * @ret move Continuing scroller movement (if applicable) + */ +int jump_scroll_move ( struct jump_scroller *scroll, int move ) { + int current = scroll->current; + int last = ( scroll->count - 1 ); + + /* Sanity checks */ + assert ( move != 0 ); + assert ( scroll->count != 0 ); + + /* Move to the new current item */ + current += move; + + /* Check for start/end of list */ + if ( current < 0 ) { + /* We have attempted to move before the start of the + * list. Move to the start of the list and continue + * moving forwards (if applicable). + */ + scroll->current = 0; + return +1; + } else if ( current > last ) { + /* We have attempted to move after the end of the + * list. Move to the end of the list and continue + * moving backwards (if applicable). + */ + scroll->current = last; + return -1; + } else { + /* Update the current item and continue moving in the + * same direction (if applicable). + */ + scroll->current = current; + return ( ( move > 0 ) ? +1 : -1 ); + } +} + +/** + * Jump scroll to new page (if applicable) + * + * @v scroll Jump scroller + * @ret jumped Jumped to a new page + */ +int jump_scroll ( struct jump_scroller *scroll ) { + unsigned int index; + + /* Sanity checks */ + assert ( scroll->rows != 0 ); + assert ( scroll->count != 0 ); + assert ( scroll->current < scroll->count ); + assert ( scroll->first < scroll->count ); + + /* Do nothing if we are already on the correct page */ + index = ( scroll->current - scroll->first ); + if ( index < scroll->rows ) + return 0; + + /* Move to required page */ + while ( scroll->first < scroll->current ) + scroll->first += scroll->rows; + while ( scroll->first > scroll->current ) + scroll->first -= scroll->rows; + + return 1; +} diff --git a/src/image/der.c b/src/image/der.c new file mode 100644 index 00000000..fa17e565 --- /dev/null +++ b/src/image/der.c @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * DER-encoded ASN.1 data + * + */ + +/** + * Extract ASN.1 object from image + * + * @v image DER image + * @v offset Offset within image + * @v cursor ASN.1 cursor to fill in + * @ret next Offset to next image, or negative error + * + * The caller is responsible for eventually calling free() on the + * allocated ASN.1 cursor. + */ +static int der_asn1 ( struct image *image, size_t offset __unused, + struct asn1_cursor **cursor ) { + void *data; + + /* Allocate cursor and data buffer */ + *cursor = malloc ( sizeof ( **cursor ) + image->len ); + if ( ! *cursor ) + return -ENOMEM; + data = ( ( ( void * ) *cursor ) + sizeof ( **cursor ) ); + + /* Populate cursor and data buffer */ + (*cursor)->data = data; + (*cursor)->len = image->len; + copy_from_user ( data, image->data, 0, image->len ); + + return image->len; +} + +/** + * Probe DER image + * + * @v image DER image + * @ret rc Return status code + */ +static int der_probe ( struct image *image ) { + struct asn1_cursor cursor; + uint8_t buf[8]; + size_t extra; + size_t total; + int len; + int rc; + + /* Sanity check: no realistic DER image can be smaller than this */ + if ( image->len < sizeof ( buf ) ) + return -ENOEXEC; + + /* Prepare partial cursor */ + cursor.data = buf; + cursor.len = sizeof ( buf ); + copy_from_user ( buf, image->data, 0, sizeof ( buf ) ); + extra = ( image->len - sizeof ( buf ) ); + + /* Get length of ASN.1 sequence */ + len = asn1_start ( &cursor, ASN1_SEQUENCE, extra ); + if ( len < 0 ) { + rc = len; + DBGC ( image, "DER %s is not valid ASN.1: %s\n", + image->name, strerror ( rc ) ); + return rc; + } + + /* Add length of tag and length bytes consumed by asn1_start() */ + total = ( len + ( cursor.data - ( ( void * ) buf ) ) ); + assert ( total <= image->len ); + + /* Check that image comprises a single well-formed ASN.1 object */ + if ( total != image->len ) { + DBGC ( image, "DER %s is not single ASN.1\n", image->name ); + return -ENOEXEC; + } + + return 0; +} + +/** DER image type */ +struct image_type der_image_type __image_type ( PROBE_NORMAL ) = { + .name = "DER", + .probe = der_probe, + .asn1 = der_asn1, +}; diff --git a/src/image/pem.c b/src/image/pem.c new file mode 100644 index 00000000..2dcc3644 --- /dev/null +++ b/src/image/pem.c @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * PEM-encoded ASN.1 data + * + */ + +/** + * Locate next line + * + * @v data PEM data + * @v len Length of PEM data + * @v offset Starting offset + * @ret next Offset to next line + */ +static size_t pem_next ( userptr_t data, size_t len, size_t offset ) { + off_t eol; + + /* Find and skip next newline character, if any */ + eol = memchr_user ( data, offset, '\n', ( len - offset ) ); + if ( eol < 0 ) + return len; + return ( eol + 1 ); +} + +/** + * Locate boundary marker line + * + * @v data PEM data + * @v len Length of PEM data + * @v offset Starting offset + * @v marker Boundary marker + * @ret offset Offset to boundary marker line, or negative error + */ +static int pem_marker ( userptr_t data, size_t len, size_t offset, + const char *marker ) { + char buf[ strlen ( marker ) ]; + + /* Sanity check */ + assert ( offset <= len ); + + /* Scan for marker at start of line */ + while ( offset < len ) { + + /* Check for marker */ + if ( ( len - offset ) < sizeof ( buf ) ) + break; + copy_from_user ( buf, data, offset, sizeof ( buf ) ); + if ( memcmp ( buf, marker, sizeof ( buf ) ) == 0 ) + return offset; + + /* Move to next line */ + offset = pem_next ( data, len, offset ); + assert ( offset <= len ); + } + + return -ENOENT; +} + +/** + * Extract ASN.1 object from PEM data + * + * @v data PEM data + * @v len Length of PEM data + * @v offset Offset within data + * @v cursor ASN.1 cursor to fill in + * @ret next Offset to next object, or negative error + * + * The caller is responsible for eventually calling free() on the + * allocated ASN.1 cursor. + */ +int pem_asn1 ( userptr_t data, size_t len, size_t offset, + struct asn1_cursor **cursor ) { + size_t encoded_len; + size_t decoded_max_len; + char *encoded; + void *decoded; + int decoded_len; + int begin; + int end; + int rc; + + /* Locate and skip BEGIN marker */ + begin = pem_marker ( data, len, offset, PEM_BEGIN ); + if ( begin < 0 ) { + rc = begin; + DBGC ( data, "PEM [%#zx,%#zx) missing BEGIN marker: %s\n", + offset, len, strerror ( rc ) ); + goto err_begin; + } + begin = pem_next ( data, len, begin ); + + /* Locate and skip END marker */ + end = pem_marker ( data, len, begin, PEM_END ); + if ( end < 0 ) { + rc = end; + DBGC ( data, "PEM [%#zx,%#zx) missing END marker: %s\n", + offset, len, strerror ( rc ) ); + goto err_end; + } + encoded_len = ( end - begin ); + end = pem_next ( data, len, end ); + + /* Extract Base64-encoded data */ + encoded = malloc ( encoded_len + 1 /* NUL */ ); + if ( ! encoded ) { + rc = -ENOMEM; + goto err_alloc_encoded; + } + copy_from_user ( encoded, data, begin, encoded_len ); + encoded[encoded_len] = '\0'; + + /* Allocate cursor and data buffer */ + decoded_max_len = base64_decoded_max_len ( encoded ); + *cursor = malloc ( sizeof ( **cursor ) + decoded_max_len ); + if ( ! *cursor ) { + rc = -ENOMEM; + goto err_alloc_cursor; + } + decoded = ( ( ( void * ) *cursor ) + sizeof ( **cursor ) ); + + /* Decode Base64-encoded data */ + decoded_len = base64_decode ( encoded, decoded, decoded_max_len ); + if ( decoded_len < 0 ) { + rc = decoded_len; + DBGC ( data, "PEM could not decode: %s\n", strerror ( rc ) ); + goto err_decode; + } + (*cursor)->data = decoded; + (*cursor)->len = decoded_len; + assert ( (*cursor)->len <= decoded_max_len ); + + /* Free Base64-encoded data */ + free ( encoded ); + + /* Update offset and skip any unencapsulated trailer */ + offset = end; + if ( pem_marker ( data, len, offset, PEM_BEGIN ) < 0 ) + offset = len; + + return offset; + + err_decode: + free ( *cursor ); + *cursor = NULL; + err_alloc_cursor: + free ( encoded ); + err_alloc_encoded: + err_end: + err_begin: + return rc; +} + +/** + * Probe PEM image + * + * @v image PEM image + * @ret rc Return status code + */ +static int pem_image_probe ( struct image *image ) { + int offset; + int rc; + + /* Check that image contains a BEGIN marker */ + if ( ( offset = pem_marker ( image->data, image->len, 0, + PEM_BEGIN ) ) < 0 ) { + rc = offset; + DBGC ( image, "PEM %s has no BEGIN marker: %s\n", + image->name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Extract ASN.1 object from image + * + * @v image PEM image + * @v offset Offset within image + * @v cursor ASN.1 cursor to fill in + * @ret next Offset to next image, or negative error + * + * The caller is responsible for eventually calling free() on the + * allocated ASN.1 cursor. + */ +static int pem_image_asn1 ( struct image *image, size_t offset, + struct asn1_cursor **cursor ) { + int next; + int rc; + + /* Extract ASN.1 object */ + if ( ( next = pem_asn1 ( image->data, image->len, offset, + cursor ) ) < 0 ) { + rc = next; + DBGC ( image, "PEM %s could not extract ASN.1: %s\n", + image->name, strerror ( rc ) ); + return rc; + } + + return next; +} + +/** PEM image type */ +struct image_type pem_image_type __image_type ( PROBE_NORMAL ) = { + .name = "PEM", + .probe = pem_image_probe, + .asn1 = pem_image_asn1, +}; diff --git a/src/include/ipxe/blocktrans.h b/src/include/ipxe/blocktrans.h new file mode 100644 index 00000000..fee71b96 --- /dev/null +++ b/src/include/ipxe/blocktrans.h @@ -0,0 +1,38 @@ +#ifndef _IPXE_BLOCKTRANS_H +#define _IPXE_BLOCKTRANS_H + +/** @file + * + * Block device translator + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include + +/** A block device translator */ +struct block_translator { + /** Reference count */ + struct refcnt refcnt; + /** Block device interface */ + struct interface block; + /** Data transfer interface */ + struct interface xfer; + + /** Data transfer buffer */ + struct xfer_buffer xferbuf; + /** Data buffer */ + userptr_t buffer; + /** Block size */ + size_t blksize; +}; + +extern int block_translate ( struct interface *block, + userptr_t buffer, size_t size ); + +#endif /* _IPXE_BLOCKTRANS_H */ diff --git a/src/include/ipxe/cdc.h b/src/include/ipxe/cdc.h new file mode 100644 index 00000000..b8b4a59d --- /dev/null +++ b/src/include/ipxe/cdc.h @@ -0,0 +1,104 @@ +#ifndef _IPXE_CDC_H +#define _IPXE_CDC_H + +/** @file + * + * USB Communications Device Class (CDC) + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** Class code for communications devices */ +#define USB_CLASS_CDC 2 + +/** Send encapsulated command */ +#define CDC_SEND_ENCAPSULATED_COMMAND \ + ( USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 0x00 ) ) + +/** Get encapsulated response */ +#define CDC_GET_ENCAPSULATED_RESPONSE \ + ( USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 0x01 ) ) + +/** Union functional descriptor */ +struct cdc_union_descriptor { + /** Descriptor header */ + struct usb_descriptor_header header; + /** Descriptor subtype */ + uint8_t subtype; + /** Interfaces (variable-length) */ + uint8_t interface[1]; +} __attribute__ (( packed )); + +/** Union functional descriptor subtype */ +#define CDC_SUBTYPE_UNION 6 + +/** Ethernet descriptor subtype */ +#define CDC_SUBTYPE_ETHERNET 15 + +/** Response available */ +#define CDC_RESPONSE_AVAILABLE \ + ( USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 0x01 ) ) + +/** Network connection notification */ +#define CDC_NETWORK_CONNECTION \ + ( USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 0x00 ) ) + +/** Connection speed change notification */ +#define CDC_CONNECTION_SPEED_CHANGE \ + ( USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 0x2a ) ) + +/** Connection speed change notification */ +struct cdc_connection_speed_change { + /** Downlink bit rate, in bits per second */ + uint32_t down; + /** Uplink bit rate, in bits per second */ + uint32_t up; +} __attribute__ (( packed )); + +extern struct cdc_union_descriptor * +cdc_union_descriptor ( struct usb_configuration_descriptor *config, + struct usb_interface_descriptor *interface ); + +/** + * Send encapsulated command + * + * @v usb USB device + * @v interface Interface number + * @v data Command + * @v len Length of command + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +cdc_send_encapsulated_command ( struct usb_device *usb, unsigned int interface, + void *data, size_t len ) { + + return usb_control ( usb, CDC_SEND_ENCAPSULATED_COMMAND, 0, interface, + data, len ); +} + +/** +* Get encapsulated response +* +* @v usb USB device +* @v interface Interface number +* @v data Response buffer +* @v len Length of response buffer +* @ret rc Return status code +*/ +static inline __attribute__ (( always_inline )) int +cdc_get_encapsulated_response ( struct usb_device *usb, unsigned int interface, + void *data, size_t len ) { + + return usb_control ( usb, CDC_GET_ENCAPSULATED_RESPONSE, 0, interface, + data, len ); +} + +#endif /* _IPXE_CDC_H */ diff --git a/src/include/ipxe/der.h b/src/include/ipxe/der.h new file mode 100644 index 00000000..c63bd975 --- /dev/null +++ b/src/include/ipxe/der.h @@ -0,0 +1,16 @@ +#ifndef _IPXE_DER_H +#define _IPXE_DER_H + +/** @file + * + * DER image format + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +extern struct image_type der_image_type __image_type ( PROBE_NORMAL ); + +#endif /* _IPXE_DER_H */ diff --git a/src/include/ipxe/dummy_sanboot.h b/src/include/ipxe/dummy_sanboot.h new file mode 100644 index 00000000..9c9d942a --- /dev/null +++ b/src/include/ipxe/dummy_sanboot.h @@ -0,0 +1,18 @@ +#ifndef _IPXE_DUMMY_SANBOOT_H +#define _IPXE_DUMMY_SANBOOT_H + +/** @file + * + * Dummy SAN device + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef SANBOOT_DUMMY +#define SANBOOT_PREFIX_dummy +#else +#define SANBOOT_PREFIX_dummy __dummy_ +#endif + +#endif /* _IPXE_DUMMY_SANBOOT_H */ diff --git a/src/include/ipxe/ecb.h b/src/include/ipxe/ecb.h new file mode 100644 index 00000000..4e6aa3c8 --- /dev/null +++ b/src/include/ipxe/ecb.h @@ -0,0 +1,55 @@ +#ifndef _IPXE_ECB_H +#define _IPXE_ECB_H + +/** @file + * + * Electronic codebook (ECB) + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +extern void ecb_encrypt ( void *ctx, const void *src, void *dst, + size_t len, struct cipher_algorithm *raw_cipher ); +extern void ecb_decrypt ( void *ctx, const void *src, void *dst, + size_t len, struct cipher_algorithm *raw_cipher ); + +/** + * Create a cipher-block chaining mode of behaviour of an existing cipher + * + * @v _ecb_name Name for the new ECB cipher + * @v _ecb_cipher New cipher algorithm + * @v _raw_cipher Underlying cipher algorithm + * @v _raw_context Context structure for the underlying cipher + * @v _blocksize Cipher block size + */ +#define ECB_CIPHER( _ecb_name, _ecb_cipher, _raw_cipher, _raw_context, \ + _blocksize ) \ +static int _ecb_name ## _setkey ( void *ctx, const void *key, \ + size_t keylen ) { \ + return cipher_setkey ( &_raw_cipher, ctx, key, keylen ); \ +} \ +static void _ecb_name ## _setiv ( void *ctx, const void *iv ) { \ + cipher_setiv ( &_raw_cipher, ctx, iv ); \ +} \ +static void _ecb_name ## _encrypt ( void *ctx, const void *src, \ + void *dst, size_t len ) { \ + ecb_encrypt ( ctx, src, dst, len, &_raw_cipher ); \ +} \ +static void _ecb_name ## _decrypt ( void *ctx, const void *src, \ + void *dst, size_t len ) { \ + ecb_decrypt ( ctx, src, dst, len, &_raw_cipher ); \ +} \ +struct cipher_algorithm _ecb_cipher = { \ + .name = #_ecb_name, \ + .ctxsize = sizeof ( _raw_context ), \ + .blocksize = _blocksize, \ + .setkey = _ecb_name ## _setkey, \ + .setiv = _ecb_name ## _setiv, \ + .encrypt = _ecb_name ## _encrypt, \ + .decrypt = _ecb_name ## _decrypt, \ +}; + +#endif /* _IPXE_ECB_H */ diff --git a/src/include/ipxe/efi/AArch64/ProcessorBind.h b/src/include/ipxe/efi/AArch64/ProcessorBind.h new file mode 100644 index 00000000..909b5cde --- /dev/null +++ b/src/include/ipxe/efi/AArch64/ProcessorBind.h @@ -0,0 +1,156 @@ +/** @file + Processor or Compiler specific defines and types for AArch64. + + Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.
+ Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.
+ Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.
+ + This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __PROCESSOR_BIND_H__ +#define __PROCESSOR_BIND_H__ + +FILE_LICENCE ( BSD3 ); + +/// +/// Define the processor type so other code can make processor based choices +/// +#define MDE_CPU_AARCH64 + +// +// Make sure we are using the correct packing rules per EFI specification +// +#ifndef __GNUC__ +#pragma pack() +#endif + +#if _MSC_EXTENSIONS + // + // use Microsoft* C compiler dependent integer width types + // + typedef unsigned __int64 UINT64; + typedef __int64 INT64; + typedef unsigned __int32 UINT32; + typedef __int32 INT32; + typedef unsigned short UINT16; + typedef unsigned short CHAR16; + typedef short INT16; + typedef unsigned char BOOLEAN; + typedef unsigned char UINT8; + typedef char CHAR8; + typedef signed char INT8; +#else + // + // Assume standard AARCH64 alignment. + // + typedef unsigned long long UINT64; + typedef long long INT64; + typedef unsigned int UINT32; + typedef int INT32; + typedef unsigned short UINT16; + typedef unsigned short CHAR16; + typedef short INT16; + typedef unsigned char BOOLEAN; + typedef unsigned char UINT8; + typedef char CHAR8; + typedef signed char INT8; +#endif + +/// +/// Unsigned value of native width. (4 bytes on supported 32-bit processor instructions, +/// 8 bytes on supported 64-bit processor instructions) +/// +typedef UINT64 UINTN; + +/// +/// Signed value of native width. (4 bytes on supported 32-bit processor instructions, +/// 8 bytes on supported 64-bit processor instructions) +/// +typedef INT64 INTN; + +// +// Processor specific defines +// + +/// +/// A value of native width with the highest bit set. +/// +#define MAX_BIT 0x8000000000000000ULL + +/// +/// A value of native width with the two highest bits set. +/// +#define MAX_2_BITS 0xC000000000000000ULL + +/// +/// Maximum legal AARCH64 address +/// +#define MAX_ADDRESS 0xFFFFFFFFFFFFFFFFULL + +/// +/// Maximum legal AArch64 INTN and UINTN values. +/// +#define MAX_INTN ((INTN)0x7FFFFFFFFFFFFFFFULL) +#define MAX_UINTN ((UINTN)0xFFFFFFFFFFFFFFFFULL) + +/// +/// The stack alignment required for AARCH64 +/// +#define CPU_STACK_ALIGNMENT 16 + +/// +/// Page allocation granularity for AARCH64 +/// +#define DEFAULT_PAGE_ALLOCATION_GRANULARITY (0x1000) +#define RUNTIME_PAGE_ALLOCATION_GRANULARITY (0x10000) + +// +// Modifier to ensure that all protocol member functions and EFI intrinsics +// use the correct C calling convention. All protocol member functions and +// EFI intrinsics are required to modify their member functions with EFIAPI. +// +#define EFIAPI + +// When compiling with Clang, we still use GNU as for the assembler, so we still +// need to define the GCC_ASM* macros. +#if defined(__GNUC__) || defined(__clang__) + /// + /// For GNU assembly code, .global or .globl can declare global symbols. + /// Define this macro to unify the usage. + /// + #define ASM_GLOBAL .globl + + #define GCC_ASM_EXPORT(func__) \ + .global _CONCATENATE (__USER_LABEL_PREFIX__, func__) ;\ + .type ASM_PFX(func__), %function + + #define GCC_ASM_IMPORT(func__) \ + .extern _CONCATENATE (__USER_LABEL_PREFIX__, func__) + +#endif + +/** + Return the pointer to the first instruction of a function given a function pointer. + On ARM CPU architectures, these two pointer values are the same, + so the implementation of this macro is very simple. + + @param FunctionPointer A pointer to a function. + + @return The pointer to the first instruction of a function given a function pointer. + +**/ +#define FUNCTION_ENTRY_POINT(FunctionPointer) (VOID *)(UINTN)(FunctionPointer) + +#ifndef __USER_LABEL_PREFIX__ +#define __USER_LABEL_PREFIX__ +#endif + +#endif diff --git a/src/include/ipxe/efi/Arm/ProcessorBind.h b/src/include/ipxe/efi/Arm/ProcessorBind.h new file mode 100644 index 00000000..efe3bf17 --- /dev/null +++ b/src/include/ipxe/efi/Arm/ProcessorBind.h @@ -0,0 +1,184 @@ +/** @file + Processor or Compiler specific defines and types for ARM. + + Copyright (c) 2006 - 2013, Intel Corporation. All rights reserved.
+ Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __PROCESSOR_BIND_H__ +#define __PROCESSOR_BIND_H__ + +FILE_LICENCE ( BSD3 ); + +/// +/// Define the processor type so other code can make processor based choices +/// +#define MDE_CPU_ARM + +// +// Make sure we are using the correct packing rules per EFI specification +// +#ifndef __GNUC__ +#pragma pack() +#endif + +// +// RVCT does not support the __builtin_unreachable() macro +// +#ifdef __ARMCC_VERSION +#define UNREACHABLE() +#endif + +#if _MSC_EXTENSIONS + // + // use Microsoft* C compiler dependent integer width types + // + typedef unsigned __int64 UINT64; + typedef __int64 INT64; + typedef unsigned __int32 UINT32; + typedef __int32 INT32; + typedef unsigned short UINT16; + typedef unsigned short CHAR16; + typedef short INT16; + typedef unsigned char BOOLEAN; + typedef unsigned char UINT8; + typedef char CHAR8; + typedef signed char INT8; +#else + // + // Assume standard ARM alignment. + // Need to check portability of long long + // + typedef unsigned long long UINT64; + typedef long long INT64; + typedef unsigned int UINT32; + typedef int INT32; + typedef unsigned short UINT16; + typedef unsigned short CHAR16; + typedef short INT16; + typedef unsigned char BOOLEAN; + typedef unsigned char UINT8; + typedef char CHAR8; + typedef signed char INT8; +#endif + +/// +/// Unsigned value of native width. (4 bytes on supported 32-bit processor instructions, +/// 8 bytes on supported 64-bit processor instructions) +/// +typedef UINT32 UINTN; + +/// +/// Signed value of native width. (4 bytes on supported 32-bit processor instructions, +/// 8 bytes on supported 64-bit processor instructions) +/// +typedef INT32 INTN; + +// +// Processor specific defines +// + +/// +/// A value of native width with the highest bit set. +/// +#define MAX_BIT 0x80000000 + +/// +/// A value of native width with the two highest bits set. +/// +#define MAX_2_BITS 0xC0000000 + +/// +/// Maximum legal ARM address +/// +#define MAX_ADDRESS 0xFFFFFFFF + +/// +/// Maximum legal ARM INTN and UINTN values. +/// +#define MAX_INTN ((INTN)0x7FFFFFFF) +#define MAX_UINTN ((UINTN)0xFFFFFFFF) + +/// +/// The stack alignment required for ARM +/// +#define CPU_STACK_ALIGNMENT sizeof(UINT64) + +/// +/// Page allocation granularity for ARM +/// +#define DEFAULT_PAGE_ALLOCATION_GRANULARITY (0x1000) +#define RUNTIME_PAGE_ALLOCATION_GRANULARITY (0x1000) + +// +// Modifier to ensure that all protocol member functions and EFI intrinsics +// use the correct C calling convention. All protocol member functions and +// EFI intrinsics are required to modify their member functions with EFIAPI. +// +#define EFIAPI + +// When compiling with Clang, we still use GNU as for the assembler, so we still +// need to define the GCC_ASM* macros. +#if defined(__GNUC__) || defined(__clang__) + /// + /// For GNU assembly code, .global or .globl can declare global symbols. + /// Define this macro to unify the usage. + /// + #define ASM_GLOBAL .globl + + #if !defined(__APPLE__) + /// + /// ARM EABI defines that the linker should not manipulate call relocations + /// (do bl/blx conversion) unless the target symbol has function type. + /// CodeSourcery 2010.09 started requiring the .type to function properly + /// + #define INTERWORK_FUNC(func__) .type ASM_PFX(func__), %function + + #define GCC_ASM_EXPORT(func__) \ + .global _CONCATENATE (__USER_LABEL_PREFIX__, func__) ;\ + .type ASM_PFX(func__), %function + + #define GCC_ASM_IMPORT(func__) \ + .extern _CONCATENATE (__USER_LABEL_PREFIX__, func__) + + #else + // + // .type not supported by Apple Xcode tools + // + #define INTERWORK_FUNC(func__) + + #define GCC_ASM_EXPORT(func__) \ + .globl _CONCATENATE (__USER_LABEL_PREFIX__, func__) \ + + #define GCC_ASM_IMPORT(name) + + #endif +#endif + +/** + Return the pointer to the first instruction of a function given a function pointer. + On ARM CPU architectures, these two pointer values are the same, + so the implementation of this macro is very simple. + + @param FunctionPointer A pointer to a function. + + @return The pointer to the first instruction of a function given a function pointer. + +**/ +#define FUNCTION_ENTRY_POINT(FunctionPointer) (VOID *)(UINTN)(FunctionPointer) + +#ifndef __USER_LABEL_PREFIX__ +#define __USER_LABEL_PREFIX__ +#endif + +#endif + + diff --git a/src/include/ipxe/efi/Guid/Acpi.h b/src/include/ipxe/efi/Guid/Acpi.h new file mode 100644 index 00000000..c4169c5f --- /dev/null +++ b/src/include/ipxe/efi/Guid/Acpi.h @@ -0,0 +1,48 @@ +/** @file + GUIDs used for ACPI entries in the EFI system table + + These GUIDs point the ACPI tables as defined in the ACPI specifications. + ACPI 2.0 specification defines the ACPI 2.0 GUID. UEFI 2.0 defines the + ACPI 2.0 Table GUID and ACPI Table GUID. + + Copyright (c) 2006 - 2009, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + + @par Revision Reference: + GUIDs defined in UEFI 2.0 spec. + +**/ + +#ifndef __ACPI_GUID_H__ +#define __ACPI_GUID_H__ + +FILE_LICENCE ( BSD3 ); + +#define ACPI_TABLE_GUID \ + { \ + 0xeb9d2d30, 0x2d88, 0x11d3, {0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d } \ + } + +#define EFI_ACPI_TABLE_GUID \ + { \ + 0x8868e871, 0xe4f1, 0x11d3, {0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81 } \ + } + +#define ACPI_10_TABLE_GUID ACPI_TABLE_GUID + +// +// ACPI 2.0 or newer tables should use EFI_ACPI_TABLE_GUID. +// +#define EFI_ACPI_20_TABLE_GUID EFI_ACPI_TABLE_GUID + +extern EFI_GUID gEfiAcpiTableGuid; +extern EFI_GUID gEfiAcpi10TableGuid; +extern EFI_GUID gEfiAcpi20TableGuid; + +#endif diff --git a/src/include/ipxe/efi/IndustryStandard/Acpi10.h b/src/include/ipxe/efi/IndustryStandard/Acpi10.h new file mode 100644 index 00000000..78570479 --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/Acpi10.h @@ -0,0 +1,663 @@ +/** @file + ACPI 1.0b definitions from the ACPI Specification, revision 1.0b + +Copyright (c) 2006 - 2011, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +**/ + +#ifndef _ACPI_1_0_H_ +#define _ACPI_1_0_H_ + +FILE_LICENCE ( BSD3 ); + +#include + +/// +/// Common table header, this prefaces all ACPI tables, including FACS, but +/// excluding the RSD PTR structure. +/// +typedef struct { + UINT32 Signature; + UINT32 Length; +} EFI_ACPI_COMMON_HEADER; + +#pragma pack(1) +/// +/// The common ACPI description table header. This structure prefaces most ACPI tables. +/// +typedef struct { + UINT32 Signature; + UINT32 Length; + UINT8 Revision; + UINT8 Checksum; + UINT8 OemId[6]; + UINT64 OemTableId; + UINT32 OemRevision; + UINT32 CreatorId; + UINT32 CreatorRevision; +} EFI_ACPI_DESCRIPTION_HEADER; +#pragma pack() + +// +// Define for Desriptor +// +#define ACPI_SMALL_ITEM_FLAG 0x00 +#define ACPI_LARGE_ITEM_FLAG 0x01 + +// +// Small Item Descriptor Name +// +#define ACPI_SMALL_IRQ_DESCRIPTOR_NAME 0x04 +#define ACPI_SMALL_DMA_DESCRIPTOR_NAME 0x05 +#define ACPI_SMALL_START_DEPENDENT_DESCRIPTOR_NAME 0x06 +#define ACPI_SMALL_END_DEPENDENT_DESCRIPTOR_NAME 0x07 +#define ACPI_SMALL_IO_PORT_DESCRIPTOR_NAME 0x08 +#define ACPI_SMALL_FIXED_IO_PORT_DESCRIPTOR_NAME 0x09 +#define ACPI_SMALL_VENDOR_DEFINED_DESCRIPTOR_NAME 0x0E +#define ACPI_SMALL_END_TAG_DESCRIPTOR_NAME 0x0F + +// +// Large Item Descriptor Name +// +#define ACPI_LARGE_24_BIT_MEMORY_RANGE_DESCRIPTOR_NAME 0x01 +#define ACPI_LARGE_VENDOR_DEFINED_DESCRIPTOR_NAME 0x04 +#define ACPI_LARGE_32_BIT_MEMORY_RANGE_DESCRIPTOR_NAME 0x05 +#define ACPI_LARGE_32_BIT_FIXED_MEMORY_RANGE_DESCRIPTOR_NAME 0x06 +#define ACPI_LARGE_DWORD_ADDRESS_SPACE_DESCRIPTOR_NAME 0x07 +#define ACPI_LARGE_WORD_ADDRESS_SPACE_DESCRIPTOR_NAME 0x08 +#define ACPI_LARGE_EXTENDED_IRQ_DESCRIPTOR_NAME 0x09 +#define ACPI_LARGE_QWORD_ADDRESS_SPACE_DESCRIPTOR_NAME 0x0A + +// +// Small Item Descriptor Value +// +#define ACPI_IRQ_NOFLAG_DESCRIPTOR 0x22 +#define ACPI_IRQ_DESCRIPTOR 0x23 +#define ACPI_DMA_DESCRIPTOR 0x2A +#define ACPI_START_DEPENDENT_DESCRIPTOR 0x30 +#define ACPI_START_DEPENDENT_EX_DESCRIPTOR 0x31 +#define ACPI_END_DEPENDENT_DESCRIPTOR 0x38 +#define ACPI_IO_PORT_DESCRIPTOR 0x47 +#define ACPI_FIXED_LOCATION_IO_PORT_DESCRIPTOR 0x4B +#define ACPI_END_TAG_DESCRIPTOR 0x79 + +// +// Large Item Descriptor Value +// +#define ACPI_24_BIT_MEMORY_RANGE_DESCRIPTOR 0x81 +#define ACPI_32_BIT_MEMORY_RANGE_DESCRIPTOR 0x85 +#define ACPI_32_BIT_FIXED_MEMORY_RANGE_DESCRIPTOR 0x86 +#define ACPI_DWORD_ADDRESS_SPACE_DESCRIPTOR 0x87 +#define ACPI_WORD_ADDRESS_SPACE_DESCRIPTOR 0x88 +#define ACPI_EXTENDED_INTERRUPT_DESCRIPTOR 0x89 +#define ACPI_QWORD_ADDRESS_SPACE_DESCRIPTOR 0x8A +#define ACPI_ADDRESS_SPACE_DESCRIPTOR 0x8A + +// +// Resource Type +// +#define ACPI_ADDRESS_SPACE_TYPE_MEM 0x00 +#define ACPI_ADDRESS_SPACE_TYPE_IO 0x01 +#define ACPI_ADDRESS_SPACE_TYPE_BUS 0x02 + +/// +/// Power Management Timer frequency is fixed at 3.579545MHz. +/// +#define ACPI_TIMER_FREQUENCY 3579545 + +// +// Ensure proper structure formats +// +#pragma pack(1) + +/// +/// The commond definition of QWORD, DWORD, and WORD +/// Address Space Descriptors. +/// +typedef PACKED struct { + UINT8 Desc; + UINT16 Len; + UINT8 ResType; + UINT8 GenFlag; + UINT8 SpecificFlag; + UINT64 AddrSpaceGranularity; + UINT64 AddrRangeMin; + UINT64 AddrRangeMax; + UINT64 AddrTranslationOffset; + UINT64 AddrLen; +} EFI_ACPI_ADDRESS_SPACE_DESCRIPTOR; + +typedef PACKED union { + UINT8 Byte; + PACKED struct { + UINT8 Length : 3; + UINT8 Name : 4; + UINT8 Type : 1; + } Bits; +} ACPI_SMALL_RESOURCE_HEADER; + +typedef PACKED struct { + PACKED union { + UINT8 Byte; + PACKED struct { + UINT8 Name : 7; + UINT8 Type : 1; + }Bits; + } Header; + UINT16 Length; +} ACPI_LARGE_RESOURCE_HEADER; + +/// +/// IRQ Descriptor. +/// +typedef PACKED struct { + ACPI_SMALL_RESOURCE_HEADER Header; + UINT16 Mask; +} EFI_ACPI_IRQ_NOFLAG_DESCRIPTOR; + +/// +/// IRQ Descriptor. +/// +typedef PACKED struct { + ACPI_SMALL_RESOURCE_HEADER Header; + UINT16 Mask; + UINT8 Information; +} EFI_ACPI_IRQ_DESCRIPTOR; + +/// +/// DMA Descriptor. +/// +typedef PACKED struct { + ACPI_SMALL_RESOURCE_HEADER Header; + UINT8 ChannelMask; + UINT8 Information; +} EFI_ACPI_DMA_DESCRIPTOR; + +/// +/// I/O Port Descriptor +/// +typedef PACKED struct { + ACPI_SMALL_RESOURCE_HEADER Header; + UINT8 Information; + UINT16 BaseAddressMin; + UINT16 BaseAddressMax; + UINT8 Alignment; + UINT8 Length; +} EFI_ACPI_IO_PORT_DESCRIPTOR; + +/// +/// Fixed Location I/O Port Descriptor. +/// +typedef PACKED struct { + ACPI_SMALL_RESOURCE_HEADER Header; + UINT16 BaseAddress; + UINT8 Length; +} EFI_ACPI_FIXED_LOCATION_IO_PORT_DESCRIPTOR; + +/// +/// 24-Bit Memory Range Descriptor +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 Information; + UINT16 BaseAddressMin; + UINT16 BaseAddressMax; + UINT16 Alignment; + UINT16 Length; +} EFI_ACPI_24_BIT_MEMORY_RANGE_DESCRIPTOR; + +/// +/// 32-Bit Memory Range Descriptor +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 Information; + UINT32 BaseAddressMin; + UINT32 BaseAddressMax; + UINT32 Alignment; + UINT32 Length; +} EFI_ACPI_32_BIT_MEMORY_RANGE_DESCRIPTOR; + +/// +/// Fixed 32-Bit Fixed Memory Range Descriptor +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 Information; + UINT32 BaseAddress; + UINT32 Length; +} EFI_ACPI_32_BIT_FIXED_MEMORY_RANGE_DESCRIPTOR; + +/// +/// QWORD Address Space Descriptor +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 ResType; + UINT8 GenFlag; + UINT8 SpecificFlag; + UINT64 AddrSpaceGranularity; + UINT64 AddrRangeMin; + UINT64 AddrRangeMax; + UINT64 AddrTranslationOffset; + UINT64 AddrLen; +} EFI_ACPI_QWORD_ADDRESS_SPACE_DESCRIPTOR; + +/// +/// DWORD Address Space Descriptor +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 ResType; + UINT8 GenFlag; + UINT8 SpecificFlag; + UINT32 AddrSpaceGranularity; + UINT32 AddrRangeMin; + UINT32 AddrRangeMax; + UINT32 AddrTranslationOffset; + UINT32 AddrLen; +} EFI_ACPI_DWORD_ADDRESS_SPACE_DESCRIPTOR; + +/// +/// WORD Address Space Descriptor +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 ResType; + UINT8 GenFlag; + UINT8 SpecificFlag; + UINT16 AddrSpaceGranularity; + UINT16 AddrRangeMin; + UINT16 AddrRangeMax; + UINT16 AddrTranslationOffset; + UINT16 AddrLen; +} EFI_ACPI_WORD_ADDRESS_SPACE_DESCRIPTOR; + +/// +/// Extended Interrupt Descriptor +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 InterruptVectorFlags; + UINT8 InterruptTableLength; + UINT32 InterruptNumber[1]; +} EFI_ACPI_EXTENDED_INTERRUPT_DESCRIPTOR; + +#pragma pack() + +/// +/// The End tag identifies an end of resource data. +/// +typedef struct { + UINT8 Desc; + UINT8 Checksum; +} EFI_ACPI_END_TAG_DESCRIPTOR; + +// +// General use definitions +// +#define EFI_ACPI_RESERVED_BYTE 0x00 +#define EFI_ACPI_RESERVED_WORD 0x0000 +#define EFI_ACPI_RESERVED_DWORD 0x00000000 +#define EFI_ACPI_RESERVED_QWORD 0x0000000000000000 + +// +// Resource Type Specific Flags +// Ref ACPI specification 6.4.3.5.5 +// +// Bit [0] : Write Status, _RW +// +#define EFI_ACPI_MEMORY_RESOURCE_SPECIFIC_FLAG_READ_WRITE (1 << 0) +#define EFI_ACPI_MEMORY_RESOURCE_SPECIFIC_FLAG_READ_ONLY (0 << 0) +// +// Bit [2:1] : Memory Attributes, _MEM +// +#define EFI_ACPI_MEMORY_RESOURCE_SPECIFIC_FLAG_NON_CACHEABLE (0 << 1) +#define EFI_ACPI_MEMORY_RESOURCE_SPECIFIC_FLAG_CACHEABLE (1 << 1) +#define EFI_ACPI_MEMORY_RESOURCE_SPECIFIC_FLAG_CACHEABLE_WRITE_COMBINING (2 << 1) +#define EFI_ACPI_MEMORY_RESOURCE_SPECIFIC_FLAG_CACHEABLE_PREFETCHABLE (3 << 1) +// +// Bit [4:3] : Memory Attributes, _MTP +// +#define EFI_ACPI_MEMORY_RESOURCE_SPECIFIC_FLAG_ADDRESS_RANGE_MEMORY (0 << 3) +#define EFI_ACPI_MEMORY_RESOURCE_SPECIFIC_FLAG_ADDRESS_RANGE_RESERVED (1 << 3) +#define EFI_ACPI_MEMORY_RESOURCE_SPECIFIC_FLAG_ADDRESS_RANGE_ACPI (2 << 3) +#define EFI_APCI_MEMORY_RESOURCE_SPECIFIC_FLAG_ADDRESS_RANGE_NVS (3 << 3) +// +// Bit [5] : Memory to I/O Translation, _TTP +// +#define EFI_ACPI_MEMORY_RESOURCE_SPECIFIC_FLAG_TYPE_TRANSLATION (1 << 5) +#define EFI_ACPI_MEMORY_RESOURCE_SPECIFIC_FLAG_TYPE_STATIC (0 << 5) + +// +// IRQ Information +// Ref ACPI specification 6.4.2.1 +// +#define EFI_ACPI_IRQ_SHARABLE_MASK 0x10 +#define EFI_ACPI_IRQ_SHARABLE 0x10 + +#define EFI_ACPI_IRQ_POLARITY_MASK 0x08 +#define EFI_ACPI_IRQ_HIGH_TRUE 0x00 +#define EFI_ACPI_IRQ_LOW_FALSE 0x08 + +#define EFI_ACPI_IRQ_MODE 0x01 +#define EFI_ACPI_IRQ_LEVEL_TRIGGERED 0x00 +#define EFI_ACPI_IRQ_EDGE_TRIGGERED 0x01 + +// +// DMA Information +// Ref ACPI specification 6.4.2.2 +// +#define EFI_ACPI_DMA_SPEED_TYPE_MASK 0x60 +#define EFI_ACPI_DMA_SPEED_TYPE_COMPATIBILITY 0x00 +#define EFI_ACPI_DMA_SPEED_TYPE_A 0x20 +#define EFI_ACPI_DMA_SPEED_TYPE_B 0x40 +#define EFI_ACPI_DMA_SPEED_TYPE_F 0x60 + +#define EFI_ACPI_DMA_BUS_MASTER_MASK 0x04 +#define EFI_ACPI_DMA_BUS_MASTER 0x04 + +#define EFI_ACPI_DMA_TRANSFER_TYPE_MASK 0x03 +#define EFI_ACPI_DMA_TRANSFER_TYPE_8_BIT 0x00 +#define EFI_ACPI_DMA_TRANSFER_TYPE_8_BIT_AND_16_BIT 0x01 +#define EFI_ACPI_DMA_TRANSFER_TYPE_16_BIT 0x10 + +// +// IO Information +// Ref ACPI specification 6.4.2.5 +// +#define EFI_ACPI_IO_DECODE_MASK 0x01 +#define EFI_ACPI_IO_DECODE_16_BIT 0x01 +#define EFI_ACPI_IO_DECODE_10_BIT 0x00 + +// +// Memory Information +// Ref ACPI specification 6.4.3.4 +// +#define EFI_ACPI_MEMORY_WRITE_STATUS_MASK 0x01 +#define EFI_ACPI_MEMORY_WRITABLE 0x01 +#define EFI_ACPI_MEMORY_NON_WRITABLE 0x00 + +// +// Ensure proper structure formats +// +#pragma pack(1) +// +// ACPI 1.0b table structures +// + +/// +/// Root System Description Pointer Structure. +/// +typedef struct { + UINT64 Signature; + UINT8 Checksum; + UINT8 OemId[6]; + UINT8 Reserved; + UINT32 RsdtAddress; +} EFI_ACPI_1_0_ROOT_SYSTEM_DESCRIPTION_POINTER; + +// +// Root System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT32 table pointers. +// + +/// +/// RSDT Revision (as defined in ACPI 1.0b specification). +/// +#define EFI_ACPI_1_0_ROOT_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Fixed ACPI Description Table Structure (FADT). +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 FirmwareCtrl; + UINT32 Dsdt; + UINT8 IntModel; + UINT8 Reserved1; + UINT16 SciInt; + UINT32 SmiCmd; + UINT8 AcpiEnable; + UINT8 AcpiDisable; + UINT8 S4BiosReq; + UINT8 Reserved2; + UINT32 Pm1aEvtBlk; + UINT32 Pm1bEvtBlk; + UINT32 Pm1aCntBlk; + UINT32 Pm1bCntBlk; + UINT32 Pm2CntBlk; + UINT32 PmTmrBlk; + UINT32 Gpe0Blk; + UINT32 Gpe1Blk; + UINT8 Pm1EvtLen; + UINT8 Pm1CntLen; + UINT8 Pm2CntLen; + UINT8 PmTmLen; + UINT8 Gpe0BlkLen; + UINT8 Gpe1BlkLen; + UINT8 Gpe1Base; + UINT8 Reserved3; + UINT16 PLvl2Lat; + UINT16 PLvl3Lat; + UINT16 FlushSize; + UINT16 FlushStride; + UINT8 DutyOffset; + UINT8 DutyWidth; + UINT8 DayAlrm; + UINT8 MonAlrm; + UINT8 Century; + UINT8 Reserved4; + UINT8 Reserved5; + UINT8 Reserved6; + UINT32 Flags; +} EFI_ACPI_1_0_FIXED_ACPI_DESCRIPTION_TABLE; + +/// +/// FADT Version (as defined in ACPI 1.0b specification). +/// +#define EFI_ACPI_1_0_FIXED_ACPI_DESCRIPTION_TABLE_REVISION 0x01 + +#define EFI_ACPI_1_0_INT_MODE_DUAL_PIC 0 +#define EFI_ACPI_1_0_INT_MODE_MULTIPLE_APIC 1 + +// +// Fixed ACPI Description Table Fixed Feature Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_1_0_WBINVD BIT0 +#define EFI_ACPI_1_0_WBINVD_FLUSH BIT1 +#define EFI_ACPI_1_0_PROC_C1 BIT2 +#define EFI_ACPI_1_0_P_LVL2_UP BIT3 +#define EFI_ACPI_1_0_PWR_BUTTON BIT4 +#define EFI_ACPI_1_0_SLP_BUTTON BIT5 +#define EFI_ACPI_1_0_FIX_RTC BIT6 +#define EFI_ACPI_1_0_RTC_S4 BIT7 +#define EFI_ACPI_1_0_TMR_VAL_EXT BIT8 +#define EFI_ACPI_1_0_DCK_CAP BIT9 + +/// +/// Firmware ACPI Control Structure. +/// +typedef struct { + UINT32 Signature; + UINT32 Length; + UINT32 HardwareSignature; + UINT32 FirmwareWakingVector; + UINT32 GlobalLock; + UINT32 Flags; + UINT8 Reserved[40]; +} EFI_ACPI_1_0_FIRMWARE_ACPI_CONTROL_STRUCTURE; + +/// +/// Firmware Control Structure Feature Flags. +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_1_0_S4BIOS_F BIT0 + +/// +/// Multiple APIC Description Table header definition. The rest of the table +/// must be defined in a platform-specific manner. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 LocalApicAddress; + UINT32 Flags; +} EFI_ACPI_1_0_MULTIPLE_APIC_DESCRIPTION_TABLE_HEADER; + +/// +/// MADT Revision (as defined in ACPI 1.0b specification). +/// +#define EFI_ACPI_1_0_MULTIPLE_APIC_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Multiple APIC Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_1_0_PCAT_COMPAT BIT0 + +// +// Multiple APIC Description Table APIC structure types +// All other values between 0x05 an 0xFF are reserved and +// will be ignored by OSPM. +// +#define EFI_ACPI_1_0_PROCESSOR_LOCAL_APIC 0x00 +#define EFI_ACPI_1_0_IO_APIC 0x01 +#define EFI_ACPI_1_0_INTERRUPT_SOURCE_OVERRIDE 0x02 +#define EFI_ACPI_1_0_NON_MASKABLE_INTERRUPT_SOURCE 0x03 +#define EFI_ACPI_1_0_LOCAL_APIC_NMI 0x04 + +// +// APIC Structure Definitions +// + +/// +/// Processor Local APIC Structure Definition. +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT8 ApicId; + UINT32 Flags; +} EFI_ACPI_1_0_PROCESSOR_LOCAL_APIC_STRUCTURE; + +/// +/// Local APIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_1_0_LOCAL_APIC_ENABLED BIT0 + +/// +/// IO APIC Structure. +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 IoApicAddress; + UINT32 SystemVectorBase; +} EFI_ACPI_1_0_IO_APIC_STRUCTURE; + +/// +/// Interrupt Source Override Structure. +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Bus; + UINT8 Source; + UINT32 GlobalSystemInterruptVector; + UINT16 Flags; +} EFI_ACPI_1_0_INTERRUPT_SOURCE_OVERRIDE_STRUCTURE; + +/// +/// Non-Maskable Interrupt Source Structure. +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT32 GlobalSystemInterruptVector; +} EFI_ACPI_1_0_NON_MASKABLE_INTERRUPT_SOURCE_STRUCTURE; + +/// +/// Local APIC NMI Structure. +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT16 Flags; + UINT8 LocalApicInti; +} EFI_ACPI_1_0_LOCAL_APIC_NMI_STRUCTURE; + +/// +/// Smart Battery Description Table (SBST) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 WarningEnergyLevel; + UINT32 LowEnergyLevel; + UINT32 CriticalEnergyLevel; +} EFI_ACPI_1_0_SMART_BATTERY_DESCRIPTION_TABLE; + +// +// Known table signatures +// + +/// +/// "RSD PTR " Root System Description Pointer. +/// +#define EFI_ACPI_1_0_ROOT_SYSTEM_DESCRIPTION_POINTER_SIGNATURE SIGNATURE_64('R', 'S', 'D', ' ', 'P', 'T', 'R', ' ') + +/// +/// "APIC" Multiple APIC Description Table. +/// +#define EFI_ACPI_1_0_APIC_SIGNATURE SIGNATURE_32('A', 'P', 'I', 'C') + +/// +/// "DSDT" Differentiated System Description Table. +/// +#define EFI_ACPI_1_0_DIFFERENTIATED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('D', 'S', 'D', 'T') + +/// +/// "FACS" Firmware ACPI Control Structure. +/// +#define EFI_ACPI_1_0_FIRMWARE_ACPI_CONTROL_STRUCTURE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'S') + +/// +/// "FACP" Fixed ACPI Description Table. +/// +#define EFI_ACPI_1_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'P') + +/// +/// "PSDT" Persistent System Description Table. +/// +#define EFI_ACPI_1_0_PERSISTENT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('P', 'S', 'D', 'T') + +/// +/// "RSDT" Root System Description Table. +/// +#define EFI_ACPI_1_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('R', 'S', 'D', 'T') + +/// +/// "SBST" Smart Battery Specification Table. +/// +#define EFI_ACPI_1_0_SMART_BATTERY_SPECIFICATION_TABLE_SIGNATURE SIGNATURE_32('S', 'B', 'S', 'T') + +/// +/// "SSDT" Secondary System Description Table. +/// +#define EFI_ACPI_1_0_SECONDARY_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('S', 'S', 'D', 'T') + +#pragma pack() + +#endif diff --git a/src/include/ipxe/efi/IndustryStandard/Acpi20.h b/src/include/ipxe/efi/IndustryStandard/Acpi20.h new file mode 100644 index 00000000..f5ff44c9 --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/Acpi20.h @@ -0,0 +1,547 @@ +/** @file + ACPI 2.0 definitions from the ACPI Specification, revision 2.0 + + Copyright (c) 2006 - 2011, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +**/ + +#ifndef _ACPI_2_0_H_ +#define _ACPI_2_0_H_ + +FILE_LICENCE ( BSD3 ); + +#include + +// +// Define for Desriptor +// +#define ACPI_LARGE_GENERIC_REGISTER_DESCRIPTOR_NAME 0x02 + +#define ACPI_GENERIC_REGISTER_DESCRIPTOR 0x82 + +// +// Ensure proper structure formats +// +#pragma pack(1) + +/// +/// Generic Register Descriptor +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 AddressSpaceId; + UINT8 RegisterBitWidth; + UINT8 RegisterBitOffset; + UINT8 AddressSize; + UINT64 RegisterAddress; +} EFI_ACPI_GENERIC_REGISTER_DESCRIPTOR; + +#pragma pack() + +// +// Ensure proper structure formats +// +#pragma pack(1) + +/// +/// ACPI 2.0 Generic Address Space definition +/// +typedef struct { + UINT8 AddressSpaceId; + UINT8 RegisterBitWidth; + UINT8 RegisterBitOffset; + UINT8 Reserved; + UINT64 Address; +} EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE; + +// +// Generic Address Space Address IDs +// +#define EFI_ACPI_2_0_SYSTEM_MEMORY 0 +#define EFI_ACPI_2_0_SYSTEM_IO 1 +#define EFI_ACPI_2_0_PCI_CONFIGURATION_SPACE 2 +#define EFI_ACPI_2_0_EMBEDDED_CONTROLLER 3 +#define EFI_ACPI_2_0_SMBUS 4 +#define EFI_ACPI_2_0_FUNCTIONAL_FIXED_HARDWARE 0x7F + +// +// ACPI 2.0 table structures +// + +/// +/// Root System Description Pointer Structure +/// +typedef struct { + UINT64 Signature; + UINT8 Checksum; + UINT8 OemId[6]; + UINT8 Revision; + UINT32 RsdtAddress; + UINT32 Length; + UINT64 XsdtAddress; + UINT8 ExtendedChecksum; + UINT8 Reserved[3]; +} EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_POINTER; + +/// +/// RSD_PTR Revision (as defined in ACPI 2.0 spec.) +/// +#define EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_POINTER_REVISION 0x02 + +/// +/// Common table header, this prefaces all ACPI tables, including FACS, but +/// excluding the RSD PTR structure +/// +typedef struct { + UINT32 Signature; + UINT32 Length; +} EFI_ACPI_2_0_COMMON_HEADER; + +// +// Root System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT32 table pointers. +// + +/// +/// RSDT Revision (as defined in ACPI 2.0 spec.) +/// +#define EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +// +// Extended System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT64 table pointers. +// + +/// +/// XSDT Revision (as defined in ACPI 2.0 spec.) +/// +#define EFI_ACPI_2_0_EXTENDED_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Fixed ACPI Description Table Structure (FADT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 FirmwareCtrl; + UINT32 Dsdt; + UINT8 Reserved0; + UINT8 PreferredPmProfile; + UINT16 SciInt; + UINT32 SmiCmd; + UINT8 AcpiEnable; + UINT8 AcpiDisable; + UINT8 S4BiosReq; + UINT8 PstateCnt; + UINT32 Pm1aEvtBlk; + UINT32 Pm1bEvtBlk; + UINT32 Pm1aCntBlk; + UINT32 Pm1bCntBlk; + UINT32 Pm2CntBlk; + UINT32 PmTmrBlk; + UINT32 Gpe0Blk; + UINT32 Gpe1Blk; + UINT8 Pm1EvtLen; + UINT8 Pm1CntLen; + UINT8 Pm2CntLen; + UINT8 PmTmrLen; + UINT8 Gpe0BlkLen; + UINT8 Gpe1BlkLen; + UINT8 Gpe1Base; + UINT8 CstCnt; + UINT16 PLvl2Lat; + UINT16 PLvl3Lat; + UINT16 FlushSize; + UINT16 FlushStride; + UINT8 DutyOffset; + UINT8 DutyWidth; + UINT8 DayAlrm; + UINT8 MonAlrm; + UINT8 Century; + UINT16 IaPcBootArch; + UINT8 Reserved1; + UINT32 Flags; + EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE ResetReg; + UINT8 ResetValue; + UINT8 Reserved2[3]; + UINT64 XFirmwareCtrl; + UINT64 XDsdt; + EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE XPm1aEvtBlk; + EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE XPm1bEvtBlk; + EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE XPm1aCntBlk; + EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE XPm1bCntBlk; + EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE XPm2CntBlk; + EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE XPmTmrBlk; + EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE XGpe0Blk; + EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE XGpe1Blk; +} EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE; + +/// +/// FADT Version (as defined in ACPI 2.0 spec.) +/// +#define EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_REVISION 0x03 + +// +// Fixed ACPI Description Table Preferred Power Management Profile +// +#define EFI_ACPI_2_0_PM_PROFILE_UNSPECIFIED 0 +#define EFI_ACPI_2_0_PM_PROFILE_DESKTOP 1 +#define EFI_ACPI_2_0_PM_PROFILE_MOBILE 2 +#define EFI_ACPI_2_0_PM_PROFILE_WORKSTATION 3 +#define EFI_ACPI_2_0_PM_PROFILE_ENTERPRISE_SERVER 4 +#define EFI_ACPI_2_0_PM_PROFILE_SOHO_SERVER 5 +#define EFI_ACPI_2_0_PM_PROFILE_APPLIANCE_PC 6 + +// +// Fixed ACPI Description Table Boot Architecture Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_2_0_LEGACY_DEVICES BIT0 +#define EFI_ACPI_2_0_8042 BIT1 + +// +// Fixed ACPI Description Table Fixed Feature Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_2_0_WBINVD BIT0 +#define EFI_ACPI_2_0_WBINVD_FLUSH BIT1 +#define EFI_ACPI_2_0_PROC_C1 BIT2 +#define EFI_ACPI_2_0_P_LVL2_UP BIT3 +#define EFI_ACPI_2_0_PWR_BUTTON BIT4 +#define EFI_ACPI_2_0_SLP_BUTTON BIT5 +#define EFI_ACPI_2_0_FIX_RTC BIT6 +#define EFI_ACPI_2_0_RTC_S4 BIT7 +#define EFI_ACPI_2_0_TMR_VAL_EXT BIT8 +#define EFI_ACPI_2_0_DCK_CAP BIT9 +#define EFI_ACPI_2_0_RESET_REG_SUP BIT10 +#define EFI_ACPI_2_0_SEALED_CASE BIT11 +#define EFI_ACPI_2_0_HEADLESS BIT12 +#define EFI_ACPI_2_0_CPU_SW_SLP BIT13 + +/// +/// Firmware ACPI Control Structure +/// +typedef struct { + UINT32 Signature; + UINT32 Length; + UINT32 HardwareSignature; + UINT32 FirmwareWakingVector; + UINT32 GlobalLock; + UINT32 Flags; + UINT64 XFirmwareWakingVector; + UINT8 Version; + UINT8 Reserved[31]; +} EFI_ACPI_2_0_FIRMWARE_ACPI_CONTROL_STRUCTURE; + +/// +/// FACS Version (as defined in ACPI 2.0 spec.) +/// +#define EFI_ACPI_2_0_FIRMWARE_ACPI_CONTROL_STRUCTURE_VERSION 0x01 + +/// +/// Firmware Control Structure Feature Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_2_0_S4BIOS_F BIT0 + +/// +/// Multiple APIC Description Table header definition. The rest of the table +/// must be defined in a platform specific manner. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 LocalApicAddress; + UINT32 Flags; +} EFI_ACPI_2_0_MULTIPLE_APIC_DESCRIPTION_TABLE_HEADER; + +/// +/// MADT Revision (as defined in ACPI 2.0 spec.) +/// +#define EFI_ACPI_2_0_MULTIPLE_APIC_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Multiple APIC Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_2_0_PCAT_COMPAT BIT0 + +// +// Multiple APIC Description Table APIC structure types +// All other values between 0x09 an 0xFF are reserved and +// will be ignored by OSPM. +// +#define EFI_ACPI_2_0_PROCESSOR_LOCAL_APIC 0x00 +#define EFI_ACPI_2_0_IO_APIC 0x01 +#define EFI_ACPI_2_0_INTERRUPT_SOURCE_OVERRIDE 0x02 +#define EFI_ACPI_2_0_NON_MASKABLE_INTERRUPT_SOURCE 0x03 +#define EFI_ACPI_2_0_LOCAL_APIC_NMI 0x04 +#define EFI_ACPI_2_0_LOCAL_APIC_ADDRESS_OVERRIDE 0x05 +#define EFI_ACPI_2_0_IO_SAPIC 0x06 +#define EFI_ACPI_2_0_PROCESSOR_LOCAL_SAPIC 0x07 +#define EFI_ACPI_2_0_PLATFORM_INTERRUPT_SOURCES 0x08 + +// +// APIC Structure Definitions +// + +/// +/// Processor Local APIC Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT8 ApicId; + UINT32 Flags; +} EFI_ACPI_2_0_PROCESSOR_LOCAL_APIC_STRUCTURE; + +/// +/// Local APIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_2_0_LOCAL_APIC_ENABLED BIT0 + +/// +/// IO APIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 IoApicAddress; + UINT32 GlobalSystemInterruptBase; +} EFI_ACPI_2_0_IO_APIC_STRUCTURE; + +/// +/// Interrupt Source Override Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Bus; + UINT8 Source; + UINT32 GlobalSystemInterrupt; + UINT16 Flags; +} EFI_ACPI_2_0_INTERRUPT_SOURCE_OVERRIDE_STRUCTURE; + +/// +/// Non-Maskable Interrupt Source Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT32 GlobalSystemInterrupt; +} EFI_ACPI_2_0_NON_MASKABLE_INTERRUPT_SOURCE_STRUCTURE; + +/// +/// Local APIC NMI Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT16 Flags; + UINT8 LocalApicLint; +} EFI_ACPI_2_0_LOCAL_APIC_NMI_STRUCTURE; + +/// +/// Local APIC Address Override Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved; + UINT64 LocalApicAddress; +} EFI_ACPI_2_0_LOCAL_APIC_ADDRESS_OVERRIDE_STRUCTURE; + +/// +/// IO SAPIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 GlobalSystemInterruptBase; + UINT64 IoSapicAddress; +} EFI_ACPI_2_0_IO_SAPIC_STRUCTURE; + +/// +/// Local SAPIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT8 LocalSapicId; + UINT8 LocalSapicEid; + UINT8 Reserved[3]; + UINT32 Flags; +} EFI_ACPI_2_0_PROCESSOR_LOCAL_SAPIC_STRUCTURE; + +/// +/// Platform Interrupt Sources Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT8 InterruptType; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT8 IoSapicVector; + UINT32 GlobalSystemInterrupt; + UINT32 Reserved; +} EFI_ACPI_2_0_PLATFORM_INTERRUPT_SOURCES_STRUCTURE; + +/// +/// Smart Battery Description Table (SBST) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 WarningEnergyLevel; + UINT32 LowEnergyLevel; + UINT32 CriticalEnergyLevel; +} EFI_ACPI_2_0_SMART_BATTERY_DESCRIPTION_TABLE; + +/// +/// SBST Version (as defined in ACPI 2.0 spec.) +/// +#define EFI_ACPI_2_0_SMART_BATTERY_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Embedded Controller Boot Resources Table (ECDT) +/// The table is followed by a null terminated ASCII string that contains +/// a fully qualified reference to the name space object. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE EcControl; + EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE EcData; + UINT32 Uid; + UINT8 GpeBit; +} EFI_ACPI_2_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE; + +/// +/// ECDT Version (as defined in ACPI 2.0 spec.) +/// +#define EFI_ACPI_2_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE_REVISION 0x01 + +// +// Known table signatures +// + +/// +/// "RSD PTR " Root System Description Pointer +/// +#define EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_POINTER_SIGNATURE SIGNATURE_64('R', 'S', 'D', ' ', 'P', 'T', 'R', ' ') + +/// +/// "SPIC" Multiple SAPIC Description Table +/// +/// BUGBUG: Don't know where this came from except SR870BN4 uses it. +/// #define EFI_ACPI_2_0_MULTIPLE_SAPIC_DESCRIPTION_TABLE_SIGNATURE 0x43495053 +/// +#define EFI_ACPI_2_0_MULTIPLE_SAPIC_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('A', 'P', 'I', 'C') + +/// +/// "BOOT" MS Simple Boot Spec +/// +#define EFI_ACPI_2_0_SIMPLE_BOOT_FLAG_TABLE_SIGNATURE SIGNATURE_32('B', 'O', 'O', 'T') + +/// +/// "DBGP" MS Bebug Port Spec +/// +#define EFI_ACPI_2_0_DEBUG_PORT_TABLE_SIGNATURE SIGNATURE_32('D', 'B', 'G', 'P') + +/// +/// "DSDT" Differentiated System Description Table +/// +#define EFI_ACPI_2_0_DIFFERENTIATED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('D', 'S', 'D', 'T') + +/// +/// "ECDT" Embedded Controller Boot Resources Table +/// +#define EFI_ACPI_2_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE_SIGNATURE SIGNATURE_32('E', 'C', 'D', 'T') + +/// +/// "ETDT" Event Timer Description Table +/// +#define EFI_ACPI_2_0_EVENT_TIMER_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('E', 'T', 'D', 'T') + +/// +/// "FACS" Firmware ACPI Control Structure +/// +#define EFI_ACPI_2_0_FIRMWARE_ACPI_CONTROL_STRUCTURE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'S') + +/// +/// "FACP" Fixed ACPI Description Table +/// +#define EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'P') + +/// +/// "APIC" Multiple APIC Description Table +/// +#define EFI_ACPI_2_0_MULTIPLE_APIC_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('A', 'P', 'I', 'C') + +/// +/// "PSDT" Persistent System Description Table +/// +#define EFI_ACPI_2_0_PERSISTENT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('P', 'S', 'D', 'T') + +/// +/// "RSDT" Root System Description Table +/// +#define EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('R', 'S', 'D', 'T') + +/// +/// "SBST" Smart Battery Specification Table +/// +#define EFI_ACPI_2_0_SMART_BATTERY_SPECIFICATION_TABLE_SIGNATURE SIGNATURE_32('S', 'B', 'S', 'T') + +/// +/// "SLIT" System Locality Information Table +/// +#define EFI_ACPI_2_0_SYSTEM_LOCALITY_INFORMATION_TABLE_SIGNATURE SIGNATURE_32('S', 'L', 'I', 'T') + +/// +/// "SPCR" Serial Port Concole Redirection Table +/// +#define EFI_ACPI_2_0_SERIAL_PORT_CONSOLE_REDIRECTION_TABLE_SIGNATURE SIGNATURE_32('S', 'P', 'C', 'R') + +/// +/// "SRAT" Static Resource Affinity Table +/// +#define EFI_ACPI_2_0_STATIC_RESOURCE_AFFINITY_TABLE_SIGNATURE SIGNATURE_32('S', 'R', 'A', 'T') + +/// +/// "SSDT" Secondary System Description Table +/// +#define EFI_ACPI_2_0_SECONDARY_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('S', 'S', 'D', 'T') + +/// +/// "SPMI" Server Platform Management Interface Table +/// +#define EFI_ACPI_2_0_SERVER_PLATFORM_MANAGEMENT_INTERFACE_SIGNATURE SIGNATURE_32('S', 'P', 'M', 'I') + +/// +/// "XSDT" Extended System Description Table +/// +#define EFI_ACPI_2_0_EXTENDED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('X', 'S', 'D', 'T') + +/// +/// "MCFG" PCI Express Memory Mapped Configuration Space Base Address Description Table +/// +#define EFI_ACPI_2_0_MEMORY_MAPPED_CONFIGURATION_BASE_ADDRESS_TABLE_SIGNATURE SIGNATURE_32('M', 'C', 'F', 'G') + +#pragma pack() + +#endif diff --git a/src/include/ipxe/efi/IndustryStandard/Acpi30.h b/src/include/ipxe/efi/IndustryStandard/Acpi30.h new file mode 100644 index 00000000..abaa7212 --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/Acpi30.h @@ -0,0 +1,731 @@ +/** @file + ACPI 3.0 definitions from the ACPI Specification Revision 3.0b October 10, 2006 + + Copyright (c) 2006 - 2011, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +**/ + +#ifndef _ACPI_3_0_H_ +#define _ACPI_3_0_H_ + +FILE_LICENCE ( BSD3 ); + +#include + +// +// Define for Desriptor +// +#define ACPI_LARGE_EXTENDED_ADDRESS_SPACE_DESCRIPTOR_NAME 0x0B + +#define ACPI_EXTENDED_ADDRESS_SPACE_DESCRIPTOR 0x8B + +// +// Ensure proper structure formats +// +#pragma pack(1) + +/// +/// Extended Address Space Descriptor +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 ResType; + UINT8 GenFlag; + UINT8 SpecificFlag; + UINT8 RevisionId; + UINT8 Reserved; + UINT64 AddrSpaceGranularity; + UINT64 AddrRangeMin; + UINT64 AddrRangeMax; + UINT64 AddrTranslationOffset; + UINT64 AddrLen; + UINT64 TypeSpecificAttribute; +} EFI_ACPI_EXTENDED_ADDRESS_SPACE_DESCRIPTOR; + +#pragma pack() + +// +// Memory Type Specific Flags +// +#define EFI_ACPI_MEMORY_TYPE_SPECIFIC_ATTRIBUTES_UC 0x0000000000000001 +#define EFI_ACPI_MEMORY_TYPE_SPECIFIC_ATTRIBUTES_WC 0x0000000000000002 +#define EFI_ACPI_MEMORY_TYPE_SPECIFIC_ATTRIBUTES_WT 0x0000000000000004 +#define EFI_ACPI_MEMORY_TYPE_SPECIFIC_ATTRIBUTES_WB 0x0000000000000008 +#define EFI_ACPI_MEMORY_TYPE_SPECIFIC_ATTRIBUTES_UCE 0x0000000000000010 +#define EFI_ACPI_MEMORY_TYPE_SPECIFIC_ATTRIBUTES_NV 0x0000000000008000 + +// +// Ensure proper structure formats +// +#pragma pack(1) + +/// +/// ACPI 3.0 Generic Address Space definition +/// +typedef struct { + UINT8 AddressSpaceId; + UINT8 RegisterBitWidth; + UINT8 RegisterBitOffset; + UINT8 AccessSize; + UINT64 Address; +} EFI_ACPI_3_0_GENERIC_ADDRESS_STRUCTURE; + +// +// Generic Address Space Address IDs +// +#define EFI_ACPI_3_0_SYSTEM_MEMORY 0 +#define EFI_ACPI_3_0_SYSTEM_IO 1 +#define EFI_ACPI_3_0_PCI_CONFIGURATION_SPACE 2 +#define EFI_ACPI_3_0_EMBEDDED_CONTROLLER 3 +#define EFI_ACPI_3_0_SMBUS 4 +#define EFI_ACPI_3_0_FUNCTIONAL_FIXED_HARDWARE 0x7F + +// +// Generic Address Space Access Sizes +// +#define EFI_ACPI_3_0_UNDEFINED 0 +#define EFI_ACPI_3_0_BYTE 1 +#define EFI_ACPI_3_0_WORD 2 +#define EFI_ACPI_3_0_DWORD 3 +#define EFI_ACPI_3_0_QWORD 4 + +// +// ACPI 3.0 table structures +// + +/// +/// Root System Description Pointer Structure +/// +typedef struct { + UINT64 Signature; + UINT8 Checksum; + UINT8 OemId[6]; + UINT8 Revision; + UINT32 RsdtAddress; + UINT32 Length; + UINT64 XsdtAddress; + UINT8 ExtendedChecksum; + UINT8 Reserved[3]; +} EFI_ACPI_3_0_ROOT_SYSTEM_DESCRIPTION_POINTER; + +/// +/// RSD_PTR Revision (as defined in ACPI 3.0b spec.) +/// +#define EFI_ACPI_3_0_ROOT_SYSTEM_DESCRIPTION_POINTER_REVISION 0x02 ///< ACPISpec (Revision 3.0b) says current value is 2 + +/// +/// Common table header, this prefaces all ACPI tables, including FACS, but +/// excluding the RSD PTR structure +/// +typedef struct { + UINT32 Signature; + UINT32 Length; +} EFI_ACPI_3_0_COMMON_HEADER; + +// +// Root System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT32 table pointers. +// + +/// +/// RSDT Revision (as defined in ACPI 3.0 spec.) +/// +#define EFI_ACPI_3_0_ROOT_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +// +// Extended System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT64 table pointers. +// + +/// +/// XSDT Revision (as defined in ACPI 3.0 spec.) +/// +#define EFI_ACPI_3_0_EXTENDED_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Fixed ACPI Description Table Structure (FADT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 FirmwareCtrl; + UINT32 Dsdt; + UINT8 Reserved0; + UINT8 PreferredPmProfile; + UINT16 SciInt; + UINT32 SmiCmd; + UINT8 AcpiEnable; + UINT8 AcpiDisable; + UINT8 S4BiosReq; + UINT8 PstateCnt; + UINT32 Pm1aEvtBlk; + UINT32 Pm1bEvtBlk; + UINT32 Pm1aCntBlk; + UINT32 Pm1bCntBlk; + UINT32 Pm2CntBlk; + UINT32 PmTmrBlk; + UINT32 Gpe0Blk; + UINT32 Gpe1Blk; + UINT8 Pm1EvtLen; + UINT8 Pm1CntLen; + UINT8 Pm2CntLen; + UINT8 PmTmrLen; + UINT8 Gpe0BlkLen; + UINT8 Gpe1BlkLen; + UINT8 Gpe1Base; + UINT8 CstCnt; + UINT16 PLvl2Lat; + UINT16 PLvl3Lat; + UINT16 FlushSize; + UINT16 FlushStride; + UINT8 DutyOffset; + UINT8 DutyWidth; + UINT8 DayAlrm; + UINT8 MonAlrm; + UINT8 Century; + UINT16 IaPcBootArch; + UINT8 Reserved1; + UINT32 Flags; + EFI_ACPI_3_0_GENERIC_ADDRESS_STRUCTURE ResetReg; + UINT8 ResetValue; + UINT8 Reserved2[3]; + UINT64 XFirmwareCtrl; + UINT64 XDsdt; + EFI_ACPI_3_0_GENERIC_ADDRESS_STRUCTURE XPm1aEvtBlk; + EFI_ACPI_3_0_GENERIC_ADDRESS_STRUCTURE XPm1bEvtBlk; + EFI_ACPI_3_0_GENERIC_ADDRESS_STRUCTURE XPm1aCntBlk; + EFI_ACPI_3_0_GENERIC_ADDRESS_STRUCTURE XPm1bCntBlk; + EFI_ACPI_3_0_GENERIC_ADDRESS_STRUCTURE XPm2CntBlk; + EFI_ACPI_3_0_GENERIC_ADDRESS_STRUCTURE XPmTmrBlk; + EFI_ACPI_3_0_GENERIC_ADDRESS_STRUCTURE XGpe0Blk; + EFI_ACPI_3_0_GENERIC_ADDRESS_STRUCTURE XGpe1Blk; +} EFI_ACPI_3_0_FIXED_ACPI_DESCRIPTION_TABLE; + +/// +/// FADT Version (as defined in ACPI 3.0 spec.) +/// +#define EFI_ACPI_3_0_FIXED_ACPI_DESCRIPTION_TABLE_REVISION 0x04 + +// +// Fixed ACPI Description Table Preferred Power Management Profile +// +#define EFI_ACPI_3_0_PM_PROFILE_UNSPECIFIED 0 +#define EFI_ACPI_3_0_PM_PROFILE_DESKTOP 1 +#define EFI_ACPI_3_0_PM_PROFILE_MOBILE 2 +#define EFI_ACPI_3_0_PM_PROFILE_WORKSTATION 3 +#define EFI_ACPI_3_0_PM_PROFILE_ENTERPRISE_SERVER 4 +#define EFI_ACPI_3_0_PM_PROFILE_SOHO_SERVER 5 +#define EFI_ACPI_3_0_PM_PROFILE_APPLIANCE_PC 6 +#define EFI_ACPI_3_0_PM_PROFILE_PERFORMANCE_SERVER 7 + +// +// Fixed ACPI Description Table Boot Architecture Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_3_0_LEGACY_DEVICES BIT0 +#define EFI_ACPI_3_0_8042 BIT1 +#define EFI_ACPI_3_0_VGA_NOT_PRESENT BIT2 +#define EFI_ACPI_3_0_MSI_NOT_SUPPORTED BIT3 +#define EFI_ACPI_3_0_PCIE_ASPM_CONTROLS BIT4 + +// +// Fixed ACPI Description Table Fixed Feature Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_3_0_WBINVD BIT0 +#define EFI_ACPI_3_0_WBINVD_FLUSH BIT1 +#define EFI_ACPI_3_0_PROC_C1 BIT2 +#define EFI_ACPI_3_0_P_LVL2_UP BIT3 +#define EFI_ACPI_3_0_PWR_BUTTON BIT4 +#define EFI_ACPI_3_0_SLP_BUTTON BIT5 +#define EFI_ACPI_3_0_FIX_RTC BIT6 +#define EFI_ACPI_3_0_RTC_S4 BIT7 +#define EFI_ACPI_3_0_TMR_VAL_EXT BIT8 +#define EFI_ACPI_3_0_DCK_CAP BIT9 +#define EFI_ACPI_3_0_RESET_REG_SUP BIT10 +#define EFI_ACPI_3_0_SEALED_CASE BIT11 +#define EFI_ACPI_3_0_HEADLESS BIT12 +#define EFI_ACPI_3_0_CPU_SW_SLP BIT13 +#define EFI_ACPI_3_0_PCI_EXP_WAK BIT14 +#define EFI_ACPI_3_0_USE_PLATFORM_CLOCK BIT15 +#define EFI_ACPI_3_0_S4_RTC_STS_VALID BIT16 +#define EFI_ACPI_3_0_REMOTE_POWER_ON_CAPABLE BIT17 +#define EFI_ACPI_3_0_FORCE_APIC_CLUSTER_MODEL BIT18 +#define EFI_ACPI_3_0_FORCE_APIC_PHYSICAL_DESTINATION_MODE BIT19 + +/// +/// Firmware ACPI Control Structure +/// +typedef struct { + UINT32 Signature; + UINT32 Length; + UINT32 HardwareSignature; + UINT32 FirmwareWakingVector; + UINT32 GlobalLock; + UINT32 Flags; + UINT64 XFirmwareWakingVector; + UINT8 Version; + UINT8 Reserved[31]; +} EFI_ACPI_3_0_FIRMWARE_ACPI_CONTROL_STRUCTURE; + +/// +/// FACS Version (as defined in ACPI 3.0 spec.) +/// +#define EFI_ACPI_3_0_FIRMWARE_ACPI_CONTROL_STRUCTURE_VERSION 0x01 + +/// +/// Firmware Control Structure Feature Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_3_0_S4BIOS_F BIT0 + +// +// Differentiated System Description Table, +// Secondary System Description Table +// and Persistent System Description Table, +// no definition needed as they are common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a definition block. +// +#define EFI_ACPI_3_0_DIFFERENTIATED_SYSTEM_DESCRIPTION_TABLE_REVISION 0x02 +#define EFI_ACPI_3_0_SECONDARY_SYSTEM_DESCRIPTION_TABLE_REVISION 0x02 + +/// +/// Multiple APIC Description Table header definition. The rest of the table +/// must be defined in a platform specific manner. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 LocalApicAddress; + UINT32 Flags; +} EFI_ACPI_3_0_MULTIPLE_APIC_DESCRIPTION_TABLE_HEADER; + +/// +/// MADT Revision (as defined in ACPI 3.0 spec.) +/// +#define EFI_ACPI_3_0_MULTIPLE_APIC_DESCRIPTION_TABLE_REVISION 0x02 + +/// +/// Multiple APIC Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_3_0_PCAT_COMPAT BIT0 + +// +// Multiple APIC Description Table APIC structure types +// All other values between 0x09 an 0xFF are reserved and +// will be ignored by OSPM. +// +#define EFI_ACPI_3_0_PROCESSOR_LOCAL_APIC 0x00 +#define EFI_ACPI_3_0_IO_APIC 0x01 +#define EFI_ACPI_3_0_INTERRUPT_SOURCE_OVERRIDE 0x02 +#define EFI_ACPI_3_0_NON_MASKABLE_INTERRUPT_SOURCE 0x03 +#define EFI_ACPI_3_0_LOCAL_APIC_NMI 0x04 +#define EFI_ACPI_3_0_LOCAL_APIC_ADDRESS_OVERRIDE 0x05 +#define EFI_ACPI_3_0_IO_SAPIC 0x06 +#define EFI_ACPI_3_0_LOCAL_SAPIC 0x07 +#define EFI_ACPI_3_0_PLATFORM_INTERRUPT_SOURCES 0x08 + +// +// APIC Structure Definitions +// + +/// +/// Processor Local APIC Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT8 ApicId; + UINT32 Flags; +} EFI_ACPI_3_0_PROCESSOR_LOCAL_APIC_STRUCTURE; + +/// +/// Local APIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_3_0_LOCAL_APIC_ENABLED BIT0 + +/// +/// IO APIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 IoApicAddress; + UINT32 GlobalSystemInterruptBase; +} EFI_ACPI_3_0_IO_APIC_STRUCTURE; + +/// +/// Interrupt Source Override Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Bus; + UINT8 Source; + UINT32 GlobalSystemInterrupt; + UINT16 Flags; +} EFI_ACPI_3_0_INTERRUPT_SOURCE_OVERRIDE_STRUCTURE; + +/// +/// Platform Interrupt Sources Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT8 InterruptType; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT8 IoSapicVector; + UINT32 GlobalSystemInterrupt; + UINT32 PlatformInterruptSourceFlags; + UINT8 CpeiProcessorOverride; + UINT8 Reserved[31]; +} EFI_ACPI_3_0_PLATFORM_INTERRUPT_APIC_STRUCTURE; + +// +// MPS INTI flags. +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_3_0_POLARITY (3 << 0) +#define EFI_ACPI_3_0_TRIGGER_MODE (3 << 2) + +/// +/// Non-Maskable Interrupt Source Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT32 GlobalSystemInterrupt; +} EFI_ACPI_3_0_NON_MASKABLE_INTERRUPT_SOURCE_STRUCTURE; + +/// +/// Local APIC NMI Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT16 Flags; + UINT8 LocalApicLint; +} EFI_ACPI_3_0_LOCAL_APIC_NMI_STRUCTURE; + +/// +/// Local APIC Address Override Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved; + UINT64 LocalApicAddress; +} EFI_ACPI_3_0_LOCAL_APIC_ADDRESS_OVERRIDE_STRUCTURE; + +/// +/// IO SAPIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 GlobalSystemInterruptBase; + UINT64 IoSapicAddress; +} EFI_ACPI_3_0_IO_SAPIC_STRUCTURE; + +/// +/// Local SAPIC Structure +/// This struct followed by a null-terminated ASCII string - ACPI Processor UID String +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT8 LocalSapicId; + UINT8 LocalSapicEid; + UINT8 Reserved[3]; + UINT32 Flags; + UINT32 ACPIProcessorUIDValue; +} EFI_ACPI_3_0_PROCESSOR_LOCAL_SAPIC_STRUCTURE; + +/// +/// Platform Interrupt Sources Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT8 InterruptType; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT8 IoSapicVector; + UINT32 GlobalSystemInterrupt; + UINT32 PlatformInterruptSourceFlags; +} EFI_ACPI_3_0_PLATFORM_INTERRUPT_SOURCES_STRUCTURE; + +/// +/// Platform Interrupt Source Flags. +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_3_0_CPEI_PROCESSOR_OVERRIDE BIT0 + +/// +/// Smart Battery Description Table (SBST) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 WarningEnergyLevel; + UINT32 LowEnergyLevel; + UINT32 CriticalEnergyLevel; +} EFI_ACPI_3_0_SMART_BATTERY_DESCRIPTION_TABLE; + +/// +/// SBST Version (as defined in ACPI 3.0 spec.) +/// +#define EFI_ACPI_3_0_SMART_BATTERY_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Embedded Controller Boot Resources Table (ECDT) +/// The table is followed by a null terminated ASCII string that contains +/// a fully qualified reference to the name space object. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + EFI_ACPI_3_0_GENERIC_ADDRESS_STRUCTURE EcControl; + EFI_ACPI_3_0_GENERIC_ADDRESS_STRUCTURE EcData; + UINT32 Uid; + UINT8 GpeBit; +} EFI_ACPI_3_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE; + +/// +/// ECDT Version (as defined in ACPI 3.0 spec.) +/// +#define EFI_ACPI_3_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE_REVISION 0x01 + +/// +/// System Resource Affinity Table (SRAT. The rest of the table +/// must be defined in a platform specific manner. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 Reserved1; ///< Must be set to 1 + UINT64 Reserved2; +} EFI_ACPI_3_0_SYSTEM_RESOURCE_AFFINITY_TABLE_HEADER; + +/// +/// SRAT Version (as defined in ACPI 3.0 spec.) +/// +#define EFI_ACPI_3_0_SYSTEM_RESOURCE_AFFINITY_TABLE_REVISION 0x02 + +// +// SRAT structure types. +// All other values between 0x02 an 0xFF are reserved and +// will be ignored by OSPM. +// +#define EFI_ACPI_3_0_PROCESSOR_LOCAL_APIC_SAPIC_AFFINITY 0x00 +#define EFI_ACPI_3_0_MEMORY_AFFINITY 0x01 + +/// +/// Processor Local APIC/SAPIC Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 ProximityDomain7To0; + UINT8 ApicId; + UINT32 Flags; + UINT8 LocalSapicEid; + UINT8 ProximityDomain31To8[3]; + UINT8 Reserved[4]; +} EFI_ACPI_3_0_PROCESSOR_LOCAL_APIC_SAPIC_AFFINITY_STRUCTURE; + +/// +/// Local APIC/SAPIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_3_0_PROCESSOR_LOCAL_APIC_SAPIC_ENABLED (1 << 0) + +/// +/// Memory Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT32 ProximityDomain; + UINT16 Reserved1; + UINT32 AddressBaseLow; + UINT32 AddressBaseHigh; + UINT32 LengthLow; + UINT32 LengthHigh; + UINT32 Reserved2; + UINT32 Flags; + UINT64 Reserved3; +} EFI_ACPI_3_0_MEMORY_AFFINITY_STRUCTURE; + +// +// Memory Flags. All other bits are reserved and must be 0. +// +#define EFI_ACPI_3_0_MEMORY_ENABLED (1 << 0) +#define EFI_ACPI_3_0_MEMORY_HOT_PLUGGABLE (1 << 1) +#define EFI_ACPI_3_0_MEMORY_NONVOLATILE (1 << 2) + +/// +/// System Locality Distance Information Table (SLIT). +/// The rest of the table is a matrix. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT64 NumberOfSystemLocalities; +} EFI_ACPI_3_0_SYSTEM_LOCALITY_DISTANCE_INFORMATION_TABLE_HEADER; + +/// +/// SLIT Version (as defined in ACPI 3.0 spec.) +/// +#define EFI_ACPI_3_0_SYSTEM_LOCALITY_DISTANCE_INFORMATION_TABLE_REVISION 0x01 + +// +// Known table signatures +// + +/// +/// "RSD PTR " Root System Description Pointer +/// +#define EFI_ACPI_3_0_ROOT_SYSTEM_DESCRIPTION_POINTER_SIGNATURE SIGNATURE_64('R', 'S', 'D', ' ', 'P', 'T', 'R', ' ') + +/// +/// "APIC" Multiple APIC Description Table +/// +#define EFI_ACPI_3_0_MULTIPLE_APIC_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('A', 'P', 'I', 'C') + +/// +/// "DSDT" Differentiated System Description Table +/// +#define EFI_ACPI_3_0_DIFFERENTIATED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('D', 'S', 'D', 'T') + +/// +/// "ECDT" Embedded Controller Boot Resources Table +/// +#define EFI_ACPI_3_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE_SIGNATURE SIGNATURE_32('E', 'C', 'D', 'T') + +/// +/// "FACP" Fixed ACPI Description Table +/// +#define EFI_ACPI_3_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'P') + +/// +/// "FACS" Firmware ACPI Control Structure +/// +#define EFI_ACPI_3_0_FIRMWARE_ACPI_CONTROL_STRUCTURE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'S') + +/// +/// "PSDT" Persistent System Description Table +/// +#define EFI_ACPI_3_0_PERSISTENT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('P', 'S', 'D', 'T') + +/// +/// "RSDT" Root System Description Table +/// +#define EFI_ACPI_3_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('R', 'S', 'D', 'T') + +/// +/// "SBST" Smart Battery Specification Table +/// +#define EFI_ACPI_3_0_SMART_BATTERY_SPECIFICATION_TABLE_SIGNATURE SIGNATURE_32('S', 'B', 'S', 'T') + +/// +/// "SLIT" System Locality Information Table +/// +#define EFI_ACPI_3_0_SYSTEM_LOCALITY_INFORMATION_TABLE_SIGNATURE SIGNATURE_32('S', 'L', 'I', 'T') + +/// +/// "SRAT" System Resource Affinity Table +/// +#define EFI_ACPI_3_0_SYSTEM_RESOURCE_AFFINITY_TABLE_SIGNATURE SIGNATURE_32('S', 'R', 'A', 'T') + +/// +/// "SSDT" Secondary System Description Table +/// +#define EFI_ACPI_3_0_SECONDARY_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('S', 'S', 'D', 'T') + +/// +/// "XSDT" Extended System Description Table +/// +#define EFI_ACPI_3_0_EXTENDED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('X', 'S', 'D', 'T') + +/// +/// "BOOT" MS Simple Boot Spec +/// +#define EFI_ACPI_3_0_SIMPLE_BOOT_FLAG_TABLE_SIGNATURE SIGNATURE_32('B', 'O', 'O', 'T') + +/// +/// "CPEP" Corrected Platform Error Polling Table +/// +#define EFI_ACPI_3_0_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_SIGNATURE SIGNATURE_32('C', 'P', 'E', 'P') + +/// +/// "DBGP" MS Debug Port Spec +/// +#define EFI_ACPI_3_0_DEBUG_PORT_TABLE_SIGNATURE SIGNATURE_32('D', 'B', 'G', 'P') + +/// +/// "ETDT" Event Timer Description Table +/// +#define EFI_ACPI_3_0_EVENT_TIMER_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('E', 'T', 'D', 'T') + +/// +/// "HPET" IA-PC High Precision Event Timer Table +/// +#define EFI_ACPI_3_0_HIGH_PRECISION_EVENT_TIMER_TABLE_SIGNATURE SIGNATURE_32('H', 'P', 'E', 'T') + +/// +/// "MCFG" PCI Express Memory Mapped Configuration Space Base Address Description Table +/// +#define EFI_ACPI_3_0_PCI_EXPRESS_MEMORY_MAPPED_CONFIGURATION_SPACE_BASE_ADDRESS_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('M', 'C', 'F', 'G') + +/// +/// "SPCR" Serial Port Concole Redirection Table +/// +#define EFI_ACPI_3_0_SERIAL_PORT_CONSOLE_REDIRECTION_TABLE_SIGNATURE SIGNATURE_32('S', 'P', 'C', 'R') + +/// +/// "SPMI" Server Platform Management Interface Table +/// +#define EFI_ACPI_3_0_SERVER_PLATFORM_MANAGEMENT_INTERFACE_TABLE_SIGNATURE SIGNATURE_32('S', 'P', 'M', 'I') + +/// +/// "TCPA" Trusted Computing Platform Alliance Capabilities Table +/// +#define EFI_ACPI_3_0_TRUSTED_COMPUTING_PLATFORM_ALLIANCE_CAPABILITIES_TABLE_SIGNATURE SIGNATURE_32('T', 'C', 'P', 'A') + +/// +/// "WDRT" Watchdog Resource Table +/// +#define EFI_ACPI_3_0_WATCHDOG_RESOURCE_TABLE_SIGNATURE SIGNATURE_32('W', 'D', 'R', 'T') + +/// +/// "WDAT" Watchdog Action Table +/// +#define EFI_ACPI_3_0_WATCHDOG_ACTION_TABLE_SIGNATURE SIGNATURE_32('W', 'D', 'A', 'T') + +/// +/// "WSPT" Windows Specific Properties Table +/// +#define EFI_ACPI_3_0_WINDOWS_SPECIFIC_PROPERTIES_TABLE_SIGNATURE SIGNATURE_32('W', 'S', 'P', 'T') + +/// +/// "iBFT" iSCSI Boot Firmware Table +/// +#define EFI_ACPI_3_0_ISCSI_BOOT_FIRMWARE_TABLE_SIGNATURE SIGNATURE_32('i', 'B', 'F', 'T') + +#pragma pack() + +#endif diff --git a/src/include/ipxe/efi/IndustryStandard/Acpi40.h b/src/include/ipxe/efi/IndustryStandard/Acpi40.h new file mode 100644 index 00000000..5fcad3e4 --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/Acpi40.h @@ -0,0 +1,1311 @@ +/** @file + ACPI 4.0 definitions from the ACPI Specification Revision 4.0a April 5, 2010 + + Copyright (c) 2010 - 2011, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +**/ + +#ifndef _ACPI_4_0_H_ +#define _ACPI_4_0_H_ + +FILE_LICENCE ( BSD3 ); + +#include + +// +// Ensure proper structure formats +// +#pragma pack(1) + +/// +/// ACPI 4.0 Generic Address Space definition +/// +typedef struct { + UINT8 AddressSpaceId; + UINT8 RegisterBitWidth; + UINT8 RegisterBitOffset; + UINT8 AccessSize; + UINT64 Address; +} EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE; + +// +// Generic Address Space Address IDs +// +#define EFI_ACPI_4_0_SYSTEM_MEMORY 0 +#define EFI_ACPI_4_0_SYSTEM_IO 1 +#define EFI_ACPI_4_0_PCI_CONFIGURATION_SPACE 2 +#define EFI_ACPI_4_0_EMBEDDED_CONTROLLER 3 +#define EFI_ACPI_4_0_SMBUS 4 +#define EFI_ACPI_4_0_FUNCTIONAL_FIXED_HARDWARE 0x7F + +// +// Generic Address Space Access Sizes +// +#define EFI_ACPI_4_0_UNDEFINED 0 +#define EFI_ACPI_4_0_BYTE 1 +#define EFI_ACPI_4_0_WORD 2 +#define EFI_ACPI_4_0_DWORD 3 +#define EFI_ACPI_4_0_QWORD 4 + +// +// ACPI 4.0 table structures +// + +/// +/// Root System Description Pointer Structure +/// +typedef struct { + UINT64 Signature; + UINT8 Checksum; + UINT8 OemId[6]; + UINT8 Revision; + UINT32 RsdtAddress; + UINT32 Length; + UINT64 XsdtAddress; + UINT8 ExtendedChecksum; + UINT8 Reserved[3]; +} EFI_ACPI_4_0_ROOT_SYSTEM_DESCRIPTION_POINTER; + +/// +/// RSD_PTR Revision (as defined in ACPI 4.0b spec.) +/// +#define EFI_ACPI_4_0_ROOT_SYSTEM_DESCRIPTION_POINTER_REVISION 0x02 ///< ACPISpec (Revision 4.0a) says current value is 2 + +/// +/// Common table header, this prefaces all ACPI tables, including FACS, but +/// excluding the RSD PTR structure +/// +typedef struct { + UINT32 Signature; + UINT32 Length; +} EFI_ACPI_4_0_COMMON_HEADER; + +// +// Root System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT32 table pointers. +// + +/// +/// RSDT Revision (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_ROOT_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +// +// Extended System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT64 table pointers. +// + +/// +/// XSDT Revision (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_EXTENDED_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Fixed ACPI Description Table Structure (FADT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 FirmwareCtrl; + UINT32 Dsdt; + UINT8 Reserved0; + UINT8 PreferredPmProfile; + UINT16 SciInt; + UINT32 SmiCmd; + UINT8 AcpiEnable; + UINT8 AcpiDisable; + UINT8 S4BiosReq; + UINT8 PstateCnt; + UINT32 Pm1aEvtBlk; + UINT32 Pm1bEvtBlk; + UINT32 Pm1aCntBlk; + UINT32 Pm1bCntBlk; + UINT32 Pm2CntBlk; + UINT32 PmTmrBlk; + UINT32 Gpe0Blk; + UINT32 Gpe1Blk; + UINT8 Pm1EvtLen; + UINT8 Pm1CntLen; + UINT8 Pm2CntLen; + UINT8 PmTmrLen; + UINT8 Gpe0BlkLen; + UINT8 Gpe1BlkLen; + UINT8 Gpe1Base; + UINT8 CstCnt; + UINT16 PLvl2Lat; + UINT16 PLvl3Lat; + UINT16 FlushSize; + UINT16 FlushStride; + UINT8 DutyOffset; + UINT8 DutyWidth; + UINT8 DayAlrm; + UINT8 MonAlrm; + UINT8 Century; + UINT16 IaPcBootArch; + UINT8 Reserved1; + UINT32 Flags; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE ResetReg; + UINT8 ResetValue; + UINT8 Reserved2[3]; + UINT64 XFirmwareCtrl; + UINT64 XDsdt; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE XPm1aEvtBlk; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE XPm1bEvtBlk; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE XPm1aCntBlk; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE XPm1bCntBlk; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE XPm2CntBlk; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE XPmTmrBlk; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE XGpe0Blk; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE XGpe1Blk; +} EFI_ACPI_4_0_FIXED_ACPI_DESCRIPTION_TABLE; + +/// +/// FADT Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_FIXED_ACPI_DESCRIPTION_TABLE_REVISION 0x04 + +// +// Fixed ACPI Description Table Preferred Power Management Profile +// +#define EFI_ACPI_4_0_PM_PROFILE_UNSPECIFIED 0 +#define EFI_ACPI_4_0_PM_PROFILE_DESKTOP 1 +#define EFI_ACPI_4_0_PM_PROFILE_MOBILE 2 +#define EFI_ACPI_4_0_PM_PROFILE_WORKSTATION 3 +#define EFI_ACPI_4_0_PM_PROFILE_ENTERPRISE_SERVER 4 +#define EFI_ACPI_4_0_PM_PROFILE_SOHO_SERVER 5 +#define EFI_ACPI_4_0_PM_PROFILE_APPLIANCE_PC 6 +#define EFI_ACPI_4_0_PM_PROFILE_PERFORMANCE_SERVER 7 + +// +// Fixed ACPI Description Table Boot Architecture Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_4_0_LEGACY_DEVICES BIT0 +#define EFI_ACPI_4_0_8042 BIT1 +#define EFI_ACPI_4_0_VGA_NOT_PRESENT BIT2 +#define EFI_ACPI_4_0_MSI_NOT_SUPPORTED BIT3 +#define EFI_ACPI_4_0_PCIE_ASPM_CONTROLS BIT4 + +// +// Fixed ACPI Description Table Fixed Feature Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_4_0_WBINVD BIT0 +#define EFI_ACPI_4_0_WBINVD_FLUSH BIT1 +#define EFI_ACPI_4_0_PROC_C1 BIT2 +#define EFI_ACPI_4_0_P_LVL2_UP BIT3 +#define EFI_ACPI_4_0_PWR_BUTTON BIT4 +#define EFI_ACPI_4_0_SLP_BUTTON BIT5 +#define EFI_ACPI_4_0_FIX_RTC BIT6 +#define EFI_ACPI_4_0_RTC_S4 BIT7 +#define EFI_ACPI_4_0_TMR_VAL_EXT BIT8 +#define EFI_ACPI_4_0_DCK_CAP BIT9 +#define EFI_ACPI_4_0_RESET_REG_SUP BIT10 +#define EFI_ACPI_4_0_SEALED_CASE BIT11 +#define EFI_ACPI_4_0_HEADLESS BIT12 +#define EFI_ACPI_4_0_CPU_SW_SLP BIT13 +#define EFI_ACPI_4_0_PCI_EXP_WAK BIT14 +#define EFI_ACPI_4_0_USE_PLATFORM_CLOCK BIT15 +#define EFI_ACPI_4_0_S4_RTC_STS_VALID BIT16 +#define EFI_ACPI_4_0_REMOTE_POWER_ON_CAPABLE BIT17 +#define EFI_ACPI_4_0_FORCE_APIC_CLUSTER_MODEL BIT18 +#define EFI_ACPI_4_0_FORCE_APIC_PHYSICAL_DESTINATION_MODE BIT19 + +/// +/// Firmware ACPI Control Structure +/// +typedef struct { + UINT32 Signature; + UINT32 Length; + UINT32 HardwareSignature; + UINT32 FirmwareWakingVector; + UINT32 GlobalLock; + UINT32 Flags; + UINT64 XFirmwareWakingVector; + UINT8 Version; + UINT8 Reserved0[3]; + UINT32 OspmFlags; + UINT8 Reserved1[24]; +} EFI_ACPI_4_0_FIRMWARE_ACPI_CONTROL_STRUCTURE; + +/// +/// FACS Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_FIRMWARE_ACPI_CONTROL_STRUCTURE_VERSION 0x02 + +/// +/// Firmware Control Structure Feature Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_4_0_S4BIOS_F BIT0 +#define EFI_ACPI_4_0_64BIT_WAKE_SUPPORTED_F BIT1 + +/// +/// OSPM Enabled Firmware Control Structure Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_4_0_OSPM_64BIT_WAKE__F BIT0 + +// +// Differentiated System Description Table, +// Secondary System Description Table +// and Persistent System Description Table, +// no definition needed as they are common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a definition block. +// +#define EFI_ACPI_4_0_DIFFERENTIATED_SYSTEM_DESCRIPTION_TABLE_REVISION 0x02 +#define EFI_ACPI_4_0_SECONDARY_SYSTEM_DESCRIPTION_TABLE_REVISION 0x02 + +/// +/// Multiple APIC Description Table header definition. The rest of the table +/// must be defined in a platform specific manner. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 LocalApicAddress; + UINT32 Flags; +} EFI_ACPI_4_0_MULTIPLE_APIC_DESCRIPTION_TABLE_HEADER; + +/// +/// MADT Revision (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_MULTIPLE_APIC_DESCRIPTION_TABLE_REVISION 0x03 + +/// +/// Multiple APIC Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_4_0_PCAT_COMPAT BIT0 + +// +// Multiple APIC Description Table APIC structure types +// All other values between 0x0B an 0xFF are reserved and +// will be ignored by OSPM. +// +#define EFI_ACPI_4_0_PROCESSOR_LOCAL_APIC 0x00 +#define EFI_ACPI_4_0_IO_APIC 0x01 +#define EFI_ACPI_4_0_INTERRUPT_SOURCE_OVERRIDE 0x02 +#define EFI_ACPI_4_0_NON_MASKABLE_INTERRUPT_SOURCE 0x03 +#define EFI_ACPI_4_0_LOCAL_APIC_NMI 0x04 +#define EFI_ACPI_4_0_LOCAL_APIC_ADDRESS_OVERRIDE 0x05 +#define EFI_ACPI_4_0_IO_SAPIC 0x06 +#define EFI_ACPI_4_0_LOCAL_SAPIC 0x07 +#define EFI_ACPI_4_0_PLATFORM_INTERRUPT_SOURCES 0x08 +#define EFI_ACPI_4_0_PROCESSOR_LOCAL_X2APIC 0x09 +#define EFI_ACPI_4_0_LOCAL_X2APIC_NMI 0x0A + +// +// APIC Structure Definitions +// + +/// +/// Processor Local APIC Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT8 ApicId; + UINT32 Flags; +} EFI_ACPI_4_0_PROCESSOR_LOCAL_APIC_STRUCTURE; + +/// +/// Local APIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_4_0_LOCAL_APIC_ENABLED BIT0 + +/// +/// IO APIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 IoApicAddress; + UINT32 GlobalSystemInterruptBase; +} EFI_ACPI_4_0_IO_APIC_STRUCTURE; + +/// +/// Interrupt Source Override Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Bus; + UINT8 Source; + UINT32 GlobalSystemInterrupt; + UINT16 Flags; +} EFI_ACPI_4_0_INTERRUPT_SOURCE_OVERRIDE_STRUCTURE; + +/// +/// Platform Interrupt Sources Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT8 InterruptType; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT8 IoSapicVector; + UINT32 GlobalSystemInterrupt; + UINT32 PlatformInterruptSourceFlags; + UINT8 CpeiProcessorOverride; + UINT8 Reserved[31]; +} EFI_ACPI_4_0_PLATFORM_INTERRUPT_APIC_STRUCTURE; + +// +// MPS INTI flags. +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_4_0_POLARITY (3 << 0) +#define EFI_ACPI_4_0_TRIGGER_MODE (3 << 2) + +/// +/// Non-Maskable Interrupt Source Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT32 GlobalSystemInterrupt; +} EFI_ACPI_4_0_NON_MASKABLE_INTERRUPT_SOURCE_STRUCTURE; + +/// +/// Local APIC NMI Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT16 Flags; + UINT8 LocalApicLint; +} EFI_ACPI_4_0_LOCAL_APIC_NMI_STRUCTURE; + +/// +/// Local APIC Address Override Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved; + UINT64 LocalApicAddress; +} EFI_ACPI_4_0_LOCAL_APIC_ADDRESS_OVERRIDE_STRUCTURE; + +/// +/// IO SAPIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 GlobalSystemInterruptBase; + UINT64 IoSapicAddress; +} EFI_ACPI_4_0_IO_SAPIC_STRUCTURE; + +/// +/// Local SAPIC Structure +/// This struct followed by a null-terminated ASCII string - ACPI Processor UID String +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT8 LocalSapicId; + UINT8 LocalSapicEid; + UINT8 Reserved[3]; + UINT32 Flags; + UINT32 ACPIProcessorUIDValue; +} EFI_ACPI_4_0_PROCESSOR_LOCAL_SAPIC_STRUCTURE; + +/// +/// Platform Interrupt Sources Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT8 InterruptType; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT8 IoSapicVector; + UINT32 GlobalSystemInterrupt; + UINT32 PlatformInterruptSourceFlags; +} EFI_ACPI_4_0_PLATFORM_INTERRUPT_SOURCES_STRUCTURE; + +/// +/// Platform Interrupt Source Flags. +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_4_0_CPEI_PROCESSOR_OVERRIDE BIT0 + +/// +/// Processor Local x2APIC Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Reserved[2]; + UINT32 X2ApicId; + UINT32 Flags; + UINT32 AcpiProcessorUid; +} EFI_ACPI_4_0_PROCESSOR_LOCAL_X2APIC_STRUCTURE; + +/// +/// Local x2APIC NMI Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT32 AcpiProcessorUid; + UINT8 LocalX2ApicLint; + UINT8 Reserved[3]; +} EFI_ACPI_4_0_LOCAL_X2APIC_NMI_STRUCTURE; + +/// +/// Smart Battery Description Table (SBST) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 WarningEnergyLevel; + UINT32 LowEnergyLevel; + UINT32 CriticalEnergyLevel; +} EFI_ACPI_4_0_SMART_BATTERY_DESCRIPTION_TABLE; + +/// +/// SBST Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_SMART_BATTERY_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Embedded Controller Boot Resources Table (ECDT) +/// The table is followed by a null terminated ASCII string that contains +/// a fully qualified reference to the name space object. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE EcControl; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE EcData; + UINT32 Uid; + UINT8 GpeBit; +} EFI_ACPI_4_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE; + +/// +/// ECDT Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE_REVISION 0x01 + +/// +/// System Resource Affinity Table (SRAT. The rest of the table +/// must be defined in a platform specific manner. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 Reserved1; ///< Must be set to 1 + UINT64 Reserved2; +} EFI_ACPI_4_0_SYSTEM_RESOURCE_AFFINITY_TABLE_HEADER; + +/// +/// SRAT Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_SYSTEM_RESOURCE_AFFINITY_TABLE_REVISION 0x03 + +// +// SRAT structure types. +// All other values between 0x03 an 0xFF are reserved and +// will be ignored by OSPM. +// +#define EFI_ACPI_4_0_PROCESSOR_LOCAL_APIC_SAPIC_AFFINITY 0x00 +#define EFI_ACPI_4_0_MEMORY_AFFINITY 0x01 +#define EFI_ACPI_4_0_PROCESSOR_LOCAL_X2APIC_AFFINITY 0x02 + +/// +/// Processor Local APIC/SAPIC Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 ProximityDomain7To0; + UINT8 ApicId; + UINT32 Flags; + UINT8 LocalSapicEid; + UINT8 ProximityDomain31To8[3]; + UINT32 ClockDomain; +} EFI_ACPI_4_0_PROCESSOR_LOCAL_APIC_SAPIC_AFFINITY_STRUCTURE; + +/// +/// Local APIC/SAPIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_4_0_PROCESSOR_LOCAL_APIC_SAPIC_ENABLED (1 << 0) + +/// +/// Memory Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT32 ProximityDomain; + UINT16 Reserved1; + UINT32 AddressBaseLow; + UINT32 AddressBaseHigh; + UINT32 LengthLow; + UINT32 LengthHigh; + UINT32 Reserved2; + UINT32 Flags; + UINT64 Reserved3; +} EFI_ACPI_4_0_MEMORY_AFFINITY_STRUCTURE; + +// +// Memory Flags. All other bits are reserved and must be 0. +// +#define EFI_ACPI_4_0_MEMORY_ENABLED (1 << 0) +#define EFI_ACPI_4_0_MEMORY_HOT_PLUGGABLE (1 << 1) +#define EFI_ACPI_4_0_MEMORY_NONVOLATILE (1 << 2) + +/// +/// Processor Local x2APIC Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Reserved1[2]; + UINT32 ProximityDomain; + UINT32 X2ApicId; + UINT32 Flags; + UINT32 ClockDomain; + UINT8 Reserved2[4]; +} EFI_ACPI_4_0_PROCESSOR_LOCAL_X2APIC_AFFINITY_STRUCTURE; + +/// +/// System Locality Distance Information Table (SLIT). +/// The rest of the table is a matrix. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT64 NumberOfSystemLocalities; +} EFI_ACPI_4_0_SYSTEM_LOCALITY_DISTANCE_INFORMATION_TABLE_HEADER; + +/// +/// SLIT Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_SYSTEM_LOCALITY_DISTANCE_INFORMATION_TABLE_REVISION 0x01 + +/// +/// Corrected Platform Error Polling Table (CPEP) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT8 Reserved[8]; +} EFI_ACPI_4_0_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_HEADER; + +/// +/// CPEP Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_REVISION 0x01 + +// +// CPEP processor structure types. +// +#define EFI_ACPI_4_0_CPEP_PROCESSOR_APIC_SAPIC 0x00 + +/// +/// Corrected Platform Error Polling Processor Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT32 PollingInterval; +} EFI_ACPI_4_0_CPEP_PROCESSOR_APIC_SAPIC_STRUCTURE; + +/// +/// Maximum System Characteristics Table (MSCT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 OffsetProxDomInfo; + UINT32 MaximumNumberOfProximityDomains; + UINT32 MaximumNumberOfClockDomains; + UINT64 MaximumPhysicalAddress; +} EFI_ACPI_4_0_MAXIMUM_SYSTEM_CHARACTERISTICS_TABLE_HEADER; + +/// +/// MSCT Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_MAXIMUM_SYSTEM_CHARACTERISTICS_TABLE_REVISION 0x01 + +/// +/// Maximum Proximity Domain Information Structure Definition +/// +typedef struct { + UINT8 Revision; + UINT8 Length; + UINT32 ProximityDomainRangeLow; + UINT32 ProximityDomainRangeHigh; + UINT32 MaximumProcessorCapacity; + UINT64 MaximumMemoryCapacity; +} EFI_ACPI_4_0_MAXIMUM_PROXIMITY_DOMAIN_INFORMATION_STRUCTURE; + +/// +/// Boot Error Record Table (BERT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 BootErrorRegionLength; + UINT64 BootErrorRegion; +} EFI_ACPI_4_0_BOOT_ERROR_RECORD_TABLE_HEADER; + +/// +/// BERT Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_BOOT_ERROR_RECORD_TABLE_REVISION 0x01 + +/// +/// Boot Error Region Block Status Definition +/// +typedef struct { + UINT32 UncorrectableErrorValid:1; + UINT32 CorrectableErrorValid:1; + UINT32 MultipleUncorrectableErrors:1; + UINT32 MultipleCorrectableErrors:1; + UINT32 ErrorDataEntryCount:10; + UINT32 Reserved:18; +} EFI_ACPI_4_0_ERROR_BLOCK_STATUS; + +/// +/// Boot Error Region Definition +/// +typedef struct { + EFI_ACPI_4_0_ERROR_BLOCK_STATUS BlockStatus; + UINT32 RawDataOffset; + UINT32 RawDataLength; + UINT32 DataLength; + UINT32 ErrorSeverity; +} EFI_ACPI_4_0_BOOT_ERROR_REGION_STRUCTURE; + +// +// Boot Error Severity types +// +#define EFI_ACPI_4_0_ERROR_SEVERITY_CORRECTABLE 0x00 +#define EFI_ACPI_4_0_ERROR_SEVERITY_FATAL 0x01 +#define EFI_ACPI_4_0_ERROR_SEVERITY_CORRECTED 0x02 +#define EFI_ACPI_4_0_ERROR_SEVERITY_NONE 0x03 + +/// +/// Generic Error Data Entry Definition +/// +typedef struct { + UINT8 SectionType[16]; + UINT32 ErrorSeverity; + UINT16 Revision; + UINT8 ValidationBits; + UINT8 Flags; + UINT32 ErrorDataLength; + UINT8 FruId[16]; + UINT8 FruText[20]; +} EFI_ACPI_4_0_GENERIC_ERROR_DATA_ENTRY_STRUCTURE; + +/// +/// Generic Error Data Entry Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_GENERIC_ERROR_DATA_ENTRY_REVISION 0x0201 + +/// +/// HEST - Hardware Error Source Table +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 ErrorSourceCount; +} EFI_ACPI_4_0_HARDWARE_ERROR_SOURCE_TABLE_HEADER; + +/// +/// HEST Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_HARDWARE_ERROR_SOURCE_TABLE_REVISION 0x01 + +// +// Error Source structure types. +// +#define EFI_ACPI_4_0_IA32_ARCHITECTURE_MACHINE_CHECK_EXCEPTION 0x00 +#define EFI_ACPI_4_0_IA32_ARCHITECTURE_CORRECTED_MACHINE_CHECK 0x01 +#define EFI_ACPI_4_0_IA32_ARCHITECTURE_NMI_ERROR 0x02 +#define EFI_ACPI_4_0_PCI_EXPRESS_ROOT_PORT_AER 0x06 +#define EFI_ACPI_4_0_PCI_EXPRESS_DEVICE_AER 0x07 +#define EFI_ACPI_4_0_PCI_EXPRESS_BRIDGE_AER 0x08 +#define EFI_ACPI_4_0_GENERIC_HARDWARE_ERROR 0x09 + +// +// Error Source structure flags. +// +#define EFI_ACPI_4_0_ERROR_SOURCE_FLAG_FIRMWARE_FIRST (1 << 0) +#define EFI_ACPI_4_0_ERROR_SOURCE_FLAG_GLOBAL (1 << 1) + +/// +/// IA-32 Architecture Machine Check Exception Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT64 GlobalCapabilityInitData; + UINT64 GlobalControlInitData; + UINT8 NumberOfHardwareBanks; + UINT8 Reserved1[7]; +} EFI_ACPI_4_0_IA32_ARCHITECTURE_MACHINE_CHECK_EXCEPTION_STRUCTURE; + +/// +/// IA-32 Architecture Machine Check Bank Structure Definition +/// +typedef struct { + UINT8 BankNumber; + UINT8 ClearStatusOnInitialization; + UINT8 StatusDataFormat; + UINT8 Reserved0; + UINT32 ControlRegisterMsrAddress; + UINT64 ControlInitData; + UINT32 StatusRegisterMsrAddress; + UINT32 AddressRegisterMsrAddress; + UINT32 MiscRegisterMsrAddress; +} EFI_ACPI_4_0_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_BANK_STRUCTURE; + +/// +/// IA-32 Architecture Machine Check Bank Structure MCA data format +/// +#define EFI_ACPI_4_0_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_DATA_FORMAT_IA32 0x00 +#define EFI_ACPI_4_0_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_DATA_FORMAT_INTEL64 0x01 +#define EFI_ACPI_4_0_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_DATA_FORMAT_AMD64 0x02 + +// +// Hardware Error Notification types. All other values are reserved +// +#define EFI_ACPI_4_0_HARDWARE_ERROR_NOTIFICATION_POLLED 0x00 +#define EFI_ACPI_4_0_HARDWARE_ERROR_NOTIFICATION_EXTERNAL_INTERRUPT 0x01 +#define EFI_ACPI_4_0_HARDWARE_ERROR_NOTIFICATION_LOCAL_INTERRUPT 0x02 +#define EFI_ACPI_4_0_HARDWARE_ERROR_NOTIFICATION_SCI 0x03 +#define EFI_ACPI_4_0_HARDWARE_ERROR_NOTIFICATION_NMI 0x04 + +/// +/// Hardware Error Notification Configuration Write Enable Structure Definition +/// +typedef struct { + UINT16 Type:1; + UINT16 PollInterval:1; + UINT16 SwitchToPollingThresholdValue:1; + UINT16 SwitchToPollingThresholdWindow:1; + UINT16 ErrorThresholdValue:1; + UINT16 ErrorThresholdWindow:1; + UINT16 Reserved:10; +} EFI_ACPI_4_0_HARDWARE_ERROR_NOTIFICATION_CONFIGURATION_WRITE_ENABLE_STRUCTURE; + +/// +/// Hardware Error Notification Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + EFI_ACPI_4_0_HARDWARE_ERROR_NOTIFICATION_CONFIGURATION_WRITE_ENABLE_STRUCTURE ConfigurationWriteEnable; + UINT32 PollInterval; + UINT32 Vector; + UINT32 SwitchToPollingThresholdValue; + UINT32 SwitchToPollingThresholdWindow; + UINT32 ErrorThresholdValue; + UINT32 ErrorThresholdWindow; +} EFI_ACPI_4_0_HARDWARE_ERROR_NOTIFICATION_STRUCTURE; + +/// +/// IA-32 Architecture Corrected Machine Check Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + EFI_ACPI_4_0_HARDWARE_ERROR_NOTIFICATION_STRUCTURE NotificationStructure; + UINT8 NumberOfHardwareBanks; + UINT8 Reserved1[3]; +} EFI_ACPI_4_0_IA32_ARCHITECTURE_CORRECTED_MACHINE_CHECK_STRUCTURE; + +/// +/// IA-32 Architecture NMI Error Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 MaxRawDataLength; +} EFI_ACPI_4_0_IA32_ARCHITECTURE_NMI_ERROR_STRUCTURE; + +/// +/// PCI Express Root Port AER Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 Bus; + UINT16 Device; + UINT16 Function; + UINT16 DeviceControl; + UINT8 Reserved1[2]; + UINT32 UncorrectableErrorMask; + UINT32 UncorrectableErrorSeverity; + UINT32 CorrectableErrorMask; + UINT32 AdvancedErrorCapabilitiesAndControl; + UINT32 RootErrorCommand; +} EFI_ACPI_4_0_PCI_EXPRESS_ROOT_PORT_AER_STRUCTURE; + +/// +/// PCI Express Device AER Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 Bus; + UINT16 Device; + UINT16 Function; + UINT16 DeviceControl; + UINT8 Reserved1[2]; + UINT32 UncorrectableErrorMask; + UINT32 UncorrectableErrorSeverity; + UINT32 CorrectableErrorMask; + UINT32 AdvancedErrorCapabilitiesAndControl; +} EFI_ACPI_4_0_PCI_EXPRESS_DEVICE_AER_STRUCTURE; + +/// +/// PCI Express Bridge AER Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 Bus; + UINT16 Device; + UINT16 Function; + UINT16 DeviceControl; + UINT8 Reserved1[2]; + UINT32 UncorrectableErrorMask; + UINT32 UncorrectableErrorSeverity; + UINT32 CorrectableErrorMask; + UINT32 AdvancedErrorCapabilitiesAndControl; + UINT32 SecondaryUncorrectableErrorMask; + UINT32 SecondaryUncorrectableErrorSeverity; + UINT32 SecondaryAdvancedErrorCapabilitiesAndControl; +} EFI_ACPI_4_0_PCI_EXPRESS_BRIDGE_AER_STRUCTURE; + +/// +/// Generic Hardware Error Source Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT16 RelatedSourceId; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 MaxRawDataLength; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE ErrorStatusAddress; + EFI_ACPI_4_0_HARDWARE_ERROR_NOTIFICATION_STRUCTURE NotificationStructure; + UINT32 ErrorStatusBlockLength; +} EFI_ACPI_4_0_GENERIC_HARDWARE_ERROR_SOURCE_STRUCTURE; + +/// +/// Generic Error Status Definition +/// +typedef struct { + EFI_ACPI_4_0_ERROR_BLOCK_STATUS BlockStatus; + UINT32 RawDataOffset; + UINT32 RawDataLength; + UINT32 DataLength; + UINT32 ErrorSeverity; +} EFI_ACPI_4_0_GENERIC_ERROR_STATUS_STRUCTURE; + +/// +/// ERST - Error Record Serialization Table +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 SerializationHeaderSize; + UINT8 Reserved0[4]; + UINT32 InstructionEntryCount; +} EFI_ACPI_4_0_ERROR_RECORD_SERIALIZATION_TABLE_HEADER; + +/// +/// ERST Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_ERROR_RECORD_SERIALIZATION_TABLE_REVISION 0x01 + +/// +/// ERST Serialization Actions +/// +#define EFI_ACPI_4_0_ERST_BEGIN_WRITE_OPERATION 0x00 +#define EFI_ACPI_4_0_ERST_BEGIN_READ_OPERATION 0x01 +#define EFI_ACPI_4_0_ERST_BEGIN_CLEAR_OPERATION 0x02 +#define EFI_ACPI_4_0_ERST_END_OPERATION 0x03 +#define EFI_ACPI_4_0_ERST_SET_RECORD_OFFSET 0x04 +#define EFI_ACPI_4_0_ERST_EXECUTE_OPERATION 0x05 +#define EFI_ACPI_4_0_ERST_CHECK_BUSY_STATUS 0x06 +#define EFI_ACPI_4_0_ERST_GET_COMMAND_STATUS 0x07 +#define EFI_ACPI_4_0_ERST_GET_RECORD_IDENTIFIER 0x08 +#define EFI_ACPI_4_0_ERST_SET_RECORD_IDENTIFIER 0x09 +#define EFI_ACPI_4_0_ERST_GET_RECORD_COUNT 0x0A +#define EFI_ACPI_4_0_ERST_BEGIN_DUMMY_WRITE_OPERATION 0x0B +#define EFI_ACPI_4_0_ERST_GET_ERROR_LOG_ADDRESS_RANGE 0x0D +#define EFI_ACPI_4_0_ERST_GET_ERROR_LOG_ADDRESS_RANGE_LENGTH 0x0E +#define EFI_ACPI_4_0_ERST_GET_ERROR_LOG_ADDRESS_RANGE_ATTRIBUTES 0x0F + +/// +/// ERST Action Command Status +/// +#define EFI_ACPI_4_0_EINJ_STATUS_SUCCESS 0x00 +#define EFI_ACPI_4_0_EINJ_STATUS_NOT_ENOUGH_SPACE 0x01 +#define EFI_ACPI_4_0_EINJ_STATUS_HARDWARE_NOT_AVAILABLE 0x02 +#define EFI_ACPI_4_0_EINJ_STATUS_FAILED 0x03 +#define EFI_ACPI_4_0_EINJ_STATUS_RECORD_STORE_EMPTY 0x04 +#define EFI_ACPI_4_0_EINJ_STATUS_RECORD_NOT_FOUND 0x05 + +/// +/// ERST Serialization Instructions +/// +#define EFI_ACPI_4_0_ERST_READ_REGISTER 0x00 +#define EFI_ACPI_4_0_ERST_READ_REGISTER_VALUE 0x01 +#define EFI_ACPI_4_0_ERST_WRITE_REGISTER 0x02 +#define EFI_ACPI_4_0_ERST_WRITE_REGISTER_VALUE 0x03 +#define EFI_ACPI_4_0_ERST_NOOP 0x04 +#define EFI_ACPI_4_0_ERST_LOAD_VAR1 0x05 +#define EFI_ACPI_4_0_ERST_LOAD_VAR2 0x06 +#define EFI_ACPI_4_0_ERST_STORE_VAR1 0x07 +#define EFI_ACPI_4_0_ERST_ADD 0x08 +#define EFI_ACPI_4_0_ERST_SUBTRACT 0x09 +#define EFI_ACPI_4_0_ERST_ADD_VALUE 0x0A +#define EFI_ACPI_4_0_ERST_SUBTRACT_VALUE 0x0B +#define EFI_ACPI_4_0_ERST_STALL 0x0C +#define EFI_ACPI_4_0_ERST_STALL_WHILE_TRUE 0x0D +#define EFI_ACPI_4_0_ERST_SKIP_NEXT_INSTRUCTION_IF_TRUE 0x0E +#define EFI_ACPI_4_0_ERST_GOTO 0x0F +#define EFI_ACPI_4_0_ERST_SET_SRC_ADDRESS_BASE 0x10 +#define EFI_ACPI_4_0_ERST_SET_DST_ADDRESS_BASE 0x11 +#define EFI_ACPI_4_0_ERST_MOVE_DATA 0x12 + +/// +/// ERST Instruction Flags +/// +#define EFI_ACPI_4_0_ERST_PRESERVE_REGISTER 0x01 + +/// +/// ERST Serialization Instruction Entry +/// +typedef struct { + UINT8 SerializationAction; + UINT8 Instruction; + UINT8 Flags; + UINT8 Reserved0; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE RegisterRegion; + UINT64 Value; + UINT64 Mask; +} EFI_ACPI_4_0_ERST_SERIALIZATION_INSTRUCTION_ENTRY; + +/// +/// EINJ - Error Injection Table +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 InjectionHeaderSize; + UINT8 InjectionFlags; + UINT8 Reserved0[3]; + UINT32 InjectionEntryCount; +} EFI_ACPI_4_0_ERROR_INJECTION_TABLE_HEADER; + +/// +/// EINJ Version (as defined in ACPI 4.0 spec.) +/// +#define EFI_ACPI_4_0_ERROR_INJECTION_TABLE_REVISION 0x01 + +/// +/// EINJ Error Injection Actions +/// +#define EFI_ACPI_4_0_EINJ_BEGIN_INJECTION_OPERATION 0x00 +#define EFI_ACPI_4_0_EINJ_GET_TRIGGER_ERROR_ACTION_TABLE 0x01 +#define EFI_ACPI_4_0_EINJ_SET_ERROR_TYPE 0x02 +#define EFI_ACPI_4_0_EINJ_GET_ERROR_TYPE 0x03 +#define EFI_ACPI_4_0_EINJ_END_OPERATION 0x04 +#define EFI_ACPI_4_0_EINJ_EXECUTE_OPERATION 0x05 +#define EFI_ACPI_4_0_EINJ_CHECK_BUSY_STATUS 0x06 +#define EFI_ACPI_4_0_EINJ_GET_COMMAND_STATUS 0x07 +#define EFI_ACPI_4_0_EINJ_TRIGGER_ERROR 0xFF + +/// +/// EINJ Action Command Status +/// +#define EFI_ACPI_4_0_EINJ_STATUS_SUCCESS 0x00 +#define EFI_ACPI_4_0_EINJ_STATUS_UNKNOWN_FAILURE 0x01 +#define EFI_ACPI_4_0_EINJ_STATUS_INVALID_ACCESS 0x02 + +/// +/// EINJ Error Type Definition +/// +#define EFI_ACPI_4_0_EINJ_ERROR_PROCESSOR_CORRECTABLE (1 << 0) +#define EFI_ACPI_4_0_EINJ_ERROR_PROCESSOR_UNCORRECTABLE_NONFATAL (1 << 1) +#define EFI_ACPI_4_0_EINJ_ERROR_PROCESSOR_UNCORRECTABLE_FATAL (1 << 2) +#define EFI_ACPI_4_0_EINJ_ERROR_MEMORY_CORRECTABLE (1 << 3) +#define EFI_ACPI_4_0_EINJ_ERROR_MEMORY_UNCORRECTABLE_NONFATAL (1 << 4) +#define EFI_ACPI_4_0_EINJ_ERROR_MEMORY_UNCORRECTABLE_FATAL (1 << 5) +#define EFI_ACPI_4_0_EINJ_ERROR_PCI_EXPRESS_CORRECTABLE (1 << 6) +#define EFI_ACPI_4_0_EINJ_ERROR_PCI_EXPRESS_UNCORRECTABLE_NONFATAL (1 << 7) +#define EFI_ACPI_4_0_EINJ_ERROR_PCI_EXPRESS_UNCORRECTABLE_FATAL (1 << 8) +#define EFI_ACPI_4_0_EINJ_ERROR_PLATFORM_CORRECTABLE (1 << 9) +#define EFI_ACPI_4_0_EINJ_ERROR_PLATFORM_UNCORRECTABLE_NONFATAL (1 << 10) +#define EFI_ACPI_4_0_EINJ_ERROR_PLATFORM_UNCORRECTABLE_FATAL (1 << 11) + +/// +/// EINJ Injection Instructions +/// +#define EFI_ACPI_4_0_EINJ_READ_REGISTER 0x00 +#define EFI_ACPI_4_0_EINJ_READ_REGISTER_VALUE 0x01 +#define EFI_ACPI_4_0_EINJ_WRITE_REGISTER 0x02 +#define EFI_ACPI_4_0_EINJ_WRITE_REGISTER_VALUE 0x03 +#define EFI_ACPI_4_0_EINJ_NOOP 0x04 + +/// +/// EINJ Instruction Flags +/// +#define EFI_ACPI_4_0_EINJ_PRESERVE_REGISTER 0x01 + +/// +/// EINJ Injection Instruction Entry +/// +typedef struct { + UINT8 InjectionAction; + UINT8 Instruction; + UINT8 Flags; + UINT8 Reserved0; + EFI_ACPI_4_0_GENERIC_ADDRESS_STRUCTURE RegisterRegion; + UINT64 Value; + UINT64 Mask; +} EFI_ACPI_4_0_EINJ_INJECTION_INSTRUCTION_ENTRY; + +/// +/// EINJ Trigger Action Table +/// +typedef struct { + UINT32 HeaderSize; + UINT32 Revision; + UINT32 TableSize; + UINT32 EntryCount; +} EFI_ACPI_4_0_EINJ_TRIGGER_ACTION_TABLE; + +// +// Known table signatures +// + +/// +/// "RSD PTR " Root System Description Pointer +/// +#define EFI_ACPI_4_0_ROOT_SYSTEM_DESCRIPTION_POINTER_SIGNATURE SIGNATURE_64('R', 'S', 'D', ' ', 'P', 'T', 'R', ' ') + +/// +/// "APIC" Multiple APIC Description Table +/// +#define EFI_ACPI_4_0_MULTIPLE_APIC_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('A', 'P', 'I', 'C') + +/// +/// "BERT" Boot Error Record Table +/// +#define EFI_ACPI_4_0_BOOT_ERROR_RECORD_TABLE_SIGNATURE SIGNATURE_32('B', 'E', 'R', 'T') + +/// +/// "CPEP" Corrected Platform Error Polling Table +/// +#define EFI_ACPI_4_0_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_SIGNATURE SIGNATURE_32('C', 'P', 'E', 'P') + +/// +/// "DSDT" Differentiated System Description Table +/// +#define EFI_ACPI_4_0_DIFFERENTIATED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('D', 'S', 'D', 'T') + +/// +/// "ECDT" Embedded Controller Boot Resources Table +/// +#define EFI_ACPI_4_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE_SIGNATURE SIGNATURE_32('E', 'C', 'D', 'T') + +/// +/// "EINJ" Error Injection Table +/// +#define EFI_ACPI_4_0_ERROR_INJECTION_TABLE_SIGNATURE SIGNATURE_32('E', 'I', 'N', 'J') + +/// +/// "ERST" Error Record Serialization Table +/// +#define EFI_ACPI_4_0_ERROR_RECORD_SERIALIZATION_TABLE_SIGNATURE SIGNATURE_32('E', 'R', 'S', 'T') + +/// +/// "FACP" Fixed ACPI Description Table +/// +#define EFI_ACPI_4_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'P') + +/// +/// "FACS" Firmware ACPI Control Structure +/// +#define EFI_ACPI_4_0_FIRMWARE_ACPI_CONTROL_STRUCTURE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'S') + +/// +/// "HEST" Hardware Error Source Table +/// +#define EFI_ACPI_4_0_HARDWARE_ERROR_SOURCE_TABLE_SIGNATURE SIGNATURE_32('H', 'E', 'S', 'T') + +/// +/// "MSCT" Maximum System Characteristics Table +/// +#define EFI_ACPI_4_0_MAXIMUM_SYSTEM_CHARACTERISTICS_TABLE_SIGNATURE SIGNATURE_32('M', 'S', 'C', 'T') + +/// +/// "PSDT" Persistent System Description Table +/// +#define EFI_ACPI_4_0_PERSISTENT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('P', 'S', 'D', 'T') + +/// +/// "RSDT" Root System Description Table +/// +#define EFI_ACPI_4_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('R', 'S', 'D', 'T') + +/// +/// "SBST" Smart Battery Specification Table +/// +#define EFI_ACPI_4_0_SMART_BATTERY_SPECIFICATION_TABLE_SIGNATURE SIGNATURE_32('S', 'B', 'S', 'T') + +/// +/// "SLIT" System Locality Information Table +/// +#define EFI_ACPI_4_0_SYSTEM_LOCALITY_INFORMATION_TABLE_SIGNATURE SIGNATURE_32('S', 'L', 'I', 'T') + +/// +/// "SRAT" System Resource Affinity Table +/// +#define EFI_ACPI_4_0_SYSTEM_RESOURCE_AFFINITY_TABLE_SIGNATURE SIGNATURE_32('S', 'R', 'A', 'T') + +/// +/// "SSDT" Secondary System Description Table +/// +#define EFI_ACPI_4_0_SECONDARY_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('S', 'S', 'D', 'T') + +/// +/// "XSDT" Extended System Description Table +/// +#define EFI_ACPI_4_0_EXTENDED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('X', 'S', 'D', 'T') + +/// +/// "BOOT" MS Simple Boot Spec +/// +#define EFI_ACPI_4_0_SIMPLE_BOOT_FLAG_TABLE_SIGNATURE SIGNATURE_32('B', 'O', 'O', 'T') + +/// +/// "DBGP" MS Debug Port Spec +/// +#define EFI_ACPI_4_0_DEBUG_PORT_TABLE_SIGNATURE SIGNATURE_32('D', 'B', 'G', 'P') + +/// +/// "DMAR" DMA Remapping Table +/// +#define EFI_ACPI_4_0_DMA_REMAPPING_TABLE_SIGNATURE SIGNATURE_32('D', 'M', 'A', 'R') + +/// +/// "ETDT" Event Timer Description Table +/// +#define EFI_ACPI_4_0_EVENT_TIMER_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('E', 'T', 'D', 'T') + +/// +/// "HPET" IA-PC High Precision Event Timer Table +/// +#define EFI_ACPI_4_0_HIGH_PRECISION_EVENT_TIMER_TABLE_SIGNATURE SIGNATURE_32('H', 'P', 'E', 'T') + +/// +/// "iBFT" iSCSI Boot Firmware Table +/// +#define EFI_ACPI_4_0_ISCSI_BOOT_FIRMWARE_TABLE_SIGNATURE SIGNATURE_32('i', 'B', 'F', 'T') + +/// +/// "IVRS" I/O Virtualization Reporting Structure +/// +#define EFI_ACPI_4_0_IO_VIRTUALIZATION_REPORTING_STRUCTURE_SIGNATURE SIGNATURE_32('I', 'V', 'R', 'S') + +/// +/// "MCFG" PCI Express Memory Mapped Configuration Space Base Address Description Table +/// +#define EFI_ACPI_4_0_PCI_EXPRESS_MEMORY_MAPPED_CONFIGURATION_SPACE_BASE_ADDRESS_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('M', 'C', 'F', 'G') + +/// +/// "MCHI" Management Controller Host Interface Table +/// +#define EFI_ACPI_4_0_MANAGEMENT_CONTROLLER_HOST_INTERFACE_TABLE_SIGNATURE SIGNATURE_32('M', 'C', 'H', 'I') + +/// +/// "SPCR" Serial Port Concole Redirection Table +/// +#define EFI_ACPI_4_0_SERIAL_PORT_CONSOLE_REDIRECTION_TABLE_SIGNATURE SIGNATURE_32('S', 'P', 'C', 'R') + +/// +/// "SPMI" Server Platform Management Interface Table +/// +#define EFI_ACPI_4_0_SERVER_PLATFORM_MANAGEMENT_INTERFACE_TABLE_SIGNATURE SIGNATURE_32('S', 'P', 'M', 'I') + +/// +/// "TCPA" Trusted Computing Platform Alliance Capabilities Table +/// +#define EFI_ACPI_4_0_TRUSTED_COMPUTING_PLATFORM_ALLIANCE_CAPABILITIES_TABLE_SIGNATURE SIGNATURE_32('T', 'C', 'P', 'A') + +/// +/// "UEFI" UEFI ACPI Data Table +/// +#define EFI_ACPI_4_0_UEFI_ACPI_DATA_TABLE_SIGNATURE SIGNATURE_32('U', 'E', 'F', 'I') + +/// +/// "WAET" Windows ACPI Enlightenment Table +/// +#define EFI_ACPI_4_0_WINDOWS_ACPI_ENLIGHTENMENT_TABLE_SIGNATURE SIGNATURE_32('W', 'A', 'E', 'T') + +/// +/// "WDAT" Watchdog Action Table +/// +#define EFI_ACPI_4_0_WATCHDOG_ACTION_TABLE_SIGNATURE SIGNATURE_32('W', 'D', 'A', 'T') + +/// +/// "WDRT" Watchdog Resource Table +/// +#define EFI_ACPI_4_0_WATCHDOG_RESOURCE_TABLE_SIGNATURE SIGNATURE_32('W', 'D', 'R', 'T') + +#pragma pack() + +#endif diff --git a/src/include/ipxe/efi/IndustryStandard/Acpi50.h b/src/include/ipxe/efi/IndustryStandard/Acpi50.h new file mode 100644 index 00000000..df9e7153 --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/Acpi50.h @@ -0,0 +1,2121 @@ +/** @file + ACPI 5.0 definitions from the ACPI Specification Revision 5.0a November 13, 2013. + + Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
+ Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +**/ + +#ifndef _ACPI_5_0_H_ +#define _ACPI_5_0_H_ + +FILE_LICENCE ( BSD3 ); + +#include + +// +// Define for Desriptor +// +#define ACPI_SMALL_FIXED_DMA_DESCRIPTOR_NAME 0x0A +#define ACPI_LARGE_GPIO_CONNECTION_DESCRIPTOR_NAME 0x0C +#define ACPI_LARGE_GENERIC_SERIAL_BUS_CONNECTION_DESCRIPTOR_NAME 0x0E + +#define ACPI_FIXED_DMA_DESCRIPTOR 0x55 +#define ACPI_GPIO_CONNECTION_DESCRIPTOR 0x8C +#define ACPI_GENERIC_SERIAL_BUS_CONNECTION_DESCRIPTOR 0x8E + +#pragma pack(1) + +/// +/// Generic DMA Descriptor. +/// +typedef PACKED struct { + ACPI_SMALL_RESOURCE_HEADER Header; + UINT16 DmaRequestLine; + UINT16 DmaChannel; + UINT8 DmaTransferWidth; +} EFI_ACPI_FIXED_DMA_DESCRIPTOR; + +/// +/// GPIO Connection Descriptor +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 RevisionId; + UINT8 ConnectionType; + UINT16 GeneralFlags; + UINT16 InterruptFlags; + UINT8 PinConfiguration; + UINT16 OutputDriveStrength; + UINT16 DebounceTimeout; + UINT16 PinTableOffset; + UINT8 ResourceSourceIndex; + UINT16 ResourceSourceNameOffset; + UINT16 VendorDataOffset; + UINT16 VendorDataLength; +} EFI_ACPI_GPIO_CONNECTION_DESCRIPTOR; + +#define EFI_ACPI_GPIO_CONNECTION_TYPE_INTERRUPT 0x0 +#define EFI_ACPI_GPIO_CONNECTION_TYPE_IO 0x1 + +/// +/// Serial Bus Resource Descriptor (Generic) +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 RevisionId; + UINT8 ResourceSourceIndex; + UINT8 SerialBusType; + UINT8 GeneralFlags; + UINT16 TypeSpecificFlags; + UINT8 TypeSpecificRevisionId; + UINT16 TypeDataLength; +// Type specific data +} EFI_ACPI_SERIAL_BUS_RESOURCE_DESCRIPTOR; + +#define EFI_ACPI_SERIAL_BUS_RESOURCE_TYPE_I2C 0x1 +#define EFI_ACPI_SERIAL_BUS_RESOURCE_TYPE_SPI 0x2 +#define EFI_ACPI_SERIAL_BUS_RESOURCE_TYPE_UART 0x3 + +/// +/// Serial Bus Resource Descriptor (I2C) +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 RevisionId; + UINT8 ResourceSourceIndex; + UINT8 SerialBusType; + UINT8 GeneralFlags; + UINT16 TypeSpecificFlags; + UINT8 TypeSpecificRevisionId; + UINT16 TypeDataLength; + UINT32 ConnectionSpeed; + UINT16 SlaveAddress; +} EFI_ACPI_SERIAL_BUS_RESOURCE_I2C_DESCRIPTOR; + +/// +/// Serial Bus Resource Descriptor (SPI) +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 RevisionId; + UINT8 ResourceSourceIndex; + UINT8 SerialBusType; + UINT8 GeneralFlags; + UINT16 TypeSpecificFlags; + UINT8 TypeSpecificRevisionId; + UINT16 TypeDataLength; + UINT32 ConnectionSpeed; + UINT8 DataBitLength; + UINT8 Phase; + UINT8 Polarity; + UINT16 DeviceSelection; +} EFI_ACPI_SERIAL_BUS_RESOURCE_SPI_DESCRIPTOR; + +/// +/// Serial Bus Resource Descriptor (UART) +/// +typedef PACKED struct { + ACPI_LARGE_RESOURCE_HEADER Header; + UINT8 RevisionId; + UINT8 ResourceSourceIndex; + UINT8 SerialBusType; + UINT8 GeneralFlags; + UINT16 TypeSpecificFlags; + UINT8 TypeSpecificRevisionId; + UINT16 TypeDataLength; + UINT32 DefaultBaudRate; + UINT16 RxFIFO; + UINT16 TxFIFO; + UINT8 Parity; + UINT8 SerialLinesEnabled; +} EFI_ACPI_SERIAL_BUS_RESOURCE_UART_DESCRIPTOR; + +#pragma pack() + +// +// Ensure proper structure formats +// +#pragma pack(1) + +/// +/// ACPI 5.0 Generic Address Space definition +/// +typedef struct { + UINT8 AddressSpaceId; + UINT8 RegisterBitWidth; + UINT8 RegisterBitOffset; + UINT8 AccessSize; + UINT64 Address; +} EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE; + +// +// Generic Address Space Address IDs +// +#define EFI_ACPI_5_0_SYSTEM_MEMORY 0 +#define EFI_ACPI_5_0_SYSTEM_IO 1 +#define EFI_ACPI_5_0_PCI_CONFIGURATION_SPACE 2 +#define EFI_ACPI_5_0_EMBEDDED_CONTROLLER 3 +#define EFI_ACPI_5_0_SMBUS 4 +#define EFI_ACPI_5_0_PLATFORM_COMMUNICATION_CHANNEL 0x0A +#define EFI_ACPI_5_0_FUNCTIONAL_FIXED_HARDWARE 0x7F + +// +// Generic Address Space Access Sizes +// +#define EFI_ACPI_5_0_UNDEFINED 0 +#define EFI_ACPI_5_0_BYTE 1 +#define EFI_ACPI_5_0_WORD 2 +#define EFI_ACPI_5_0_DWORD 3 +#define EFI_ACPI_5_0_QWORD 4 + +// +// ACPI 5.0 table structures +// + +/// +/// Root System Description Pointer Structure +/// +typedef struct { + UINT64 Signature; + UINT8 Checksum; + UINT8 OemId[6]; + UINT8 Revision; + UINT32 RsdtAddress; + UINT32 Length; + UINT64 XsdtAddress; + UINT8 ExtendedChecksum; + UINT8 Reserved[3]; +} EFI_ACPI_5_0_ROOT_SYSTEM_DESCRIPTION_POINTER; + +/// +/// RSD_PTR Revision (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_ROOT_SYSTEM_DESCRIPTION_POINTER_REVISION 0x02 ///< ACPISpec (Revision 5.0) says current value is 2 + +/// +/// Common table header, this prefaces all ACPI tables, including FACS, but +/// excluding the RSD PTR structure +/// +typedef struct { + UINT32 Signature; + UINT32 Length; +} EFI_ACPI_5_0_COMMON_HEADER; + +// +// Root System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT32 table pointers. +// + +/// +/// RSDT Revision (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_ROOT_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +// +// Extended System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT64 table pointers. +// + +/// +/// XSDT Revision (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_EXTENDED_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Fixed ACPI Description Table Structure (FADT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 FirmwareCtrl; + UINT32 Dsdt; + UINT8 Reserved0; + UINT8 PreferredPmProfile; + UINT16 SciInt; + UINT32 SmiCmd; + UINT8 AcpiEnable; + UINT8 AcpiDisable; + UINT8 S4BiosReq; + UINT8 PstateCnt; + UINT32 Pm1aEvtBlk; + UINT32 Pm1bEvtBlk; + UINT32 Pm1aCntBlk; + UINT32 Pm1bCntBlk; + UINT32 Pm2CntBlk; + UINT32 PmTmrBlk; + UINT32 Gpe0Blk; + UINT32 Gpe1Blk; + UINT8 Pm1EvtLen; + UINT8 Pm1CntLen; + UINT8 Pm2CntLen; + UINT8 PmTmrLen; + UINT8 Gpe0BlkLen; + UINT8 Gpe1BlkLen; + UINT8 Gpe1Base; + UINT8 CstCnt; + UINT16 PLvl2Lat; + UINT16 PLvl3Lat; + UINT16 FlushSize; + UINT16 FlushStride; + UINT8 DutyOffset; + UINT8 DutyWidth; + UINT8 DayAlrm; + UINT8 MonAlrm; + UINT8 Century; + UINT16 IaPcBootArch; + UINT8 Reserved1; + UINT32 Flags; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE ResetReg; + UINT8 ResetValue; + UINT8 Reserved2[3]; + UINT64 XFirmwareCtrl; + UINT64 XDsdt; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE XPm1aEvtBlk; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE XPm1bEvtBlk; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE XPm1aCntBlk; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE XPm1bCntBlk; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE XPm2CntBlk; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE XPmTmrBlk; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE XGpe0Blk; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE XGpe1Blk; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE SleepControlReg; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE SleepStatusReg; +} EFI_ACPI_5_0_FIXED_ACPI_DESCRIPTION_TABLE; + +/// +/// FADT Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_FIXED_ACPI_DESCRIPTION_TABLE_REVISION 0x05 + +// +// Fixed ACPI Description Table Preferred Power Management Profile +// +#define EFI_ACPI_5_0_PM_PROFILE_UNSPECIFIED 0 +#define EFI_ACPI_5_0_PM_PROFILE_DESKTOP 1 +#define EFI_ACPI_5_0_PM_PROFILE_MOBILE 2 +#define EFI_ACPI_5_0_PM_PROFILE_WORKSTATION 3 +#define EFI_ACPI_5_0_PM_PROFILE_ENTERPRISE_SERVER 4 +#define EFI_ACPI_5_0_PM_PROFILE_SOHO_SERVER 5 +#define EFI_ACPI_5_0_PM_PROFILE_APPLIANCE_PC 6 +#define EFI_ACPI_5_0_PM_PROFILE_PERFORMANCE_SERVER 7 +#define EFI_ACPI_5_0_PM_PROFILE_TABLET 8 + +// +// Fixed ACPI Description Table Boot Architecture Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_5_0_LEGACY_DEVICES BIT0 +#define EFI_ACPI_5_0_8042 BIT1 +#define EFI_ACPI_5_0_VGA_NOT_PRESENT BIT2 +#define EFI_ACPI_5_0_MSI_NOT_SUPPORTED BIT3 +#define EFI_ACPI_5_0_PCIE_ASPM_CONTROLS BIT4 +#define EFI_ACPI_5_0_CMOS_RTC_NOT_PRESENT BIT5 + +// +// Fixed ACPI Description Table Fixed Feature Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_5_0_WBINVD BIT0 +#define EFI_ACPI_5_0_WBINVD_FLUSH BIT1 +#define EFI_ACPI_5_0_PROC_C1 BIT2 +#define EFI_ACPI_5_0_P_LVL2_UP BIT3 +#define EFI_ACPI_5_0_PWR_BUTTON BIT4 +#define EFI_ACPI_5_0_SLP_BUTTON BIT5 +#define EFI_ACPI_5_0_FIX_RTC BIT6 +#define EFI_ACPI_5_0_RTC_S4 BIT7 +#define EFI_ACPI_5_0_TMR_VAL_EXT BIT8 +#define EFI_ACPI_5_0_DCK_CAP BIT9 +#define EFI_ACPI_5_0_RESET_REG_SUP BIT10 +#define EFI_ACPI_5_0_SEALED_CASE BIT11 +#define EFI_ACPI_5_0_HEADLESS BIT12 +#define EFI_ACPI_5_0_CPU_SW_SLP BIT13 +#define EFI_ACPI_5_0_PCI_EXP_WAK BIT14 +#define EFI_ACPI_5_0_USE_PLATFORM_CLOCK BIT15 +#define EFI_ACPI_5_0_S4_RTC_STS_VALID BIT16 +#define EFI_ACPI_5_0_REMOTE_POWER_ON_CAPABLE BIT17 +#define EFI_ACPI_5_0_FORCE_APIC_CLUSTER_MODEL BIT18 +#define EFI_ACPI_5_0_FORCE_APIC_PHYSICAL_DESTINATION_MODE BIT19 +#define EFI_ACPI_5_0_HW_REDUCED_ACPI BIT20 +#define EFI_ACPI_5_0_LOW_POWER_S0_IDLE_CAPABLE BIT21 + +/// +/// Firmware ACPI Control Structure +/// +typedef struct { + UINT32 Signature; + UINT32 Length; + UINT32 HardwareSignature; + UINT32 FirmwareWakingVector; + UINT32 GlobalLock; + UINT32 Flags; + UINT64 XFirmwareWakingVector; + UINT8 Version; + UINT8 Reserved0[3]; + UINT32 OspmFlags; + UINT8 Reserved1[24]; +} EFI_ACPI_5_0_FIRMWARE_ACPI_CONTROL_STRUCTURE; + +/// +/// FACS Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_FIRMWARE_ACPI_CONTROL_STRUCTURE_VERSION 0x02 + +/// +/// Firmware Control Structure Feature Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_5_0_S4BIOS_F BIT0 +#define EFI_ACPI_5_0_64BIT_WAKE_SUPPORTED_F BIT1 + +/// +/// OSPM Enabled Firmware Control Structure Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_5_0_OSPM_64BIT_WAKE_F BIT0 + +// +// Differentiated System Description Table, +// Secondary System Description Table +// and Persistent System Description Table, +// no definition needed as they are common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a definition block. +// +#define EFI_ACPI_5_0_DIFFERENTIATED_SYSTEM_DESCRIPTION_TABLE_REVISION 0x02 +#define EFI_ACPI_5_0_SECONDARY_SYSTEM_DESCRIPTION_TABLE_REVISION 0x02 + +/// +/// Multiple APIC Description Table header definition. The rest of the table +/// must be defined in a platform specific manner. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 LocalApicAddress; + UINT32 Flags; +} EFI_ACPI_5_0_MULTIPLE_APIC_DESCRIPTION_TABLE_HEADER; + +/// +/// MADT Revision (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_MULTIPLE_APIC_DESCRIPTION_TABLE_REVISION 0x03 + +/// +/// Multiple APIC Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_5_0_PCAT_COMPAT BIT0 + +// +// Multiple APIC Description Table APIC structure types +// All other values between 0x0D and 0x7F are reserved and +// will be ignored by OSPM. 0x80 ~ 0xFF are reserved for OEM. +// +#define EFI_ACPI_5_0_PROCESSOR_LOCAL_APIC 0x00 +#define EFI_ACPI_5_0_IO_APIC 0x01 +#define EFI_ACPI_5_0_INTERRUPT_SOURCE_OVERRIDE 0x02 +#define EFI_ACPI_5_0_NON_MASKABLE_INTERRUPT_SOURCE 0x03 +#define EFI_ACPI_5_0_LOCAL_APIC_NMI 0x04 +#define EFI_ACPI_5_0_LOCAL_APIC_ADDRESS_OVERRIDE 0x05 +#define EFI_ACPI_5_0_IO_SAPIC 0x06 +#define EFI_ACPI_5_0_LOCAL_SAPIC 0x07 +#define EFI_ACPI_5_0_PLATFORM_INTERRUPT_SOURCES 0x08 +#define EFI_ACPI_5_0_PROCESSOR_LOCAL_X2APIC 0x09 +#define EFI_ACPI_5_0_LOCAL_X2APIC_NMI 0x0A +#define EFI_ACPI_5_0_GIC 0x0B +#define EFI_ACPI_5_0_GICD 0x0C + +// +// APIC Structure Definitions +// + +/// +/// Processor Local APIC Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT8 ApicId; + UINT32 Flags; +} EFI_ACPI_5_0_PROCESSOR_LOCAL_APIC_STRUCTURE; + +/// +/// Local APIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_0_LOCAL_APIC_ENABLED BIT0 + +/// +/// IO APIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 IoApicAddress; + UINT32 GlobalSystemInterruptBase; +} EFI_ACPI_5_0_IO_APIC_STRUCTURE; + +/// +/// Interrupt Source Override Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Bus; + UINT8 Source; + UINT32 GlobalSystemInterrupt; + UINT16 Flags; +} EFI_ACPI_5_0_INTERRUPT_SOURCE_OVERRIDE_STRUCTURE; + +/// +/// Platform Interrupt Sources Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT8 InterruptType; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT8 IoSapicVector; + UINT32 GlobalSystemInterrupt; + UINT32 PlatformInterruptSourceFlags; + UINT8 CpeiProcessorOverride; + UINT8 Reserved[31]; +} EFI_ACPI_5_0_PLATFORM_INTERRUPT_APIC_STRUCTURE; + +// +// MPS INTI flags. +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_5_0_POLARITY (3 << 0) +#define EFI_ACPI_5_0_TRIGGER_MODE (3 << 2) + +/// +/// Non-Maskable Interrupt Source Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT32 GlobalSystemInterrupt; +} EFI_ACPI_5_0_NON_MASKABLE_INTERRUPT_SOURCE_STRUCTURE; + +/// +/// Local APIC NMI Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT16 Flags; + UINT8 LocalApicLint; +} EFI_ACPI_5_0_LOCAL_APIC_NMI_STRUCTURE; + +/// +/// Local APIC Address Override Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved; + UINT64 LocalApicAddress; +} EFI_ACPI_5_0_LOCAL_APIC_ADDRESS_OVERRIDE_STRUCTURE; + +/// +/// IO SAPIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 GlobalSystemInterruptBase; + UINT64 IoSapicAddress; +} EFI_ACPI_5_0_IO_SAPIC_STRUCTURE; + +/// +/// Local SAPIC Structure +/// This struct followed by a null-terminated ASCII string - ACPI Processor UID String +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT8 LocalSapicId; + UINT8 LocalSapicEid; + UINT8 Reserved[3]; + UINT32 Flags; + UINT32 ACPIProcessorUIDValue; +} EFI_ACPI_5_0_PROCESSOR_LOCAL_SAPIC_STRUCTURE; + +/// +/// Platform Interrupt Sources Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT8 InterruptType; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT8 IoSapicVector; + UINT32 GlobalSystemInterrupt; + UINT32 PlatformInterruptSourceFlags; +} EFI_ACPI_5_0_PLATFORM_INTERRUPT_SOURCES_STRUCTURE; + +/// +/// Platform Interrupt Source Flags. +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_5_0_CPEI_PROCESSOR_OVERRIDE BIT0 + +/// +/// Processor Local x2APIC Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Reserved[2]; + UINT32 X2ApicId; + UINT32 Flags; + UINT32 AcpiProcessorUid; +} EFI_ACPI_5_0_PROCESSOR_LOCAL_X2APIC_STRUCTURE; + +/// +/// Local x2APIC NMI Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT32 AcpiProcessorUid; + UINT8 LocalX2ApicLint; + UINT8 Reserved[3]; +} EFI_ACPI_5_0_LOCAL_X2APIC_NMI_STRUCTURE; + +/// +/// GIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved; + UINT32 GicId; + UINT32 AcpiProcessorUid; + UINT32 Flags; + UINT32 ParkingProtocolVersion; + UINT32 PerformanceInterruptGsiv; + UINT64 ParkedAddress; + UINT64 PhysicalBaseAddress; +} EFI_ACPI_5_0_GIC_STRUCTURE; + +/// +/// GIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_0_GIC_ENABLED BIT0 +#define EFI_ACPI_5_0_PERFORMANCE_INTERRUPT_MODEL BIT1 + +/// +/// GIC Distributor Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved1; + UINT32 GicId; + UINT64 PhysicalBaseAddress; + UINT32 SystemVectorBase; + UINT32 Reserved2; +} EFI_ACPI_5_0_GIC_DISTRIBUTOR_STRUCTURE; + +/// +/// Smart Battery Description Table (SBST) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 WarningEnergyLevel; + UINT32 LowEnergyLevel; + UINT32 CriticalEnergyLevel; +} EFI_ACPI_5_0_SMART_BATTERY_DESCRIPTION_TABLE; + +/// +/// SBST Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_SMART_BATTERY_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Embedded Controller Boot Resources Table (ECDT) +/// The table is followed by a null terminated ASCII string that contains +/// a fully qualified reference to the name space object. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE EcControl; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE EcData; + UINT32 Uid; + UINT8 GpeBit; +} EFI_ACPI_5_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE; + +/// +/// ECDT Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE_REVISION 0x01 + +/// +/// System Resource Affinity Table (SRAT). The rest of the table +/// must be defined in a platform specific manner. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 Reserved1; ///< Must be set to 1 + UINT64 Reserved2; +} EFI_ACPI_5_0_SYSTEM_RESOURCE_AFFINITY_TABLE_HEADER; + +/// +/// SRAT Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_SYSTEM_RESOURCE_AFFINITY_TABLE_REVISION 0x03 + +// +// SRAT structure types. +// All other values between 0x03 an 0xFF are reserved and +// will be ignored by OSPM. +// +#define EFI_ACPI_5_0_PROCESSOR_LOCAL_APIC_SAPIC_AFFINITY 0x00 +#define EFI_ACPI_5_0_MEMORY_AFFINITY 0x01 +#define EFI_ACPI_5_0_PROCESSOR_LOCAL_X2APIC_AFFINITY 0x02 + +/// +/// Processor Local APIC/SAPIC Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 ProximityDomain7To0; + UINT8 ApicId; + UINT32 Flags; + UINT8 LocalSapicEid; + UINT8 ProximityDomain31To8[3]; + UINT32 ClockDomain; +} EFI_ACPI_5_0_PROCESSOR_LOCAL_APIC_SAPIC_AFFINITY_STRUCTURE; + +/// +/// Local APIC/SAPIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_0_PROCESSOR_LOCAL_APIC_SAPIC_ENABLED (1 << 0) + +/// +/// Memory Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT32 ProximityDomain; + UINT16 Reserved1; + UINT32 AddressBaseLow; + UINT32 AddressBaseHigh; + UINT32 LengthLow; + UINT32 LengthHigh; + UINT32 Reserved2; + UINT32 Flags; + UINT64 Reserved3; +} EFI_ACPI_5_0_MEMORY_AFFINITY_STRUCTURE; + +// +// Memory Flags. All other bits are reserved and must be 0. +// +#define EFI_ACPI_5_0_MEMORY_ENABLED (1 << 0) +#define EFI_ACPI_5_0_MEMORY_HOT_PLUGGABLE (1 << 1) +#define EFI_ACPI_5_0_MEMORY_NONVOLATILE (1 << 2) + +/// +/// Processor Local x2APIC Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Reserved1[2]; + UINT32 ProximityDomain; + UINT32 X2ApicId; + UINT32 Flags; + UINT32 ClockDomain; + UINT8 Reserved2[4]; +} EFI_ACPI_5_0_PROCESSOR_LOCAL_X2APIC_AFFINITY_STRUCTURE; + +/// +/// System Locality Distance Information Table (SLIT). +/// The rest of the table is a matrix. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT64 NumberOfSystemLocalities; +} EFI_ACPI_5_0_SYSTEM_LOCALITY_DISTANCE_INFORMATION_TABLE_HEADER; + +/// +/// SLIT Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_SYSTEM_LOCALITY_DISTANCE_INFORMATION_TABLE_REVISION 0x01 + +/// +/// Corrected Platform Error Polling Table (CPEP) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT8 Reserved[8]; +} EFI_ACPI_5_0_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_HEADER; + +/// +/// CPEP Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_REVISION 0x01 + +// +// CPEP processor structure types. +// +#define EFI_ACPI_5_0_CPEP_PROCESSOR_APIC_SAPIC 0x00 + +/// +/// Corrected Platform Error Polling Processor Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT32 PollingInterval; +} EFI_ACPI_5_0_CPEP_PROCESSOR_APIC_SAPIC_STRUCTURE; + +/// +/// Maximum System Characteristics Table (MSCT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 OffsetProxDomInfo; + UINT32 MaximumNumberOfProximityDomains; + UINT32 MaximumNumberOfClockDomains; + UINT64 MaximumPhysicalAddress; +} EFI_ACPI_5_0_MAXIMUM_SYSTEM_CHARACTERISTICS_TABLE_HEADER; + +/// +/// MSCT Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_MAXIMUM_SYSTEM_CHARACTERISTICS_TABLE_REVISION 0x01 + +/// +/// Maximum Proximity Domain Information Structure Definition +/// +typedef struct { + UINT8 Revision; + UINT8 Length; + UINT32 ProximityDomainRangeLow; + UINT32 ProximityDomainRangeHigh; + UINT32 MaximumProcessorCapacity; + UINT64 MaximumMemoryCapacity; +} EFI_ACPI_5_0_MAXIMUM_PROXIMITY_DOMAIN_INFORMATION_STRUCTURE; + +/// +/// ACPI RAS Feature Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT8 PlatformCommunicationChannelIdentifier[12]; +} EFI_ACPI_5_0_RAS_FEATURE_TABLE; + +/// +/// RASF Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_RAS_FEATURE_TABLE_REVISION 0x01 + +/// +/// ACPI RASF Platform Communication Channel Shared Memory Region definition. +/// +typedef struct { + UINT32 Signature; + UINT16 Command; + UINT16 Status; + UINT16 Version; + UINT8 RASCapabilities[16]; + UINT8 SetRASCapabilities[16]; + UINT16 NumberOfRASFParameterBlocks; + UINT32 SetRASCapabilitiesStatus; +} EFI_ACPI_5_0_RASF_PLATFORM_COMMUNICATION_CHANNEL_SHARED_MEMORY_REGION; + +/// +/// ACPI RASF PCC command code +/// +#define EFI_ACPI_5_0_RASF_PCC_COMMAND_CODE_EXECUTE_RASF_COMMAND 0x01 + +/// +/// ACPI RASF Platform RAS Capabilities +/// +#define EFI_ACPI_5_0_RASF_PLATFORM_RAS_CAPABILITY_HARDWARE_BASED_PATROL_SCRUB_SUPPOTED 0x01 +#define EFI_ACPI_5_0_RASF_PLATFORM_RAS_CAPABILITY_HARDWARE_BASED_PATROL_SCRUB_SUPPOTED_AND_EXPOSED_TO_SOFTWARE 0x02 + +/// +/// ACPI RASF Parameter Block structure for PATROL_SCRUB +/// +typedef struct { + UINT16 Type; + UINT16 Version; + UINT16 Length; + UINT16 PatrolScrubCommand; + UINT64 RequestedAddressRange[2]; + UINT64 ActualAddressRange[2]; + UINT16 Flags; + UINT8 RequestedSpeed; +} EFI_ACPI_5_0_RASF_PATROL_SCRUB_PLATFORM_BLOCK_STRUCTURE; + +/// +/// ACPI RASF Patrol Scrub command +/// +#define EFI_ACPI_5_0_RASF_PATROL_SCRUB_COMMAND_GET_PATROL_PARAMETERS 0x01 +#define EFI_ACPI_5_0_RASF_PATROL_SCRUB_COMMAND_START_PATROL_SCRUBBER 0x02 +#define EFI_ACPI_5_0_RASF_PATROL_SCRUB_COMMAND_STOP_PATROL_SCRUBBER 0x03 + +/// +/// Memory Power State Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT8 PlatformCommunicationChannelIdentifier; + UINT8 Reserved[3]; +// Memory Power Node Structure +// Memory Power State Characteristics +} EFI_ACPI_5_0_MEMORY_POWER_STATUS_TABLE; + +/// +/// MPST Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_MEMORY_POWER_STATE_TABLE_REVISION 0x01 + +/// +/// MPST Platform Communication Channel Shared Memory Region definition. +/// +typedef struct { + UINT32 Signature; + UINT16 Command; + UINT16 Status; + UINT32 MemoryPowerCommandRegister; + UINT32 MemoryPowerStatusRegister; + UINT32 PowerStateId; + UINT32 MemoryPowerNodeId; + UINT64 MemoryEnergyConsumed; + UINT64 ExpectedAveragePowerComsuned; +} EFI_ACPI_5_0_MPST_PLATFORM_COMMUNICATION_CHANNEL_SHARED_MEMORY_REGION; + +/// +/// ACPI MPST PCC command code +/// +#define EFI_ACPI_5_0_MPST_PCC_COMMAND_CODE_EXECUTE_MPST_COMMAND 0x03 + +/// +/// ACPI MPST Memory Power command +/// +#define EFI_ACPI_5_0_MPST_MEMORY_POWER_COMMAND_GET_MEMORY_POWER_STATE 0x01 +#define EFI_ACPI_5_0_MPST_MEMORY_POWER_COMMAND_SET_MEMORY_POWER_STATE 0x02 +#define EFI_ACPI_5_0_MPST_MEMORY_POWER_COMMAND_GET_AVERAGE_POWER_CONSUMED 0x03 +#define EFI_ACPI_5_0_MPST_MEMORY_POWER_COMMAND_GET_MEMORY_ENERGY_CONSUMED 0x04 + +/// +/// MPST Memory Power Node Table +/// +typedef struct { + UINT8 PowerStateValue; + UINT8 PowerStateInformationIndex; +} EFI_ACPI_5_0_MPST_MEMORY_POWER_STATE; + +typedef struct { + UINT8 Flag; + UINT8 Reserved; + UINT16 MemoryPowerNodeId; + UINT32 Length; + UINT64 AddressBase; + UINT64 AddressLength; + UINT32 NumberOfPowerStates; + UINT32 NumberOfPhysicalComponents; +//EFI_ACPI_5_0_MPST_MEMORY_POWER_STATE MemoryPowerState[NumberOfPowerStates]; +//UINT16 PhysicalComponentIdentifier[NumberOfPhysicalComponents]; +} EFI_ACPI_5_0_MPST_MEMORY_POWER_STRUCTURE; + +#define EFI_ACPI_5_0_MPST_MEMORY_POWER_STRUCTURE_FLAG_ENABLE 0x01 +#define EFI_ACPI_5_0_MPST_MEMORY_POWER_STRUCTURE_FLAG_POWER_MANAGED 0x02 +#define EFI_ACPI_5_0_MPST_MEMORY_POWER_STRUCTURE_FLAG_HOT_PLUGGABLE 0x04 + +typedef struct { + UINT16 MemoryPowerNodeCount; + UINT8 Reserved[2]; +} EFI_ACPI_5_0_MPST_MEMORY_POWER_NODE_TABLE; + +/// +/// MPST Memory Power State Characteristics Table +/// +typedef struct { + UINT8 PowerStateStructureID; + UINT8 Flag; + UINT16 Reserved; + UINT32 AveragePowerConsumedInMPS0; + UINT32 RelativePowerSavingToMPS0; + UINT64 ExitLatencyToMPS0; +} EFI_ACPI_5_0_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_STRUCTURE; + +#define EFI_ACPI_5_0_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_STRUCTURE_FLAG_MEMORY_CONTENT_PRESERVED 0x01 +#define EFI_ACPI_5_0_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_STRUCTURE_FLAG_AUTONOMOUS_MEMORY_POWER_STATE_ENTRY 0x02 +#define EFI_ACPI_5_0_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_STRUCTURE_FLAG_AUTONOMOUS_MEMORY_POWER_STATE_EXIT 0x04 + +typedef struct { + UINT16 MemoryPowerStateCharacteristicsCount; + UINT8 Reserved[2]; +} EFI_ACPI_5_0_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_TABLE; + +/// +/// Memory Topology Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 Reserved; +} EFI_ACPI_5_0_MEMORY_TOPOLOGY_TABLE; + +/// +/// PMTT Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_MEMORY_TOPOLOGY_TABLE_REVISION 0x01 + +/// +/// Common Memory Aggregator Device Structure. +/// +typedef struct { + UINT8 Type; + UINT8 Reserved; + UINT16 Length; + UINT16 Flags; + UINT16 Reserved1; +} EFI_ACPI_5_0_PMMT_COMMON_MEMORY_AGGREGATOR_DEVICE_STRUCTURE; + +/// +/// Memory Aggregator Device Type +/// +#define EFI_ACPI_5_0_PMMT_MEMORY_AGGREGATOR_DEVICE_TYPE_SOCKET 0x1 +#define EFI_ACPI_5_0_PMMT_MEMORY_AGGREGATOR_DEVICE_TYPE_MEMORY_CONTROLLER 0x2 +#define EFI_ACPI_5_0_PMMT_MEMORY_AGGREGATOR_DEVICE_TYPE_DIMM 0x3 + +/// +/// Socket Memory Aggregator Device Structure. +/// +typedef struct { + EFI_ACPI_5_0_PMMT_COMMON_MEMORY_AGGREGATOR_DEVICE_STRUCTURE Header; + UINT16 SocketIdentifier; + UINT16 Reserved; +//EFI_ACPI_5_0_PMMT_MEMORY_CONTROLLER_MEMORY_AGGREGATOR_DEVICE_STRUCTURE MemoryController[]; +} EFI_ACPI_5_0_PMMT_SOCKET_MEMORY_AGGREGATOR_DEVICE_STRUCTURE; + +/// +/// MemoryController Memory Aggregator Device Structure. +/// +typedef struct { + EFI_ACPI_5_0_PMMT_COMMON_MEMORY_AGGREGATOR_DEVICE_STRUCTURE Header; + UINT32 ReadLatency; + UINT32 WriteLatency; + UINT32 ReadBandwidth; + UINT32 WriteBandwidth; + UINT16 OptimalAccessUnit; + UINT16 OptimalAccessAlignment; + UINT16 Reserved; + UINT16 NumberOfProximityDomains; +//UINT32 ProximityDomain[NumberOfProximityDomains]; +//EFI_ACPI_5_0_PMMT_DIMM_MEMORY_AGGREGATOR_DEVICE_STRUCTURE PhysicalComponent[]; +} EFI_ACPI_5_0_PMMT_MEMORY_CONTROLLER_MEMORY_AGGREGATOR_DEVICE_STRUCTURE; + +/// +/// DIMM Memory Aggregator Device Structure. +/// +typedef struct { + EFI_ACPI_5_0_PMMT_COMMON_MEMORY_AGGREGATOR_DEVICE_STRUCTURE Header; + UINT16 PhysicalComponentIdentifier; + UINT16 Reserved; + UINT32 SizeOfDimm; + UINT32 SmbiosHandle; +} EFI_ACPI_5_0_PMMT_DIMM_MEMORY_AGGREGATOR_DEVICE_STRUCTURE; + +/// +/// Boot Graphics Resource Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + /// + /// 2-bytes (16 bit) version ID. This value must be 1. + /// + UINT16 Version; + /// + /// 1-byte status field indicating current status about the table. + /// Bits[7:1] = Reserved (must be zero) + /// Bit [0] = Valid. A one indicates the boot image graphic is valid. + /// + UINT8 Status; + /// + /// 1-byte enumerated type field indicating format of the image. + /// 0 = Bitmap + /// 1 - 255 Reserved (for future use) + /// + UINT8 ImageType; + /// + /// 8-byte (64 bit) physical address pointing to the firmware's in-memory copy + /// of the image bitmap. + /// + UINT64 ImageAddress; + /// + /// A 4-byte (32-bit) unsigned long describing the display X-offset of the boot image. + /// (X, Y) display offset of the top left corner of the boot image. + /// The top left corner of the display is at offset (0, 0). + /// + UINT32 ImageOffsetX; + /// + /// A 4-byte (32-bit) unsigned long describing the display Y-offset of the boot image. + /// (X, Y) display offset of the top left corner of the boot image. + /// The top left corner of the display is at offset (0, 0). + /// + UINT32 ImageOffsetY; +} EFI_ACPI_5_0_BOOT_GRAPHICS_RESOURCE_TABLE; + +/// +/// BGRT Revision +/// +#define EFI_ACPI_5_0_BOOT_GRAPHICS_RESOURCE_TABLE_REVISION 1 + +/// +/// BGRT Version +/// +#define EFI_ACPI_5_0_BGRT_VERSION 0x01 + +/// +/// BGRT Status +/// +#define EFI_ACPI_5_0_BGRT_STATUS_NOT_DISPLAYED 0x00 +#define EFI_ACPI_5_0_BGRT_STATUS_DISPLAYED 0x01 +#define EFI_ACPI_5_0_BGRT_STATUS_INVALID EFI_ACPI_5_0_BGRT_STATUS_NOT_DISPLAYED +#define EFI_ACPI_5_0_BGRT_STATUS_VALID EFI_ACPI_5_0_BGRT_STATUS_DISPLAYED + +/// +/// BGRT Image Type +/// +#define EFI_ACPI_5_0_BGRT_IMAGE_TYPE_BMP 0x00 + +/// +/// FPDT Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_FIRMWARE_PERFORMANCE_DATA_TABLE_REVISION 0x01 + +/// +/// FPDT Performance Record Types +/// +#define EFI_ACPI_5_0_FPDT_RECORD_TYPE_FIRMWARE_BASIC_BOOT_POINTER 0x0000 +#define EFI_ACPI_5_0_FPDT_RECORD_TYPE_S3_PERFORMANCE_TABLE_POINTER 0x0001 + +/// +/// FPDT Performance Record Revision +/// +#define EFI_ACPI_5_0_FPDT_RECORD_REVISION_FIRMWARE_BASIC_BOOT_POINTER 0x01 +#define EFI_ACPI_5_0_FPDT_RECORD_REVISION_S3_PERFORMANCE_TABLE_POINTER 0x01 + +/// +/// FPDT Runtime Performance Record Types +/// +#define EFI_ACPI_5_0_FPDT_RUNTIME_RECORD_TYPE_S3_RESUME 0x0000 +#define EFI_ACPI_5_0_FPDT_RUNTIME_RECORD_TYPE_S3_SUSPEND 0x0001 +#define EFI_ACPI_5_0_FPDT_RUNTIME_RECORD_TYPE_FIRMWARE_BASIC_BOOT 0x0002 + +/// +/// FPDT Runtime Performance Record Revision +/// +#define EFI_ACPI_5_0_FPDT_RUNTIME_RECORD_REVISION_S3_RESUME 0x01 +#define EFI_ACPI_5_0_FPDT_RUNTIME_RECORD_REVISION_S3_SUSPEND 0x01 +#define EFI_ACPI_5_0_FPDT_RUNTIME_RECORD_REVISION_FIRMWARE_BASIC_BOOT 0x02 + +/// +/// FPDT Performance Record header +/// +typedef struct { + UINT16 Type; + UINT8 Length; + UINT8 Revision; +} EFI_ACPI_5_0_FPDT_PERFORMANCE_RECORD_HEADER; + +/// +/// FPDT Performance Table header +/// +typedef struct { + UINT32 Signature; + UINT32 Length; +} EFI_ACPI_5_0_FPDT_PERFORMANCE_TABLE_HEADER; + +/// +/// FPDT Firmware Basic Boot Performance Pointer Record Structure +/// +typedef struct { + EFI_ACPI_5_0_FPDT_PERFORMANCE_RECORD_HEADER Header; + UINT32 Reserved; + /// + /// 64-bit processor-relative physical address of the Basic Boot Performance Table. + /// + UINT64 BootPerformanceTablePointer; +} EFI_ACPI_5_0_FPDT_BOOT_PERFORMANCE_TABLE_POINTER_RECORD; + +/// +/// FPDT S3 Performance Table Pointer Record Structure +/// +typedef struct { + EFI_ACPI_5_0_FPDT_PERFORMANCE_RECORD_HEADER Header; + UINT32 Reserved; + /// + /// 64-bit processor-relative physical address of the S3 Performance Table. + /// + UINT64 S3PerformanceTablePointer; +} EFI_ACPI_5_0_FPDT_S3_PERFORMANCE_TABLE_POINTER_RECORD; + +/// +/// FPDT Firmware Basic Boot Performance Record Structure +/// +typedef struct { + EFI_ACPI_5_0_FPDT_PERFORMANCE_RECORD_HEADER Header; + UINT32 Reserved; + /// + /// Timer value logged at the beginning of firmware image execution. + /// This may not always be zero or near zero. + /// + UINT64 ResetEnd; + /// + /// Timer value logged just prior to loading the OS boot loader into memory. + /// For non-UEFI compatible boots, this field must be zero. + /// + UINT64 OsLoaderLoadImageStart; + /// + /// Timer value logged just prior to launching the previously loaded OS boot loader image. + /// For non-UEFI compatible boots, the timer value logged will be just prior + /// to the INT 19h handler invocation. + /// + UINT64 OsLoaderStartImageStart; + /// + /// Timer value logged at the point when the OS loader calls the + /// ExitBootServices function for UEFI compatible firmware. + /// For non-UEFI compatible boots, this field must be zero. + /// + UINT64 ExitBootServicesEntry; + /// + /// Timer value logged at the point just prior towhen the OS loader gaining + /// control back from calls the ExitBootServices function for UEFI compatible firmware. + /// For non-UEFI compatible boots, this field must be zero. + /// + UINT64 ExitBootServicesExit; +} EFI_ACPI_5_0_FPDT_FIRMWARE_BASIC_BOOT_RECORD; + +/// +/// FPDT Firmware Basic Boot Performance Table signature +/// +#define EFI_ACPI_5_0_FPDT_BOOT_PERFORMANCE_TABLE_SIGNATURE SIGNATURE_32('F', 'B', 'P', 'T') + +// +// FPDT Firmware Basic Boot Performance Table +// +typedef struct { + EFI_ACPI_5_0_FPDT_PERFORMANCE_TABLE_HEADER Header; + // + // one or more Performance Records. + // +} EFI_ACPI_5_0_FPDT_FIRMWARE_BASIC_BOOT_TABLE; + +/// +/// FPDT "S3PT" S3 Performance Table +/// +#define EFI_ACPI_5_0_FPDT_S3_PERFORMANCE_TABLE_SIGNATURE SIGNATURE_32('S', '3', 'P', 'T') + +// +// FPDT Firmware S3 Boot Performance Table +// +typedef struct { + EFI_ACPI_5_0_FPDT_PERFORMANCE_TABLE_HEADER Header; + // + // one or more Performance Records. + // +} EFI_ACPI_5_0_FPDT_FIRMWARE_S3_BOOT_TABLE; + +/// +/// FPDT Basic S3 Resume Performance Record +/// +typedef struct { + EFI_ACPI_5_0_FPDT_PERFORMANCE_RECORD_HEADER Header; + /// + /// A count of the number of S3 resume cycles since the last full boot sequence. + /// + UINT32 ResumeCount; + /// + /// Timer recorded at the end of BIOS S3 resume, just prior to handoff to the + /// OS waking vector. Only the most recent resume cycle's time is retained. + /// + UINT64 FullResume; + /// + /// Average timer value of all resume cycles logged since the last full boot + /// sequence, including the most recent resume. Note that the entire log of + /// timer values does not need to be retained in order to calculate this average. + /// + UINT64 AverageResume; +} EFI_ACPI_5_0_FPDT_S3_RESUME_RECORD; + +/// +/// FPDT Basic S3 Suspend Performance Record +/// +typedef struct { + EFI_ACPI_5_0_FPDT_PERFORMANCE_RECORD_HEADER Header; + /// + /// Timer value recorded at the OS write to SLP_TYP upon entry to S3. + /// Only the most recent suspend cycle's timer value is retained. + /// + UINT64 SuspendStart; + /// + /// Timer value recorded at the final firmware write to SLP_TYP (or other + /// mechanism) used to trigger hardware entry to S3. + /// Only the most recent suspend cycle's timer value is retained. + /// + UINT64 SuspendEnd; +} EFI_ACPI_5_0_FPDT_S3_SUSPEND_RECORD; + +/// +/// Firmware Performance Record Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; +} EFI_ACPI_5_0_FIRMWARE_PERFORMANCE_RECORD_TABLE; + +/// +/// Generic Timer Description Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT64 PhysicalAddress; + UINT32 GlobalFlags; + UINT32 SecurePL1TimerGSIV; + UINT32 SecurePL1TimerFlags; + UINT32 NonSecurePL1TimerGSIV; + UINT32 NonSecurePL1TimerFlags; + UINT32 VirtualTimerGSIV; + UINT32 VirtualTimerFlags; + UINT32 NonSecurePL2TimerGSIV; + UINT32 NonSecurePL2TimerFlags; +} EFI_ACPI_5_0_GENERIC_TIMER_DESCRIPTION_TABLE; + +/// +/// GTDT Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_GENERIC_TIMER_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Global Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_0_GTDT_GLOBAL_FLAG_MEMORY_MAPPED_BLOCK_PRESENT BIT0 +#define EFI_ACPI_5_0_GTDT_GLOBAL_FLAG_INTERRUPT_MODE BIT1 + +/// +/// Timer Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_0_GTDT_TIMER_FLAG_TIMER_INTERRUPT_MODE BIT0 +#define EFI_ACPI_5_0_GTDT_TIMER_FLAG_TIMER_INTERRUPT_POLARITY BIT1 + +/// +/// Boot Error Record Table (BERT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 BootErrorRegionLength; + UINT64 BootErrorRegion; +} EFI_ACPI_5_0_BOOT_ERROR_RECORD_TABLE_HEADER; + +/// +/// BERT Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_BOOT_ERROR_RECORD_TABLE_REVISION 0x01 + +/// +/// Boot Error Region Block Status Definition +/// +typedef struct { + UINT32 UncorrectableErrorValid:1; + UINT32 CorrectableErrorValid:1; + UINT32 MultipleUncorrectableErrors:1; + UINT32 MultipleCorrectableErrors:1; + UINT32 ErrorDataEntryCount:10; + UINT32 Reserved:18; +} EFI_ACPI_5_0_ERROR_BLOCK_STATUS; + +/// +/// Boot Error Region Definition +/// +typedef struct { + EFI_ACPI_5_0_ERROR_BLOCK_STATUS BlockStatus; + UINT32 RawDataOffset; + UINT32 RawDataLength; + UINT32 DataLength; + UINT32 ErrorSeverity; +} EFI_ACPI_5_0_BOOT_ERROR_REGION_STRUCTURE; + +// +// Boot Error Severity types +// +#define EFI_ACPI_5_0_ERROR_SEVERITY_CORRECTABLE 0x00 +#define EFI_ACPI_5_0_ERROR_SEVERITY_FATAL 0x01 +#define EFI_ACPI_5_0_ERROR_SEVERITY_CORRECTED 0x02 +#define EFI_ACPI_5_0_ERROR_SEVERITY_NONE 0x03 + +/// +/// Generic Error Data Entry Definition +/// +typedef struct { + UINT8 SectionType[16]; + UINT32 ErrorSeverity; + UINT16 Revision; + UINT8 ValidationBits; + UINT8 Flags; + UINT32 ErrorDataLength; + UINT8 FruId[16]; + UINT8 FruText[20]; +} EFI_ACPI_5_0_GENERIC_ERROR_DATA_ENTRY_STRUCTURE; + +/// +/// Generic Error Data Entry Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_GENERIC_ERROR_DATA_ENTRY_REVISION 0x0201 + +/// +/// HEST - Hardware Error Source Table +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 ErrorSourceCount; +} EFI_ACPI_5_0_HARDWARE_ERROR_SOURCE_TABLE_HEADER; + +/// +/// HEST Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_HARDWARE_ERROR_SOURCE_TABLE_REVISION 0x01 + +// +// Error Source structure types. +// +#define EFI_ACPI_5_0_IA32_ARCHITECTURE_MACHINE_CHECK_EXCEPTION 0x00 +#define EFI_ACPI_5_0_IA32_ARCHITECTURE_CORRECTED_MACHINE_CHECK 0x01 +#define EFI_ACPI_5_0_IA32_ARCHITECTURE_NMI_ERROR 0x02 +#define EFI_ACPI_5_0_PCI_EXPRESS_ROOT_PORT_AER 0x06 +#define EFI_ACPI_5_0_PCI_EXPRESS_DEVICE_AER 0x07 +#define EFI_ACPI_5_0_PCI_EXPRESS_BRIDGE_AER 0x08 +#define EFI_ACPI_5_0_GENERIC_HARDWARE_ERROR 0x09 + +// +// Error Source structure flags. +// +#define EFI_ACPI_5_0_ERROR_SOURCE_FLAG_FIRMWARE_FIRST (1 << 0) +#define EFI_ACPI_5_0_ERROR_SOURCE_FLAG_GLOBAL (1 << 1) + +/// +/// IA-32 Architecture Machine Check Exception Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT64 GlobalCapabilityInitData; + UINT64 GlobalControlInitData; + UINT8 NumberOfHardwareBanks; + UINT8 Reserved1[7]; +} EFI_ACPI_5_0_IA32_ARCHITECTURE_MACHINE_CHECK_EXCEPTION_STRUCTURE; + +/// +/// IA-32 Architecture Machine Check Bank Structure Definition +/// +typedef struct { + UINT8 BankNumber; + UINT8 ClearStatusOnInitialization; + UINT8 StatusDataFormat; + UINT8 Reserved0; + UINT32 ControlRegisterMsrAddress; + UINT64 ControlInitData; + UINT32 StatusRegisterMsrAddress; + UINT32 AddressRegisterMsrAddress; + UINT32 MiscRegisterMsrAddress; +} EFI_ACPI_5_0_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_BANK_STRUCTURE; + +/// +/// IA-32 Architecture Machine Check Bank Structure MCA data format +/// +#define EFI_ACPI_5_0_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_DATA_FORMAT_IA32 0x00 +#define EFI_ACPI_5_0_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_DATA_FORMAT_INTEL64 0x01 +#define EFI_ACPI_5_0_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_DATA_FORMAT_AMD64 0x02 + +// +// Hardware Error Notification types. All other values are reserved +// +#define EFI_ACPI_5_0_HARDWARE_ERROR_NOTIFICATION_POLLED 0x00 +#define EFI_ACPI_5_0_HARDWARE_ERROR_NOTIFICATION_EXTERNAL_INTERRUPT 0x01 +#define EFI_ACPI_5_0_HARDWARE_ERROR_NOTIFICATION_LOCAL_INTERRUPT 0x02 +#define EFI_ACPI_5_0_HARDWARE_ERROR_NOTIFICATION_SCI 0x03 +#define EFI_ACPI_5_0_HARDWARE_ERROR_NOTIFICATION_NMI 0x04 + +/// +/// Hardware Error Notification Configuration Write Enable Structure Definition +/// +typedef struct { + UINT16 Type:1; + UINT16 PollInterval:1; + UINT16 SwitchToPollingThresholdValue:1; + UINT16 SwitchToPollingThresholdWindow:1; + UINT16 ErrorThresholdValue:1; + UINT16 ErrorThresholdWindow:1; + UINT16 Reserved:10; +} EFI_ACPI_5_0_HARDWARE_ERROR_NOTIFICATION_CONFIGURATION_WRITE_ENABLE_STRUCTURE; + +/// +/// Hardware Error Notification Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + EFI_ACPI_5_0_HARDWARE_ERROR_NOTIFICATION_CONFIGURATION_WRITE_ENABLE_STRUCTURE ConfigurationWriteEnable; + UINT32 PollInterval; + UINT32 Vector; + UINT32 SwitchToPollingThresholdValue; + UINT32 SwitchToPollingThresholdWindow; + UINT32 ErrorThresholdValue; + UINT32 ErrorThresholdWindow; +} EFI_ACPI_5_0_HARDWARE_ERROR_NOTIFICATION_STRUCTURE; + +/// +/// IA-32 Architecture Corrected Machine Check Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + EFI_ACPI_5_0_HARDWARE_ERROR_NOTIFICATION_STRUCTURE NotificationStructure; + UINT8 NumberOfHardwareBanks; + UINT8 Reserved1[3]; +} EFI_ACPI_5_0_IA32_ARCHITECTURE_CORRECTED_MACHINE_CHECK_STRUCTURE; + +/// +/// IA-32 Architecture NMI Error Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 MaxRawDataLength; +} EFI_ACPI_5_0_IA32_ARCHITECTURE_NMI_ERROR_STRUCTURE; + +/// +/// PCI Express Root Port AER Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 Bus; + UINT16 Device; + UINT16 Function; + UINT16 DeviceControl; + UINT8 Reserved1[2]; + UINT32 UncorrectableErrorMask; + UINT32 UncorrectableErrorSeverity; + UINT32 CorrectableErrorMask; + UINT32 AdvancedErrorCapabilitiesAndControl; + UINT32 RootErrorCommand; +} EFI_ACPI_5_0_PCI_EXPRESS_ROOT_PORT_AER_STRUCTURE; + +/// +/// PCI Express Device AER Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 Bus; + UINT16 Device; + UINT16 Function; + UINT16 DeviceControl; + UINT8 Reserved1[2]; + UINT32 UncorrectableErrorMask; + UINT32 UncorrectableErrorSeverity; + UINT32 CorrectableErrorMask; + UINT32 AdvancedErrorCapabilitiesAndControl; +} EFI_ACPI_5_0_PCI_EXPRESS_DEVICE_AER_STRUCTURE; + +/// +/// PCI Express Bridge AER Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 Bus; + UINT16 Device; + UINT16 Function; + UINT16 DeviceControl; + UINT8 Reserved1[2]; + UINT32 UncorrectableErrorMask; + UINT32 UncorrectableErrorSeverity; + UINT32 CorrectableErrorMask; + UINT32 AdvancedErrorCapabilitiesAndControl; + UINT32 SecondaryUncorrectableErrorMask; + UINT32 SecondaryUncorrectableErrorSeverity; + UINT32 SecondaryAdvancedErrorCapabilitiesAndControl; +} EFI_ACPI_5_0_PCI_EXPRESS_BRIDGE_AER_STRUCTURE; + +/// +/// Generic Hardware Error Source Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT16 RelatedSourceId; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 MaxRawDataLength; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE ErrorStatusAddress; + EFI_ACPI_5_0_HARDWARE_ERROR_NOTIFICATION_STRUCTURE NotificationStructure; + UINT32 ErrorStatusBlockLength; +} EFI_ACPI_5_0_GENERIC_HARDWARE_ERROR_SOURCE_STRUCTURE; + +/// +/// Generic Error Status Definition +/// +typedef struct { + EFI_ACPI_5_0_ERROR_BLOCK_STATUS BlockStatus; + UINT32 RawDataOffset; + UINT32 RawDataLength; + UINT32 DataLength; + UINT32 ErrorSeverity; +} EFI_ACPI_5_0_GENERIC_ERROR_STATUS_STRUCTURE; + +/// +/// ERST - Error Record Serialization Table +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 SerializationHeaderSize; + UINT8 Reserved0[4]; + UINT32 InstructionEntryCount; +} EFI_ACPI_5_0_ERROR_RECORD_SERIALIZATION_TABLE_HEADER; + +/// +/// ERST Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_ERROR_RECORD_SERIALIZATION_TABLE_REVISION 0x01 + +/// +/// ERST Serialization Actions +/// +#define EFI_ACPI_5_0_ERST_BEGIN_WRITE_OPERATION 0x00 +#define EFI_ACPI_5_0_ERST_BEGIN_READ_OPERATION 0x01 +#define EFI_ACPI_5_0_ERST_BEGIN_CLEAR_OPERATION 0x02 +#define EFI_ACPI_5_0_ERST_END_OPERATION 0x03 +#define EFI_ACPI_5_0_ERST_SET_RECORD_OFFSET 0x04 +#define EFI_ACPI_5_0_ERST_EXECUTE_OPERATION 0x05 +#define EFI_ACPI_5_0_ERST_CHECK_BUSY_STATUS 0x06 +#define EFI_ACPI_5_0_ERST_GET_COMMAND_STATUS 0x07 +#define EFI_ACPI_5_0_ERST_GET_RECORD_IDENTIFIER 0x08 +#define EFI_ACPI_5_0_ERST_SET_RECORD_IDENTIFIER 0x09 +#define EFI_ACPI_5_0_ERST_GET_RECORD_COUNT 0x0A +#define EFI_ACPI_5_0_ERST_BEGIN_DUMMY_WRITE_OPERATION 0x0B +#define EFI_ACPI_5_0_ERST_GET_ERROR_LOG_ADDRESS_RANGE 0x0D +#define EFI_ACPI_5_0_ERST_GET_ERROR_LOG_ADDRESS_RANGE_LENGTH 0x0E +#define EFI_ACPI_5_0_ERST_GET_ERROR_LOG_ADDRESS_RANGE_ATTRIBUTES 0x0F + +/// +/// ERST Action Command Status +/// +#define EFI_ACPI_5_0_ERST_STATUS_SUCCESS 0x00 +#define EFI_ACPI_5_0_ERST_STATUS_NOT_ENOUGH_SPACE 0x01 +#define EFI_ACPI_5_0_ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x02 +#define EFI_ACPI_5_0_ERST_STATUS_FAILED 0x03 +#define EFI_ACPI_5_0_ERST_STATUS_RECORD_STORE_EMPTY 0x04 +#define EFI_ACPI_5_0_ERST_STATUS_RECORD_NOT_FOUND 0x05 + +/// +/// ERST Serialization Instructions +/// +#define EFI_ACPI_5_0_ERST_READ_REGISTER 0x00 +#define EFI_ACPI_5_0_ERST_READ_REGISTER_VALUE 0x01 +#define EFI_ACPI_5_0_ERST_WRITE_REGISTER 0x02 +#define EFI_ACPI_5_0_ERST_WRITE_REGISTER_VALUE 0x03 +#define EFI_ACPI_5_0_ERST_NOOP 0x04 +#define EFI_ACPI_5_0_ERST_LOAD_VAR1 0x05 +#define EFI_ACPI_5_0_ERST_LOAD_VAR2 0x06 +#define EFI_ACPI_5_0_ERST_STORE_VAR1 0x07 +#define EFI_ACPI_5_0_ERST_ADD 0x08 +#define EFI_ACPI_5_0_ERST_SUBTRACT 0x09 +#define EFI_ACPI_5_0_ERST_ADD_VALUE 0x0A +#define EFI_ACPI_5_0_ERST_SUBTRACT_VALUE 0x0B +#define EFI_ACPI_5_0_ERST_STALL 0x0C +#define EFI_ACPI_5_0_ERST_STALL_WHILE_TRUE 0x0D +#define EFI_ACPI_5_0_ERST_SKIP_NEXT_INSTRUCTION_IF_TRUE 0x0E +#define EFI_ACPI_5_0_ERST_GOTO 0x0F +#define EFI_ACPI_5_0_ERST_SET_SRC_ADDRESS_BASE 0x10 +#define EFI_ACPI_5_0_ERST_SET_DST_ADDRESS_BASE 0x11 +#define EFI_ACPI_5_0_ERST_MOVE_DATA 0x12 + +/// +/// ERST Instruction Flags +/// +#define EFI_ACPI_5_0_ERST_PRESERVE_REGISTER 0x01 + +/// +/// ERST Serialization Instruction Entry +/// +typedef struct { + UINT8 SerializationAction; + UINT8 Instruction; + UINT8 Flags; + UINT8 Reserved0; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE RegisterRegion; + UINT64 Value; + UINT64 Mask; +} EFI_ACPI_5_0_ERST_SERIALIZATION_INSTRUCTION_ENTRY; + +/// +/// EINJ - Error Injection Table +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 InjectionHeaderSize; + UINT8 InjectionFlags; + UINT8 Reserved0[3]; + UINT32 InjectionEntryCount; +} EFI_ACPI_5_0_ERROR_INJECTION_TABLE_HEADER; + +/// +/// EINJ Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_ERROR_INJECTION_TABLE_REVISION 0x01 + +/// +/// EINJ Error Injection Actions +/// +#define EFI_ACPI_5_0_EINJ_BEGIN_INJECTION_OPERATION 0x00 +#define EFI_ACPI_5_0_EINJ_GET_TRIGGER_ERROR_ACTION_TABLE 0x01 +#define EFI_ACPI_5_0_EINJ_SET_ERROR_TYPE 0x02 +#define EFI_ACPI_5_0_EINJ_GET_ERROR_TYPE 0x03 +#define EFI_ACPI_5_0_EINJ_END_OPERATION 0x04 +#define EFI_ACPI_5_0_EINJ_EXECUTE_OPERATION 0x05 +#define EFI_ACPI_5_0_EINJ_CHECK_BUSY_STATUS 0x06 +#define EFI_ACPI_5_0_EINJ_GET_COMMAND_STATUS 0x07 +#define EFI_ACPI_5_0_EINJ_TRIGGER_ERROR 0xFF + +/// +/// EINJ Action Command Status +/// +#define EFI_ACPI_5_0_EINJ_STATUS_SUCCESS 0x00 +#define EFI_ACPI_5_0_EINJ_STATUS_UNKNOWN_FAILURE 0x01 +#define EFI_ACPI_5_0_EINJ_STATUS_INVALID_ACCESS 0x02 + +/// +/// EINJ Error Type Definition +/// +#define EFI_ACPI_5_0_EINJ_ERROR_PROCESSOR_CORRECTABLE (1 << 0) +#define EFI_ACPI_5_0_EINJ_ERROR_PROCESSOR_UNCORRECTABLE_NONFATAL (1 << 1) +#define EFI_ACPI_5_0_EINJ_ERROR_PROCESSOR_UNCORRECTABLE_FATAL (1 << 2) +#define EFI_ACPI_5_0_EINJ_ERROR_MEMORY_CORRECTABLE (1 << 3) +#define EFI_ACPI_5_0_EINJ_ERROR_MEMORY_UNCORRECTABLE_NONFATAL (1 << 4) +#define EFI_ACPI_5_0_EINJ_ERROR_MEMORY_UNCORRECTABLE_FATAL (1 << 5) +#define EFI_ACPI_5_0_EINJ_ERROR_PCI_EXPRESS_CORRECTABLE (1 << 6) +#define EFI_ACPI_5_0_EINJ_ERROR_PCI_EXPRESS_UNCORRECTABLE_NONFATAL (1 << 7) +#define EFI_ACPI_5_0_EINJ_ERROR_PCI_EXPRESS_UNCORRECTABLE_FATAL (1 << 8) +#define EFI_ACPI_5_0_EINJ_ERROR_PLATFORM_CORRECTABLE (1 << 9) +#define EFI_ACPI_5_0_EINJ_ERROR_PLATFORM_UNCORRECTABLE_NONFATAL (1 << 10) +#define EFI_ACPI_5_0_EINJ_ERROR_PLATFORM_UNCORRECTABLE_FATAL (1 << 11) + +/// +/// EINJ Injection Instructions +/// +#define EFI_ACPI_5_0_EINJ_READ_REGISTER 0x00 +#define EFI_ACPI_5_0_EINJ_READ_REGISTER_VALUE 0x01 +#define EFI_ACPI_5_0_EINJ_WRITE_REGISTER 0x02 +#define EFI_ACPI_5_0_EINJ_WRITE_REGISTER_VALUE 0x03 +#define EFI_ACPI_5_0_EINJ_NOOP 0x04 + +/// +/// EINJ Instruction Flags +/// +#define EFI_ACPI_5_0_EINJ_PRESERVE_REGISTER 0x01 + +/// +/// EINJ Injection Instruction Entry +/// +typedef struct { + UINT8 InjectionAction; + UINT8 Instruction; + UINT8 Flags; + UINT8 Reserved0; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE RegisterRegion; + UINT64 Value; + UINT64 Mask; +} EFI_ACPI_5_0_EINJ_INJECTION_INSTRUCTION_ENTRY; + +/// +/// EINJ Trigger Action Table +/// +typedef struct { + UINT32 HeaderSize; + UINT32 Revision; + UINT32 TableSize; + UINT32 EntryCount; +} EFI_ACPI_5_0_EINJ_TRIGGER_ACTION_TABLE; + +/// +/// Platform Communications Channel Table (PCCT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 Flags; + UINT64 Reserved; +} EFI_ACPI_5_0_PLATFORM_COMMUNICATION_CHANNEL_TABLE_HEADER; + +/// +/// PCCT Version (as defined in ACPI 5.0 spec.) +/// +#define EFI_ACPI_5_0_PLATFORM_COMMUNICATION_CHANNEL_TABLE_REVISION 0x01 + +/// +/// PCCT Global Flags +/// +#define EFI_ACPI_5_0_PCCT_FLAGS_SCI_DOORBELL BIT0 + +// +// PCCT Subspace type +// +#define EFI_ACPI_5_0_PCCT_SUBSPACE_TYPE_GENERIC 0x00 + +/// +/// PCC Subspace Structure Header +/// +typedef struct { + UINT8 Type; + UINT8 Length; +} EFI_ACPI_5_0_PCCT_SUBSPACE_HEADER; + +/// +/// Generic Communications Subspace Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Reserved[6]; + UINT64 BaseAddress; + UINT64 AddressLength; + EFI_ACPI_5_0_GENERIC_ADDRESS_STRUCTURE DoorbellRegister; + UINT64 DoorbellPreserve; + UINT64 DoorbellWrite; + UINT32 NominalLatency; + UINT32 MaximumPeriodicAccessRate; + UINT16 MinimumRequestTurnaroundTime; +} EFI_ACPI_5_0_PCCT_SUBSPACE_GENERIC; + +/// +/// Generic Communications Channel Shared Memory Region +/// + +typedef struct { + UINT8 Command; + UINT8 Reserved:7; + UINT8 GenerateSci:1; +} EFI_ACPI_5_0_PCCT_GENERIC_SHARED_MEMORY_REGION_COMMAND; + +typedef struct { + UINT8 CommandComplete:1; + UINT8 SciDoorbell:1; + UINT8 Error:1; + UINT8 PlatformNotification:1; + UINT8 Reserved:4; + UINT8 Reserved1; +} EFI_ACPI_5_0_PCCT_GENERIC_SHARED_MEMORY_REGION_STATUS; + +typedef struct { + UINT32 Signature; + EFI_ACPI_5_0_PCCT_GENERIC_SHARED_MEMORY_REGION_COMMAND Command; + EFI_ACPI_5_0_PCCT_GENERIC_SHARED_MEMORY_REGION_STATUS Status; +} EFI_ACPI_5_0_PCCT_GENERIC_SHARED_MEMORY_REGION_HEADER; + +// +// Known table signatures +// + +/// +/// "RSD PTR " Root System Description Pointer +/// +#define EFI_ACPI_5_0_ROOT_SYSTEM_DESCRIPTION_POINTER_SIGNATURE SIGNATURE_64('R', 'S', 'D', ' ', 'P', 'T', 'R', ' ') + +/// +/// "APIC" Multiple APIC Description Table +/// +#define EFI_ACPI_5_0_MULTIPLE_APIC_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('A', 'P', 'I', 'C') + +/// +/// "BERT" Boot Error Record Table +/// +#define EFI_ACPI_5_0_BOOT_ERROR_RECORD_TABLE_SIGNATURE SIGNATURE_32('B', 'E', 'R', 'T') + +/// +/// "BGRT" Boot Graphics Resource Table +/// +#define EFI_ACPI_5_0_BOOT_GRAPHICS_RESOURCE_TABLE_SIGNATURE SIGNATURE_32('B', 'G', 'R', 'T') + +/// +/// "CPEP" Corrected Platform Error Polling Table +/// +#define EFI_ACPI_5_0_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_SIGNATURE SIGNATURE_32('C', 'P', 'E', 'P') + +/// +/// "DSDT" Differentiated System Description Table +/// +#define EFI_ACPI_5_0_DIFFERENTIATED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('D', 'S', 'D', 'T') + +/// +/// "ECDT" Embedded Controller Boot Resources Table +/// +#define EFI_ACPI_5_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE_SIGNATURE SIGNATURE_32('E', 'C', 'D', 'T') + +/// +/// "EINJ" Error Injection Table +/// +#define EFI_ACPI_5_0_ERROR_INJECTION_TABLE_SIGNATURE SIGNATURE_32('E', 'I', 'N', 'J') + +/// +/// "ERST" Error Record Serialization Table +/// +#define EFI_ACPI_5_0_ERROR_RECORD_SERIALIZATION_TABLE_SIGNATURE SIGNATURE_32('E', 'R', 'S', 'T') + +/// +/// "FACP" Fixed ACPI Description Table +/// +#define EFI_ACPI_5_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'P') + +/// +/// "FACS" Firmware ACPI Control Structure +/// +#define EFI_ACPI_5_0_FIRMWARE_ACPI_CONTROL_STRUCTURE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'S') + +/// +/// "FPDT" Firmware Performance Data Table +/// +#define EFI_ACPI_5_0_FIRMWARE_PERFORMANCE_DATA_TABLE_SIGNATURE SIGNATURE_32('F', 'P', 'D', 'T') + +/// +/// "GTDT" Generic Timer Description Table +/// +#define EFI_ACPI_5_0_GENERIC_TIMER_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('G', 'T', 'D', 'T') + +/// +/// "HEST" Hardware Error Source Table +/// +#define EFI_ACPI_5_0_HARDWARE_ERROR_SOURCE_TABLE_SIGNATURE SIGNATURE_32('H', 'E', 'S', 'T') + +/// +/// "MPST" Memory Power State Table +/// +#define EFI_ACPI_5_0_MEMORY_POWER_STATE_TABLE_SIGNATURE SIGNATURE_32('M', 'P', 'S', 'T') + +/// +/// "MSCT" Maximum System Characteristics Table +/// +#define EFI_ACPI_5_0_MAXIMUM_SYSTEM_CHARACTERISTICS_TABLE_SIGNATURE SIGNATURE_32('M', 'S', 'C', 'T') + +/// +/// "PMTT" Platform Memory Topology Table +/// +#define EFI_ACPI_5_0_PLATFORM_MEMORY_TOPOLOGY_TABLE_SIGNATURE SIGNATURE_32('P', 'M', 'T', 'T') + +/// +/// "PSDT" Persistent System Description Table +/// +#define EFI_ACPI_5_0_PERSISTENT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('P', 'S', 'D', 'T') + +/// +/// "RASF" ACPI RAS Feature Table +/// +#define EFI_ACPI_5_0_ACPI_RAS_FEATURE_TABLE_SIGNATURE SIGNATURE_32('R', 'A', 'S', 'F') + +/// +/// "RSDT" Root System Description Table +/// +#define EFI_ACPI_5_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('R', 'S', 'D', 'T') + +/// +/// "SBST" Smart Battery Specification Table +/// +#define EFI_ACPI_5_0_SMART_BATTERY_SPECIFICATION_TABLE_SIGNATURE SIGNATURE_32('S', 'B', 'S', 'T') + +/// +/// "SLIT" System Locality Information Table +/// +#define EFI_ACPI_5_0_SYSTEM_LOCALITY_INFORMATION_TABLE_SIGNATURE SIGNATURE_32('S', 'L', 'I', 'T') + +/// +/// "SRAT" System Resource Affinity Table +/// +#define EFI_ACPI_5_0_SYSTEM_RESOURCE_AFFINITY_TABLE_SIGNATURE SIGNATURE_32('S', 'R', 'A', 'T') + +/// +/// "SSDT" Secondary System Description Table +/// +#define EFI_ACPI_5_0_SECONDARY_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('S', 'S', 'D', 'T') + +/// +/// "XSDT" Extended System Description Table +/// +#define EFI_ACPI_5_0_EXTENDED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('X', 'S', 'D', 'T') + +/// +/// "BOOT" MS Simple Boot Spec +/// +#define EFI_ACPI_5_0_SIMPLE_BOOT_FLAG_TABLE_SIGNATURE SIGNATURE_32('B', 'O', 'O', 'T') + +/// +/// "CSRT" MS Core System Resource Table +/// +#define EFI_ACPI_5_0_CORE_SYSTEM_RESOURCE_TABLE_SIGNATURE SIGNATURE_32('C', 'S', 'R', 'T') + +/// +/// "DBG2" MS Debug Port 2 Spec +/// +#define EFI_ACPI_5_0_DEBUG_PORT_2_TABLE_SIGNATURE SIGNATURE_32('D', 'B', 'G', '2') + +/// +/// "DBGP" MS Debug Port Spec +/// +#define EFI_ACPI_5_0_DEBUG_PORT_TABLE_SIGNATURE SIGNATURE_32('D', 'B', 'G', 'P') + +/// +/// "DMAR" DMA Remapping Table +/// +#define EFI_ACPI_5_0_DMA_REMAPPING_TABLE_SIGNATURE SIGNATURE_32('D', 'M', 'A', 'R') + +/// +/// "DRTM" Dynamic Root of Trust for Measurement Table +/// +#define EFI_ACPI_5_0_DYNAMIC_ROOT_OF_TRUST_FOR_MEASUREMENT_TABLE_SIGNATURE SIGNATURE_32('D', 'R', 'T', 'M') + +/// +/// "ETDT" Event Timer Description Table +/// +#define EFI_ACPI_5_0_EVENT_TIMER_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('E', 'T', 'D', 'T') + +/// +/// "HPET" IA-PC High Precision Event Timer Table +/// +#define EFI_ACPI_5_0_HIGH_PRECISION_EVENT_TIMER_TABLE_SIGNATURE SIGNATURE_32('H', 'P', 'E', 'T') + +/// +/// "iBFT" iSCSI Boot Firmware Table +/// +#define EFI_ACPI_5_0_ISCSI_BOOT_FIRMWARE_TABLE_SIGNATURE SIGNATURE_32('i', 'B', 'F', 'T') + +/// +/// "IVRS" I/O Virtualization Reporting Structure +/// +#define EFI_ACPI_5_0_IO_VIRTUALIZATION_REPORTING_STRUCTURE_SIGNATURE SIGNATURE_32('I', 'V', 'R', 'S') + +/// +/// "MCFG" PCI Express Memory Mapped Configuration Space Base Address Description Table +/// +#define EFI_ACPI_5_0_PCI_EXPRESS_MEMORY_MAPPED_CONFIGURATION_SPACE_BASE_ADDRESS_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('M', 'C', 'F', 'G') + +/// +/// "MCHI" Management Controller Host Interface Table +/// +#define EFI_ACPI_5_0_MANAGEMENT_CONTROLLER_HOST_INTERFACE_TABLE_SIGNATURE SIGNATURE_32('M', 'C', 'H', 'I') + +/// +/// "MSDM" MS Data Management Table +/// +#define EFI_ACPI_5_0_DATA_MANAGEMENT_TABLE_SIGNATURE SIGNATURE_32('M', 'S', 'D', 'M') + +/// +/// "SLIC" MS Software Licensing Table Specification +/// +#define EFI_ACPI_5_0_SOFTWARE_LICENSING_TABLE_SIGNATURE SIGNATURE_32('S', 'L', 'I', 'C') + +/// +/// "SPCR" Serial Port Concole Redirection Table +/// +#define EFI_ACPI_5_0_SERIAL_PORT_CONSOLE_REDIRECTION_TABLE_SIGNATURE SIGNATURE_32('S', 'P', 'C', 'R') + +/// +/// "SPMI" Server Platform Management Interface Table +/// +#define EFI_ACPI_5_0_SERVER_PLATFORM_MANAGEMENT_INTERFACE_TABLE_SIGNATURE SIGNATURE_32('S', 'P', 'M', 'I') + +/// +/// "TCPA" Trusted Computing Platform Alliance Capabilities Table +/// +#define EFI_ACPI_5_0_TRUSTED_COMPUTING_PLATFORM_ALLIANCE_CAPABILITIES_TABLE_SIGNATURE SIGNATURE_32('T', 'C', 'P', 'A') + +/// +/// "TPM2" Trusted Computing Platform 1 Table +/// +#define EFI_ACPI_5_0_TRUSTED_COMPUTING_PLATFORM_2_TABLE_SIGNATURE SIGNATURE_32('T', 'P', 'M', '2') + +/// +/// "UEFI" UEFI ACPI Data Table +/// +#define EFI_ACPI_5_0_UEFI_ACPI_DATA_TABLE_SIGNATURE SIGNATURE_32('U', 'E', 'F', 'I') + +/// +/// "WAET" Windows ACPI Emulated Devices Table +/// +#define EFI_ACPI_5_0_WINDOWS_ACPI_EMULATED_DEVICES_TABLE_SIGNATURE SIGNATURE_32('W', 'A', 'E', 'T') +#define EFI_ACPI_5_0_WINDOWS_ACPI_ENLIGHTENMENT_TABLE_SIGNATURE EFI_ACPI_5_0_WINDOWS_ACPI_EMULATED_DEVICES_TABLE_SIGNATURE + +/// +/// "WDAT" Watchdog Action Table +/// +#define EFI_ACPI_5_0_WATCHDOG_ACTION_TABLE_SIGNATURE SIGNATURE_32('W', 'D', 'A', 'T') + +/// +/// "WDRT" Watchdog Resource Table +/// +#define EFI_ACPI_5_0_WATCHDOG_RESOURCE_TABLE_SIGNATURE SIGNATURE_32('W', 'D', 'R', 'T') + +/// +/// "WPBT" MS Platform Binary Table +/// +#define EFI_ACPI_5_0_PLATFORM_BINARY_TABLE_SIGNATURE SIGNATURE_32('W', 'P', 'B', 'T') + +#pragma pack() + +#endif diff --git a/src/include/ipxe/efi/IndustryStandard/Acpi51.h b/src/include/ipxe/efi/IndustryStandard/Acpi51.h new file mode 100644 index 00000000..1ca114ca --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/Acpi51.h @@ -0,0 +1,2141 @@ +/** @file + ACPI 5.1 definitions from the ACPI Specification Revision 5.1 Errata B January, 2016. + + Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
+ Copyright (c) 2014 - 2016, Intel Corporation. All rights reserved.
+ (C) Copyright 2015 Hewlett Packard Enterprise Development LP
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +**/ + +#ifndef _ACPI_5_1_H_ +#define _ACPI_5_1_H_ + +FILE_LICENCE ( BSD3 ); + +#include + +// +// Ensure proper structure formats +// +#pragma pack(1) + +/// +/// ACPI 5.1 Generic Address Space definition +/// +typedef struct { + UINT8 AddressSpaceId; + UINT8 RegisterBitWidth; + UINT8 RegisterBitOffset; + UINT8 AccessSize; + UINT64 Address; +} EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE; + +// +// Generic Address Space Address IDs +// +#define EFI_ACPI_5_1_SYSTEM_MEMORY 0 +#define EFI_ACPI_5_1_SYSTEM_IO 1 +#define EFI_ACPI_5_1_PCI_CONFIGURATION_SPACE 2 +#define EFI_ACPI_5_1_EMBEDDED_CONTROLLER 3 +#define EFI_ACPI_5_1_SMBUS 4 +#define EFI_ACPI_5_1_PLATFORM_COMMUNICATION_CHANNEL 0x0A +#define EFI_ACPI_5_1_FUNCTIONAL_FIXED_HARDWARE 0x7F + +// +// Generic Address Space Access Sizes +// +#define EFI_ACPI_5_1_UNDEFINED 0 +#define EFI_ACPI_5_1_BYTE 1 +#define EFI_ACPI_5_1_WORD 2 +#define EFI_ACPI_5_1_DWORD 3 +#define EFI_ACPI_5_1_QWORD 4 + +// +// ACPI 5.1 table structures +// + +/// +/// Root System Description Pointer Structure +/// +typedef struct { + UINT64 Signature; + UINT8 Checksum; + UINT8 OemId[6]; + UINT8 Revision; + UINT32 RsdtAddress; + UINT32 Length; + UINT64 XsdtAddress; + UINT8 ExtendedChecksum; + UINT8 Reserved[3]; +} EFI_ACPI_5_1_ROOT_SYSTEM_DESCRIPTION_POINTER; + +/// +/// RSD_PTR Revision (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_ROOT_SYSTEM_DESCRIPTION_POINTER_REVISION 0x02 ///< ACPISpec (Revision 5.1) says current value is 2 + +/// +/// Common table header, this prefaces all ACPI tables, including FACS, but +/// excluding the RSD PTR structure +/// +typedef struct { + UINT32 Signature; + UINT32 Length; +} EFI_ACPI_5_1_COMMON_HEADER; + +// +// Root System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT32 table pointers. +// + +/// +/// RSDT Revision (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_ROOT_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +// +// Extended System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT64 table pointers. +// + +/// +/// XSDT Revision (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_EXTENDED_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Fixed ACPI Description Table Structure (FADT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 FirmwareCtrl; + UINT32 Dsdt; + UINT8 Reserved0; + UINT8 PreferredPmProfile; + UINT16 SciInt; + UINT32 SmiCmd; + UINT8 AcpiEnable; + UINT8 AcpiDisable; + UINT8 S4BiosReq; + UINT8 PstateCnt; + UINT32 Pm1aEvtBlk; + UINT32 Pm1bEvtBlk; + UINT32 Pm1aCntBlk; + UINT32 Pm1bCntBlk; + UINT32 Pm2CntBlk; + UINT32 PmTmrBlk; + UINT32 Gpe0Blk; + UINT32 Gpe1Blk; + UINT8 Pm1EvtLen; + UINT8 Pm1CntLen; + UINT8 Pm2CntLen; + UINT8 PmTmrLen; + UINT8 Gpe0BlkLen; + UINT8 Gpe1BlkLen; + UINT8 Gpe1Base; + UINT8 CstCnt; + UINT16 PLvl2Lat; + UINT16 PLvl3Lat; + UINT16 FlushSize; + UINT16 FlushStride; + UINT8 DutyOffset; + UINT8 DutyWidth; + UINT8 DayAlrm; + UINT8 MonAlrm; + UINT8 Century; + UINT16 IaPcBootArch; + UINT8 Reserved1; + UINT32 Flags; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE ResetReg; + UINT8 ResetValue; + UINT16 ArmBootArch; + UINT8 MinorVersion; + UINT64 XFirmwareCtrl; + UINT64 XDsdt; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE XPm1aEvtBlk; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE XPm1bEvtBlk; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE XPm1aCntBlk; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE XPm1bCntBlk; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE XPm2CntBlk; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE XPmTmrBlk; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE XGpe0Blk; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE XGpe1Blk; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE SleepControlReg; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE SleepStatusReg; +} EFI_ACPI_5_1_FIXED_ACPI_DESCRIPTION_TABLE; + +/// +/// FADT Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_FIXED_ACPI_DESCRIPTION_TABLE_REVISION 0x05 +#define EFI_ACPI_5_1_FIXED_ACPI_DESCRIPTION_TABLE_MINOR_REVISION 0x01 + +// +// Fixed ACPI Description Table Preferred Power Management Profile +// +#define EFI_ACPI_5_1_PM_PROFILE_UNSPECIFIED 0 +#define EFI_ACPI_5_1_PM_PROFILE_DESKTOP 1 +#define EFI_ACPI_5_1_PM_PROFILE_MOBILE 2 +#define EFI_ACPI_5_1_PM_PROFILE_WORKSTATION 3 +#define EFI_ACPI_5_1_PM_PROFILE_ENTERPRISE_SERVER 4 +#define EFI_ACPI_5_1_PM_PROFILE_SOHO_SERVER 5 +#define EFI_ACPI_5_1_PM_PROFILE_APPLIANCE_PC 6 +#define EFI_ACPI_5_1_PM_PROFILE_PERFORMANCE_SERVER 7 +#define EFI_ACPI_5_1_PM_PROFILE_TABLET 8 + +// +// Fixed ACPI Description Table Boot Architecture Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_5_1_LEGACY_DEVICES BIT0 +#define EFI_ACPI_5_1_8042 BIT1 +#define EFI_ACPI_5_1_VGA_NOT_PRESENT BIT2 +#define EFI_ACPI_5_1_MSI_NOT_SUPPORTED BIT3 +#define EFI_ACPI_5_1_PCIE_ASPM_CONTROLS BIT4 +#define EFI_ACPI_5_1_CMOS_RTC_NOT_PRESENT BIT5 + +// +// Fixed ACPI Description Table Arm Boot Architecture Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_5_1_ARM_PSCI_COMPLIANT BIT0 +#define EFI_ACPI_5_1_ARM_PSCI_USE_HVC BIT1 + +// +// Fixed ACPI Description Table Fixed Feature Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_5_1_WBINVD BIT0 +#define EFI_ACPI_5_1_WBINVD_FLUSH BIT1 +#define EFI_ACPI_5_1_PROC_C1 BIT2 +#define EFI_ACPI_5_1_P_LVL2_UP BIT3 +#define EFI_ACPI_5_1_PWR_BUTTON BIT4 +#define EFI_ACPI_5_1_SLP_BUTTON BIT5 +#define EFI_ACPI_5_1_FIX_RTC BIT6 +#define EFI_ACPI_5_1_RTC_S4 BIT7 +#define EFI_ACPI_5_1_TMR_VAL_EXT BIT8 +#define EFI_ACPI_5_1_DCK_CAP BIT9 +#define EFI_ACPI_5_1_RESET_REG_SUP BIT10 +#define EFI_ACPI_5_1_SEALED_CASE BIT11 +#define EFI_ACPI_5_1_HEADLESS BIT12 +#define EFI_ACPI_5_1_CPU_SW_SLP BIT13 +#define EFI_ACPI_5_1_PCI_EXP_WAK BIT14 +#define EFI_ACPI_5_1_USE_PLATFORM_CLOCK BIT15 +#define EFI_ACPI_5_1_S4_RTC_STS_VALID BIT16 +#define EFI_ACPI_5_1_REMOTE_POWER_ON_CAPABLE BIT17 +#define EFI_ACPI_5_1_FORCE_APIC_CLUSTER_MODEL BIT18 +#define EFI_ACPI_5_1_FORCE_APIC_PHYSICAL_DESTINATION_MODE BIT19 +#define EFI_ACPI_5_1_HW_REDUCED_ACPI BIT20 +#define EFI_ACPI_5_1_LOW_POWER_S0_IDLE_CAPABLE BIT21 + +/// +/// Firmware ACPI Control Structure +/// +typedef struct { + UINT32 Signature; + UINT32 Length; + UINT32 HardwareSignature; + UINT32 FirmwareWakingVector; + UINT32 GlobalLock; + UINT32 Flags; + UINT64 XFirmwareWakingVector; + UINT8 Version; + UINT8 Reserved0[3]; + UINT32 OspmFlags; + UINT8 Reserved1[24]; +} EFI_ACPI_5_1_FIRMWARE_ACPI_CONTROL_STRUCTURE; + +/// +/// FACS Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_FIRMWARE_ACPI_CONTROL_STRUCTURE_VERSION 0x02 + +/// +/// Firmware Control Structure Feature Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_5_1_S4BIOS_F BIT0 +#define EFI_ACPI_5_1_64BIT_WAKE_SUPPORTED_F BIT1 + +/// +/// OSPM Enabled Firmware Control Structure Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_5_1_OSPM_64BIT_WAKE_F BIT0 + +// +// Differentiated System Description Table, +// Secondary System Description Table +// and Persistent System Description Table, +// no definition needed as they are common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a definition block. +// +#define EFI_ACPI_5_1_DIFFERENTIATED_SYSTEM_DESCRIPTION_TABLE_REVISION 0x02 +#define EFI_ACPI_5_1_SECONDARY_SYSTEM_DESCRIPTION_TABLE_REVISION 0x02 + +/// +/// Multiple APIC Description Table header definition. The rest of the table +/// must be defined in a platform specific manner. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 LocalApicAddress; + UINT32 Flags; +} EFI_ACPI_5_1_MULTIPLE_APIC_DESCRIPTION_TABLE_HEADER; + +/// +/// MADT Revision (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_MULTIPLE_APIC_DESCRIPTION_TABLE_REVISION 0x03 + +/// +/// Multiple APIC Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_5_1_PCAT_COMPAT BIT0 + +// +// Multiple APIC Description Table APIC structure types +// All other values between 0x0D and 0x7F are reserved and +// will be ignored by OSPM. 0x80 ~ 0xFF are reserved for OEM. +// +#define EFI_ACPI_5_1_PROCESSOR_LOCAL_APIC 0x00 +#define EFI_ACPI_5_1_IO_APIC 0x01 +#define EFI_ACPI_5_1_INTERRUPT_SOURCE_OVERRIDE 0x02 +#define EFI_ACPI_5_1_NON_MASKABLE_INTERRUPT_SOURCE 0x03 +#define EFI_ACPI_5_1_LOCAL_APIC_NMI 0x04 +#define EFI_ACPI_5_1_LOCAL_APIC_ADDRESS_OVERRIDE 0x05 +#define EFI_ACPI_5_1_IO_SAPIC 0x06 +#define EFI_ACPI_5_1_LOCAL_SAPIC 0x07 +#define EFI_ACPI_5_1_PLATFORM_INTERRUPT_SOURCES 0x08 +#define EFI_ACPI_5_1_PROCESSOR_LOCAL_X2APIC 0x09 +#define EFI_ACPI_5_1_LOCAL_X2APIC_NMI 0x0A +#define EFI_ACPI_5_1_GIC 0x0B +#define EFI_ACPI_5_1_GICD 0x0C +#define EFI_ACPI_5_1_GIC_MSI_FRAME 0x0D +#define EFI_ACPI_5_1_GICR 0x0E + +// +// APIC Structure Definitions +// + +/// +/// Processor Local APIC Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT8 ApicId; + UINT32 Flags; +} EFI_ACPI_5_1_PROCESSOR_LOCAL_APIC_STRUCTURE; + +/// +/// Local APIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_1_LOCAL_APIC_ENABLED BIT0 + +/// +/// IO APIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 IoApicAddress; + UINT32 GlobalSystemInterruptBase; +} EFI_ACPI_5_1_IO_APIC_STRUCTURE; + +/// +/// Interrupt Source Override Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Bus; + UINT8 Source; + UINT32 GlobalSystemInterrupt; + UINT16 Flags; +} EFI_ACPI_5_1_INTERRUPT_SOURCE_OVERRIDE_STRUCTURE; + +/// +/// Platform Interrupt Sources Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT8 InterruptType; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT8 IoSapicVector; + UINT32 GlobalSystemInterrupt; + UINT32 PlatformInterruptSourceFlags; + UINT8 CpeiProcessorOverride; + UINT8 Reserved[31]; +} EFI_ACPI_5_1_PLATFORM_INTERRUPT_APIC_STRUCTURE; + +// +// MPS INTI flags. +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_5_1_POLARITY (3 << 0) +#define EFI_ACPI_5_1_TRIGGER_MODE (3 << 2) + +/// +/// Non-Maskable Interrupt Source Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT32 GlobalSystemInterrupt; +} EFI_ACPI_5_1_NON_MASKABLE_INTERRUPT_SOURCE_STRUCTURE; + +/// +/// Local APIC NMI Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT16 Flags; + UINT8 LocalApicLint; +} EFI_ACPI_5_1_LOCAL_APIC_NMI_STRUCTURE; + +/// +/// Local APIC Address Override Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved; + UINT64 LocalApicAddress; +} EFI_ACPI_5_1_LOCAL_APIC_ADDRESS_OVERRIDE_STRUCTURE; + +/// +/// IO SAPIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 GlobalSystemInterruptBase; + UINT64 IoSapicAddress; +} EFI_ACPI_5_1_IO_SAPIC_STRUCTURE; + +/// +/// Local SAPIC Structure +/// This struct followed by a null-terminated ASCII string - ACPI Processor UID String +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT8 LocalSapicId; + UINT8 LocalSapicEid; + UINT8 Reserved[3]; + UINT32 Flags; + UINT32 ACPIProcessorUIDValue; +} EFI_ACPI_5_1_PROCESSOR_LOCAL_SAPIC_STRUCTURE; + +/// +/// Platform Interrupt Sources Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT8 InterruptType; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT8 IoSapicVector; + UINT32 GlobalSystemInterrupt; + UINT32 PlatformInterruptSourceFlags; +} EFI_ACPI_5_1_PLATFORM_INTERRUPT_SOURCES_STRUCTURE; + +/// +/// Platform Interrupt Source Flags. +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_5_1_CPEI_PROCESSOR_OVERRIDE BIT0 + +/// +/// Processor Local x2APIC Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Reserved[2]; + UINT32 X2ApicId; + UINT32 Flags; + UINT32 AcpiProcessorUid; +} EFI_ACPI_5_1_PROCESSOR_LOCAL_X2APIC_STRUCTURE; + +/// +/// Local x2APIC NMI Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT32 AcpiProcessorUid; + UINT8 LocalX2ApicLint; + UINT8 Reserved[3]; +} EFI_ACPI_5_1_LOCAL_X2APIC_NMI_STRUCTURE; + +/// +/// GIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved; + UINT32 CPUInterfaceNumber; + UINT32 AcpiProcessorUid; + UINT32 Flags; + UINT32 ParkingProtocolVersion; + UINT32 PerformanceInterruptGsiv; + UINT64 ParkedAddress; + UINT64 PhysicalBaseAddress; + UINT64 GICV; + UINT64 GICH; + UINT32 VGICMaintenanceInterrupt; + UINT64 GICRBaseAddress; + UINT64 MPIDR; +} EFI_ACPI_5_1_GIC_STRUCTURE; + +/// +/// GIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_1_GIC_ENABLED BIT0 +#define EFI_ACPI_5_1_PERFORMANCE_INTERRUPT_MODEL BIT1 +#define EFI_ACPI_5_1_VGIC_MAINTENANCE_INTERRUPT_MODE_FLAGS BIT2 + +/// +/// GIC Distributor Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved1; + UINT32 GicId; + UINT64 PhysicalBaseAddress; + UINT32 SystemVectorBase; + UINT8 GicVersion; + UINT8 Reserved2[3]; +} EFI_ACPI_5_1_GIC_DISTRIBUTOR_STRUCTURE; + +/// +/// GIC Version +/// +#define EFI_ACPI_5_1_GIC_V1 0x01 +#define EFI_ACPI_5_1_GIC_V2 0x02 +#define EFI_ACPI_5_1_GIC_V3 0x03 +#define EFI_ACPI_5_1_GIC_V4 0x04 + +/// +/// GIC MSI Frame Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved1; + UINT32 GicMsiFrameId; + UINT64 PhysicalBaseAddress; + UINT32 Flags; + UINT16 SPICount; + UINT16 SPIBase; +} EFI_ACPI_5_1_GIC_MSI_FRAME_STRUCTURE; + +/// +/// GIC MSI Frame Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_1_SPI_COUNT_BASE_SELECT BIT0 + +/// +/// GICR Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved; + UINT64 DiscoveryRangeBaseAddress; + UINT32 DiscoveryRangeLength; +} EFI_ACPI_5_1_GICR_STRUCTURE; + +/// +/// Smart Battery Description Table (SBST) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 WarningEnergyLevel; + UINT32 LowEnergyLevel; + UINT32 CriticalEnergyLevel; +} EFI_ACPI_5_1_SMART_BATTERY_DESCRIPTION_TABLE; + +/// +/// SBST Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_SMART_BATTERY_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Embedded Controller Boot Resources Table (ECDT) +/// The table is followed by a null terminated ASCII string that contains +/// a fully qualified reference to the name space object. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE EcControl; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE EcData; + UINT32 Uid; + UINT8 GpeBit; +} EFI_ACPI_5_1_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE; + +/// +/// ECDT Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE_REVISION 0x01 + +/// +/// System Resource Affinity Table (SRAT). The rest of the table +/// must be defined in a platform specific manner. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 Reserved1; ///< Must be set to 1 + UINT64 Reserved2; +} EFI_ACPI_5_1_SYSTEM_RESOURCE_AFFINITY_TABLE_HEADER; + +/// +/// SRAT Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_SYSTEM_RESOURCE_AFFINITY_TABLE_REVISION 0x03 + +// +// SRAT structure types. +// All other values between 0x04 an 0xFF are reserved and +// will be ignored by OSPM. +// +#define EFI_ACPI_5_1_PROCESSOR_LOCAL_APIC_SAPIC_AFFINITY 0x00 +#define EFI_ACPI_5_1_MEMORY_AFFINITY 0x01 +#define EFI_ACPI_5_1_PROCESSOR_LOCAL_X2APIC_AFFINITY 0x02 +#define EFI_ACPI_5_1_GICC_AFFINITY 0x03 + +/// +/// Processor Local APIC/SAPIC Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 ProximityDomain7To0; + UINT8 ApicId; + UINT32 Flags; + UINT8 LocalSapicEid; + UINT8 ProximityDomain31To8[3]; + UINT32 ClockDomain; +} EFI_ACPI_5_1_PROCESSOR_LOCAL_APIC_SAPIC_AFFINITY_STRUCTURE; + +/// +/// Local APIC/SAPIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_1_PROCESSOR_LOCAL_APIC_SAPIC_ENABLED (1 << 0) + +/// +/// Memory Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT32 ProximityDomain; + UINT16 Reserved1; + UINT32 AddressBaseLow; + UINT32 AddressBaseHigh; + UINT32 LengthLow; + UINT32 LengthHigh; + UINT32 Reserved2; + UINT32 Flags; + UINT64 Reserved3; +} EFI_ACPI_5_1_MEMORY_AFFINITY_STRUCTURE; + +// +// Memory Flags. All other bits are reserved and must be 0. +// +#define EFI_ACPI_5_1_MEMORY_ENABLED (1 << 0) +#define EFI_ACPI_5_1_MEMORY_HOT_PLUGGABLE (1 << 1) +#define EFI_ACPI_5_1_MEMORY_NONVOLATILE (1 << 2) + +/// +/// Processor Local x2APIC Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Reserved1[2]; + UINT32 ProximityDomain; + UINT32 X2ApicId; + UINT32 Flags; + UINT32 ClockDomain; + UINT8 Reserved2[4]; +} EFI_ACPI_5_1_PROCESSOR_LOCAL_X2APIC_AFFINITY_STRUCTURE; + +/// +/// GICC Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT32 ProximityDomain; + UINT32 AcpiProcessorUid; + UINT32 Flags; + UINT32 ClockDomain; +} EFI_ACPI_5_1_GICC_AFFINITY_STRUCTURE; + +/// +/// GICC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_1_GICC_ENABLED (1 << 0) + +/// +/// System Locality Distance Information Table (SLIT). +/// The rest of the table is a matrix. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT64 NumberOfSystemLocalities; +} EFI_ACPI_5_1_SYSTEM_LOCALITY_DISTANCE_INFORMATION_TABLE_HEADER; + +/// +/// SLIT Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_SYSTEM_LOCALITY_DISTANCE_INFORMATION_TABLE_REVISION 0x01 + +/// +/// Corrected Platform Error Polling Table (CPEP) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT8 Reserved[8]; +} EFI_ACPI_5_1_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_HEADER; + +/// +/// CPEP Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_REVISION 0x01 + +// +// CPEP processor structure types. +// +#define EFI_ACPI_5_1_CPEP_PROCESSOR_APIC_SAPIC 0x00 + +/// +/// Corrected Platform Error Polling Processor Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT32 PollingInterval; +} EFI_ACPI_5_1_CPEP_PROCESSOR_APIC_SAPIC_STRUCTURE; + +/// +/// Maximum System Characteristics Table (MSCT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 OffsetProxDomInfo; + UINT32 MaximumNumberOfProximityDomains; + UINT32 MaximumNumberOfClockDomains; + UINT64 MaximumPhysicalAddress; +} EFI_ACPI_5_1_MAXIMUM_SYSTEM_CHARACTERISTICS_TABLE_HEADER; + +/// +/// MSCT Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_MAXIMUM_SYSTEM_CHARACTERISTICS_TABLE_REVISION 0x01 + +/// +/// Maximum Proximity Domain Information Structure Definition +/// +typedef struct { + UINT8 Revision; + UINT8 Length; + UINT32 ProximityDomainRangeLow; + UINT32 ProximityDomainRangeHigh; + UINT32 MaximumProcessorCapacity; + UINT64 MaximumMemoryCapacity; +} EFI_ACPI_5_1_MAXIMUM_PROXIMITY_DOMAIN_INFORMATION_STRUCTURE; + +/// +/// ACPI RAS Feature Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT8 PlatformCommunicationChannelIdentifier[12]; +} EFI_ACPI_5_1_RAS_FEATURE_TABLE; + +/// +/// RASF Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_RAS_FEATURE_TABLE_REVISION 0x01 + +/// +/// ACPI RASF Platform Communication Channel Shared Memory Region definition. +/// +typedef struct { + UINT32 Signature; + UINT16 Command; + UINT16 Status; + UINT16 Version; + UINT8 RASCapabilities[16]; + UINT8 SetRASCapabilities[16]; + UINT16 NumberOfRASFParameterBlocks; + UINT32 SetRASCapabilitiesStatus; +} EFI_ACPI_5_1_RASF_PLATFORM_COMMUNICATION_CHANNEL_SHARED_MEMORY_REGION; + +/// +/// ACPI RASF PCC command code +/// +#define EFI_ACPI_5_1_RASF_PCC_COMMAND_CODE_EXECUTE_RASF_COMMAND 0x01 + +/// +/// ACPI RASF Platform RAS Capabilities +/// +#define EFI_ACPI_5_1_RASF_PLATFORM_RAS_CAPABILITY_HARDWARE_BASED_PATROL_SCRUB_SUPPOTED 0x01 +#define EFI_ACPI_5_1_RASF_PLATFORM_RAS_CAPABILITY_HARDWARE_BASED_PATROL_SCRUB_SUPPOTED_AND_EXPOSED_TO_SOFTWARE 0x02 + +/// +/// ACPI RASF Parameter Block structure for PATROL_SCRUB +/// +typedef struct { + UINT16 Type; + UINT16 Version; + UINT16 Length; + UINT16 PatrolScrubCommand; + UINT64 RequestedAddressRange[2]; + UINT64 ActualAddressRange[2]; + UINT16 Flags; + UINT8 RequestedSpeed; +} EFI_ACPI_5_1_RASF_PATROL_SCRUB_PLATFORM_BLOCK_STRUCTURE; + +/// +/// ACPI RASF Patrol Scrub command +/// +#define EFI_ACPI_5_1_RASF_PATROL_SCRUB_COMMAND_GET_PATROL_PARAMETERS 0x01 +#define EFI_ACPI_5_1_RASF_PATROL_SCRUB_COMMAND_START_PATROL_SCRUBBER 0x02 +#define EFI_ACPI_5_1_RASF_PATROL_SCRUB_COMMAND_STOP_PATROL_SCRUBBER 0x03 + +/// +/// Memory Power State Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT8 PlatformCommunicationChannelIdentifier; + UINT8 Reserved[3]; +// Memory Power Node Structure +// Memory Power State Characteristics +} EFI_ACPI_5_1_MEMORY_POWER_STATUS_TABLE; + +/// +/// MPST Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_MEMORY_POWER_STATE_TABLE_REVISION 0x01 + +/// +/// MPST Platform Communication Channel Shared Memory Region definition. +/// +typedef struct { + UINT32 Signature; + UINT16 Command; + UINT16 Status; + UINT32 MemoryPowerCommandRegister; + UINT32 MemoryPowerStatusRegister; + UINT32 PowerStateId; + UINT32 MemoryPowerNodeId; + UINT64 MemoryEnergyConsumed; + UINT64 ExpectedAveragePowerComsuned; +} EFI_ACPI_5_1_MPST_PLATFORM_COMMUNICATION_CHANNEL_SHARED_MEMORY_REGION; + +/// +/// ACPI MPST PCC command code +/// +#define EFI_ACPI_5_1_MPST_PCC_COMMAND_CODE_EXECUTE_MPST_COMMAND 0x03 + +/// +/// ACPI MPST Memory Power command +/// +#define EFI_ACPI_5_1_MPST_MEMORY_POWER_COMMAND_GET_MEMORY_POWER_STATE 0x01 +#define EFI_ACPI_5_1_MPST_MEMORY_POWER_COMMAND_SET_MEMORY_POWER_STATE 0x02 +#define EFI_ACPI_5_1_MPST_MEMORY_POWER_COMMAND_GET_AVERAGE_POWER_CONSUMED 0x03 +#define EFI_ACPI_5_1_MPST_MEMORY_POWER_COMMAND_GET_MEMORY_ENERGY_CONSUMED 0x04 + +/// +/// MPST Memory Power Node Table +/// +typedef struct { + UINT8 PowerStateValue; + UINT8 PowerStateInformationIndex; +} EFI_ACPI_5_1_MPST_MEMORY_POWER_STATE; + +typedef struct { + UINT8 Flag; + UINT8 Reserved; + UINT16 MemoryPowerNodeId; + UINT32 Length; + UINT64 AddressBase; + UINT64 AddressLength; + UINT32 NumberOfPowerStates; + UINT32 NumberOfPhysicalComponents; +//EFI_ACPI_5_1_MPST_MEMORY_POWER_STATE MemoryPowerState[NumberOfPowerStates]; +//UINT16 PhysicalComponentIdentifier[NumberOfPhysicalComponents]; +} EFI_ACPI_5_1_MPST_MEMORY_POWER_STRUCTURE; + +#define EFI_ACPI_5_1_MPST_MEMORY_POWER_STRUCTURE_FLAG_ENABLE 0x01 +#define EFI_ACPI_5_1_MPST_MEMORY_POWER_STRUCTURE_FLAG_POWER_MANAGED 0x02 +#define EFI_ACPI_5_1_MPST_MEMORY_POWER_STRUCTURE_FLAG_HOT_PLUGGABLE 0x04 + +typedef struct { + UINT16 MemoryPowerNodeCount; + UINT8 Reserved[2]; +} EFI_ACPI_5_1_MPST_MEMORY_POWER_NODE_TABLE; + +/// +/// MPST Memory Power State Characteristics Table +/// +typedef struct { + UINT8 PowerStateStructureID; + UINT8 Flag; + UINT16 Reserved; + UINT32 AveragePowerConsumedInMPS0; + UINT32 RelativePowerSavingToMPS0; + UINT64 ExitLatencyToMPS0; +} EFI_ACPI_5_1_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_STRUCTURE; + +#define EFI_ACPI_5_1_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_STRUCTURE_FLAG_MEMORY_CONTENT_PRESERVED 0x01 +#define EFI_ACPI_5_1_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_STRUCTURE_FLAG_AUTONOMOUS_MEMORY_POWER_STATE_ENTRY 0x02 +#define EFI_ACPI_5_1_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_STRUCTURE_FLAG_AUTONOMOUS_MEMORY_POWER_STATE_EXIT 0x04 + +typedef struct { + UINT16 MemoryPowerStateCharacteristicsCount; + UINT8 Reserved[2]; +} EFI_ACPI_5_1_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_TABLE; + +/// +/// Memory Topology Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 Reserved; +} EFI_ACPI_5_1_MEMORY_TOPOLOGY_TABLE; + +/// +/// PMTT Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_MEMORY_TOPOLOGY_TABLE_REVISION 0x01 + +/// +/// Common Memory Aggregator Device Structure. +/// +typedef struct { + UINT8 Type; + UINT8 Reserved; + UINT16 Length; + UINT16 Flags; + UINT16 Reserved1; +} EFI_ACPI_5_1_PMMT_COMMON_MEMORY_AGGREGATOR_DEVICE_STRUCTURE; + +/// +/// Memory Aggregator Device Type +/// +#define EFI_ACPI_5_1_PMMT_MEMORY_AGGREGATOR_DEVICE_TYPE_SOCKET 0x1 +#define EFI_ACPI_5_1_PMMT_MEMORY_AGGREGATOR_DEVICE_TYPE_MEMORY_CONTROLLER 0x2 +#define EFI_ACPI_5_1_PMMT_MEMORY_AGGREGATOR_DEVICE_TYPE_DIMM 0x3 + +/// +/// Socket Memory Aggregator Device Structure. +/// +typedef struct { + EFI_ACPI_5_1_PMMT_COMMON_MEMORY_AGGREGATOR_DEVICE_STRUCTURE Header; + UINT16 SocketIdentifier; + UINT16 Reserved; +//EFI_ACPI_5_1_PMMT_MEMORY_CONTROLLER_MEMORY_AGGREGATOR_DEVICE_STRUCTURE MemoryController[]; +} EFI_ACPI_5_1_PMMT_SOCKET_MEMORY_AGGREGATOR_DEVICE_STRUCTURE; + +/// +/// MemoryController Memory Aggregator Device Structure. +/// +typedef struct { + EFI_ACPI_5_1_PMMT_COMMON_MEMORY_AGGREGATOR_DEVICE_STRUCTURE Header; + UINT32 ReadLatency; + UINT32 WriteLatency; + UINT32 ReadBandwidth; + UINT32 WriteBandwidth; + UINT16 OptimalAccessUnit; + UINT16 OptimalAccessAlignment; + UINT16 Reserved; + UINT16 NumberOfProximityDomains; +//UINT32 ProximityDomain[NumberOfProximityDomains]; +//EFI_ACPI_5_1_PMMT_DIMM_MEMORY_AGGREGATOR_DEVICE_STRUCTURE PhysicalComponent[]; +} EFI_ACPI_5_1_PMMT_MEMORY_CONTROLLER_MEMORY_AGGREGATOR_DEVICE_STRUCTURE; + +/// +/// DIMM Memory Aggregator Device Structure. +/// +typedef struct { + EFI_ACPI_5_1_PMMT_COMMON_MEMORY_AGGREGATOR_DEVICE_STRUCTURE Header; + UINT16 PhysicalComponentIdentifier; + UINT16 Reserved; + UINT32 SizeOfDimm; + UINT32 SmbiosHandle; +} EFI_ACPI_5_1_PMMT_DIMM_MEMORY_AGGREGATOR_DEVICE_STRUCTURE; + +/// +/// Boot Graphics Resource Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + /// + /// 2-bytes (16 bit) version ID. This value must be 1. + /// + UINT16 Version; + /// + /// 1-byte status field indicating current status about the table. + /// Bits[7:1] = Reserved (must be zero) + /// Bit [0] = Valid. A one indicates the boot image graphic is valid. + /// + UINT8 Status; + /// + /// 1-byte enumerated type field indicating format of the image. + /// 0 = Bitmap + /// 1 - 255 Reserved (for future use) + /// + UINT8 ImageType; + /// + /// 8-byte (64 bit) physical address pointing to the firmware's in-memory copy + /// of the image bitmap. + /// + UINT64 ImageAddress; + /// + /// A 4-byte (32-bit) unsigned long describing the display X-offset of the boot image. + /// (X, Y) display offset of the top left corner of the boot image. + /// The top left corner of the display is at offset (0, 0). + /// + UINT32 ImageOffsetX; + /// + /// A 4-byte (32-bit) unsigned long describing the display Y-offset of the boot image. + /// (X, Y) display offset of the top left corner of the boot image. + /// The top left corner of the display is at offset (0, 0). + /// + UINT32 ImageOffsetY; +} EFI_ACPI_5_1_BOOT_GRAPHICS_RESOURCE_TABLE; + +/// +/// BGRT Revision +/// +#define EFI_ACPI_5_1_BOOT_GRAPHICS_RESOURCE_TABLE_REVISION 1 + +/// +/// BGRT Version +/// +#define EFI_ACPI_5_1_BGRT_VERSION 0x01 + +/// +/// BGRT Status +/// +#define EFI_ACPI_5_1_BGRT_STATUS_NOT_DISPLAYED 0x00 +#define EFI_ACPI_5_1_BGRT_STATUS_DISPLAYED 0x01 + +/// +/// BGRT Image Type +/// +#define EFI_ACPI_5_1_BGRT_IMAGE_TYPE_BMP 0x00 + +/// +/// FPDT Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_FIRMWARE_PERFORMANCE_DATA_TABLE_REVISION 0x01 + +/// +/// FPDT Performance Record Types +/// +#define EFI_ACPI_5_1_FPDT_RECORD_TYPE_FIRMWARE_BASIC_BOOT_POINTER 0x0000 +#define EFI_ACPI_5_1_FPDT_RECORD_TYPE_S3_PERFORMANCE_TABLE_POINTER 0x0001 + +/// +/// FPDT Performance Record Revision +/// +#define EFI_ACPI_5_1_FPDT_RECORD_REVISION_FIRMWARE_BASIC_BOOT_POINTER 0x01 +#define EFI_ACPI_5_1_FPDT_RECORD_REVISION_S3_PERFORMANCE_TABLE_POINTER 0x01 + +/// +/// FPDT Runtime Performance Record Types +/// +#define EFI_ACPI_5_1_FPDT_RUNTIME_RECORD_TYPE_S3_RESUME 0x0000 +#define EFI_ACPI_5_1_FPDT_RUNTIME_RECORD_TYPE_S3_SUSPEND 0x0001 +#define EFI_ACPI_5_1_FPDT_RUNTIME_RECORD_TYPE_FIRMWARE_BASIC_BOOT 0x0002 + +/// +/// FPDT Runtime Performance Record Revision +/// +#define EFI_ACPI_5_1_FPDT_RUNTIME_RECORD_REVISION_S3_RESUME 0x01 +#define EFI_ACPI_5_1_FPDT_RUNTIME_RECORD_REVISION_S3_SUSPEND 0x01 +#define EFI_ACPI_5_1_FPDT_RUNTIME_RECORD_REVISION_FIRMWARE_BASIC_BOOT 0x02 + +/// +/// FPDT Performance Record header +/// +typedef struct { + UINT16 Type; + UINT8 Length; + UINT8 Revision; +} EFI_ACPI_5_1_FPDT_PERFORMANCE_RECORD_HEADER; + +/// +/// FPDT Performance Table header +/// +typedef struct { + UINT32 Signature; + UINT32 Length; +} EFI_ACPI_5_1_FPDT_PERFORMANCE_TABLE_HEADER; + +/// +/// FPDT Firmware Basic Boot Performance Pointer Record Structure +/// +typedef struct { + EFI_ACPI_5_1_FPDT_PERFORMANCE_RECORD_HEADER Header; + UINT32 Reserved; + /// + /// 64-bit processor-relative physical address of the Basic Boot Performance Table. + /// + UINT64 BootPerformanceTablePointer; +} EFI_ACPI_5_1_FPDT_BOOT_PERFORMANCE_TABLE_POINTER_RECORD; + +/// +/// FPDT S3 Performance Table Pointer Record Structure +/// +typedef struct { + EFI_ACPI_5_1_FPDT_PERFORMANCE_RECORD_HEADER Header; + UINT32 Reserved; + /// + /// 64-bit processor-relative physical address of the S3 Performance Table. + /// + UINT64 S3PerformanceTablePointer; +} EFI_ACPI_5_1_FPDT_S3_PERFORMANCE_TABLE_POINTER_RECORD; + +/// +/// FPDT Firmware Basic Boot Performance Record Structure +/// +typedef struct { + EFI_ACPI_5_1_FPDT_PERFORMANCE_RECORD_HEADER Header; + UINT32 Reserved; + /// + /// Timer value logged at the beginning of firmware image execution. + /// This may not always be zero or near zero. + /// + UINT64 ResetEnd; + /// + /// Timer value logged just prior to loading the OS boot loader into memory. + /// For non-UEFI compatible boots, this field must be zero. + /// + UINT64 OsLoaderLoadImageStart; + /// + /// Timer value logged just prior to launching the previously loaded OS boot loader image. + /// For non-UEFI compatible boots, the timer value logged will be just prior + /// to the INT 19h handler invocation. + /// + UINT64 OsLoaderStartImageStart; + /// + /// Timer value logged at the point when the OS loader calls the + /// ExitBootServices function for UEFI compatible firmware. + /// For non-UEFI compatible boots, this field must be zero. + /// + UINT64 ExitBootServicesEntry; + /// + /// Timer value logged at the point just prior towhen the OS loader gaining + /// control back from calls the ExitBootServices function for UEFI compatible firmware. + /// For non-UEFI compatible boots, this field must be zero. + /// + UINT64 ExitBootServicesExit; +} EFI_ACPI_5_1_FPDT_FIRMWARE_BASIC_BOOT_RECORD; + +/// +/// FPDT Firmware Basic Boot Performance Table signature +/// +#define EFI_ACPI_5_1_FPDT_BOOT_PERFORMANCE_TABLE_SIGNATURE SIGNATURE_32('F', 'B', 'P', 'T') + +// +// FPDT Firmware Basic Boot Performance Table +// +typedef struct { + EFI_ACPI_5_1_FPDT_PERFORMANCE_TABLE_HEADER Header; + // + // one or more Performance Records. + // +} EFI_ACPI_5_1_FPDT_FIRMWARE_BASIC_BOOT_TABLE; + +/// +/// FPDT "S3PT" S3 Performance Table +/// +#define EFI_ACPI_5_1_FPDT_S3_PERFORMANCE_TABLE_SIGNATURE SIGNATURE_32('S', '3', 'P', 'T') + +// +// FPDT Firmware S3 Boot Performance Table +// +typedef struct { + EFI_ACPI_5_1_FPDT_PERFORMANCE_TABLE_HEADER Header; + // + // one or more Performance Records. + // +} EFI_ACPI_5_1_FPDT_FIRMWARE_S3_BOOT_TABLE; + +/// +/// FPDT Basic S3 Resume Performance Record +/// +typedef struct { + EFI_ACPI_5_1_FPDT_PERFORMANCE_RECORD_HEADER Header; + /// + /// A count of the number of S3 resume cycles since the last full boot sequence. + /// + UINT32 ResumeCount; + /// + /// Timer recorded at the end of BIOS S3 resume, just prior to handoff to the + /// OS waking vector. Only the most recent resume cycle's time is retained. + /// + UINT64 FullResume; + /// + /// Average timer value of all resume cycles logged since the last full boot + /// sequence, including the most recent resume. Note that the entire log of + /// timer values does not need to be retained in order to calculate this average. + /// + UINT64 AverageResume; +} EFI_ACPI_5_1_FPDT_S3_RESUME_RECORD; + +/// +/// FPDT Basic S3 Suspend Performance Record +/// +typedef struct { + EFI_ACPI_5_1_FPDT_PERFORMANCE_RECORD_HEADER Header; + /// + /// Timer value recorded at the OS write to SLP_TYP upon entry to S3. + /// Only the most recent suspend cycle's timer value is retained. + /// + UINT64 SuspendStart; + /// + /// Timer value recorded at the final firmware write to SLP_TYP (or other + /// mechanism) used to trigger hardware entry to S3. + /// Only the most recent suspend cycle's timer value is retained. + /// + UINT64 SuspendEnd; +} EFI_ACPI_5_1_FPDT_S3_SUSPEND_RECORD; + +/// +/// Firmware Performance Record Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; +} EFI_ACPI_5_1_FIRMWARE_PERFORMANCE_RECORD_TABLE; + +/// +/// Generic Timer Description Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT64 CntControlBasePhysicalAddress; + UINT32 Reserved; + UINT32 SecurePL1TimerGSIV; + UINT32 SecurePL1TimerFlags; + UINT32 NonSecurePL1TimerGSIV; + UINT32 NonSecurePL1TimerFlags; + UINT32 VirtualTimerGSIV; + UINT32 VirtualTimerFlags; + UINT32 NonSecurePL2TimerGSIV; + UINT32 NonSecurePL2TimerFlags; + UINT64 CntReadBasePhysicalAddress; + UINT32 PlatformTimerCount; + UINT32 PlatformTimerOffset; +} EFI_ACPI_5_1_GENERIC_TIMER_DESCRIPTION_TABLE; + +/// +/// GTDT Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_GENERIC_TIMER_DESCRIPTION_TABLE_REVISION 0x02 + +/// +/// Timer Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_1_GTDT_TIMER_FLAG_TIMER_INTERRUPT_MODE BIT0 +#define EFI_ACPI_5_1_GTDT_TIMER_FLAG_TIMER_INTERRUPT_POLARITY BIT1 +#define EFI_ACPI_5_1_GTDT_TIMER_FLAG_ALWAYS_ON_CAPABILITY BIT2 + +/// +/// Platform Timer Type +/// +#define EFI_ACPI_5_1_GTDT_GT_BLOCK 0 +#define EFI_ACPI_5_1_GTDT_SBSA_GENERIC_WATCHDOG 1 + +/// +/// GT Block Structure +/// +typedef struct { + UINT8 Type; + UINT16 Length; + UINT8 Reserved; + UINT64 CntCtlBase; + UINT32 GTBlockTimerCount; + UINT32 GTBlockTimerOffset; +} EFI_ACPI_5_1_GTDT_GT_BLOCK_STRUCTURE; + +/// +/// GT Block Timer Structure +/// +typedef struct { + UINT8 GTFrameNumber; + UINT8 Reserved[3]; + UINT64 CntBaseX; + UINT64 CntEL0BaseX; + UINT32 GTxPhysicalTimerGSIV; + UINT32 GTxPhysicalTimerFlags; + UINT32 GTxVirtualTimerGSIV; + UINT32 GTxVirtualTimerFlags; + UINT32 GTxCommonFlags; +} EFI_ACPI_5_1_GTDT_GT_BLOCK_TIMER_STRUCTURE; + +/// +/// GT Block Physical Timers and Virtual Timers Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_1_GTDT_GT_BLOCK_TIMER_FLAG_TIMER_INTERRUPT_MODE BIT0 +#define EFI_ACPI_5_1_GTDT_GT_BLOCK_TIMER_FLAG_TIMER_INTERRUPT_POLARITY BIT1 + +/// +/// Common Flags Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_1_GTDT_GT_BLOCK_COMMON_FLAG_SECURE_TIMER BIT0 +#define EFI_ACPI_5_1_GTDT_GT_BLOCK_COMMON_FLAG_ALWAYS_ON_CAPABILITY BIT1 + +/// +/// SBSA Generic Watchdog Structure +/// +typedef struct { + UINT8 Type; + UINT16 Length; + UINT8 Reserved; + UINT64 RefreshFramePhysicalAddress; + UINT64 WatchdogControlFramePhysicalAddress; + UINT32 WatchdogTimerGSIV; + UINT32 WatchdogTimerFlags; +} EFI_ACPI_5_1_GTDT_SBSA_GENERIC_WATCHDOG_STRUCTURE; + +/// +/// SBSA Generic Watchdog Timer Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_5_1_GTDT_SBSA_GENERIC_WATCHDOG_FLAG_TIMER_INTERRUPT_MODE BIT0 +#define EFI_ACPI_5_1_GTDT_SBSA_GENERIC_WATCHDOG_FLAG_TIMER_INTERRUPT_POLARITY BIT1 +#define EFI_ACPI_5_1_GTDT_SBSA_GENERIC_WATCHDOG_FLAG_SECURE_TIMER BIT2 + +/// +/// Boot Error Record Table (BERT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 BootErrorRegionLength; + UINT64 BootErrorRegion; +} EFI_ACPI_5_1_BOOT_ERROR_RECORD_TABLE_HEADER; + +/// +/// BERT Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_BOOT_ERROR_RECORD_TABLE_REVISION 0x01 + +/// +/// Boot Error Region Block Status Definition +/// +typedef struct { + UINT32 UncorrectableErrorValid:1; + UINT32 CorrectableErrorValid:1; + UINT32 MultipleUncorrectableErrors:1; + UINT32 MultipleCorrectableErrors:1; + UINT32 ErrorDataEntryCount:10; + UINT32 Reserved:18; +} EFI_ACPI_5_1_ERROR_BLOCK_STATUS; + +/// +/// Boot Error Region Definition +/// +typedef struct { + EFI_ACPI_5_1_ERROR_BLOCK_STATUS BlockStatus; + UINT32 RawDataOffset; + UINT32 RawDataLength; + UINT32 DataLength; + UINT32 ErrorSeverity; +} EFI_ACPI_5_1_BOOT_ERROR_REGION_STRUCTURE; + +// +// Boot Error Severity types +// +#define EFI_ACPI_5_1_ERROR_SEVERITY_CORRECTABLE 0x00 +#define EFI_ACPI_5_1_ERROR_SEVERITY_FATAL 0x01 +#define EFI_ACPI_5_1_ERROR_SEVERITY_CORRECTED 0x02 +#define EFI_ACPI_5_1_ERROR_SEVERITY_NONE 0x03 + +/// +/// Generic Error Data Entry Definition +/// +typedef struct { + UINT8 SectionType[16]; + UINT32 ErrorSeverity; + UINT16 Revision; + UINT8 ValidationBits; + UINT8 Flags; + UINT32 ErrorDataLength; + UINT8 FruId[16]; + UINT8 FruText[20]; +} EFI_ACPI_5_1_GENERIC_ERROR_DATA_ENTRY_STRUCTURE; + +/// +/// Generic Error Data Entry Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_GENERIC_ERROR_DATA_ENTRY_REVISION 0x0201 + +/// +/// HEST - Hardware Error Source Table +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 ErrorSourceCount; +} EFI_ACPI_5_1_HARDWARE_ERROR_SOURCE_TABLE_HEADER; + +/// +/// HEST Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_HARDWARE_ERROR_SOURCE_TABLE_REVISION 0x01 + +// +// Error Source structure types. +// +#define EFI_ACPI_5_1_IA32_ARCHITECTURE_MACHINE_CHECK_EXCEPTION 0x00 +#define EFI_ACPI_5_1_IA32_ARCHITECTURE_CORRECTED_MACHINE_CHECK 0x01 +#define EFI_ACPI_5_1_IA32_ARCHITECTURE_NMI_ERROR 0x02 +#define EFI_ACPI_5_1_PCI_EXPRESS_ROOT_PORT_AER 0x06 +#define EFI_ACPI_5_1_PCI_EXPRESS_DEVICE_AER 0x07 +#define EFI_ACPI_5_1_PCI_EXPRESS_BRIDGE_AER 0x08 +#define EFI_ACPI_5_1_GENERIC_HARDWARE_ERROR 0x09 + +// +// Error Source structure flags. +// +#define EFI_ACPI_5_1_ERROR_SOURCE_FLAG_FIRMWARE_FIRST (1 << 0) +#define EFI_ACPI_5_1_ERROR_SOURCE_FLAG_GLOBAL (1 << 1) + +/// +/// IA-32 Architecture Machine Check Exception Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT64 GlobalCapabilityInitData; + UINT64 GlobalControlInitData; + UINT8 NumberOfHardwareBanks; + UINT8 Reserved1[7]; +} EFI_ACPI_5_1_IA32_ARCHITECTURE_MACHINE_CHECK_EXCEPTION_STRUCTURE; + +/// +/// IA-32 Architecture Machine Check Bank Structure Definition +/// +typedef struct { + UINT8 BankNumber; + UINT8 ClearStatusOnInitialization; + UINT8 StatusDataFormat; + UINT8 Reserved0; + UINT32 ControlRegisterMsrAddress; + UINT64 ControlInitData; + UINT32 StatusRegisterMsrAddress; + UINT32 AddressRegisterMsrAddress; + UINT32 MiscRegisterMsrAddress; +} EFI_ACPI_5_1_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_BANK_STRUCTURE; + +/// +/// IA-32 Architecture Machine Check Bank Structure MCA data format +/// +#define EFI_ACPI_5_1_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_DATA_FORMAT_IA32 0x00 +#define EFI_ACPI_5_1_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_DATA_FORMAT_INTEL64 0x01 +#define EFI_ACPI_5_1_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_DATA_FORMAT_AMD64 0x02 + +// +// Hardware Error Notification types. All other values are reserved +// +#define EFI_ACPI_5_1_HARDWARE_ERROR_NOTIFICATION_POLLED 0x00 +#define EFI_ACPI_5_1_HARDWARE_ERROR_NOTIFICATION_EXTERNAL_INTERRUPT 0x01 +#define EFI_ACPI_5_1_HARDWARE_ERROR_NOTIFICATION_LOCAL_INTERRUPT 0x02 +#define EFI_ACPI_5_1_HARDWARE_ERROR_NOTIFICATION_SCI 0x03 +#define EFI_ACPI_5_1_HARDWARE_ERROR_NOTIFICATION_NMI 0x04 + +/// +/// Hardware Error Notification Configuration Write Enable Structure Definition +/// +typedef struct { + UINT16 Type:1; + UINT16 PollInterval:1; + UINT16 SwitchToPollingThresholdValue:1; + UINT16 SwitchToPollingThresholdWindow:1; + UINT16 ErrorThresholdValue:1; + UINT16 ErrorThresholdWindow:1; + UINT16 Reserved:10; +} EFI_ACPI_5_1_HARDWARE_ERROR_NOTIFICATION_CONFIGURATION_WRITE_ENABLE_STRUCTURE; + +/// +/// Hardware Error Notification Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + EFI_ACPI_5_1_HARDWARE_ERROR_NOTIFICATION_CONFIGURATION_WRITE_ENABLE_STRUCTURE ConfigurationWriteEnable; + UINT32 PollInterval; + UINT32 Vector; + UINT32 SwitchToPollingThresholdValue; + UINT32 SwitchToPollingThresholdWindow; + UINT32 ErrorThresholdValue; + UINT32 ErrorThresholdWindow; +} EFI_ACPI_5_1_HARDWARE_ERROR_NOTIFICATION_STRUCTURE; + +/// +/// IA-32 Architecture Corrected Machine Check Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + EFI_ACPI_5_1_HARDWARE_ERROR_NOTIFICATION_STRUCTURE NotificationStructure; + UINT8 NumberOfHardwareBanks; + UINT8 Reserved1[3]; +} EFI_ACPI_5_1_IA32_ARCHITECTURE_CORRECTED_MACHINE_CHECK_STRUCTURE; + +/// +/// IA-32 Architecture NMI Error Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 MaxRawDataLength; +} EFI_ACPI_5_1_IA32_ARCHITECTURE_NMI_ERROR_STRUCTURE; + +/// +/// PCI Express Root Port AER Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 Bus; + UINT16 Device; + UINT16 Function; + UINT16 DeviceControl; + UINT8 Reserved1[2]; + UINT32 UncorrectableErrorMask; + UINT32 UncorrectableErrorSeverity; + UINT32 CorrectableErrorMask; + UINT32 AdvancedErrorCapabilitiesAndControl; + UINT32 RootErrorCommand; +} EFI_ACPI_5_1_PCI_EXPRESS_ROOT_PORT_AER_STRUCTURE; + +/// +/// PCI Express Device AER Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 Bus; + UINT16 Device; + UINT16 Function; + UINT16 DeviceControl; + UINT8 Reserved1[2]; + UINT32 UncorrectableErrorMask; + UINT32 UncorrectableErrorSeverity; + UINT32 CorrectableErrorMask; + UINT32 AdvancedErrorCapabilitiesAndControl; +} EFI_ACPI_5_1_PCI_EXPRESS_DEVICE_AER_STRUCTURE; + +/// +/// PCI Express Bridge AER Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 Bus; + UINT16 Device; + UINT16 Function; + UINT16 DeviceControl; + UINT8 Reserved1[2]; + UINT32 UncorrectableErrorMask; + UINT32 UncorrectableErrorSeverity; + UINT32 CorrectableErrorMask; + UINT32 AdvancedErrorCapabilitiesAndControl; + UINT32 SecondaryUncorrectableErrorMask; + UINT32 SecondaryUncorrectableErrorSeverity; + UINT32 SecondaryAdvancedErrorCapabilitiesAndControl; +} EFI_ACPI_5_1_PCI_EXPRESS_BRIDGE_AER_STRUCTURE; + +/// +/// Generic Hardware Error Source Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT16 RelatedSourceId; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 MaxRawDataLength; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE ErrorStatusAddress; + EFI_ACPI_5_1_HARDWARE_ERROR_NOTIFICATION_STRUCTURE NotificationStructure; + UINT32 ErrorStatusBlockLength; +} EFI_ACPI_5_1_GENERIC_HARDWARE_ERROR_SOURCE_STRUCTURE; + +/// +/// Generic Error Status Definition +/// +typedef struct { + EFI_ACPI_5_1_ERROR_BLOCK_STATUS BlockStatus; + UINT32 RawDataOffset; + UINT32 RawDataLength; + UINT32 DataLength; + UINT32 ErrorSeverity; +} EFI_ACPI_5_1_GENERIC_ERROR_STATUS_STRUCTURE; + +/// +/// ERST - Error Record Serialization Table +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 SerializationHeaderSize; + UINT8 Reserved0[4]; + UINT32 InstructionEntryCount; +} EFI_ACPI_5_1_ERROR_RECORD_SERIALIZATION_TABLE_HEADER; + +/// +/// ERST Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_ERROR_RECORD_SERIALIZATION_TABLE_REVISION 0x01 + +/// +/// ERST Serialization Actions +/// +#define EFI_ACPI_5_1_ERST_BEGIN_WRITE_OPERATION 0x00 +#define EFI_ACPI_5_1_ERST_BEGIN_READ_OPERATION 0x01 +#define EFI_ACPI_5_1_ERST_BEGIN_CLEAR_OPERATION 0x02 +#define EFI_ACPI_5_1_ERST_END_OPERATION 0x03 +#define EFI_ACPI_5_1_ERST_SET_RECORD_OFFSET 0x04 +#define EFI_ACPI_5_1_ERST_EXECUTE_OPERATION 0x05 +#define EFI_ACPI_5_1_ERST_CHECK_BUSY_STATUS 0x06 +#define EFI_ACPI_5_1_ERST_GET_COMMAND_STATUS 0x07 +#define EFI_ACPI_5_1_ERST_GET_RECORD_IDENTIFIER 0x08 +#define EFI_ACPI_5_1_ERST_SET_RECORD_IDENTIFIER 0x09 +#define EFI_ACPI_5_1_ERST_GET_RECORD_COUNT 0x0A +#define EFI_ACPI_5_1_ERST_BEGIN_DUMMY_WRITE_OPERATION 0x0B +#define EFI_ACPI_5_1_ERST_GET_ERROR_LOG_ADDRESS_RANGE 0x0D +#define EFI_ACPI_5_1_ERST_GET_ERROR_LOG_ADDRESS_RANGE_LENGTH 0x0E +#define EFI_ACPI_5_1_ERST_GET_ERROR_LOG_ADDRESS_RANGE_ATTRIBUTES 0x0F + +/// +/// ERST Action Command Status +/// +#define EFI_ACPI_5_1_ERST_STATUS_SUCCESS 0x00 +#define EFI_ACPI_5_1_ERST_STATUS_NOT_ENOUGH_SPACE 0x01 +#define EFI_ACPI_5_1_ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x02 +#define EFI_ACPI_5_1_ERST_STATUS_FAILED 0x03 +#define EFI_ACPI_5_1_ERST_STATUS_RECORD_STORE_EMPTY 0x04 +#define EFI_ACPI_5_1_ERST_STATUS_RECORD_NOT_FOUND 0x05 + +/// +/// ERST Serialization Instructions +/// +#define EFI_ACPI_5_1_ERST_READ_REGISTER 0x00 +#define EFI_ACPI_5_1_ERST_READ_REGISTER_VALUE 0x01 +#define EFI_ACPI_5_1_ERST_WRITE_REGISTER 0x02 +#define EFI_ACPI_5_1_ERST_WRITE_REGISTER_VALUE 0x03 +#define EFI_ACPI_5_1_ERST_NOOP 0x04 +#define EFI_ACPI_5_1_ERST_LOAD_VAR1 0x05 +#define EFI_ACPI_5_1_ERST_LOAD_VAR2 0x06 +#define EFI_ACPI_5_1_ERST_STORE_VAR1 0x07 +#define EFI_ACPI_5_1_ERST_ADD 0x08 +#define EFI_ACPI_5_1_ERST_SUBTRACT 0x09 +#define EFI_ACPI_5_1_ERST_ADD_VALUE 0x0A +#define EFI_ACPI_5_1_ERST_SUBTRACT_VALUE 0x0B +#define EFI_ACPI_5_1_ERST_STALL 0x0C +#define EFI_ACPI_5_1_ERST_STALL_WHILE_TRUE 0x0D +#define EFI_ACPI_5_1_ERST_SKIP_NEXT_INSTRUCTION_IF_TRUE 0x0E +#define EFI_ACPI_5_1_ERST_GOTO 0x0F +#define EFI_ACPI_5_1_ERST_SET_SRC_ADDRESS_BASE 0x10 +#define EFI_ACPI_5_1_ERST_SET_DST_ADDRESS_BASE 0x11 +#define EFI_ACPI_5_1_ERST_MOVE_DATA 0x12 + +/// +/// ERST Instruction Flags +/// +#define EFI_ACPI_5_1_ERST_PRESERVE_REGISTER 0x01 + +/// +/// ERST Serialization Instruction Entry +/// +typedef struct { + UINT8 SerializationAction; + UINT8 Instruction; + UINT8 Flags; + UINT8 Reserved0; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE RegisterRegion; + UINT64 Value; + UINT64 Mask; +} EFI_ACPI_5_1_ERST_SERIALIZATION_INSTRUCTION_ENTRY; + +/// +/// EINJ - Error Injection Table +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 InjectionHeaderSize; + UINT8 InjectionFlags; + UINT8 Reserved0[3]; + UINT32 InjectionEntryCount; +} EFI_ACPI_5_1_ERROR_INJECTION_TABLE_HEADER; + +/// +/// EINJ Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_ERROR_INJECTION_TABLE_REVISION 0x01 + +/// +/// EINJ Error Injection Actions +/// +#define EFI_ACPI_5_1_EINJ_BEGIN_INJECTION_OPERATION 0x00 +#define EFI_ACPI_5_1_EINJ_GET_TRIGGER_ERROR_ACTION_TABLE 0x01 +#define EFI_ACPI_5_1_EINJ_SET_ERROR_TYPE 0x02 +#define EFI_ACPI_5_1_EINJ_GET_ERROR_TYPE 0x03 +#define EFI_ACPI_5_1_EINJ_END_OPERATION 0x04 +#define EFI_ACPI_5_1_EINJ_EXECUTE_OPERATION 0x05 +#define EFI_ACPI_5_1_EINJ_CHECK_BUSY_STATUS 0x06 +#define EFI_ACPI_5_1_EINJ_GET_COMMAND_STATUS 0x07 +#define EFI_ACPI_5_1_EINJ_TRIGGER_ERROR 0xFF + +/// +/// EINJ Action Command Status +/// +#define EFI_ACPI_5_1_EINJ_STATUS_SUCCESS 0x00 +#define EFI_ACPI_5_1_EINJ_STATUS_UNKNOWN_FAILURE 0x01 +#define EFI_ACPI_5_1_EINJ_STATUS_INVALID_ACCESS 0x02 + +/// +/// EINJ Error Type Definition +/// +#define EFI_ACPI_5_1_EINJ_ERROR_PROCESSOR_CORRECTABLE (1 << 0) +#define EFI_ACPI_5_1_EINJ_ERROR_PROCESSOR_UNCORRECTABLE_NONFATAL (1 << 1) +#define EFI_ACPI_5_1_EINJ_ERROR_PROCESSOR_UNCORRECTABLE_FATAL (1 << 2) +#define EFI_ACPI_5_1_EINJ_ERROR_MEMORY_CORRECTABLE (1 << 3) +#define EFI_ACPI_5_1_EINJ_ERROR_MEMORY_UNCORRECTABLE_NONFATAL (1 << 4) +#define EFI_ACPI_5_1_EINJ_ERROR_MEMORY_UNCORRECTABLE_FATAL (1 << 5) +#define EFI_ACPI_5_1_EINJ_ERROR_PCI_EXPRESS_CORRECTABLE (1 << 6) +#define EFI_ACPI_5_1_EINJ_ERROR_PCI_EXPRESS_UNCORRECTABLE_NONFATAL (1 << 7) +#define EFI_ACPI_5_1_EINJ_ERROR_PCI_EXPRESS_UNCORRECTABLE_FATAL (1 << 8) +#define EFI_ACPI_5_1_EINJ_ERROR_PLATFORM_CORRECTABLE (1 << 9) +#define EFI_ACPI_5_1_EINJ_ERROR_PLATFORM_UNCORRECTABLE_NONFATAL (1 << 10) +#define EFI_ACPI_5_1_EINJ_ERROR_PLATFORM_UNCORRECTABLE_FATAL (1 << 11) + +/// +/// EINJ Injection Instructions +/// +#define EFI_ACPI_5_1_EINJ_READ_REGISTER 0x00 +#define EFI_ACPI_5_1_EINJ_READ_REGISTER_VALUE 0x01 +#define EFI_ACPI_5_1_EINJ_WRITE_REGISTER 0x02 +#define EFI_ACPI_5_1_EINJ_WRITE_REGISTER_VALUE 0x03 +#define EFI_ACPI_5_1_EINJ_NOOP 0x04 + +/// +/// EINJ Instruction Flags +/// +#define EFI_ACPI_5_1_EINJ_PRESERVE_REGISTER 0x01 + +/// +/// EINJ Injection Instruction Entry +/// +typedef struct { + UINT8 InjectionAction; + UINT8 Instruction; + UINT8 Flags; + UINT8 Reserved0; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE RegisterRegion; + UINT64 Value; + UINT64 Mask; +} EFI_ACPI_5_1_EINJ_INJECTION_INSTRUCTION_ENTRY; + +/// +/// EINJ Trigger Action Table +/// +typedef struct { + UINT32 HeaderSize; + UINT32 Revision; + UINT32 TableSize; + UINT32 EntryCount; +} EFI_ACPI_5_1_EINJ_TRIGGER_ACTION_TABLE; + +/// +/// Platform Communications Channel Table (PCCT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 Flags; + UINT64 Reserved; +} EFI_ACPI_5_1_PLATFORM_COMMUNICATION_CHANNEL_TABLE_HEADER; + +/// +/// PCCT Version (as defined in ACPI 5.1 spec.) +/// +#define EFI_ACPI_5_1_PLATFORM_COMMUNICATION_CHANNEL_TABLE_REVISION 0x01 + +/// +/// PCCT Global Flags +/// +#define EFI_ACPI_5_1_PCCT_FLAGS_SCI_DOORBELL BIT0 + +// +// PCCT Subspace type +// +#define EFI_ACPI_5_1_PCCT_SUBSPACE_TYPE_GENERIC 0x00 + +/// +/// PCC Subspace Structure Header +/// +typedef struct { + UINT8 Type; + UINT8 Length; +} EFI_ACPI_5_1_PCCT_SUBSPACE_HEADER; + +/// +/// Generic Communications Subspace Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Reserved[6]; + UINT64 BaseAddress; + UINT64 AddressLength; + EFI_ACPI_5_1_GENERIC_ADDRESS_STRUCTURE DoorbellRegister; + UINT64 DoorbellPreserve; + UINT64 DoorbellWrite; + UINT32 NominalLatency; + UINT32 MaximumPeriodicAccessRate; + UINT16 MinimumRequestTurnaroundTime; +} EFI_ACPI_5_1_PCCT_SUBSPACE_GENERIC; + +/// +/// Generic Communications Channel Shared Memory Region +/// + +typedef struct { + UINT8 Command; + UINT8 Reserved:7; + UINT8 GenerateSci:1; +} EFI_ACPI_5_1_PCCT_GENERIC_SHARED_MEMORY_REGION_COMMAND; + +typedef struct { + UINT8 CommandComplete:1; + UINT8 SciDoorbell:1; + UINT8 Error:1; + UINT8 PlatformNotification:1; + UINT8 Reserved:4; + UINT8 Reserved1; +} EFI_ACPI_5_1_PCCT_GENERIC_SHARED_MEMORY_REGION_STATUS; + +typedef struct { + UINT32 Signature; + EFI_ACPI_5_1_PCCT_GENERIC_SHARED_MEMORY_REGION_COMMAND Command; + EFI_ACPI_5_1_PCCT_GENERIC_SHARED_MEMORY_REGION_STATUS Status; +} EFI_ACPI_5_1_PCCT_GENERIC_SHARED_MEMORY_REGION_HEADER; + +// +// Known table signatures +// + +/// +/// "RSD PTR " Root System Description Pointer +/// +#define EFI_ACPI_5_1_ROOT_SYSTEM_DESCRIPTION_POINTER_SIGNATURE SIGNATURE_64('R', 'S', 'D', ' ', 'P', 'T', 'R', ' ') + +/// +/// "APIC" Multiple APIC Description Table +/// +#define EFI_ACPI_5_1_MULTIPLE_APIC_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('A', 'P', 'I', 'C') + +/// +/// "BERT" Boot Error Record Table +/// +#define EFI_ACPI_5_1_BOOT_ERROR_RECORD_TABLE_SIGNATURE SIGNATURE_32('B', 'E', 'R', 'T') + +/// +/// "BGRT" Boot Graphics Resource Table +/// +#define EFI_ACPI_5_1_BOOT_GRAPHICS_RESOURCE_TABLE_SIGNATURE SIGNATURE_32('B', 'G', 'R', 'T') + +/// +/// "CPEP" Corrected Platform Error Polling Table +/// +#define EFI_ACPI_5_1_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_SIGNATURE SIGNATURE_32('C', 'P', 'E', 'P') + +/// +/// "DSDT" Differentiated System Description Table +/// +#define EFI_ACPI_5_1_DIFFERENTIATED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('D', 'S', 'D', 'T') + +/// +/// "ECDT" Embedded Controller Boot Resources Table +/// +#define EFI_ACPI_5_1_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE_SIGNATURE SIGNATURE_32('E', 'C', 'D', 'T') + +/// +/// "EINJ" Error Injection Table +/// +#define EFI_ACPI_5_1_ERROR_INJECTION_TABLE_SIGNATURE SIGNATURE_32('E', 'I', 'N', 'J') + +/// +/// "ERST" Error Record Serialization Table +/// +#define EFI_ACPI_5_1_ERROR_RECORD_SERIALIZATION_TABLE_SIGNATURE SIGNATURE_32('E', 'R', 'S', 'T') + +/// +/// "FACP" Fixed ACPI Description Table +/// +#define EFI_ACPI_5_1_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'P') + +/// +/// "FACS" Firmware ACPI Control Structure +/// +#define EFI_ACPI_5_1_FIRMWARE_ACPI_CONTROL_STRUCTURE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'S') + +/// +/// "FPDT" Firmware Performance Data Table +/// +#define EFI_ACPI_5_1_FIRMWARE_PERFORMANCE_DATA_TABLE_SIGNATURE SIGNATURE_32('F', 'P', 'D', 'T') + +/// +/// "GTDT" Generic Timer Description Table +/// +#define EFI_ACPI_5_1_GENERIC_TIMER_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('G', 'T', 'D', 'T') + +/// +/// "HEST" Hardware Error Source Table +/// +#define EFI_ACPI_5_1_HARDWARE_ERROR_SOURCE_TABLE_SIGNATURE SIGNATURE_32('H', 'E', 'S', 'T') + +/// +/// "MPST" Memory Power State Table +/// +#define EFI_ACPI_5_1_MEMORY_POWER_STATE_TABLE_SIGNATURE SIGNATURE_32('M', 'P', 'S', 'T') + +/// +/// "MSCT" Maximum System Characteristics Table +/// +#define EFI_ACPI_5_1_MAXIMUM_SYSTEM_CHARACTERISTICS_TABLE_SIGNATURE SIGNATURE_32('M', 'S', 'C', 'T') + +/// +/// "PMTT" Platform Memory Topology Table +/// +#define EFI_ACPI_5_1_PLATFORM_MEMORY_TOPOLOGY_TABLE_SIGNATURE SIGNATURE_32('P', 'M', 'T', 'T') + +/// +/// "PSDT" Persistent System Description Table +/// +#define EFI_ACPI_5_1_PERSISTENT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('P', 'S', 'D', 'T') + +/// +/// "RASF" ACPI RAS Feature Table +/// +#define EFI_ACPI_5_1_ACPI_RAS_FEATURE_TABLE_SIGNATURE SIGNATURE_32('R', 'A', 'S', 'F') + +/// +/// "RSDT" Root System Description Table +/// +#define EFI_ACPI_5_1_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('R', 'S', 'D', 'T') + +/// +/// "SBST" Smart Battery Specification Table +/// +#define EFI_ACPI_5_1_SMART_BATTERY_SPECIFICATION_TABLE_SIGNATURE SIGNATURE_32('S', 'B', 'S', 'T') + +/// +/// "SLIT" System Locality Information Table +/// +#define EFI_ACPI_5_1_SYSTEM_LOCALITY_INFORMATION_TABLE_SIGNATURE SIGNATURE_32('S', 'L', 'I', 'T') + +/// +/// "SRAT" System Resource Affinity Table +/// +#define EFI_ACPI_5_1_SYSTEM_RESOURCE_AFFINITY_TABLE_SIGNATURE SIGNATURE_32('S', 'R', 'A', 'T') + +/// +/// "SSDT" Secondary System Description Table +/// +#define EFI_ACPI_5_1_SECONDARY_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('S', 'S', 'D', 'T') + +/// +/// "XSDT" Extended System Description Table +/// +#define EFI_ACPI_5_1_EXTENDED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('X', 'S', 'D', 'T') + +/// +/// "BOOT" MS Simple Boot Spec +/// +#define EFI_ACPI_5_1_SIMPLE_BOOT_FLAG_TABLE_SIGNATURE SIGNATURE_32('B', 'O', 'O', 'T') + +/// +/// "CSRT" MS Core System Resource Table +/// +#define EFI_ACPI_5_1_CORE_SYSTEM_RESOURCE_TABLE_SIGNATURE SIGNATURE_32('C', 'S', 'R', 'T') + +/// +/// "DBG2" MS Debug Port 2 Spec +/// +#define EFI_ACPI_5_1_DEBUG_PORT_2_TABLE_SIGNATURE SIGNATURE_32('D', 'B', 'G', '2') + +/// +/// "DBGP" MS Debug Port Spec +/// +#define EFI_ACPI_5_1_DEBUG_PORT_TABLE_SIGNATURE SIGNATURE_32('D', 'B', 'G', 'P') + +/// +/// "DMAR" DMA Remapping Table +/// +#define EFI_ACPI_5_1_DMA_REMAPPING_TABLE_SIGNATURE SIGNATURE_32('D', 'M', 'A', 'R') + +/// +/// "DRTM" Dynamic Root of Trust for Measurement Table +/// +#define EFI_ACPI_5_1_DYNAMIC_ROOT_OF_TRUST_FOR_MEASUREMENT_TABLE_SIGNATURE SIGNATURE_32('D', 'R', 'T', 'M') + +/// +/// "ETDT" Event Timer Description Table +/// +#define EFI_ACPI_5_1_EVENT_TIMER_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('E', 'T', 'D', 'T') + +/// +/// "HPET" IA-PC High Precision Event Timer Table +/// +#define EFI_ACPI_5_1_HIGH_PRECISION_EVENT_TIMER_TABLE_SIGNATURE SIGNATURE_32('H', 'P', 'E', 'T') + +/// +/// "iBFT" iSCSI Boot Firmware Table +/// +#define EFI_ACPI_5_1_ISCSI_BOOT_FIRMWARE_TABLE_SIGNATURE SIGNATURE_32('i', 'B', 'F', 'T') + +/// +/// "IVRS" I/O Virtualization Reporting Structure +/// +#define EFI_ACPI_5_1_IO_VIRTUALIZATION_REPORTING_STRUCTURE_SIGNATURE SIGNATURE_32('I', 'V', 'R', 'S') + +/// +/// "LPIT" Low Power Idle Table +/// +#define EFI_ACPI_5_1_IO_LOW_POWER_IDLE_TABLE_STRUCTURE_SIGNATURE SIGNATURE_32('L', 'P', 'I', 'T') + +/// +/// "MCFG" PCI Express Memory Mapped Configuration Space Base Address Description Table +/// +#define EFI_ACPI_5_1_PCI_EXPRESS_MEMORY_MAPPED_CONFIGURATION_SPACE_BASE_ADDRESS_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('M', 'C', 'F', 'G') + +/// +/// "MCHI" Management Controller Host Interface Table +/// +#define EFI_ACPI_5_1_MANAGEMENT_CONTROLLER_HOST_INTERFACE_TABLE_SIGNATURE SIGNATURE_32('M', 'C', 'H', 'I') + +/// +/// "MSDM" MS Data Management Table +/// +#define EFI_ACPI_5_1_DATA_MANAGEMENT_TABLE_SIGNATURE SIGNATURE_32('M', 'S', 'D', 'M') + +/// +/// "SLIC" MS Software Licensing Table Specification +/// +#define EFI_ACPI_5_1_SOFTWARE_LICENSING_TABLE_SIGNATURE SIGNATURE_32('S', 'L', 'I', 'C') + +/// +/// "SPCR" Serial Port Concole Redirection Table +/// +#define EFI_ACPI_5_1_SERIAL_PORT_CONSOLE_REDIRECTION_TABLE_SIGNATURE SIGNATURE_32('S', 'P', 'C', 'R') + +/// +/// "SPMI" Server Platform Management Interface Table +/// +#define EFI_ACPI_5_1_SERVER_PLATFORM_MANAGEMENT_INTERFACE_TABLE_SIGNATURE SIGNATURE_32('S', 'P', 'M', 'I') + +/// +/// "TCPA" Trusted Computing Platform Alliance Capabilities Table +/// +#define EFI_ACPI_5_1_TRUSTED_COMPUTING_PLATFORM_ALLIANCE_CAPABILITIES_TABLE_SIGNATURE SIGNATURE_32('T', 'C', 'P', 'A') + +/// +/// "TPM2" Trusted Computing Platform 1 Table +/// +#define EFI_ACPI_5_1_TRUSTED_COMPUTING_PLATFORM_2_TABLE_SIGNATURE SIGNATURE_32('T', 'P', 'M', '2') + +/// +/// "UEFI" UEFI ACPI Data Table +/// +#define EFI_ACPI_5_1_UEFI_ACPI_DATA_TABLE_SIGNATURE SIGNATURE_32('U', 'E', 'F', 'I') + +/// +/// "WAET" Windows ACPI Emulated Devices Table +/// +#define EFI_ACPI_5_1_WINDOWS_ACPI_EMULATED_DEVICES_TABLE_SIGNATURE SIGNATURE_32('W', 'A', 'E', 'T') + +/// +/// "WDAT" Watchdog Action Table +/// +#define EFI_ACPI_5_1_WATCHDOG_ACTION_TABLE_SIGNATURE SIGNATURE_32('W', 'D', 'A', 'T') + +/// +/// "WDRT" Watchdog Resource Table +/// +#define EFI_ACPI_5_1_WATCHDOG_RESOURCE_TABLE_SIGNATURE SIGNATURE_32('W', 'D', 'R', 'T') + +/// +/// "WPBT" MS Platform Binary Table +/// +#define EFI_ACPI_5_1_PLATFORM_BINARY_TABLE_SIGNATURE SIGNATURE_32('W', 'P', 'B', 'T') + +#pragma pack() + +#endif diff --git a/src/include/ipxe/efi/IndustryStandard/Acpi60.h b/src/include/ipxe/efi/IndustryStandard/Acpi60.h new file mode 100644 index 00000000..c600735f --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/Acpi60.h @@ -0,0 +1,2348 @@ +/** @file + ACPI 6.0 definitions from the ACPI Specification Revision 6.0 Errata A January, 2016. + + Copyright (c) 2015 - 2017, Intel Corporation. All rights reserved.
+ (C) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +**/ + +#ifndef _ACPI_6_0_H_ +#define _ACPI_6_0_H_ + +FILE_LICENCE ( BSD3 ); + +#include + +// +// Ensure proper structure formats +// +#pragma pack(1) + +/// +/// ACPI 6.0 Generic Address Space definition +/// +typedef struct { + UINT8 AddressSpaceId; + UINT8 RegisterBitWidth; + UINT8 RegisterBitOffset; + UINT8 AccessSize; + UINT64 Address; +} EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE; + +// +// Generic Address Space Address IDs +// +#define EFI_ACPI_6_0_SYSTEM_MEMORY 0 +#define EFI_ACPI_6_0_SYSTEM_IO 1 +#define EFI_ACPI_6_0_PCI_CONFIGURATION_SPACE 2 +#define EFI_ACPI_6_0_EMBEDDED_CONTROLLER 3 +#define EFI_ACPI_6_0_SMBUS 4 +#define EFI_ACPI_6_0_PLATFORM_COMMUNICATION_CHANNEL 0x0A +#define EFI_ACPI_6_0_FUNCTIONAL_FIXED_HARDWARE 0x7F + +// +// Generic Address Space Access Sizes +// +#define EFI_ACPI_6_0_UNDEFINED 0 +#define EFI_ACPI_6_0_BYTE 1 +#define EFI_ACPI_6_0_WORD 2 +#define EFI_ACPI_6_0_DWORD 3 +#define EFI_ACPI_6_0_QWORD 4 + +// +// ACPI 6.0 table structures +// + +/// +/// Root System Description Pointer Structure +/// +typedef struct { + UINT64 Signature; + UINT8 Checksum; + UINT8 OemId[6]; + UINT8 Revision; + UINT32 RsdtAddress; + UINT32 Length; + UINT64 XsdtAddress; + UINT8 ExtendedChecksum; + UINT8 Reserved[3]; +} EFI_ACPI_6_0_ROOT_SYSTEM_DESCRIPTION_POINTER; + +/// +/// RSD_PTR Revision (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_ROOT_SYSTEM_DESCRIPTION_POINTER_REVISION 0x02 ///< ACPISpec (Revision 6.0) says current value is 2 + +/// +/// Common table header, this prefaces all ACPI tables, including FACS, but +/// excluding the RSD PTR structure +/// +typedef struct { + UINT32 Signature; + UINT32 Length; +} EFI_ACPI_6_0_COMMON_HEADER; + +// +// Root System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT32 table pointers. +// + +/// +/// RSDT Revision (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_ROOT_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +// +// Extended System Description Table +// No definition needed as it is a common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a variable number of UINT64 table pointers. +// + +/// +/// XSDT Revision (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_EXTENDED_SYSTEM_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Fixed ACPI Description Table Structure (FADT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 FirmwareCtrl; + UINT32 Dsdt; + UINT8 Reserved0; + UINT8 PreferredPmProfile; + UINT16 SciInt; + UINT32 SmiCmd; + UINT8 AcpiEnable; + UINT8 AcpiDisable; + UINT8 S4BiosReq; + UINT8 PstateCnt; + UINT32 Pm1aEvtBlk; + UINT32 Pm1bEvtBlk; + UINT32 Pm1aCntBlk; + UINT32 Pm1bCntBlk; + UINT32 Pm2CntBlk; + UINT32 PmTmrBlk; + UINT32 Gpe0Blk; + UINT32 Gpe1Blk; + UINT8 Pm1EvtLen; + UINT8 Pm1CntLen; + UINT8 Pm2CntLen; + UINT8 PmTmrLen; + UINT8 Gpe0BlkLen; + UINT8 Gpe1BlkLen; + UINT8 Gpe1Base; + UINT8 CstCnt; + UINT16 PLvl2Lat; + UINT16 PLvl3Lat; + UINT16 FlushSize; + UINT16 FlushStride; + UINT8 DutyOffset; + UINT8 DutyWidth; + UINT8 DayAlrm; + UINT8 MonAlrm; + UINT8 Century; + UINT16 IaPcBootArch; + UINT8 Reserved1; + UINT32 Flags; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE ResetReg; + UINT8 ResetValue; + UINT16 ArmBootArch; + UINT8 MinorVersion; + UINT64 XFirmwareCtrl; + UINT64 XDsdt; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE XPm1aEvtBlk; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE XPm1bEvtBlk; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE XPm1aCntBlk; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE XPm1bCntBlk; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE XPm2CntBlk; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE XPmTmrBlk; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE XGpe0Blk; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE XGpe1Blk; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE SleepControlReg; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE SleepStatusReg; + UINT64 HypervisorVendorIdentity; +} EFI_ACPI_6_0_FIXED_ACPI_DESCRIPTION_TABLE; + +/// +/// FADT Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_FIXED_ACPI_DESCRIPTION_TABLE_REVISION 0x06 +#define EFI_ACPI_6_0_FIXED_ACPI_DESCRIPTION_TABLE_MINOR_REVISION 0x00 + +// +// Fixed ACPI Description Table Preferred Power Management Profile +// +#define EFI_ACPI_6_0_PM_PROFILE_UNSPECIFIED 0 +#define EFI_ACPI_6_0_PM_PROFILE_DESKTOP 1 +#define EFI_ACPI_6_0_PM_PROFILE_MOBILE 2 +#define EFI_ACPI_6_0_PM_PROFILE_WORKSTATION 3 +#define EFI_ACPI_6_0_PM_PROFILE_ENTERPRISE_SERVER 4 +#define EFI_ACPI_6_0_PM_PROFILE_SOHO_SERVER 5 +#define EFI_ACPI_6_0_PM_PROFILE_APPLIANCE_PC 6 +#define EFI_ACPI_6_0_PM_PROFILE_PERFORMANCE_SERVER 7 +#define EFI_ACPI_6_0_PM_PROFILE_TABLET 8 + +// +// Fixed ACPI Description Table Boot Architecture Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_6_0_LEGACY_DEVICES BIT0 +#define EFI_ACPI_6_0_8042 BIT1 +#define EFI_ACPI_6_0_VGA_NOT_PRESENT BIT2 +#define EFI_ACPI_6_0_MSI_NOT_SUPPORTED BIT3 +#define EFI_ACPI_6_0_PCIE_ASPM_CONTROLS BIT4 +#define EFI_ACPI_6_0_CMOS_RTC_NOT_PRESENT BIT5 + +// +// Fixed ACPI Description Table Arm Boot Architecture Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_6_0_ARM_PSCI_COMPLIANT BIT0 +#define EFI_ACPI_6_0_ARM_PSCI_USE_HVC BIT1 + +// +// Fixed ACPI Description Table Fixed Feature Flags +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_6_0_WBINVD BIT0 +#define EFI_ACPI_6_0_WBINVD_FLUSH BIT1 +#define EFI_ACPI_6_0_PROC_C1 BIT2 +#define EFI_ACPI_6_0_P_LVL2_UP BIT3 +#define EFI_ACPI_6_0_PWR_BUTTON BIT4 +#define EFI_ACPI_6_0_SLP_BUTTON BIT5 +#define EFI_ACPI_6_0_FIX_RTC BIT6 +#define EFI_ACPI_6_0_RTC_S4 BIT7 +#define EFI_ACPI_6_0_TMR_VAL_EXT BIT8 +#define EFI_ACPI_6_0_DCK_CAP BIT9 +#define EFI_ACPI_6_0_RESET_REG_SUP BIT10 +#define EFI_ACPI_6_0_SEALED_CASE BIT11 +#define EFI_ACPI_6_0_HEADLESS BIT12 +#define EFI_ACPI_6_0_CPU_SW_SLP BIT13 +#define EFI_ACPI_6_0_PCI_EXP_WAK BIT14 +#define EFI_ACPI_6_0_USE_PLATFORM_CLOCK BIT15 +#define EFI_ACPI_6_0_S4_RTC_STS_VALID BIT16 +#define EFI_ACPI_6_0_REMOTE_POWER_ON_CAPABLE BIT17 +#define EFI_ACPI_6_0_FORCE_APIC_CLUSTER_MODEL BIT18 +#define EFI_ACPI_6_0_FORCE_APIC_PHYSICAL_DESTINATION_MODE BIT19 +#define EFI_ACPI_6_0_HW_REDUCED_ACPI BIT20 +#define EFI_ACPI_6_0_LOW_POWER_S0_IDLE_CAPABLE BIT21 + +/// +/// Firmware ACPI Control Structure +/// +typedef struct { + UINT32 Signature; + UINT32 Length; + UINT32 HardwareSignature; + UINT32 FirmwareWakingVector; + UINT32 GlobalLock; + UINT32 Flags; + UINT64 XFirmwareWakingVector; + UINT8 Version; + UINT8 Reserved0[3]; + UINT32 OspmFlags; + UINT8 Reserved1[24]; +} EFI_ACPI_6_0_FIRMWARE_ACPI_CONTROL_STRUCTURE; + +/// +/// FACS Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_FIRMWARE_ACPI_CONTROL_STRUCTURE_VERSION 0x02 + +/// +/// Firmware Control Structure Feature Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_6_0_S4BIOS_F BIT0 +#define EFI_ACPI_6_0_64BIT_WAKE_SUPPORTED_F BIT1 + +/// +/// OSPM Enabled Firmware Control Structure Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_6_0_OSPM_64BIT_WAKE_F BIT0 + +// +// Differentiated System Description Table, +// Secondary System Description Table +// and Persistent System Description Table, +// no definition needed as they are common description table header, the same with +// EFI_ACPI_DESCRIPTION_HEADER, followed by a definition block. +// +#define EFI_ACPI_6_0_DIFFERENTIATED_SYSTEM_DESCRIPTION_TABLE_REVISION 0x02 +#define EFI_ACPI_6_0_SECONDARY_SYSTEM_DESCRIPTION_TABLE_REVISION 0x02 + +/// +/// Multiple APIC Description Table header definition. The rest of the table +/// must be defined in a platform specific manner. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 LocalApicAddress; + UINT32 Flags; +} EFI_ACPI_6_0_MULTIPLE_APIC_DESCRIPTION_TABLE_HEADER; + +/// +/// MADT Revision (as defined in ACPI 6.0 Errata A spec.) +/// +#define EFI_ACPI_6_0_MULTIPLE_APIC_DESCRIPTION_TABLE_REVISION 0x04 + +/// +/// Multiple APIC Flags +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_6_0_PCAT_COMPAT BIT0 + +// +// Multiple APIC Description Table APIC structure types +// All other values between 0x0D and 0x7F are reserved and +// will be ignored by OSPM. 0x80 ~ 0xFF are reserved for OEM. +// +#define EFI_ACPI_6_0_PROCESSOR_LOCAL_APIC 0x00 +#define EFI_ACPI_6_0_IO_APIC 0x01 +#define EFI_ACPI_6_0_INTERRUPT_SOURCE_OVERRIDE 0x02 +#define EFI_ACPI_6_0_NON_MASKABLE_INTERRUPT_SOURCE 0x03 +#define EFI_ACPI_6_0_LOCAL_APIC_NMI 0x04 +#define EFI_ACPI_6_0_LOCAL_APIC_ADDRESS_OVERRIDE 0x05 +#define EFI_ACPI_6_0_IO_SAPIC 0x06 +#define EFI_ACPI_6_0_LOCAL_SAPIC 0x07 +#define EFI_ACPI_6_0_PLATFORM_INTERRUPT_SOURCES 0x08 +#define EFI_ACPI_6_0_PROCESSOR_LOCAL_X2APIC 0x09 +#define EFI_ACPI_6_0_LOCAL_X2APIC_NMI 0x0A +#define EFI_ACPI_6_0_GIC 0x0B +#define EFI_ACPI_6_0_GICD 0x0C +#define EFI_ACPI_6_0_GIC_MSI_FRAME 0x0D +#define EFI_ACPI_6_0_GICR 0x0E +#define EFI_ACPI_6_0_GIC_ITS 0x0F + +// +// APIC Structure Definitions +// + +/// +/// Processor Local APIC Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorUid; + UINT8 ApicId; + UINT32 Flags; +} EFI_ACPI_6_0_PROCESSOR_LOCAL_APIC_STRUCTURE; + +/// +/// Local APIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_6_0_LOCAL_APIC_ENABLED BIT0 + +/// +/// IO APIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 IoApicAddress; + UINT32 GlobalSystemInterruptBase; +} EFI_ACPI_6_0_IO_APIC_STRUCTURE; + +/// +/// Interrupt Source Override Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Bus; + UINT8 Source; + UINT32 GlobalSystemInterrupt; + UINT16 Flags; +} EFI_ACPI_6_0_INTERRUPT_SOURCE_OVERRIDE_STRUCTURE; + +/// +/// Platform Interrupt Sources Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT8 InterruptType; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT8 IoSapicVector; + UINT32 GlobalSystemInterrupt; + UINT32 PlatformInterruptSourceFlags; + UINT8 CpeiProcessorOverride; + UINT8 Reserved[31]; +} EFI_ACPI_6_0_PLATFORM_INTERRUPT_APIC_STRUCTURE; + +// +// MPS INTI flags. +// All other bits are reserved and must be set to 0. +// +#define EFI_ACPI_6_0_POLARITY (3 << 0) +#define EFI_ACPI_6_0_TRIGGER_MODE (3 << 2) + +/// +/// Non-Maskable Interrupt Source Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT32 GlobalSystemInterrupt; +} EFI_ACPI_6_0_NON_MASKABLE_INTERRUPT_SOURCE_STRUCTURE; + +/// +/// Local APIC NMI Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorUid; + UINT16 Flags; + UINT8 LocalApicLint; +} EFI_ACPI_6_0_LOCAL_APIC_NMI_STRUCTURE; + +/// +/// Local APIC Address Override Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved; + UINT64 LocalApicAddress; +} EFI_ACPI_6_0_LOCAL_APIC_ADDRESS_OVERRIDE_STRUCTURE; + +/// +/// IO SAPIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 IoApicId; + UINT8 Reserved; + UINT32 GlobalSystemInterruptBase; + UINT64 IoSapicAddress; +} EFI_ACPI_6_0_IO_SAPIC_STRUCTURE; + +/// +/// Local SAPIC Structure +/// This struct followed by a null-terminated ASCII string - ACPI Processor UID String +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 AcpiProcessorId; + UINT8 LocalSapicId; + UINT8 LocalSapicEid; + UINT8 Reserved[3]; + UINT32 Flags; + UINT32 ACPIProcessorUIDValue; +} EFI_ACPI_6_0_PROCESSOR_LOCAL_SAPIC_STRUCTURE; + +/// +/// Platform Interrupt Sources Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT8 InterruptType; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT8 IoSapicVector; + UINT32 GlobalSystemInterrupt; + UINT32 PlatformInterruptSourceFlags; +} EFI_ACPI_6_0_PLATFORM_INTERRUPT_SOURCES_STRUCTURE; + +/// +/// Platform Interrupt Source Flags. +/// All other bits are reserved and must be set to 0. +/// +#define EFI_ACPI_6_0_CPEI_PROCESSOR_OVERRIDE BIT0 + +/// +/// Processor Local x2APIC Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Reserved[2]; + UINT32 X2ApicId; + UINT32 Flags; + UINT32 AcpiProcessorUid; +} EFI_ACPI_6_0_PROCESSOR_LOCAL_X2APIC_STRUCTURE; + +/// +/// Local x2APIC NMI Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Flags; + UINT32 AcpiProcessorUid; + UINT8 LocalX2ApicLint; + UINT8 Reserved[3]; +} EFI_ACPI_6_0_LOCAL_X2APIC_NMI_STRUCTURE; + +/// +/// GIC Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved; + UINT32 CPUInterfaceNumber; + UINT32 AcpiProcessorUid; + UINT32 Flags; + UINT32 ParkingProtocolVersion; + UINT32 PerformanceInterruptGsiv; + UINT64 ParkedAddress; + UINT64 PhysicalBaseAddress; + UINT64 GICV; + UINT64 GICH; + UINT32 VGICMaintenanceInterrupt; + UINT64 GICRBaseAddress; + UINT64 MPIDR; + UINT8 ProcessorPowerEfficiencyClass; + UINT8 Reserved2[3]; +} EFI_ACPI_6_0_GIC_STRUCTURE; + +/// +/// GIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_6_0_GIC_ENABLED BIT0 +#define EFI_ACPI_6_0_PERFORMANCE_INTERRUPT_MODEL BIT1 +#define EFI_ACPI_6_0_VGIC_MAINTENANCE_INTERRUPT_MODE_FLAGS BIT2 + +/// +/// GIC Distributor Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved1; + UINT32 GicId; + UINT64 PhysicalBaseAddress; + UINT32 SystemVectorBase; + UINT8 GicVersion; + UINT8 Reserved2[3]; +} EFI_ACPI_6_0_GIC_DISTRIBUTOR_STRUCTURE; + +/// +/// GIC Version +/// +#define EFI_ACPI_6_0_GIC_V1 0x01 +#define EFI_ACPI_6_0_GIC_V2 0x02 +#define EFI_ACPI_6_0_GIC_V3 0x03 +#define EFI_ACPI_6_0_GIC_V4 0x04 + +/// +/// GIC MSI Frame Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved1; + UINT32 GicMsiFrameId; + UINT64 PhysicalBaseAddress; + UINT32 Flags; + UINT16 SPICount; + UINT16 SPIBase; +} EFI_ACPI_6_0_GIC_MSI_FRAME_STRUCTURE; + +/// +/// GIC MSI Frame Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_6_0_SPI_COUNT_BASE_SELECT BIT0 + +/// +/// GICR Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved; + UINT64 DiscoveryRangeBaseAddress; + UINT32 DiscoveryRangeLength; +} EFI_ACPI_6_0_GICR_STRUCTURE; + +/// +/// GIC Interrupt Translation Service Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT16 Reserved; + UINT32 GicItsId; + UINT64 PhysicalBaseAddress; + UINT32 Reserved2; +} EFI_ACPI_6_0_GIC_ITS_STRUCTURE; + +/// +/// Smart Battery Description Table (SBST) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 WarningEnergyLevel; + UINT32 LowEnergyLevel; + UINT32 CriticalEnergyLevel; +} EFI_ACPI_6_0_SMART_BATTERY_DESCRIPTION_TABLE; + +/// +/// SBST Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_SMART_BATTERY_DESCRIPTION_TABLE_REVISION 0x01 + +/// +/// Embedded Controller Boot Resources Table (ECDT) +/// The table is followed by a null terminated ASCII string that contains +/// a fully qualified reference to the name space object. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE EcControl; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE EcData; + UINT32 Uid; + UINT8 GpeBit; +} EFI_ACPI_6_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE; + +/// +/// ECDT Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE_REVISION 0x01 + +/// +/// System Resource Affinity Table (SRAT). The rest of the table +/// must be defined in a platform specific manner. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 Reserved1; ///< Must be set to 1 + UINT64 Reserved2; +} EFI_ACPI_6_0_SYSTEM_RESOURCE_AFFINITY_TABLE_HEADER; + +/// +/// SRAT Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_SYSTEM_RESOURCE_AFFINITY_TABLE_REVISION 0x03 + +// +// SRAT structure types. +// All other values between 0x04 an 0xFF are reserved and +// will be ignored by OSPM. +// +#define EFI_ACPI_6_0_PROCESSOR_LOCAL_APIC_SAPIC_AFFINITY 0x00 +#define EFI_ACPI_6_0_MEMORY_AFFINITY 0x01 +#define EFI_ACPI_6_0_PROCESSOR_LOCAL_X2APIC_AFFINITY 0x02 +#define EFI_ACPI_6_0_GICC_AFFINITY 0x03 + +/// +/// Processor Local APIC/SAPIC Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 ProximityDomain7To0; + UINT8 ApicId; + UINT32 Flags; + UINT8 LocalSapicEid; + UINT8 ProximityDomain31To8[3]; + UINT32 ClockDomain; +} EFI_ACPI_6_0_PROCESSOR_LOCAL_APIC_SAPIC_AFFINITY_STRUCTURE; + +/// +/// Local APIC/SAPIC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_6_0_PROCESSOR_LOCAL_APIC_SAPIC_ENABLED (1 << 0) + +/// +/// Memory Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT32 ProximityDomain; + UINT16 Reserved1; + UINT32 AddressBaseLow; + UINT32 AddressBaseHigh; + UINT32 LengthLow; + UINT32 LengthHigh; + UINT32 Reserved2; + UINT32 Flags; + UINT64 Reserved3; +} EFI_ACPI_6_0_MEMORY_AFFINITY_STRUCTURE; + +// +// Memory Flags. All other bits are reserved and must be 0. +// +#define EFI_ACPI_6_0_MEMORY_ENABLED (1 << 0) +#define EFI_ACPI_6_0_MEMORY_HOT_PLUGGABLE (1 << 1) +#define EFI_ACPI_6_0_MEMORY_NONVOLATILE (1 << 2) + +/// +/// Processor Local x2APIC Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Reserved1[2]; + UINT32 ProximityDomain; + UINT32 X2ApicId; + UINT32 Flags; + UINT32 ClockDomain; + UINT8 Reserved2[4]; +} EFI_ACPI_6_0_PROCESSOR_LOCAL_X2APIC_AFFINITY_STRUCTURE; + +/// +/// GICC Affinity Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT32 ProximityDomain; + UINT32 AcpiProcessorUid; + UINT32 Flags; + UINT32 ClockDomain; +} EFI_ACPI_6_0_GICC_AFFINITY_STRUCTURE; + +/// +/// GICC Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_6_0_GICC_ENABLED (1 << 0) + +/// +/// System Locality Distance Information Table (SLIT). +/// The rest of the table is a matrix. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT64 NumberOfSystemLocalities; +} EFI_ACPI_6_0_SYSTEM_LOCALITY_DISTANCE_INFORMATION_TABLE_HEADER; + +/// +/// SLIT Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_SYSTEM_LOCALITY_DISTANCE_INFORMATION_TABLE_REVISION 0x01 + +/// +/// Corrected Platform Error Polling Table (CPEP) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT8 Reserved[8]; +} EFI_ACPI_6_0_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_HEADER; + +/// +/// CPEP Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_REVISION 0x01 + +// +// CPEP processor structure types. +// +#define EFI_ACPI_6_0_CPEP_PROCESSOR_APIC_SAPIC 0x00 + +/// +/// Corrected Platform Error Polling Processor Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 ProcessorId; + UINT8 ProcessorEid; + UINT32 PollingInterval; +} EFI_ACPI_6_0_CPEP_PROCESSOR_APIC_SAPIC_STRUCTURE; + +/// +/// Maximum System Characteristics Table (MSCT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 OffsetProxDomInfo; + UINT32 MaximumNumberOfProximityDomains; + UINT32 MaximumNumberOfClockDomains; + UINT64 MaximumPhysicalAddress; +} EFI_ACPI_6_0_MAXIMUM_SYSTEM_CHARACTERISTICS_TABLE_HEADER; + +/// +/// MSCT Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_MAXIMUM_SYSTEM_CHARACTERISTICS_TABLE_REVISION 0x01 + +/// +/// Maximum Proximity Domain Information Structure Definition +/// +typedef struct { + UINT8 Revision; + UINT8 Length; + UINT32 ProximityDomainRangeLow; + UINT32 ProximityDomainRangeHigh; + UINT32 MaximumProcessorCapacity; + UINT64 MaximumMemoryCapacity; +} EFI_ACPI_6_0_MAXIMUM_PROXIMITY_DOMAIN_INFORMATION_STRUCTURE; + +/// +/// ACPI RAS Feature Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT8 PlatformCommunicationChannelIdentifier[12]; +} EFI_ACPI_6_0_RAS_FEATURE_TABLE; + +/// +/// RASF Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_RAS_FEATURE_TABLE_REVISION 0x01 + +/// +/// ACPI RASF Platform Communication Channel Shared Memory Region definition. +/// +typedef struct { + UINT32 Signature; + UINT16 Command; + UINT16 Status; + UINT16 Version; + UINT8 RASCapabilities[16]; + UINT8 SetRASCapabilities[16]; + UINT16 NumberOfRASFParameterBlocks; + UINT32 SetRASCapabilitiesStatus; +} EFI_ACPI_6_0_RASF_PLATFORM_COMMUNICATION_CHANNEL_SHARED_MEMORY_REGION; + +/// +/// ACPI RASF PCC command code +/// +#define EFI_ACPI_6_0_RASF_PCC_COMMAND_CODE_EXECUTE_RASF_COMMAND 0x01 + +/// +/// ACPI RASF Platform RAS Capabilities +/// +#define EFI_ACPI_6_0_RASF_PLATFORM_RAS_CAPABILITY_HARDWARE_BASED_PATROL_SCRUB_SUPPOTED 0x01 +#define EFI_ACPI_6_0_RASF_PLATFORM_RAS_CAPABILITY_HARDWARE_BASED_PATROL_SCRUB_SUPPOTED_AND_EXPOSED_TO_SOFTWARE 0x02 + +/// +/// ACPI RASF Parameter Block structure for PATROL_SCRUB +/// +typedef struct { + UINT16 Type; + UINT16 Version; + UINT16 Length; + UINT16 PatrolScrubCommand; + UINT64 RequestedAddressRange[2]; + UINT64 ActualAddressRange[2]; + UINT16 Flags; + UINT8 RequestedSpeed; +} EFI_ACPI_6_0_RASF_PATROL_SCRUB_PLATFORM_BLOCK_STRUCTURE; + +/// +/// ACPI RASF Patrol Scrub command +/// +#define EFI_ACPI_6_0_RASF_PATROL_SCRUB_COMMAND_GET_PATROL_PARAMETERS 0x01 +#define EFI_ACPI_6_0_RASF_PATROL_SCRUB_COMMAND_START_PATROL_SCRUBBER 0x02 +#define EFI_ACPI_6_0_RASF_PATROL_SCRUB_COMMAND_STOP_PATROL_SCRUBBER 0x03 + +/// +/// Memory Power State Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT8 PlatformCommunicationChannelIdentifier; + UINT8 Reserved[3]; +// Memory Power Node Structure +// Memory Power State Characteristics +} EFI_ACPI_6_0_MEMORY_POWER_STATUS_TABLE; + +/// +/// MPST Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_MEMORY_POWER_STATE_TABLE_REVISION 0x01 + +/// +/// MPST Platform Communication Channel Shared Memory Region definition. +/// +typedef struct { + UINT32 Signature; + UINT16 Command; + UINT16 Status; + UINT32 MemoryPowerCommandRegister; + UINT32 MemoryPowerStatusRegister; + UINT32 PowerStateId; + UINT32 MemoryPowerNodeId; + UINT64 MemoryEnergyConsumed; + UINT64 ExpectedAveragePowerComsuned; +} EFI_ACPI_6_0_MPST_PLATFORM_COMMUNICATION_CHANNEL_SHARED_MEMORY_REGION; + +/// +/// ACPI MPST PCC command code +/// +#define EFI_ACPI_6_0_MPST_PCC_COMMAND_CODE_EXECUTE_MPST_COMMAND 0x03 + +/// +/// ACPI MPST Memory Power command +/// +#define EFI_ACPI_6_0_MPST_MEMORY_POWER_COMMAND_GET_MEMORY_POWER_STATE 0x01 +#define EFI_ACPI_6_0_MPST_MEMORY_POWER_COMMAND_SET_MEMORY_POWER_STATE 0x02 +#define EFI_ACPI_6_0_MPST_MEMORY_POWER_COMMAND_GET_AVERAGE_POWER_CONSUMED 0x03 +#define EFI_ACPI_6_0_MPST_MEMORY_POWER_COMMAND_GET_MEMORY_ENERGY_CONSUMED 0x04 + +/// +/// MPST Memory Power Node Table +/// +typedef struct { + UINT8 PowerStateValue; + UINT8 PowerStateInformationIndex; +} EFI_ACPI_6_0_MPST_MEMORY_POWER_STATE; + +typedef struct { + UINT8 Flag; + UINT8 Reserved; + UINT16 MemoryPowerNodeId; + UINT32 Length; + UINT64 AddressBase; + UINT64 AddressLength; + UINT32 NumberOfPowerStates; + UINT32 NumberOfPhysicalComponents; +//EFI_ACPI_6_0_MPST_MEMORY_POWER_STATE MemoryPowerState[NumberOfPowerStates]; +//UINT16 PhysicalComponentIdentifier[NumberOfPhysicalComponents]; +} EFI_ACPI_6_0_MPST_MEMORY_POWER_STRUCTURE; + +#define EFI_ACPI_6_0_MPST_MEMORY_POWER_STRUCTURE_FLAG_ENABLE 0x01 +#define EFI_ACPI_6_0_MPST_MEMORY_POWER_STRUCTURE_FLAG_POWER_MANAGED 0x02 +#define EFI_ACPI_6_0_MPST_MEMORY_POWER_STRUCTURE_FLAG_HOT_PLUGGABLE 0x04 + +typedef struct { + UINT16 MemoryPowerNodeCount; + UINT8 Reserved[2]; +} EFI_ACPI_6_0_MPST_MEMORY_POWER_NODE_TABLE; + +/// +/// MPST Memory Power State Characteristics Table +/// +typedef struct { + UINT8 PowerStateStructureID; + UINT8 Flag; + UINT16 Reserved; + UINT32 AveragePowerConsumedInMPS0; + UINT32 RelativePowerSavingToMPS0; + UINT64 ExitLatencyToMPS0; +} EFI_ACPI_6_0_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_STRUCTURE; + +#define EFI_ACPI_6_0_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_STRUCTURE_FLAG_MEMORY_CONTENT_PRESERVED 0x01 +#define EFI_ACPI_6_0_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_STRUCTURE_FLAG_AUTONOMOUS_MEMORY_POWER_STATE_ENTRY 0x02 +#define EFI_ACPI_6_0_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_STRUCTURE_FLAG_AUTONOMOUS_MEMORY_POWER_STATE_EXIT 0x04 + +typedef struct { + UINT16 MemoryPowerStateCharacteristicsCount; + UINT8 Reserved[2]; +} EFI_ACPI_6_0_MPST_MEMORY_POWER_STATE_CHARACTERISTICS_TABLE; + +/// +/// Memory Topology Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 Reserved; +} EFI_ACPI_6_0_MEMORY_TOPOLOGY_TABLE; + +/// +/// PMTT Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_MEMORY_TOPOLOGY_TABLE_REVISION 0x01 + +/// +/// Common Memory Aggregator Device Structure. +/// +typedef struct { + UINT8 Type; + UINT8 Reserved; + UINT16 Length; + UINT16 Flags; + UINT16 Reserved1; +} EFI_ACPI_6_0_PMMT_COMMON_MEMORY_AGGREGATOR_DEVICE_STRUCTURE; + +/// +/// Memory Aggregator Device Type +/// +#define EFI_ACPI_6_0_PMMT_MEMORY_AGGREGATOR_DEVICE_TYPE_SOCKET 0x1 +#define EFI_ACPI_6_0_PMMT_MEMORY_AGGREGATOR_DEVICE_TYPE_MEMORY_CONTROLLER 0x2 +#define EFI_ACPI_6_0_PMMT_MEMORY_AGGREGATOR_DEVICE_TYPE_DIMM 0x3 + +/// +/// Socket Memory Aggregator Device Structure. +/// +typedef struct { + EFI_ACPI_6_0_PMMT_COMMON_MEMORY_AGGREGATOR_DEVICE_STRUCTURE Header; + UINT16 SocketIdentifier; + UINT16 Reserved; +//EFI_ACPI_6_0_PMMT_MEMORY_CONTROLLER_MEMORY_AGGREGATOR_DEVICE_STRUCTURE MemoryController[]; +} EFI_ACPI_6_0_PMMT_SOCKET_MEMORY_AGGREGATOR_DEVICE_STRUCTURE; + +/// +/// MemoryController Memory Aggregator Device Structure. +/// +typedef struct { + EFI_ACPI_6_0_PMMT_COMMON_MEMORY_AGGREGATOR_DEVICE_STRUCTURE Header; + UINT32 ReadLatency; + UINT32 WriteLatency; + UINT32 ReadBandwidth; + UINT32 WriteBandwidth; + UINT16 OptimalAccessUnit; + UINT16 OptimalAccessAlignment; + UINT16 Reserved; + UINT16 NumberOfProximityDomains; +//UINT32 ProximityDomain[NumberOfProximityDomains]; +//EFI_ACPI_6_0_PMMT_DIMM_MEMORY_AGGREGATOR_DEVICE_STRUCTURE PhysicalComponent[]; +} EFI_ACPI_6_0_PMMT_MEMORY_CONTROLLER_MEMORY_AGGREGATOR_DEVICE_STRUCTURE; + +/// +/// DIMM Memory Aggregator Device Structure. +/// +typedef struct { + EFI_ACPI_6_0_PMMT_COMMON_MEMORY_AGGREGATOR_DEVICE_STRUCTURE Header; + UINT16 PhysicalComponentIdentifier; + UINT16 Reserved; + UINT32 SizeOfDimm; + UINT32 SmbiosHandle; +} EFI_ACPI_6_0_PMMT_DIMM_MEMORY_AGGREGATOR_DEVICE_STRUCTURE; + +/// +/// Boot Graphics Resource Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + /// + /// 2-bytes (16 bit) version ID. This value must be 1. + /// + UINT16 Version; + /// + /// 1-byte status field indicating current status about the table. + /// Bits[7:1] = Reserved (must be zero) + /// Bit [0] = Valid. A one indicates the boot image graphic is valid. + /// + UINT8 Status; + /// + /// 1-byte enumerated type field indicating format of the image. + /// 0 = Bitmap + /// 1 - 255 Reserved (for future use) + /// + UINT8 ImageType; + /// + /// 8-byte (64 bit) physical address pointing to the firmware's in-memory copy + /// of the image bitmap. + /// + UINT64 ImageAddress; + /// + /// A 4-byte (32-bit) unsigned long describing the display X-offset of the boot image. + /// (X, Y) display offset of the top left corner of the boot image. + /// The top left corner of the display is at offset (0, 0). + /// + UINT32 ImageOffsetX; + /// + /// A 4-byte (32-bit) unsigned long describing the display Y-offset of the boot image. + /// (X, Y) display offset of the top left corner of the boot image. + /// The top left corner of the display is at offset (0, 0). + /// + UINT32 ImageOffsetY; +} EFI_ACPI_6_0_BOOT_GRAPHICS_RESOURCE_TABLE; + +/// +/// BGRT Revision +/// +#define EFI_ACPI_6_0_BOOT_GRAPHICS_RESOURCE_TABLE_REVISION 1 + +/// +/// BGRT Version +/// +#define EFI_ACPI_6_0_BGRT_VERSION 0x01 + +/// +/// BGRT Status +/// +#define EFI_ACPI_6_0_BGRT_STATUS_NOT_DISPLAYED 0x00 +#define EFI_ACPI_6_0_BGRT_STATUS_DISPLAYED 0x01 + +/// +/// BGRT Image Type +/// +#define EFI_ACPI_6_0_BGRT_IMAGE_TYPE_BMP 0x00 + +/// +/// FPDT Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_FIRMWARE_PERFORMANCE_DATA_TABLE_REVISION 0x01 + +/// +/// FPDT Performance Record Types +/// +#define EFI_ACPI_6_0_FPDT_RECORD_TYPE_FIRMWARE_BASIC_BOOT_POINTER 0x0000 +#define EFI_ACPI_6_0_FPDT_RECORD_TYPE_S3_PERFORMANCE_TABLE_POINTER 0x0001 + +/// +/// FPDT Performance Record Revision +/// +#define EFI_ACPI_6_0_FPDT_RECORD_REVISION_FIRMWARE_BASIC_BOOT_POINTER 0x01 +#define EFI_ACPI_6_0_FPDT_RECORD_REVISION_S3_PERFORMANCE_TABLE_POINTER 0x01 + +/// +/// FPDT Runtime Performance Record Types +/// +#define EFI_ACPI_6_0_FPDT_RUNTIME_RECORD_TYPE_S3_RESUME 0x0000 +#define EFI_ACPI_6_0_FPDT_RUNTIME_RECORD_TYPE_S3_SUSPEND 0x0001 +#define EFI_ACPI_6_0_FPDT_RUNTIME_RECORD_TYPE_FIRMWARE_BASIC_BOOT 0x0002 + +/// +/// FPDT Runtime Performance Record Revision +/// +#define EFI_ACPI_6_0_FPDT_RUNTIME_RECORD_REVISION_S3_RESUME 0x01 +#define EFI_ACPI_6_0_FPDT_RUNTIME_RECORD_REVISION_S3_SUSPEND 0x01 +#define EFI_ACPI_6_0_FPDT_RUNTIME_RECORD_REVISION_FIRMWARE_BASIC_BOOT 0x02 + +/// +/// FPDT Performance Record header +/// +typedef struct { + UINT16 Type; + UINT8 Length; + UINT8 Revision; +} EFI_ACPI_6_0_FPDT_PERFORMANCE_RECORD_HEADER; + +/// +/// FPDT Performance Table header +/// +typedef struct { + UINT32 Signature; + UINT32 Length; +} EFI_ACPI_6_0_FPDT_PERFORMANCE_TABLE_HEADER; + +/// +/// FPDT Firmware Basic Boot Performance Pointer Record Structure +/// +typedef struct { + EFI_ACPI_6_0_FPDT_PERFORMANCE_RECORD_HEADER Header; + UINT32 Reserved; + /// + /// 64-bit processor-relative physical address of the Basic Boot Performance Table. + /// + UINT64 BootPerformanceTablePointer; +} EFI_ACPI_6_0_FPDT_BOOT_PERFORMANCE_TABLE_POINTER_RECORD; + +/// +/// FPDT S3 Performance Table Pointer Record Structure +/// +typedef struct { + EFI_ACPI_6_0_FPDT_PERFORMANCE_RECORD_HEADER Header; + UINT32 Reserved; + /// + /// 64-bit processor-relative physical address of the S3 Performance Table. + /// + UINT64 S3PerformanceTablePointer; +} EFI_ACPI_6_0_FPDT_S3_PERFORMANCE_TABLE_POINTER_RECORD; + +/// +/// FPDT Firmware Basic Boot Performance Record Structure +/// +typedef struct { + EFI_ACPI_6_0_FPDT_PERFORMANCE_RECORD_HEADER Header; + UINT32 Reserved; + /// + /// Timer value logged at the beginning of firmware image execution. + /// This may not always be zero or near zero. + /// + UINT64 ResetEnd; + /// + /// Timer value logged just prior to loading the OS boot loader into memory. + /// For non-UEFI compatible boots, this field must be zero. + /// + UINT64 OsLoaderLoadImageStart; + /// + /// Timer value logged just prior to launching the previously loaded OS boot loader image. + /// For non-UEFI compatible boots, the timer value logged will be just prior + /// to the INT 19h handler invocation. + /// + UINT64 OsLoaderStartImageStart; + /// + /// Timer value logged at the point when the OS loader calls the + /// ExitBootServices function for UEFI compatible firmware. + /// For non-UEFI compatible boots, this field must be zero. + /// + UINT64 ExitBootServicesEntry; + /// + /// Timer value logged at the point just prior towhen the OS loader gaining + /// control back from calls the ExitBootServices function for UEFI compatible firmware. + /// For non-UEFI compatible boots, this field must be zero. + /// + UINT64 ExitBootServicesExit; +} EFI_ACPI_6_0_FPDT_FIRMWARE_BASIC_BOOT_RECORD; + +/// +/// FPDT Firmware Basic Boot Performance Table signature +/// +#define EFI_ACPI_6_0_FPDT_BOOT_PERFORMANCE_TABLE_SIGNATURE SIGNATURE_32('F', 'B', 'P', 'T') + +// +// FPDT Firmware Basic Boot Performance Table +// +typedef struct { + EFI_ACPI_6_0_FPDT_PERFORMANCE_TABLE_HEADER Header; + // + // one or more Performance Records. + // +} EFI_ACPI_6_0_FPDT_FIRMWARE_BASIC_BOOT_TABLE; + +/// +/// FPDT "S3PT" S3 Performance Table +/// +#define EFI_ACPI_6_0_FPDT_S3_PERFORMANCE_TABLE_SIGNATURE SIGNATURE_32('S', '3', 'P', 'T') + +// +// FPDT Firmware S3 Boot Performance Table +// +typedef struct { + EFI_ACPI_6_0_FPDT_PERFORMANCE_TABLE_HEADER Header; + // + // one or more Performance Records. + // +} EFI_ACPI_6_0_FPDT_FIRMWARE_S3_BOOT_TABLE; + +/// +/// FPDT Basic S3 Resume Performance Record +/// +typedef struct { + EFI_ACPI_6_0_FPDT_PERFORMANCE_RECORD_HEADER Header; + /// + /// A count of the number of S3 resume cycles since the last full boot sequence. + /// + UINT32 ResumeCount; + /// + /// Timer recorded at the end of BIOS S3 resume, just prior to handoff to the + /// OS waking vector. Only the most recent resume cycle's time is retained. + /// + UINT64 FullResume; + /// + /// Average timer value of all resume cycles logged since the last full boot + /// sequence, including the most recent resume. Note that the entire log of + /// timer values does not need to be retained in order to calculate this average. + /// + UINT64 AverageResume; +} EFI_ACPI_6_0_FPDT_S3_RESUME_RECORD; + +/// +/// FPDT Basic S3 Suspend Performance Record +/// +typedef struct { + EFI_ACPI_6_0_FPDT_PERFORMANCE_RECORD_HEADER Header; + /// + /// Timer value recorded at the OS write to SLP_TYP upon entry to S3. + /// Only the most recent suspend cycle's timer value is retained. + /// + UINT64 SuspendStart; + /// + /// Timer value recorded at the final firmware write to SLP_TYP (or other + /// mechanism) used to trigger hardware entry to S3. + /// Only the most recent suspend cycle's timer value is retained. + /// + UINT64 SuspendEnd; +} EFI_ACPI_6_0_FPDT_S3_SUSPEND_RECORD; + +/// +/// Firmware Performance Record Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; +} EFI_ACPI_6_0_FIRMWARE_PERFORMANCE_RECORD_TABLE; + +/// +/// Generic Timer Description Table definition. +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT64 CntControlBasePhysicalAddress; + UINT32 Reserved; + UINT32 SecurePL1TimerGSIV; + UINT32 SecurePL1TimerFlags; + UINT32 NonSecurePL1TimerGSIV; + UINT32 NonSecurePL1TimerFlags; + UINT32 VirtualTimerGSIV; + UINT32 VirtualTimerFlags; + UINT32 NonSecurePL2TimerGSIV; + UINT32 NonSecurePL2TimerFlags; + UINT64 CntReadBasePhysicalAddress; + UINT32 PlatformTimerCount; + UINT32 PlatformTimerOffset; +} EFI_ACPI_6_0_GENERIC_TIMER_DESCRIPTION_TABLE; + +/// +/// GTDT Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_GENERIC_TIMER_DESCRIPTION_TABLE_REVISION 0x02 + +/// +/// Timer Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_6_0_GTDT_TIMER_FLAG_TIMER_INTERRUPT_MODE BIT0 +#define EFI_ACPI_6_0_GTDT_TIMER_FLAG_TIMER_INTERRUPT_POLARITY BIT1 +#define EFI_ACPI_6_0_GTDT_TIMER_FLAG_ALWAYS_ON_CAPABILITY BIT2 + +/// +/// Platform Timer Type +/// +#define EFI_ACPI_6_0_GTDT_GT_BLOCK 0 +#define EFI_ACPI_6_0_GTDT_SBSA_GENERIC_WATCHDOG 1 + +/// +/// GT Block Structure +/// +typedef struct { + UINT8 Type; + UINT16 Length; + UINT8 Reserved; + UINT64 CntCtlBase; + UINT32 GTBlockTimerCount; + UINT32 GTBlockTimerOffset; +} EFI_ACPI_6_0_GTDT_GT_BLOCK_STRUCTURE; + +/// +/// GT Block Timer Structure +/// +typedef struct { + UINT8 GTFrameNumber; + UINT8 Reserved[3]; + UINT64 CntBaseX; + UINT64 CntEL0BaseX; + UINT32 GTxPhysicalTimerGSIV; + UINT32 GTxPhysicalTimerFlags; + UINT32 GTxVirtualTimerGSIV; + UINT32 GTxVirtualTimerFlags; + UINT32 GTxCommonFlags; +} EFI_ACPI_6_0_GTDT_GT_BLOCK_TIMER_STRUCTURE; + +/// +/// GT Block Physical Timers and Virtual Timers Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_6_0_GTDT_GT_BLOCK_TIMER_FLAG_TIMER_INTERRUPT_MODE BIT0 +#define EFI_ACPI_6_0_GTDT_GT_BLOCK_TIMER_FLAG_TIMER_INTERRUPT_POLARITY BIT1 + +/// +/// Common Flags Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_6_0_GTDT_GT_BLOCK_COMMON_FLAG_SECURE_TIMER BIT0 +#define EFI_ACPI_6_0_GTDT_GT_BLOCK_COMMON_FLAG_ALWAYS_ON_CAPABILITY BIT1 + +/// +/// SBSA Generic Watchdog Structure +/// +typedef struct { + UINT8 Type; + UINT16 Length; + UINT8 Reserved; + UINT64 RefreshFramePhysicalAddress; + UINT64 WatchdogControlFramePhysicalAddress; + UINT32 WatchdogTimerGSIV; + UINT32 WatchdogTimerFlags; +} EFI_ACPI_6_0_GTDT_SBSA_GENERIC_WATCHDOG_STRUCTURE; + +/// +/// SBSA Generic Watchdog Timer Flags. All other bits are reserved and must be 0. +/// +#define EFI_ACPI_6_0_GTDT_SBSA_GENERIC_WATCHDOG_FLAG_TIMER_INTERRUPT_MODE BIT0 +#define EFI_ACPI_6_0_GTDT_SBSA_GENERIC_WATCHDOG_FLAG_TIMER_INTERRUPT_POLARITY BIT1 +#define EFI_ACPI_6_0_GTDT_SBSA_GENERIC_WATCHDOG_FLAG_SECURE_TIMER BIT2 + +// +// NVDIMM Firmware Interface Table definition. +// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 Reserved; +} EFI_ACPI_6_0_NVDIMM_FIRMWARE_INTERFACE_TABLE; + +// +// NFIT Version (as defined in ACPI 6.0 spec.) +// +#define EFI_ACPI_6_0_NVDIMM_FIRMWARE_INTERFACE_TABLE_REVISION 0x1 + +// +// Definition for NFIT Table Structure Types +// +#define EFI_ACPI_6_0_NFIT_SYSTEM_PHYSICAL_ADDRESS_RANGE_STRUCTURE_TYPE 0 +#define EFI_ACPI_6_0_NFIT_MEMORY_DEVICE_TO_SYSTEM_ADDRESS_RANGE_MAP_STRUCTURE_TYPE 1 +#define EFI_ACPI_6_0_NFIT_INTERLEAVE_STRUCTURE_TYPE 2 +#define EFI_ACPI_6_0_NFIT_SMBIOS_MANAGEMENT_INFORMATION_STRUCTURE_TYPE 3 +#define EFI_ACPI_6_0_NFIT_NVDIMM_CONTROL_REGION_STRUCTURE_TYPE 4 +#define EFI_ACPI_6_0_NFIT_NVDIMM_BLOCK_DATA_WINDOW_REGION_STRUCTURE_TYPE 5 +#define EFI_ACPI_6_0_NFIT_FLUSH_HINT_ADDRESS_STRUCTURE_TYPE 6 + +// +// Definition for NFIT Structure Header +// +typedef struct { + UINT16 Type; + UINT16 Length; +} EFI_ACPI_6_0_NFIT_STRUCTURE_HEADER; + +// +// Definition for System Physical Address Range Structure +// +#define EFI_ACPI_6_0_NFIT_SYSTEM_PHYSICAL_ADDRESS_RANGE_FLAGS_CONTROL_REGION_FOR_MANAGEMENT BIT0 +#define EFI_ACPI_6_0_NFIT_SYSTEM_PHYSICAL_ADDRESS_RANGE_FLAGS_PROXIMITY_DOMAIN_VALID BIT1 +#define EFI_ACPI_6_0_NFIT_GUID_VOLATILE_MEMORY_REGION { 0x7305944F, 0xFDDA, 0x44E3, { 0xB1, 0x6C, 0x3F, 0x22, 0xD2, 0x52, 0xE5, 0xD0 }} +#define EFI_ACPI_6_0_NFIT_GUID_BYTE_ADDRESSABLE_PERSISTENT_MEMORY_REGION { 0x66F0D379, 0xB4F3, 0x4074, { 0xAC, 0x43, 0x0D, 0x33, 0x18, 0xB7, 0x8C, 0xDB }} +#define EFI_ACPI_6_0_NFIT_GUID_NVDIMM_CONTROL_REGION { 0x92F701F6, 0x13B4, 0x405D, { 0x91, 0x0B, 0x29, 0x93, 0x67, 0xE8, 0x23, 0x4C }} +#define EFI_ACPI_6_0_NFIT_GUID_NVDIMM_BLOCK_DATA_WINDOW_REGION { 0x91AF0530, 0x5D86, 0x470E, { 0xA6, 0xB0, 0x0A, 0x2D, 0xB9, 0x40, 0x82, 0x49 }} +#define EFI_ACPI_6_0_NFIT_GUID_RAM_DISK_SUPPORTING_VIRTUAL_DISK_REGION_VOLATILE { 0x77AB535A, 0x45FC, 0x624B, { 0x55, 0x60, 0xF7, 0xB2, 0x81, 0xD1, 0xF9, 0x6E }} +#define EFI_ACPI_6_0_NFIT_GUID_RAM_DISK_SUPPORTING_VIRTUAL_CD_REGION_VOLATILE { 0x3D5ABD30, 0x4175, 0x87CE, { 0x6D, 0x64, 0xD2, 0xAD, 0xE5, 0x23, 0xC4, 0xBB }} +#define EFI_ACPI_6_0_NFIT_GUID_RAM_DISK_SUPPORTING_VIRTUAL_DISK_REGION_PERSISTENT { 0x5CEA02C9, 0x4D07, 0x69D3, { 0x26, 0x9F ,0x44, 0x96, 0xFB, 0xE0, 0x96, 0xF9 }} +#define EFI_ACPI_6_0_NFIT_GUID_RAM_DISK_SUPPORTING_VIRTUAL_CD_REGION_PERSISTENT { 0x08018188, 0x42CD, 0xBB48, { 0x10, 0x0F, 0x53, 0x87, 0xD5, 0x3D, 0xED, 0x3D }} +typedef struct { + UINT16 Type; + UINT16 Length; + UINT16 SPARangeStructureIndex; + UINT16 Flags; + UINT32 Reserved_8; + UINT32 ProximityDomain; + GUID AddressRangeTypeGUID; + UINT64 SystemPhysicalAddressRangeBase; + UINT64 SystemPhysicalAddressRangeLength; + UINT64 AddressRangeMemoryMappingAttribute; +} EFI_ACPI_6_0_NFIT_SYSTEM_PHYSICAL_ADDRESS_RANGE_STRUCTURE; + +// +// Definition for Memory Device to System Physical Address Range Mapping Structure +// +typedef struct { + UINT32 DIMMNumber:4; + UINT32 MemoryChannelNumber:4; + UINT32 MemoryControllerID:4; + UINT32 SocketID:4; + UINT32 NodeControllerID:12; + UINT32 Reserved_28:4; +} EFI_ACPI_6_0_NFIT_DEVICE_HANDLE; + +#define EFI_ACPI_6_0_NFIT_MEMORY_DEVICE_STATE_FLAGS_PREVIOUS_SAVE_FAIL BIT0 +#define EFI_ACPI_6_0_NFIT_MEMORY_DEVICE_STATE_FLAGS_LAST_RESTORE_FAIL BIT1 +#define EFI_ACPI_6_0_NFIT_MEMORY_DEVICE_STATE_FLAGS_PLATFORM_FLUSH_FAIL BIT2 +#define EFI_ACPI_6_0_NFIT_MEMORY_DEVICE_STATE_FLAGS_NOT_ARMED_PRIOR_TO_OSPM_HAND_OFF BIT3 +#define EFI_ACPI_6_0_NFIT_MEMORY_DEVICE_STATE_FLAGS_SMART_HEALTH_EVENTS_PRIOR_OSPM_HAND_OFF BIT4 +#define EFI_ACPI_6_0_NFIT_MEMORY_DEVICE_STATE_FLAGS_FIRMWARE_ENABLED_TO_NOTIFY_OSPM_ON_SMART_HEALTH_EVENTS BIT5 +typedef struct { + UINT16 Type; + UINT16 Length; + EFI_ACPI_6_0_NFIT_DEVICE_HANDLE NFITDeviceHandle; + UINT16 MemoryDevicePhysicalID; + UINT16 MemoryDeviceRegionID; + UINT16 SPARangeStructureIndex ; + UINT16 NVDIMMControlRegionStructureIndex; + UINT64 MemoryDeviceRegionSize; + UINT64 RegionOffset; + UINT64 MemoryDevicePhysicalAddressRegionBase; + UINT16 InterleaveStructureIndex; + UINT16 InterleaveWays; + UINT16 MemoryDeviceStateFlags; + UINT16 Reserved_46; +} EFI_ACPI_6_0_NFIT_MEMORY_DEVICE_TO_SYSTEM_ADDRESS_RANGE_MAP_STRUCTURE; + +// +// Definition for Interleave Structure +// +typedef struct { + UINT16 Type; + UINT16 Length; + UINT16 InterleaveStructureIndex; + UINT16 Reserved_6; + UINT32 NumberOfLines; + UINT32 LineSize; +//UINT32 LineOffset[NumberOfLines]; +} EFI_ACPI_6_0_NFIT_INTERLEAVE_STRUCTURE; + +// +// Definition for SMBIOS Management Information Structure +// +typedef struct { + UINT16 Type; + UINT16 Length; + UINT32 Reserved_4; +//UINT8 Data[]; +} EFI_ACPI_6_0_NFIT_SMBIOS_MANAGEMENT_INFORMATION_STRUCTURE; + +// +// Definition for NVDIMM Control Region Structure +// +#define EFI_ACPI_6_0_NFIT_NVDIMM_CONTROL_REGION_FLAGS_BLOCK_DATA_WINDOWS_BUFFERED BIT0 +typedef struct { + UINT16 Type; + UINT16 Length; + UINT16 NVDIMMControlRegionStructureIndex; + UINT16 VendorID; + UINT16 DeviceID; + UINT16 RevisionID; + UINT16 SubsystemVendorID; + UINT16 SubsystemDeviceID; + UINT16 SubsystemRevisionID; + UINT8 Reserved_18[6]; + UINT32 SerialNumber; + UINT16 RegionFormatInterfaceCode; + UINT16 NumberOfBlockControlWindows; + UINT64 SizeOfBlockControlWindow; + UINT64 CommandRegisterOffsetInBlockControlWindow; + UINT64 SizeOfCommandRegisterInBlockControlWindows; + UINT64 StatusRegisterOffsetInBlockControlWindow; + UINT64 SizeOfStatusRegisterInBlockControlWindows; + UINT16 NVDIMMControlRegionFlag; + UINT8 Reserved_74[6]; +} EFI_ACPI_6_0_NFIT_NVDIMM_CONTROL_REGION_STRUCTURE; + +// +// Definition for NVDIMM Block Data Window Region Structure +// +typedef struct { + UINT16 Type; + UINT16 Length; + UINT16 NVDIMMControlRegionStructureIndex; + UINT16 NumberOfBlockDataWindows; + UINT64 BlockDataWindowStartOffset; + UINT64 SizeOfBlockDataWindow; + UINT64 BlockAccessibleMemoryCapacity; + UINT64 BeginningAddressOfFirstBlockInBlockAccessibleMemory; +} EFI_ACPI_6_0_NFIT_NVDIMM_BLOCK_DATA_WINDOW_REGION_STRUCTURE; + +// +// Definition for Flush Hint Address Structure +// +typedef struct { + UINT16 Type; + UINT16 Length; + EFI_ACPI_6_0_NFIT_DEVICE_HANDLE NFITDeviceHandle; + UINT16 NumberOfFlushHintAddresses; + UINT8 Reserved_10[6]; +//UINT64 FlushHintAddress[NumberOfFlushHintAddresses]; +} EFI_ACPI_6_0_NFIT_FLUSH_HINT_ADDRESS_STRUCTURE; + +/// +/// Boot Error Record Table (BERT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 BootErrorRegionLength; + UINT64 BootErrorRegion; +} EFI_ACPI_6_0_BOOT_ERROR_RECORD_TABLE_HEADER; + +/// +/// BERT Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_BOOT_ERROR_RECORD_TABLE_REVISION 0x01 + +/// +/// Boot Error Region Block Status Definition +/// +typedef struct { + UINT32 UncorrectableErrorValid:1; + UINT32 CorrectableErrorValid:1; + UINT32 MultipleUncorrectableErrors:1; + UINT32 MultipleCorrectableErrors:1; + UINT32 ErrorDataEntryCount:10; + UINT32 Reserved:18; +} EFI_ACPI_6_0_ERROR_BLOCK_STATUS; + +/// +/// Boot Error Region Definition +/// +typedef struct { + EFI_ACPI_6_0_ERROR_BLOCK_STATUS BlockStatus; + UINT32 RawDataOffset; + UINT32 RawDataLength; + UINT32 DataLength; + UINT32 ErrorSeverity; +} EFI_ACPI_6_0_BOOT_ERROR_REGION_STRUCTURE; + +// +// Boot Error Severity types +// +#define EFI_ACPI_6_0_ERROR_SEVERITY_CORRECTABLE 0x00 +#define EFI_ACPI_6_0_ERROR_SEVERITY_FATAL 0x01 +#define EFI_ACPI_6_0_ERROR_SEVERITY_CORRECTED 0x02 +#define EFI_ACPI_6_0_ERROR_SEVERITY_NONE 0x03 + +/// +/// Generic Error Data Entry Definition +/// +typedef struct { + UINT8 SectionType[16]; + UINT32 ErrorSeverity; + UINT16 Revision; + UINT8 ValidationBits; + UINT8 Flags; + UINT32 ErrorDataLength; + UINT8 FruId[16]; + UINT8 FruText[20]; +} EFI_ACPI_6_0_GENERIC_ERROR_DATA_ENTRY_STRUCTURE; + +/// +/// Generic Error Data Entry Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_GENERIC_ERROR_DATA_ENTRY_REVISION 0x0201 + +/// +/// HEST - Hardware Error Source Table +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 ErrorSourceCount; +} EFI_ACPI_6_0_HARDWARE_ERROR_SOURCE_TABLE_HEADER; + +/// +/// HEST Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_HARDWARE_ERROR_SOURCE_TABLE_REVISION 0x01 + +// +// Error Source structure types. +// +#define EFI_ACPI_6_0_IA32_ARCHITECTURE_MACHINE_CHECK_EXCEPTION 0x00 +#define EFI_ACPI_6_0_IA32_ARCHITECTURE_CORRECTED_MACHINE_CHECK 0x01 +#define EFI_ACPI_6_0_IA32_ARCHITECTURE_NMI_ERROR 0x02 +#define EFI_ACPI_6_0_PCI_EXPRESS_ROOT_PORT_AER 0x06 +#define EFI_ACPI_6_0_PCI_EXPRESS_DEVICE_AER 0x07 +#define EFI_ACPI_6_0_PCI_EXPRESS_BRIDGE_AER 0x08 +#define EFI_ACPI_6_0_GENERIC_HARDWARE_ERROR 0x09 + +// +// Error Source structure flags. +// +#define EFI_ACPI_6_0_ERROR_SOURCE_FLAG_FIRMWARE_FIRST (1 << 0) +#define EFI_ACPI_6_0_ERROR_SOURCE_FLAG_GLOBAL (1 << 1) + +/// +/// IA-32 Architecture Machine Check Exception Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT64 GlobalCapabilityInitData; + UINT64 GlobalControlInitData; + UINT8 NumberOfHardwareBanks; + UINT8 Reserved1[7]; +} EFI_ACPI_6_0_IA32_ARCHITECTURE_MACHINE_CHECK_EXCEPTION_STRUCTURE; + +/// +/// IA-32 Architecture Machine Check Bank Structure Definition +/// +typedef struct { + UINT8 BankNumber; + UINT8 ClearStatusOnInitialization; + UINT8 StatusDataFormat; + UINT8 Reserved0; + UINT32 ControlRegisterMsrAddress; + UINT64 ControlInitData; + UINT32 StatusRegisterMsrAddress; + UINT32 AddressRegisterMsrAddress; + UINT32 MiscRegisterMsrAddress; +} EFI_ACPI_6_0_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_BANK_STRUCTURE; + +/// +/// IA-32 Architecture Machine Check Bank Structure MCA data format +/// +#define EFI_ACPI_6_0_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_DATA_FORMAT_IA32 0x00 +#define EFI_ACPI_6_0_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_DATA_FORMAT_INTEL64 0x01 +#define EFI_ACPI_6_0_IA32_ARCHITECTURE_MACHINE_CHECK_ERROR_DATA_FORMAT_AMD64 0x02 + +// +// Hardware Error Notification types. All other values are reserved +// +#define EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_POLLED 0x00 +#define EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_EXTERNAL_INTERRUPT 0x01 +#define EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_LOCAL_INTERRUPT 0x02 +#define EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_SCI 0x03 +#define EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_NMI 0x04 +#define EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_CMCI 0x05 +#define EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_MCE 0x06 +#define EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_GPIO_SIGNAL 0x07 + +/// +/// Hardware Error Notification Configuration Write Enable Structure Definition +/// +typedef struct { + UINT16 Type:1; + UINT16 PollInterval:1; + UINT16 SwitchToPollingThresholdValue:1; + UINT16 SwitchToPollingThresholdWindow:1; + UINT16 ErrorThresholdValue:1; + UINT16 ErrorThresholdWindow:1; + UINT16 Reserved:10; +} EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_CONFIGURATION_WRITE_ENABLE_STRUCTURE; + +/// +/// Hardware Error Notification Structure Definition +/// +typedef struct { + UINT8 Type; + UINT8 Length; + EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_CONFIGURATION_WRITE_ENABLE_STRUCTURE ConfigurationWriteEnable; + UINT32 PollInterval; + UINT32 Vector; + UINT32 SwitchToPollingThresholdValue; + UINT32 SwitchToPollingThresholdWindow; + UINT32 ErrorThresholdValue; + UINT32 ErrorThresholdWindow; +} EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_STRUCTURE; + +/// +/// IA-32 Architecture Corrected Machine Check Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_STRUCTURE NotificationStructure; + UINT8 NumberOfHardwareBanks; + UINT8 Reserved1[3]; +} EFI_ACPI_6_0_IA32_ARCHITECTURE_CORRECTED_MACHINE_CHECK_STRUCTURE; + +/// +/// IA-32 Architecture NMI Error Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 MaxRawDataLength; +} EFI_ACPI_6_0_IA32_ARCHITECTURE_NMI_ERROR_STRUCTURE; + +/// +/// PCI Express Root Port AER Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 Bus; + UINT16 Device; + UINT16 Function; + UINT16 DeviceControl; + UINT8 Reserved1[2]; + UINT32 UncorrectableErrorMask; + UINT32 UncorrectableErrorSeverity; + UINT32 CorrectableErrorMask; + UINT32 AdvancedErrorCapabilitiesAndControl; + UINT32 RootErrorCommand; +} EFI_ACPI_6_0_PCI_EXPRESS_ROOT_PORT_AER_STRUCTURE; + +/// +/// PCI Express Device AER Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 Bus; + UINT16 Device; + UINT16 Function; + UINT16 DeviceControl; + UINT8 Reserved1[2]; + UINT32 UncorrectableErrorMask; + UINT32 UncorrectableErrorSeverity; + UINT32 CorrectableErrorMask; + UINT32 AdvancedErrorCapabilitiesAndControl; +} EFI_ACPI_6_0_PCI_EXPRESS_DEVICE_AER_STRUCTURE; + +/// +/// PCI Express Bridge AER Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT8 Reserved0[2]; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 Bus; + UINT16 Device; + UINT16 Function; + UINT16 DeviceControl; + UINT8 Reserved1[2]; + UINT32 UncorrectableErrorMask; + UINT32 UncorrectableErrorSeverity; + UINT32 CorrectableErrorMask; + UINT32 AdvancedErrorCapabilitiesAndControl; + UINT32 SecondaryUncorrectableErrorMask; + UINT32 SecondaryUncorrectableErrorSeverity; + UINT32 SecondaryAdvancedErrorCapabilitiesAndControl; +} EFI_ACPI_6_0_PCI_EXPRESS_BRIDGE_AER_STRUCTURE; + +/// +/// Generic Hardware Error Source Structure Definition +/// +typedef struct { + UINT16 Type; + UINT16 SourceId; + UINT16 RelatedSourceId; + UINT8 Flags; + UINT8 Enabled; + UINT32 NumberOfRecordsToPreAllocate; + UINT32 MaxSectionsPerRecord; + UINT32 MaxRawDataLength; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE ErrorStatusAddress; + EFI_ACPI_6_0_HARDWARE_ERROR_NOTIFICATION_STRUCTURE NotificationStructure; + UINT32 ErrorStatusBlockLength; +} EFI_ACPI_6_0_GENERIC_HARDWARE_ERROR_SOURCE_STRUCTURE; + +/// +/// Generic Error Status Definition +/// +typedef struct { + EFI_ACPI_6_0_ERROR_BLOCK_STATUS BlockStatus; + UINT32 RawDataOffset; + UINT32 RawDataLength; + UINT32 DataLength; + UINT32 ErrorSeverity; +} EFI_ACPI_6_0_GENERIC_ERROR_STATUS_STRUCTURE; + +/// +/// ERST - Error Record Serialization Table +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 SerializationHeaderSize; + UINT8 Reserved0[4]; + UINT32 InstructionEntryCount; +} EFI_ACPI_6_0_ERROR_RECORD_SERIALIZATION_TABLE_HEADER; + +/// +/// ERST Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_ERROR_RECORD_SERIALIZATION_TABLE_REVISION 0x01 + +/// +/// ERST Serialization Actions +/// +#define EFI_ACPI_6_0_ERST_BEGIN_WRITE_OPERATION 0x00 +#define EFI_ACPI_6_0_ERST_BEGIN_READ_OPERATION 0x01 +#define EFI_ACPI_6_0_ERST_BEGIN_CLEAR_OPERATION 0x02 +#define EFI_ACPI_6_0_ERST_END_OPERATION 0x03 +#define EFI_ACPI_6_0_ERST_SET_RECORD_OFFSET 0x04 +#define EFI_ACPI_6_0_ERST_EXECUTE_OPERATION 0x05 +#define EFI_ACPI_6_0_ERST_CHECK_BUSY_STATUS 0x06 +#define EFI_ACPI_6_0_ERST_GET_COMMAND_STATUS 0x07 +#define EFI_ACPI_6_0_ERST_GET_RECORD_IDENTIFIER 0x08 +#define EFI_ACPI_6_0_ERST_SET_RECORD_IDENTIFIER 0x09 +#define EFI_ACPI_6_0_ERST_GET_RECORD_COUNT 0x0A +#define EFI_ACPI_6_0_ERST_BEGIN_DUMMY_WRITE_OPERATION 0x0B +#define EFI_ACPI_6_0_ERST_GET_ERROR_LOG_ADDRESS_RANGE 0x0D +#define EFI_ACPI_6_0_ERST_GET_ERROR_LOG_ADDRESS_RANGE_LENGTH 0x0E +#define EFI_ACPI_6_0_ERST_GET_ERROR_LOG_ADDRESS_RANGE_ATTRIBUTES 0x0F + +/// +/// ERST Action Command Status +/// +#define EFI_ACPI_6_0_ERST_STATUS_SUCCESS 0x00 +#define EFI_ACPI_6_0_ERST_STATUS_NOT_ENOUGH_SPACE 0x01 +#define EFI_ACPI_6_0_ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x02 +#define EFI_ACPI_6_0_ERST_STATUS_FAILED 0x03 +#define EFI_ACPI_6_0_ERST_STATUS_RECORD_STORE_EMPTY 0x04 +#define EFI_ACPI_6_0_ERST_STATUS_RECORD_NOT_FOUND 0x05 + +/// +/// ERST Serialization Instructions +/// +#define EFI_ACPI_6_0_ERST_READ_REGISTER 0x00 +#define EFI_ACPI_6_0_ERST_READ_REGISTER_VALUE 0x01 +#define EFI_ACPI_6_0_ERST_WRITE_REGISTER 0x02 +#define EFI_ACPI_6_0_ERST_WRITE_REGISTER_VALUE 0x03 +#define EFI_ACPI_6_0_ERST_NOOP 0x04 +#define EFI_ACPI_6_0_ERST_LOAD_VAR1 0x05 +#define EFI_ACPI_6_0_ERST_LOAD_VAR2 0x06 +#define EFI_ACPI_6_0_ERST_STORE_VAR1 0x07 +#define EFI_ACPI_6_0_ERST_ADD 0x08 +#define EFI_ACPI_6_0_ERST_SUBTRACT 0x09 +#define EFI_ACPI_6_0_ERST_ADD_VALUE 0x0A +#define EFI_ACPI_6_0_ERST_SUBTRACT_VALUE 0x0B +#define EFI_ACPI_6_0_ERST_STALL 0x0C +#define EFI_ACPI_6_0_ERST_STALL_WHILE_TRUE 0x0D +#define EFI_ACPI_6_0_ERST_SKIP_NEXT_INSTRUCTION_IF_TRUE 0x0E +#define EFI_ACPI_6_0_ERST_GOTO 0x0F +#define EFI_ACPI_6_0_ERST_SET_SRC_ADDRESS_BASE 0x10 +#define EFI_ACPI_6_0_ERST_SET_DST_ADDRESS_BASE 0x11 +#define EFI_ACPI_6_0_ERST_MOVE_DATA 0x12 + +/// +/// ERST Instruction Flags +/// +#define EFI_ACPI_6_0_ERST_PRESERVE_REGISTER 0x01 + +/// +/// ERST Serialization Instruction Entry +/// +typedef struct { + UINT8 SerializationAction; + UINT8 Instruction; + UINT8 Flags; + UINT8 Reserved0; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE RegisterRegion; + UINT64 Value; + UINT64 Mask; +} EFI_ACPI_6_0_ERST_SERIALIZATION_INSTRUCTION_ENTRY; + +/// +/// EINJ - Error Injection Table +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 InjectionHeaderSize; + UINT8 InjectionFlags; + UINT8 Reserved0[3]; + UINT32 InjectionEntryCount; +} EFI_ACPI_6_0_ERROR_INJECTION_TABLE_HEADER; + +/// +/// EINJ Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_ERROR_INJECTION_TABLE_REVISION 0x01 + +/// +/// EINJ Error Injection Actions +/// +#define EFI_ACPI_6_0_EINJ_BEGIN_INJECTION_OPERATION 0x00 +#define EFI_ACPI_6_0_EINJ_GET_TRIGGER_ERROR_ACTION_TABLE 0x01 +#define EFI_ACPI_6_0_EINJ_SET_ERROR_TYPE 0x02 +#define EFI_ACPI_6_0_EINJ_GET_ERROR_TYPE 0x03 +#define EFI_ACPI_6_0_EINJ_END_OPERATION 0x04 +#define EFI_ACPI_6_0_EINJ_EXECUTE_OPERATION 0x05 +#define EFI_ACPI_6_0_EINJ_CHECK_BUSY_STATUS 0x06 +#define EFI_ACPI_6_0_EINJ_GET_COMMAND_STATUS 0x07 +#define EFI_ACPI_6_0_EINJ_TRIGGER_ERROR 0xFF + +/// +/// EINJ Action Command Status +/// +#define EFI_ACPI_6_0_EINJ_STATUS_SUCCESS 0x00 +#define EFI_ACPI_6_0_EINJ_STATUS_UNKNOWN_FAILURE 0x01 +#define EFI_ACPI_6_0_EINJ_STATUS_INVALID_ACCESS 0x02 + +/// +/// EINJ Error Type Definition +/// +#define EFI_ACPI_6_0_EINJ_ERROR_PROCESSOR_CORRECTABLE (1 << 0) +#define EFI_ACPI_6_0_EINJ_ERROR_PROCESSOR_UNCORRECTABLE_NONFATAL (1 << 1) +#define EFI_ACPI_6_0_EINJ_ERROR_PROCESSOR_UNCORRECTABLE_FATAL (1 << 2) +#define EFI_ACPI_6_0_EINJ_ERROR_MEMORY_CORRECTABLE (1 << 3) +#define EFI_ACPI_6_0_EINJ_ERROR_MEMORY_UNCORRECTABLE_NONFATAL (1 << 4) +#define EFI_ACPI_6_0_EINJ_ERROR_MEMORY_UNCORRECTABLE_FATAL (1 << 5) +#define EFI_ACPI_6_0_EINJ_ERROR_PCI_EXPRESS_CORRECTABLE (1 << 6) +#define EFI_ACPI_6_0_EINJ_ERROR_PCI_EXPRESS_UNCORRECTABLE_NONFATAL (1 << 7) +#define EFI_ACPI_6_0_EINJ_ERROR_PCI_EXPRESS_UNCORRECTABLE_FATAL (1 << 8) +#define EFI_ACPI_6_0_EINJ_ERROR_PLATFORM_CORRECTABLE (1 << 9) +#define EFI_ACPI_6_0_EINJ_ERROR_PLATFORM_UNCORRECTABLE_NONFATAL (1 << 10) +#define EFI_ACPI_6_0_EINJ_ERROR_PLATFORM_UNCORRECTABLE_FATAL (1 << 11) + +/// +/// EINJ Injection Instructions +/// +#define EFI_ACPI_6_0_EINJ_READ_REGISTER 0x00 +#define EFI_ACPI_6_0_EINJ_READ_REGISTER_VALUE 0x01 +#define EFI_ACPI_6_0_EINJ_WRITE_REGISTER 0x02 +#define EFI_ACPI_6_0_EINJ_WRITE_REGISTER_VALUE 0x03 +#define EFI_ACPI_6_0_EINJ_NOOP 0x04 + +/// +/// EINJ Instruction Flags +/// +#define EFI_ACPI_6_0_EINJ_PRESERVE_REGISTER 0x01 + +/// +/// EINJ Injection Instruction Entry +/// +typedef struct { + UINT8 InjectionAction; + UINT8 Instruction; + UINT8 Flags; + UINT8 Reserved0; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE RegisterRegion; + UINT64 Value; + UINT64 Mask; +} EFI_ACPI_6_0_EINJ_INJECTION_INSTRUCTION_ENTRY; + +/// +/// EINJ Trigger Action Table +/// +typedef struct { + UINT32 HeaderSize; + UINT32 Revision; + UINT32 TableSize; + UINT32 EntryCount; +} EFI_ACPI_6_0_EINJ_TRIGGER_ACTION_TABLE; + +/// +/// Platform Communications Channel Table (PCCT) +/// +typedef struct { + EFI_ACPI_DESCRIPTION_HEADER Header; + UINT32 Flags; + UINT64 Reserved; +} EFI_ACPI_6_0_PLATFORM_COMMUNICATION_CHANNEL_TABLE_HEADER; + +/// +/// PCCT Version (as defined in ACPI 6.0 spec.) +/// +#define EFI_ACPI_6_0_PLATFORM_COMMUNICATION_CHANNEL_TABLE_REVISION 0x01 + +/// +/// PCCT Global Flags +/// +#define EFI_ACPI_6_0_PCCT_FLAGS_SCI_DOORBELL BIT0 + +// +// PCCT Subspace type +// +#define EFI_ACPI_6_0_PCCT_SUBSPACE_TYPE_GENERIC 0x00 + +/// +/// PCC Subspace Structure Header +/// +typedef struct { + UINT8 Type; + UINT8 Length; +} EFI_ACPI_6_0_PCCT_SUBSPACE_HEADER; + +/// +/// Generic Communications Subspace Structure +/// +typedef struct { + UINT8 Type; + UINT8 Length; + UINT8 Reserved[6]; + UINT64 BaseAddress; + UINT64 AddressLength; + EFI_ACPI_6_0_GENERIC_ADDRESS_STRUCTURE DoorbellRegister; + UINT64 DoorbellPreserve; + UINT64 DoorbellWrite; + UINT32 NominalLatency; + UINT32 MaximumPeriodicAccessRate; + UINT16 MinimumRequestTurnaroundTime; +} EFI_ACPI_6_0_PCCT_SUBSPACE_GENERIC; + +/// +/// Generic Communications Channel Shared Memory Region +/// + +typedef struct { + UINT8 Command; + UINT8 Reserved:7; + UINT8 GenerateSci:1; +} EFI_ACPI_6_0_PCCT_GENERIC_SHARED_MEMORY_REGION_COMMAND; + +typedef struct { + UINT8 CommandComplete:1; + UINT8 SciDoorbell:1; + UINT8 Error:1; + UINT8 PlatformNotification:1; + UINT8 Reserved:4; + UINT8 Reserved1; +} EFI_ACPI_6_0_PCCT_GENERIC_SHARED_MEMORY_REGION_STATUS; + +typedef struct { + UINT32 Signature; + EFI_ACPI_6_0_PCCT_GENERIC_SHARED_MEMORY_REGION_COMMAND Command; + EFI_ACPI_6_0_PCCT_GENERIC_SHARED_MEMORY_REGION_STATUS Status; +} EFI_ACPI_6_0_PCCT_GENERIC_SHARED_MEMORY_REGION_HEADER; + +// +// Known table signatures +// + +/// +/// "RSD PTR " Root System Description Pointer +/// +#define EFI_ACPI_6_0_ROOT_SYSTEM_DESCRIPTION_POINTER_SIGNATURE SIGNATURE_64('R', 'S', 'D', ' ', 'P', 'T', 'R', ' ') + +/// +/// "APIC" Multiple APIC Description Table +/// +#define EFI_ACPI_6_0_MULTIPLE_APIC_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('A', 'P', 'I', 'C') + +/// +/// "BERT" Boot Error Record Table +/// +#define EFI_ACPI_6_0_BOOT_ERROR_RECORD_TABLE_SIGNATURE SIGNATURE_32('B', 'E', 'R', 'T') + +/// +/// "BGRT" Boot Graphics Resource Table +/// +#define EFI_ACPI_6_0_BOOT_GRAPHICS_RESOURCE_TABLE_SIGNATURE SIGNATURE_32('B', 'G', 'R', 'T') + +/// +/// "CPEP" Corrected Platform Error Polling Table +/// +#define EFI_ACPI_6_0_CORRECTED_PLATFORM_ERROR_POLLING_TABLE_SIGNATURE SIGNATURE_32('C', 'P', 'E', 'P') + +/// +/// "DSDT" Differentiated System Description Table +/// +#define EFI_ACPI_6_0_DIFFERENTIATED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('D', 'S', 'D', 'T') + +/// +/// "ECDT" Embedded Controller Boot Resources Table +/// +#define EFI_ACPI_6_0_EMBEDDED_CONTROLLER_BOOT_RESOURCES_TABLE_SIGNATURE SIGNATURE_32('E', 'C', 'D', 'T') + +/// +/// "EINJ" Error Injection Table +/// +#define EFI_ACPI_6_0_ERROR_INJECTION_TABLE_SIGNATURE SIGNATURE_32('E', 'I', 'N', 'J') + +/// +/// "ERST" Error Record Serialization Table +/// +#define EFI_ACPI_6_0_ERROR_RECORD_SERIALIZATION_TABLE_SIGNATURE SIGNATURE_32('E', 'R', 'S', 'T') + +/// +/// "FACP" Fixed ACPI Description Table +/// +#define EFI_ACPI_6_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'P') + +/// +/// "FACS" Firmware ACPI Control Structure +/// +#define EFI_ACPI_6_0_FIRMWARE_ACPI_CONTROL_STRUCTURE_SIGNATURE SIGNATURE_32('F', 'A', 'C', 'S') + +/// +/// "FPDT" Firmware Performance Data Table +/// +#define EFI_ACPI_6_0_FIRMWARE_PERFORMANCE_DATA_TABLE_SIGNATURE SIGNATURE_32('F', 'P', 'D', 'T') + +/// +/// "GTDT" Generic Timer Description Table +/// +#define EFI_ACPI_6_0_GENERIC_TIMER_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('G', 'T', 'D', 'T') + +/// +/// "HEST" Hardware Error Source Table +/// +#define EFI_ACPI_6_0_HARDWARE_ERROR_SOURCE_TABLE_SIGNATURE SIGNATURE_32('H', 'E', 'S', 'T') + +/// +/// "MPST" Memory Power State Table +/// +#define EFI_ACPI_6_0_MEMORY_POWER_STATE_TABLE_SIGNATURE SIGNATURE_32('M', 'P', 'S', 'T') + +/// +/// "MSCT" Maximum System Characteristics Table +/// +#define EFI_ACPI_6_0_MAXIMUM_SYSTEM_CHARACTERISTICS_TABLE_SIGNATURE SIGNATURE_32('M', 'S', 'C', 'T') + +/// +/// "NFIT" NVDIMM Firmware Interface Table +/// +#define EFI_ACPI_6_0_NVDIMM_FIRMWARE_INTERFACE_TABLE_STRUCTURE_SIGNATURE SIGNATURE_32('N', 'F', 'I', 'T') + +/// +/// "PMTT" Platform Memory Topology Table +/// +#define EFI_ACPI_6_0_PLATFORM_MEMORY_TOPOLOGY_TABLE_SIGNATURE SIGNATURE_32('P', 'M', 'T', 'T') + +/// +/// "PSDT" Persistent System Description Table +/// +#define EFI_ACPI_6_0_PERSISTENT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('P', 'S', 'D', 'T') + +/// +/// "RASF" ACPI RAS Feature Table +/// +#define EFI_ACPI_6_0_ACPI_RAS_FEATURE_TABLE_SIGNATURE SIGNATURE_32('R', 'A', 'S', 'F') + +/// +/// "RSDT" Root System Description Table +/// +#define EFI_ACPI_6_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('R', 'S', 'D', 'T') + +/// +/// "SBST" Smart Battery Specification Table +/// +#define EFI_ACPI_6_0_SMART_BATTERY_SPECIFICATION_TABLE_SIGNATURE SIGNATURE_32('S', 'B', 'S', 'T') + +/// +/// "SLIT" System Locality Information Table +/// +#define EFI_ACPI_6_0_SYSTEM_LOCALITY_INFORMATION_TABLE_SIGNATURE SIGNATURE_32('S', 'L', 'I', 'T') + +/// +/// "SRAT" System Resource Affinity Table +/// +#define EFI_ACPI_6_0_SYSTEM_RESOURCE_AFFINITY_TABLE_SIGNATURE SIGNATURE_32('S', 'R', 'A', 'T') + +/// +/// "SSDT" Secondary System Description Table +/// +#define EFI_ACPI_6_0_SECONDARY_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('S', 'S', 'D', 'T') + +/// +/// "XSDT" Extended System Description Table +/// +#define EFI_ACPI_6_0_EXTENDED_SYSTEM_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('X', 'S', 'D', 'T') + +/// +/// "BOOT" MS Simple Boot Spec +/// +#define EFI_ACPI_6_0_SIMPLE_BOOT_FLAG_TABLE_SIGNATURE SIGNATURE_32('B', 'O', 'O', 'T') + +/// +/// "CSRT" MS Core System Resource Table +/// +#define EFI_ACPI_6_0_CORE_SYSTEM_RESOURCE_TABLE_SIGNATURE SIGNATURE_32('C', 'S', 'R', 'T') + +/// +/// "DBG2" MS Debug Port 2 Spec +/// +#define EFI_ACPI_6_0_DEBUG_PORT_2_TABLE_SIGNATURE SIGNATURE_32('D', 'B', 'G', '2') + +/// +/// "DBGP" MS Debug Port Spec +/// +#define EFI_ACPI_6_0_DEBUG_PORT_TABLE_SIGNATURE SIGNATURE_32('D', 'B', 'G', 'P') + +/// +/// "DMAR" DMA Remapping Table +/// +#define EFI_ACPI_6_0_DMA_REMAPPING_TABLE_SIGNATURE SIGNATURE_32('D', 'M', 'A', 'R') + +/// +/// "DRTM" Dynamic Root of Trust for Measurement Table +/// +#define EFI_ACPI_6_0_DYNAMIC_ROOT_OF_TRUST_FOR_MEASUREMENT_TABLE_SIGNATURE SIGNATURE_32('D', 'R', 'T', 'M') + +/// +/// "ETDT" Event Timer Description Table +/// +#define EFI_ACPI_6_0_EVENT_TIMER_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('E', 'T', 'D', 'T') + +/// +/// "HPET" IA-PC High Precision Event Timer Table +/// +#define EFI_ACPI_6_0_HIGH_PRECISION_EVENT_TIMER_TABLE_SIGNATURE SIGNATURE_32('H', 'P', 'E', 'T') + +/// +/// "iBFT" iSCSI Boot Firmware Table +/// +#define EFI_ACPI_6_0_ISCSI_BOOT_FIRMWARE_TABLE_SIGNATURE SIGNATURE_32('i', 'B', 'F', 'T') + +/// +/// "IORT" I/O Remapping Table +/// +#define EFI_ACPI_6_0_IO_REMAPPING_TABLE_SIGNATURE SIGNATURE_32('I', 'O', 'R', 'T') + +/// +/// "IVRS" I/O Virtualization Reporting Structure +/// +#define EFI_ACPI_6_0_IO_VIRTUALIZATION_REPORTING_STRUCTURE_SIGNATURE SIGNATURE_32('I', 'V', 'R', 'S') + +/// +/// "LPIT" Low Power Idle Table +/// +#define EFI_ACPI_6_0_LOW_POWER_IDLE_TABLE_STRUCTURE_SIGNATURE SIGNATURE_32('L', 'P', 'I', 'T') + +/// +/// "MCFG" PCI Express Memory Mapped Configuration Space Base Address Description Table +/// +#define EFI_ACPI_6_0_PCI_EXPRESS_MEMORY_MAPPED_CONFIGURATION_SPACE_BASE_ADDRESS_DESCRIPTION_TABLE_SIGNATURE SIGNATURE_32('M', 'C', 'F', 'G') + +/// +/// "MCHI" Management Controller Host Interface Table +/// +#define EFI_ACPI_6_0_MANAGEMENT_CONTROLLER_HOST_INTERFACE_TABLE_SIGNATURE SIGNATURE_32('M', 'C', 'H', 'I') + +/// +/// "MSDM" MS Data Management Table +/// +#define EFI_ACPI_6_0_DATA_MANAGEMENT_TABLE_SIGNATURE SIGNATURE_32('M', 'S', 'D', 'M') + +/// +/// "SLIC" MS Software Licensing Table Specification +/// +#define EFI_ACPI_6_0_SOFTWARE_LICENSING_TABLE_SIGNATURE SIGNATURE_32('S', 'L', 'I', 'C') + +/// +/// "SPCR" Serial Port Concole Redirection Table +/// +#define EFI_ACPI_6_0_SERIAL_PORT_CONSOLE_REDIRECTION_TABLE_SIGNATURE SIGNATURE_32('S', 'P', 'C', 'R') + +/// +/// "SPMI" Server Platform Management Interface Table +/// +#define EFI_ACPI_6_0_SERVER_PLATFORM_MANAGEMENT_INTERFACE_TABLE_SIGNATURE SIGNATURE_32('S', 'P', 'M', 'I') + +/// +/// "STAO" _STA Override Table +/// +#define EFI_ACPI_6_0_STA_OVERRIDE_TABLE_SIGNATURE SIGNATURE_32('S', 'T', 'A', 'O') + +/// +/// "TCPA" Trusted Computing Platform Alliance Capabilities Table +/// +#define EFI_ACPI_6_0_TRUSTED_COMPUTING_PLATFORM_ALLIANCE_CAPABILITIES_TABLE_SIGNATURE SIGNATURE_32('T', 'C', 'P', 'A') + +/// +/// "TPM2" Trusted Computing Platform 1 Table +/// +#define EFI_ACPI_6_0_TRUSTED_COMPUTING_PLATFORM_2_TABLE_SIGNATURE SIGNATURE_32('T', 'P', 'M', '2') + +/// +/// "UEFI" UEFI ACPI Data Table +/// +#define EFI_ACPI_6_0_UEFI_ACPI_DATA_TABLE_SIGNATURE SIGNATURE_32('U', 'E', 'F', 'I') + +/// +/// "WAET" Windows ACPI Emulated Devices Table +/// +#define EFI_ACPI_6_0_WINDOWS_ACPI_EMULATED_DEVICES_TABLE_SIGNATURE SIGNATURE_32('W', 'A', 'E', 'T') + +/// +/// "WDAT" Watchdog Action Table +/// +#define EFI_ACPI_6_0_WATCHDOG_ACTION_TABLE_SIGNATURE SIGNATURE_32('W', 'D', 'A', 'T') + +/// +/// "WDRT" Watchdog Resource Table +/// +#define EFI_ACPI_6_0_WATCHDOG_RESOURCE_TABLE_SIGNATURE SIGNATURE_32('W', 'D', 'R', 'T') + +/// +/// "WPBT" MS Platform Binary Table +/// +#define EFI_ACPI_6_0_PLATFORM_BINARY_TABLE_SIGNATURE SIGNATURE_32('W', 'P', 'B', 'T') + +/// +/// "XENV" Xen Project Table +/// +#define EFI_ACPI_6_0_XEN_PROJECT_TABLE_SIGNATURE SIGNATURE_32('X', 'E', 'N', 'V') + +#pragma pack() + +#endif diff --git a/src/include/ipxe/efi/IndustryStandard/AcpiAml.h b/src/include/ipxe/efi/IndustryStandard/AcpiAml.h new file mode 100644 index 00000000..a9186b40 --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/AcpiAml.h @@ -0,0 +1,177 @@ +/** @file + This file contains AML code definition in the latest ACPI spec. + + Copyright (c) 2011, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef _ACPI_AML_H_ +#define _ACPI_AML_H_ + +FILE_LICENCE ( BSD3 ); + +// +// ACPI AML definition +// + +// +// Primary OpCode +// +#define AML_ZERO_OP 0x00 +#define AML_ONE_OP 0x01 +#define AML_ALIAS_OP 0x06 +#define AML_NAME_OP 0x08 +#define AML_BYTE_PREFIX 0x0a +#define AML_WORD_PREFIX 0x0b +#define AML_DWORD_PREFIX 0x0c +#define AML_STRING_PREFIX 0x0d +#define AML_QWORD_PREFIX 0x0e +#define AML_SCOPE_OP 0x10 +#define AML_BUFFER_OP 0x11 +#define AML_PACKAGE_OP 0x12 +#define AML_VAR_PACKAGE_OP 0x13 +#define AML_METHOD_OP 0x14 +#define AML_DUAL_NAME_PREFIX 0x2e +#define AML_MULTI_NAME_PREFIX 0x2f +#define AML_NAME_CHAR_A 0x41 +#define AML_NAME_CHAR_B 0x42 +#define AML_NAME_CHAR_C 0x43 +#define AML_NAME_CHAR_D 0x44 +#define AML_NAME_CHAR_E 0x45 +#define AML_NAME_CHAR_F 0x46 +#define AML_NAME_CHAR_G 0x47 +#define AML_NAME_CHAR_H 0x48 +#define AML_NAME_CHAR_I 0x49 +#define AML_NAME_CHAR_J 0x4a +#define AML_NAME_CHAR_K 0x4b +#define AML_NAME_CHAR_L 0x4c +#define AML_NAME_CHAR_M 0x4d +#define AML_NAME_CHAR_N 0x4e +#define AML_NAME_CHAR_O 0x4f +#define AML_NAME_CHAR_P 0x50 +#define AML_NAME_CHAR_Q 0x51 +#define AML_NAME_CHAR_R 0x52 +#define AML_NAME_CHAR_S 0x53 +#define AML_NAME_CHAR_T 0x54 +#define AML_NAME_CHAR_U 0x55 +#define AML_NAME_CHAR_V 0x56 +#define AML_NAME_CHAR_W 0x57 +#define AML_NAME_CHAR_X 0x58 +#define AML_NAME_CHAR_Y 0x59 +#define AML_NAME_CHAR_Z 0x5a +#define AML_ROOT_CHAR 0x5c +#define AML_PARENT_PREFIX_CHAR 0x5e +#define AML_NAME_CHAR__ 0x5f +#define AML_LOCAL0 0x60 +#define AML_LOCAL1 0x61 +#define AML_LOCAL2 0x62 +#define AML_LOCAL3 0x63 +#define AML_LOCAL4 0x64 +#define AML_LOCAL5 0x65 +#define AML_LOCAL6 0x66 +#define AML_LOCAL7 0x67 +#define AML_ARG0 0x68 +#define AML_ARG1 0x69 +#define AML_ARG2 0x6a +#define AML_ARG3 0x6b +#define AML_ARG4 0x6c +#define AML_ARG5 0x6d +#define AML_ARG6 0x6e +#define AML_STORE_OP 0x70 +#define AML_REF_OF_OP 0x71 +#define AML_ADD_OP 0x72 +#define AML_CONCAT_OP 0x73 +#define AML_SUBTRACT_OP 0x74 +#define AML_INCREMENT_OP 0x75 +#define AML_DECREMENT_OP 0x76 +#define AML_MULTIPLY_OP 0x77 +#define AML_DIVIDE_OP 0x78 +#define AML_SHIFT_LEFT_OP 0x79 +#define AML_SHIFT_RIGHT_OP 0x7a +#define AML_AND_OP 0x7b +#define AML_NAND_OP 0x7c +#define AML_OR_OP 0x7d +#define AML_NOR_OP 0x7e +#define AML_XOR_OP 0x7f +#define AML_NOT_OP 0x80 +#define AML_FIND_SET_LEFT_BIT_OP 0x81 +#define AML_FIND_SET_RIGHT_BIT_OP 0x82 +#define AML_DEREF_OF_OP 0x83 +#define AML_CONCAT_RES_OP 0x84 +#define AML_MOD_OP 0x85 +#define AML_NOTIFY_OP 0x86 +#define AML_SIZE_OF_OP 0x87 +#define AML_INDEX_OP 0x88 +#define AML_MATCH_OP 0x89 +#define AML_CREATE_DWORD_FIELD_OP 0x8a +#define AML_CREATE_WORD_FIELD_OP 0x8b +#define AML_CREATE_BYTE_FIELD_OP 0x8c +#define AML_CREATE_BIT_FIELD_OP 0x8d +#define AML_OBJECT_TYPE_OP 0x8e +#define AML_CREATE_QWORD_FIELD_OP 0x8f +#define AML_LAND_OP 0x90 +#define AML_LOR_OP 0x91 +#define AML_LNOT_OP 0x92 +#define AML_LEQUAL_OP 0x93 +#define AML_LGREATER_OP 0x94 +#define AML_LLESS_OP 0x95 +#define AML_TO_BUFFER_OP 0x96 +#define AML_TO_DEC_STRING_OP 0x97 +#define AML_TO_HEX_STRING_OP 0x98 +#define AML_TO_INTEGER_OP 0x99 +#define AML_TO_STRING_OP 0x9c +#define AML_COPY_OBJECT_OP 0x9d +#define AML_MID_OP 0x9e +#define AML_CONTINUE_OP 0x9f +#define AML_IF_OP 0xa0 +#define AML_ELSE_OP 0xa1 +#define AML_WHILE_OP 0xa2 +#define AML_NOOP_OP 0xa3 +#define AML_RETURN_OP 0xa4 +#define AML_BREAK_OP 0xa5 +#define AML_BREAK_POINT_OP 0xcc +#define AML_ONES_OP 0xff + +// +// Extended OpCode +// +#define AML_EXT_OP 0x5b + +#define AML_EXT_MUTEX_OP 0x01 +#define AML_EXT_EVENT_OP 0x02 +#define AML_EXT_COND_REF_OF_OP 0x12 +#define AML_EXT_CREATE_FIELD_OP 0x13 +#define AML_EXT_LOAD_TABLE_OP 0x1f +#define AML_EXT_LOAD_OP 0x20 +#define AML_EXT_STALL_OP 0x21 +#define AML_EXT_SLEEP_OP 0x22 +#define AML_EXT_ACQUIRE_OP 0x23 +#define AML_EXT_SIGNAL_OP 0x24 +#define AML_EXT_WAIT_OP 0x25 +#define AML_EXT_RESET_OP 0x26 +#define AML_EXT_RELEASE_OP 0x27 +#define AML_EXT_FROM_BCD_OP 0x28 +#define AML_EXT_TO_BCD_OP 0x29 +#define AML_EXT_UNLOAD_OP 0x2a +#define AML_EXT_REVISION_OP 0x30 +#define AML_EXT_DEBUG_OP 0x31 +#define AML_EXT_FATAL_OP 0x32 +#define AML_EXT_TIMER_OP 0x33 +#define AML_EXT_REGION_OP 0x80 +#define AML_EXT_FIELD_OP 0x81 +#define AML_EXT_DEVICE_OP 0x82 +#define AML_EXT_PROCESSOR_OP 0x83 +#define AML_EXT_POWER_RES_OP 0x84 +#define AML_EXT_THERMAL_ZONE_OP 0x85 +#define AML_EXT_INDEX_FIELD_OP 0x86 +#define AML_EXT_BANK_FIELD_OP 0x87 +#define AML_EXT_DATA_REGION_OP 0x88 + +#endif diff --git a/src/include/ipxe/efi/IndustryStandard/Bluetooth.h b/src/include/ipxe/efi/IndustryStandard/Bluetooth.h new file mode 100644 index 00000000..f63ab890 --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/Bluetooth.h @@ -0,0 +1,49 @@ +/** @file + This file contains the Bluetooth definitions that are consumed by drivers. + These definitions are from Bluetooth Core Specification Version 4.0 June, 2010 + + Copyright (c) 2015, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef _BLUETOOTH_H_ +#define _BLUETOOTH_H_ + +FILE_LICENCE ( BSD3 ); + +#pragma pack(1) + +/// +/// BLUETOOTH_ADDRESS +/// +typedef struct { + /// + /// 48bit Bluetooth device address. + /// + UINT8 Address[6]; +} BLUETOOTH_ADDRESS; + +/// +/// BLUETOOTH_CLASS_OF_DEVICE. See Bluetooth specification for detail. +/// +typedef struct { + UINT8 FormatType:2; + UINT8 MinorDeviceClass: 6; + UINT16 MajorDeviceClass: 5; + UINT16 MajorServiceClass:11; +} BLUETOOTH_CLASS_OF_DEVICE; + +#pragma pack() + +#define BLUETOOTH_HCI_COMMAND_LOCAL_READABLE_NAME_MAX_SIZE 248 + +#define BLUETOOTH_HCI_LINK_KEY_SIZE 16 + +#endif diff --git a/src/include/ipxe/efi/IndustryStandard/Tpm12.h b/src/include/ipxe/efi/IndustryStandard/Tpm12.h new file mode 100644 index 00000000..509425cc --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/Tpm12.h @@ -0,0 +1,2175 @@ +/** @file + TPM Specification data structures (TCG TPM Specification Version 1.2 Revision 103) + See http://trustedcomputinggroup.org for latest specification updates + + Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +**/ + + +#ifndef _TPM12_H_ +#define _TPM12_H_ + +FILE_LICENCE ( BSD3 ); + +/// +/// The start of TPM return codes +/// +#define TPM_BASE 0 + +// +// All structures MUST be packed on a byte boundary. +// + +#pragma pack (1) + +// +// Part 2, section 2.2.3: Helper redefinitions +// +/// +/// Indicates the conditions where it is required that authorization be presented +/// +typedef UINT8 TPM_AUTH_DATA_USAGE; +/// +/// The information as to what the payload is in an encrypted structure +/// +typedef UINT8 TPM_PAYLOAD_TYPE; +/// +/// The version info breakdown +/// +typedef UINT8 TPM_VERSION_BYTE; +/// +/// The state of the dictionary attack mitigation logic +/// +typedef UINT8 TPM_DA_STATE; +/// +/// The request or response authorization type +/// +typedef UINT16 TPM_TAG; +/// +/// The protocol in use +/// +typedef UINT16 TPM_PROTOCOL_ID; +/// +/// Indicates the start state +/// +typedef UINT16 TPM_STARTUP_TYPE; +/// +/// The definition of the encryption scheme +/// +typedef UINT16 TPM_ENC_SCHEME; +/// +/// The definition of the signature scheme +/// +typedef UINT16 TPM_SIG_SCHEME; +/// +/// The definition of the migration scheme +/// +typedef UINT16 TPM_MIGRATE_SCHEME; +/// +/// Sets the state of the physical presence mechanism +/// +typedef UINT16 TPM_PHYSICAL_PRESENCE; +/// +/// Indicates the types of entity that are supported by the TPM +/// +typedef UINT16 TPM_ENTITY_TYPE; +/// +/// Indicates the permitted usage of the key +/// +typedef UINT16 TPM_KEY_USAGE; +/// +/// The type of asymmetric encrypted structure in use by the endorsement key +/// +typedef UINT16 TPM_EK_TYPE; +/// +/// The tag for the structure +/// +typedef UINT16 TPM_STRUCTURE_TAG; +/// +/// The platform specific spec to which the information relates to +/// +typedef UINT16 TPM_PLATFORM_SPECIFIC; +/// +/// The command ordinal +/// +typedef UINT32 TPM_COMMAND_CODE; +/// +/// Identifies a TPM capability area +/// +typedef UINT32 TPM_CAPABILITY_AREA; +/// +/// Indicates information regarding a key +/// +typedef UINT32 TPM_KEY_FLAGS; +/// +/// Indicates the type of algorithm +/// +typedef UINT32 TPM_ALGORITHM_ID; +/// +/// The locality modifier +/// +typedef UINT32 TPM_MODIFIER_INDICATOR; +/// +/// The actual number of a counter +/// +typedef UINT32 TPM_ACTUAL_COUNT; +/// +/// Attributes that define what options are in use for a transport session +/// +typedef UINT32 TPM_TRANSPORT_ATTRIBUTES; +/// +/// Handle to an authorization session +/// +typedef UINT32 TPM_AUTHHANDLE; +/// +/// Index to a DIR register +/// +typedef UINT32 TPM_DIRINDEX; +/// +/// The area where a key is held assigned by the TPM +/// +typedef UINT32 TPM_KEY_HANDLE; +/// +/// Index to a PCR register +/// +typedef UINT32 TPM_PCRINDEX; +/// +/// The return code from a function +/// +typedef UINT32 TPM_RESULT; +/// +/// The types of resources that a TPM may have using internal resources +/// +typedef UINT32 TPM_RESOURCE_TYPE; +/// +/// Allows for controlling of the key when loaded and how to handle TPM_Startup issues +/// +typedef UINT32 TPM_KEY_CONTROL; +/// +/// The index into the NV storage area +/// +typedef UINT32 TPM_NV_INDEX; +/// +/// The family ID. Family IDs are automatically assigned a sequence number by the TPM. +/// A trusted process can set the FamilyID value in an individual row to NULL, which +/// invalidates that row. The family ID resets to NULL on each change of TPM Owner. +/// +typedef UINT32 TPM_FAMILY_ID; +/// +/// IA value used as a label for the most recent verification of this family. Set to zero when not in use. +/// +typedef UINT32 TPM_FAMILY_VERIFICATION; +/// +/// How the TPM handles var +/// +typedef UINT32 TPM_STARTUP_EFFECTS; +/// +/// The mode of a symmetric encryption +/// +typedef UINT32 TPM_SYM_MODE; +/// +/// The family flags +/// +typedef UINT32 TPM_FAMILY_FLAGS; +/// +/// The index value for the delegate NV table +/// +typedef UINT32 TPM_DELEGATE_INDEX; +/// +/// The restrictions placed on delegation of CMK commands +/// +typedef UINT32 TPM_CMK_DELEGATE; +/// +/// The ID value of a monotonic counter +/// +typedef UINT32 TPM_COUNT_ID; +/// +/// A command to execute +/// +typedef UINT32 TPM_REDIT_COMMAND; +/// +/// A transport session handle +/// +typedef UINT32 TPM_TRANSHANDLE; +/// +/// A generic handle could be key, transport etc +/// +typedef UINT32 TPM_HANDLE; +/// +/// What operation is happening +/// +typedef UINT32 TPM_FAMILY_OPERATION; + +// +// Part 2, section 2.2.4: Vendor specific +// The following defines allow for the quick specification of a +// vendor specific item. +// +#define TPM_Vendor_Specific32 ((UINT32) 0x00000400) +#define TPM_Vendor_Specific8 ((UINT8) 0x80) + +// +// Part 2, section 3.1: TPM_STRUCTURE_TAG +// +#define TPM_TAG_CONTEXTBLOB ((TPM_STRUCTURE_TAG) 0x0001) +#define TPM_TAG_CONTEXT_SENSITIVE ((TPM_STRUCTURE_TAG) 0x0002) +#define TPM_TAG_CONTEXTPOINTER ((TPM_STRUCTURE_TAG) 0x0003) +#define TPM_TAG_CONTEXTLIST ((TPM_STRUCTURE_TAG) 0x0004) +#define TPM_TAG_SIGNINFO ((TPM_STRUCTURE_TAG) 0x0005) +#define TPM_TAG_PCR_INFO_LONG ((TPM_STRUCTURE_TAG) 0x0006) +#define TPM_TAG_PERSISTENT_FLAGS ((TPM_STRUCTURE_TAG) 0x0007) +#define TPM_TAG_VOLATILE_FLAGS ((TPM_STRUCTURE_TAG) 0x0008) +#define TPM_TAG_PERSISTENT_DATA ((TPM_STRUCTURE_TAG) 0x0009) +#define TPM_TAG_VOLATILE_DATA ((TPM_STRUCTURE_TAG) 0x000A) +#define TPM_TAG_SV_DATA ((TPM_STRUCTURE_TAG) 0x000B) +#define TPM_TAG_EK_BLOB ((TPM_STRUCTURE_TAG) 0x000C) +#define TPM_TAG_EK_BLOB_AUTH ((TPM_STRUCTURE_TAG) 0x000D) +#define TPM_TAG_COUNTER_VALUE ((TPM_STRUCTURE_TAG) 0x000E) +#define TPM_TAG_TRANSPORT_INTERNAL ((TPM_STRUCTURE_TAG) 0x000F) +#define TPM_TAG_TRANSPORT_LOG_IN ((TPM_STRUCTURE_TAG) 0x0010) +#define TPM_TAG_TRANSPORT_LOG_OUT ((TPM_STRUCTURE_TAG) 0x0011) +#define TPM_TAG_AUDIT_EVENT_IN ((TPM_STRUCTURE_TAG) 0x0012) +#define TPM_TAG_AUDIT_EVENT_OUT ((TPM_STRUCTURE_TAG) 0x0013) +#define TPM_TAG_CURRENT_TICKS ((TPM_STRUCTURE_TAG) 0x0014) +#define TPM_TAG_KEY ((TPM_STRUCTURE_TAG) 0x0015) +#define TPM_TAG_STORED_DATA12 ((TPM_STRUCTURE_TAG) 0x0016) +#define TPM_TAG_NV_ATTRIBUTES ((TPM_STRUCTURE_TAG) 0x0017) +#define TPM_TAG_NV_DATA_PUBLIC ((TPM_STRUCTURE_TAG) 0x0018) +#define TPM_TAG_NV_DATA_SENSITIVE ((TPM_STRUCTURE_TAG) 0x0019) +#define TPM_TAG_DELEGATIONS ((TPM_STRUCTURE_TAG) 0x001A) +#define TPM_TAG_DELEGATE_PUBLIC ((TPM_STRUCTURE_TAG) 0x001B) +#define TPM_TAG_DELEGATE_TABLE_ROW ((TPM_STRUCTURE_TAG) 0x001C) +#define TPM_TAG_TRANSPORT_AUTH ((TPM_STRUCTURE_TAG) 0x001D) +#define TPM_TAG_TRANSPORT_PUBLIC ((TPM_STRUCTURE_TAG) 0x001E) +#define TPM_TAG_PERMANENT_FLAGS ((TPM_STRUCTURE_TAG) 0x001F) +#define TPM_TAG_STCLEAR_FLAGS ((TPM_STRUCTURE_TAG) 0x0020) +#define TPM_TAG_STANY_FLAGS ((TPM_STRUCTURE_TAG) 0x0021) +#define TPM_TAG_PERMANENT_DATA ((TPM_STRUCTURE_TAG) 0x0022) +#define TPM_TAG_STCLEAR_DATA ((TPM_STRUCTURE_TAG) 0x0023) +#define TPM_TAG_STANY_DATA ((TPM_STRUCTURE_TAG) 0x0024) +#define TPM_TAG_FAMILY_TABLE_ENTRY ((TPM_STRUCTURE_TAG) 0x0025) +#define TPM_TAG_DELEGATE_SENSITIVE ((TPM_STRUCTURE_TAG) 0x0026) +#define TPM_TAG_DELG_KEY_BLOB ((TPM_STRUCTURE_TAG) 0x0027) +#define TPM_TAG_KEY12 ((TPM_STRUCTURE_TAG) 0x0028) +#define TPM_TAG_CERTIFY_INFO2 ((TPM_STRUCTURE_TAG) 0x0029) +#define TPM_TAG_DELEGATE_OWNER_BLOB ((TPM_STRUCTURE_TAG) 0x002A) +#define TPM_TAG_EK_BLOB_ACTIVATE ((TPM_STRUCTURE_TAG) 0x002B) +#define TPM_TAG_DAA_BLOB ((TPM_STRUCTURE_TAG) 0x002C) +#define TPM_TAG_DAA_CONTEXT ((TPM_STRUCTURE_TAG) 0x002D) +#define TPM_TAG_DAA_ENFORCE ((TPM_STRUCTURE_TAG) 0x002E) +#define TPM_TAG_DAA_ISSUER ((TPM_STRUCTURE_TAG) 0x002F) +#define TPM_TAG_CAP_VERSION_INFO ((TPM_STRUCTURE_TAG) 0x0030) +#define TPM_TAG_DAA_SENSITIVE ((TPM_STRUCTURE_TAG) 0x0031) +#define TPM_TAG_DAA_TPM ((TPM_STRUCTURE_TAG) 0x0032) +#define TPM_TAG_CMK_MIGAUTH ((TPM_STRUCTURE_TAG) 0x0033) +#define TPM_TAG_CMK_SIGTICKET ((TPM_STRUCTURE_TAG) 0x0034) +#define TPM_TAG_CMK_MA_APPROVAL ((TPM_STRUCTURE_TAG) 0x0035) +#define TPM_TAG_QUOTE_INFO2 ((TPM_STRUCTURE_TAG) 0x0036) +#define TPM_TAG_DA_INFO ((TPM_STRUCTURE_TAG) 0x0037) +#define TPM_TAG_DA_LIMITED ((TPM_STRUCTURE_TAG) 0x0038) +#define TPM_TAG_DA_ACTION_TYPE ((TPM_STRUCTURE_TAG) 0x0039) + +// +// Part 2, section 4: TPM Types +// + +// +// Part 2, section 4.1: TPM_RESOURCE_TYPE +// +#define TPM_RT_KEY ((TPM_RESOURCE_TYPE) 0x00000001) ///< The handle is a key handle and is the result of a LoadKey type operation +#define TPM_RT_AUTH ((TPM_RESOURCE_TYPE) 0x00000002) ///< The handle is an authorization handle. Auth handles come from TPM_OIAP, TPM_OSAP and TPM_DSAP +#define TPM_RT_HASH ((TPM_RESOURCE_TYPE) 0x00000003) ///< Reserved for hashes +#define TPM_RT_TRANS ((TPM_RESOURCE_TYPE) 0x00000004) ///< The handle is for a transport session. Transport handles come from TPM_EstablishTransport +#define TPM_RT_CONTEXT ((TPM_RESOURCE_TYPE) 0x00000005) ///< Resource wrapped and held outside the TPM using the context save/restore commands +#define TPM_RT_COUNTER ((TPM_RESOURCE_TYPE) 0x00000006) ///< Reserved for counters +#define TPM_RT_DELEGATE ((TPM_RESOURCE_TYPE) 0x00000007) ///< The handle is for a delegate row. These are the internal rows held in NV storage by the TPM +#define TPM_RT_DAA_TPM ((TPM_RESOURCE_TYPE) 0x00000008) ///< The value is a DAA TPM specific blob +#define TPM_RT_DAA_V0 ((TPM_RESOURCE_TYPE) 0x00000009) ///< The value is a DAA V0 parameter +#define TPM_RT_DAA_V1 ((TPM_RESOURCE_TYPE) 0x0000000A) ///< The value is a DAA V1 parameter + +// +// Part 2, section 4.2: TPM_PAYLOAD_TYPE +// +#define TPM_PT_ASYM ((TPM_PAYLOAD_TYPE) 0x01) ///< The entity is an asymmetric key +#define TPM_PT_BIND ((TPM_PAYLOAD_TYPE) 0x02) ///< The entity is bound data +#define TPM_PT_MIGRATE ((TPM_PAYLOAD_TYPE) 0x03) ///< The entity is a migration blob +#define TPM_PT_MAINT ((TPM_PAYLOAD_TYPE) 0x04) ///< The entity is a maintenance blob +#define TPM_PT_SEAL ((TPM_PAYLOAD_TYPE) 0x05) ///< The entity is sealed data +#define TPM_PT_MIGRATE_RESTRICTED ((TPM_PAYLOAD_TYPE) 0x06) ///< The entity is a restricted-migration asymmetric key +#define TPM_PT_MIGRATE_EXTERNAL ((TPM_PAYLOAD_TYPE) 0x07) ///< The entity is a external migratable key +#define TPM_PT_CMK_MIGRATE ((TPM_PAYLOAD_TYPE) 0x08) ///< The entity is a CMK migratable blob +#define TPM_PT_VENDOR_SPECIFIC ((TPM_PAYLOAD_TYPE) 0x80) ///< 0x80 - 0xFF Vendor specific payloads + +// +// Part 2, section 4.3: TPM_ENTITY_TYPE +// +#define TPM_ET_KEYHANDLE ((UINT16) 0x0001) ///< The entity is a keyHandle or key +#define TPM_ET_OWNER ((UINT16) 0x0002) ///< The entity is the TPM Owner +#define TPM_ET_DATA ((UINT16) 0x0003) ///< The entity is some data +#define TPM_ET_SRK ((UINT16) 0x0004) ///< The entity is the SRK +#define TPM_ET_KEY ((UINT16) 0x0005) ///< The entity is a key or keyHandle +#define TPM_ET_REVOKE ((UINT16) 0x0006) ///< The entity is the RevokeTrust value +#define TPM_ET_DEL_OWNER_BLOB ((UINT16) 0x0007) ///< The entity is a delegate owner blob +#define TPM_ET_DEL_ROW ((UINT16) 0x0008) ///< The entity is a delegate row +#define TPM_ET_DEL_KEY_BLOB ((UINT16) 0x0009) ///< The entity is a delegate key blob +#define TPM_ET_COUNTER ((UINT16) 0x000A) ///< The entity is a counter +#define TPM_ET_NV ((UINT16) 0x000B) ///< The entity is a NV index +#define TPM_ET_OPERATOR ((UINT16) 0x000C) ///< The entity is the operator +#define TPM_ET_RESERVED_HANDLE ((UINT16) 0x0040) ///< Reserved. This value avoids collisions with the handle MSB setting. +// +// TPM_ENTITY_TYPE MSB Values: The MSB is used to indicate the ADIP encryption sheme when applicable +// +#define TPM_ET_XOR ((UINT16) 0x0000) ///< ADIP encryption scheme: XOR +#define TPM_ET_AES128 ((UINT16) 0x0006) ///< ADIP encryption scheme: AES 128 bits + +// +// Part 2, section 4.4.1: Reserved Key Handles +// +#define TPM_KH_SRK ((TPM_KEY_HANDLE) 0x40000000) ///< The handle points to the SRK +#define TPM_KH_OWNER ((TPM_KEY_HANDLE) 0x40000001) ///< The handle points to the TPM Owner +#define TPM_KH_REVOKE ((TPM_KEY_HANDLE) 0x40000002) ///< The handle points to the RevokeTrust value +#define TPM_KH_TRANSPORT ((TPM_KEY_HANDLE) 0x40000003) ///< The handle points to the EstablishTransport static authorization +#define TPM_KH_OPERATOR ((TPM_KEY_HANDLE) 0x40000004) ///< The handle points to the Operator auth +#define TPM_KH_ADMIN ((TPM_KEY_HANDLE) 0x40000005) ///< The handle points to the delegation administration auth +#define TPM_KH_EK ((TPM_KEY_HANDLE) 0x40000006) ///< The handle points to the PUBEK, only usable with TPM_OwnerReadInternalPub + +// +// Part 2, section 4.5: TPM_STARTUP_TYPE +// +#define TPM_ST_CLEAR ((TPM_STARTUP_TYPE) 0x0001) ///< The TPM is starting up from a clean state +#define TPM_ST_STATE ((TPM_STARTUP_TYPE) 0x0002) ///< The TPM is starting up from a saved state +#define TPM_ST_DEACTIVATED ((TPM_STARTUP_TYPE) 0x0003) ///< The TPM is to startup and set the deactivated flag to TRUE + +// +// Part 2, section 4.6: TPM_STATUP_EFFECTS +// The table makeup is still an open issue. +// + +// +// Part 2, section 4.7: TPM_PROTOCOL_ID +// +#define TPM_PID_OIAP ((TPM_PROTOCOL_ID) 0x0001) ///< The OIAP protocol. +#define TPM_PID_OSAP ((TPM_PROTOCOL_ID) 0x0002) ///< The OSAP protocol. +#define TPM_PID_ADIP ((TPM_PROTOCOL_ID) 0x0003) ///< The ADIP protocol. +#define TPM_PID_ADCP ((TPM_PROTOCOL_ID) 0x0004) ///< The ADCP protocol. +#define TPM_PID_OWNER ((TPM_PROTOCOL_ID) 0x0005) ///< The protocol for taking ownership of a TPM. +#define TPM_PID_DSAP ((TPM_PROTOCOL_ID) 0x0006) ///< The DSAP protocol +#define TPM_PID_TRANSPORT ((TPM_PROTOCOL_ID) 0x0007) ///< The transport protocol + +// +// Part 2, section 4.8: TPM_ALGORITHM_ID +// The TPM MUST support the algorithms TPM_ALG_RSA, TPM_ALG_SHA, TPM_ALG_HMAC, +// TPM_ALG_MGF1 +// +#define TPM_ALG_RSA ((TPM_ALGORITHM_ID) 0x00000001) ///< The RSA algorithm. +#define TPM_ALG_DES ((TPM_ALGORITHM_ID) 0x00000002) ///< The DES algorithm +#define TPM_ALG_3DES ((TPM_ALGORITHM_ID) 0x00000003) ///< The 3DES algorithm in EDE mode +#define TPM_ALG_SHA ((TPM_ALGORITHM_ID) 0x00000004) ///< The SHA1 algorithm +#define TPM_ALG_HMAC ((TPM_ALGORITHM_ID) 0x00000005) ///< The RFC 2104 HMAC algorithm +#define TPM_ALG_AES128 ((TPM_ALGORITHM_ID) 0x00000006) ///< The AES algorithm, key size 128 +#define TPM_ALG_MGF1 ((TPM_ALGORITHM_ID) 0x00000007) ///< The XOR algorithm using MGF1 to create a string the size of the encrypted block +#define TPM_ALG_AES192 ((TPM_ALGORITHM_ID) 0x00000008) ///< AES, key size 192 +#define TPM_ALG_AES256 ((TPM_ALGORITHM_ID) 0x00000009) ///< AES, key size 256 +#define TPM_ALG_XOR ((TPM_ALGORITHM_ID) 0x0000000A) ///< XOR using the rolling nonces + +// +// Part 2, section 4.9: TPM_PHYSICAL_PRESENCE +// +#define TPM_PHYSICAL_PRESENCE_HW_DISABLE ((TPM_PHYSICAL_PRESENCE) 0x0200) ///< Sets the physicalPresenceHWEnable to FALSE +#define TPM_PHYSICAL_PRESENCE_CMD_DISABLE ((TPM_PHYSICAL_PRESENCE) 0x0100) ///< Sets the physicalPresenceCMDEnable to FALSE +#define TPM_PHYSICAL_PRESENCE_LIFETIME_LOCK ((TPM_PHYSICAL_PRESENCE) 0x0080) ///< Sets the physicalPresenceLifetimeLock to TRUE +#define TPM_PHYSICAL_PRESENCE_HW_ENABLE ((TPM_PHYSICAL_PRESENCE) 0x0040) ///< Sets the physicalPresenceHWEnable to TRUE +#define TPM_PHYSICAL_PRESENCE_CMD_ENABLE ((TPM_PHYSICAL_PRESENCE) 0x0020) ///< Sets the physicalPresenceCMDEnable to TRUE +#define TPM_PHYSICAL_PRESENCE_NOTPRESENT ((TPM_PHYSICAL_PRESENCE) 0x0010) ///< Sets PhysicalPresence = FALSE +#define TPM_PHYSICAL_PRESENCE_PRESENT ((TPM_PHYSICAL_PRESENCE) 0x0008) ///< Sets PhysicalPresence = TRUE +#define TPM_PHYSICAL_PRESENCE_LOCK ((TPM_PHYSICAL_PRESENCE) 0x0004) ///< Sets PhysicalPresenceLock = TRUE + +// +// Part 2, section 4.10: TPM_MIGRATE_SCHEME +// +#define TPM_MS_MIGRATE ((TPM_MIGRATE_SCHEME) 0x0001) ///< A public key that can be used with all TPM migration commands other than 'ReWrap' mode. +#define TPM_MS_REWRAP ((TPM_MIGRATE_SCHEME) 0x0002) ///< A public key that can be used for the ReWrap mode of TPM_CreateMigrationBlob. +#define TPM_MS_MAINT ((TPM_MIGRATE_SCHEME) 0x0003) ///< A public key that can be used for the Maintenance commands +#define TPM_MS_RESTRICT_MIGRATE ((TPM_MIGRATE_SCHEME) 0x0004) ///< The key is to be migrated to a Migration Authority. +#define TPM_MS_RESTRICT_APPROVE_DOUBLE ((TPM_MIGRATE_SCHEME) 0x0005) ///< The key is to be migrated to an entity approved by a Migration Authority using double wrapping + +// +// Part 2, section 4.11: TPM_EK_TYPE +// +#define TPM_EK_TYPE_ACTIVATE ((TPM_EK_TYPE) 0x0001) ///< The blob MUST be TPM_EK_BLOB_ACTIVATE +#define TPM_EK_TYPE_AUTH ((TPM_EK_TYPE) 0x0002) ///< The blob MUST be TPM_EK_BLOB_AUTH + +// +// Part 2, section 4.12: TPM_PLATFORM_SPECIFIC +// +#define TPM_PS_PC_11 ((TPM_PLATFORM_SPECIFIC) 0x0001) ///< PC Specific version 1.1 +#define TPM_PS_PC_12 ((TPM_PLATFORM_SPECIFIC) 0x0002) ///< PC Specific version 1.2 +#define TPM_PS_PDA_12 ((TPM_PLATFORM_SPECIFIC) 0x0003) ///< PDA Specific version 1.2 +#define TPM_PS_Server_12 ((TPM_PLATFORM_SPECIFIC) 0x0004) ///< Server Specific version 1.2 +#define TPM_PS_Mobile_12 ((TPM_PLATFORM_SPECIFIC) 0x0005) ///< Mobil Specific version 1.2 + +// +// Part 2, section 5: Basic Structures +// + +/// +/// Part 2, section 5.1: TPM_STRUCT_VER +/// +typedef struct tdTPM_STRUCT_VER { + UINT8 major; + UINT8 minor; + UINT8 revMajor; + UINT8 revMinor; +} TPM_STRUCT_VER; + +/// +/// Part 2, section 5.3: TPM_VERSION +/// +typedef struct tdTPM_VERSION { + TPM_VERSION_BYTE major; + TPM_VERSION_BYTE minor; + UINT8 revMajor; + UINT8 revMinor; +} TPM_VERSION; + + +#define TPM_SHA1_160_HASH_LEN 0x14 +#define TPM_SHA1BASED_NONCE_LEN TPM_SHA1_160_HASH_LEN + +/// +/// Part 2, section 5.4: TPM_DIGEST +/// +typedef struct tdTPM_DIGEST{ + UINT8 digest[TPM_SHA1_160_HASH_LEN]; +} TPM_DIGEST; + +/// +/// This SHALL be the digest of the chosen identityLabel and privacyCA for a new TPM identity +/// +typedef TPM_DIGEST TPM_CHOSENID_HASH; +/// +/// This SHALL be the hash of a list of PCR indexes and PCR values that a key or data is bound to +/// +typedef TPM_DIGEST TPM_COMPOSITE_HASH; +/// +/// This SHALL be the value of a DIR register +/// +typedef TPM_DIGEST TPM_DIRVALUE; + +typedef TPM_DIGEST TPM_HMAC; +/// +/// The value inside of the PCR +/// +typedef TPM_DIGEST TPM_PCRVALUE; +/// +/// This SHALL be the value of the current internal audit state +/// +typedef TPM_DIGEST TPM_AUDITDIGEST; + +/// +/// Part 2, section 5.5: TPM_NONCE +/// +typedef struct tdTPM_NONCE{ + UINT8 nonce[20]; +} TPM_NONCE; + +/// +/// This SHALL be a random value generated by a TPM immediately after the EK is installed +/// in that TPM, whenever an EK is installed in that TPM +/// +typedef TPM_NONCE TPM_DAA_TPM_SEED; +/// +/// This SHALL be a random value +/// +typedef TPM_NONCE TPM_DAA_CONTEXT_SEED; + +// +// Part 2, section 5.6: TPM_AUTHDATA +// +/// +/// The AuthData data is the information that is saved or passed to provide proof of ownership +/// 296 of an entity +/// +typedef UINT8 tdTPM_AUTHDATA[20]; + +typedef tdTPM_AUTHDATA TPM_AUTHDATA; +/// +/// A secret plaintext value used in the authorization process +/// +typedef TPM_AUTHDATA TPM_SECRET; +/// +/// A ciphertext (encrypted) version of AuthData data. The encryption mechanism depends on the context +/// +typedef TPM_AUTHDATA TPM_ENCAUTH; + +/// +/// Part 2, section 5.7: TPM_KEY_HANDLE_LIST +/// Size of handle is loaded * sizeof(TPM_KEY_HANDLE) +/// +typedef struct tdTPM_KEY_HANDLE_LIST { + UINT16 loaded; + TPM_KEY_HANDLE handle[1]; +} TPM_KEY_HANDLE_LIST; + +// +// Part 2, section 5.8: TPM_KEY_USAGE values +// +/// +/// TPM_KEY_SIGNING SHALL indicate a signing key. The [private] key SHALL be +/// used for signing operations, only. This means that it MUST be a leaf of the +/// Protected Storage key hierarchy. +/// +#define TPM_KEY_SIGNING ((UINT16) 0x0010) +/// +/// TPM_KEY_STORAGE SHALL indicate a storage key. The key SHALL be used to wrap +/// and unwrap other keys in the Protected Storage hierarchy +/// +#define TPM_KEY_STORAGE ((UINT16) 0x0011) +/// +/// TPM_KEY_IDENTITY SHALL indicate an identity key. The key SHALL be used for +/// operations that require a TPM identity, only. +/// +#define TPM_KEY_IDENTITY ((UINT16) 0x0012) +/// +/// TPM_KEY_AUTHCHANGE SHALL indicate an ephemeral key that is in use during +/// the ChangeAuthAsym process, only. +/// +#define TPM_KEY_AUTHCHANGE ((UINT16) 0x0013) +/// +/// TPM_KEY_BIND SHALL indicate a key that can be used for TPM_Bind and +/// TPM_Unbind operations only. +/// +#define TPM_KEY_BIND ((UINT16) 0x0014) +/// +/// TPM_KEY_LEGACY SHALL indicate a key that can perform signing and binding +/// operations. The key MAY be used for both signing and binding operations. +/// The TPM_KEY_LEGACY key type is to allow for use by applications where both +/// signing and encryption operations occur with the same key. The use of this +/// key type is not recommended TPM_KEY_MIGRATE 0x0016 This SHALL indicate a +/// key in use for TPM_MigrateKey +/// +#define TPM_KEY_LEGACY ((UINT16) 0x0015) +/// +/// TPM_KEY_MIGRAGE SHALL indicate a key in use for TPM_MigrateKey +/// +#define TPM_KEY_MIGRATE ((UINT16) 0x0016) + +// +// Part 2, section 5.8.1: Mandatory Key Usage Schemes +// + +#define TPM_ES_NONE ((TPM_ENC_SCHEME) 0x0001) +#define TPM_ES_RSAESPKCSv15 ((TPM_ENC_SCHEME) 0x0002) +#define TPM_ES_RSAESOAEP_SHA1_MGF1 ((TPM_ENC_SCHEME) 0x0003) +#define TPM_ES_SYM_CNT ((TPM_ENC_SCHEME) 0x0004) ///< rev94 defined +#define TPM_ES_SYM_CTR ((TPM_ENC_SCHEME) 0x0004) +#define TPM_ES_SYM_OFB ((TPM_ENC_SCHEME) 0x0005) + +#define TPM_SS_NONE ((TPM_SIG_SCHEME) 0x0001) +#define TPM_SS_RSASSAPKCS1v15_SHA1 ((TPM_SIG_SCHEME) 0x0002) +#define TPM_SS_RSASSAPKCS1v15_DER ((TPM_SIG_SCHEME) 0x0003) +#define TPM_SS_RSASSAPKCS1v15_INFO ((TPM_SIG_SCHEME) 0x0004) + +// +// Part 2, section 5.9: TPM_AUTH_DATA_USAGE values +// +#define TPM_AUTH_NEVER ((TPM_AUTH_DATA_USAGE) 0x00) +#define TPM_AUTH_ALWAYS ((TPM_AUTH_DATA_USAGE) 0x01) +#define TPM_AUTH_PRIV_USE_ONLY ((TPM_AUTH_DATA_USAGE) 0x03) + +/// +/// Part 2, section 5.10: TPM_KEY_FLAGS +/// +typedef enum tdTPM_KEY_FLAGS { + redirection = 0x00000001, + migratable = 0x00000002, + isVolatile = 0x00000004, + pcrIgnoredOnRead = 0x00000008, + migrateAuthority = 0x00000010 +} TPM_KEY_FLAGS_BITS; + +/// +/// Part 2, section 5.11: TPM_CHANGEAUTH_VALIDATE +/// +typedef struct tdTPM_CHANGEAUTH_VALIDATE { + TPM_SECRET newAuthSecret; + TPM_NONCE n1; +} TPM_CHANGEAUTH_VALIDATE; + +/// +/// Part 2, section 5.12: TPM_MIGRATIONKEYAUTH +/// decalared after section 10 to catch declaration of TPM_PUBKEY +/// +/// Part 2 section 10.1: TPM_KEY_PARMS +/// [size_is(parmSize)] BYTE* parms; +/// +typedef struct tdTPM_KEY_PARMS { + TPM_ALGORITHM_ID algorithmID; + TPM_ENC_SCHEME encScheme; + TPM_SIG_SCHEME sigScheme; + UINT32 parmSize; + UINT8 *parms; +} TPM_KEY_PARMS; + +/// +/// Part 2, section 10.4: TPM_STORE_PUBKEY +/// +typedef struct tdTPM_STORE_PUBKEY { + UINT32 keyLength; + UINT8 key[1]; +} TPM_STORE_PUBKEY; + +/// +/// Part 2, section 10.5: TPM_PUBKEY +/// +typedef struct tdTPM_PUBKEY{ + TPM_KEY_PARMS algorithmParms; + TPM_STORE_PUBKEY pubKey; +} TPM_PUBKEY; + +/// +/// Part 2, section 5.12: TPM_MIGRATIONKEYAUTH +/// +typedef struct tdTPM_MIGRATIONKEYAUTH{ + TPM_PUBKEY migrationKey; + TPM_MIGRATE_SCHEME migrationScheme; + TPM_DIGEST digest; +} TPM_MIGRATIONKEYAUTH; + +/// +/// Part 2, section 5.13: TPM_COUNTER_VALUE +/// +typedef struct tdTPM_COUNTER_VALUE{ + TPM_STRUCTURE_TAG tag; + UINT8 label[4]; + TPM_ACTUAL_COUNT counter; +} TPM_COUNTER_VALUE; + +/// +/// Part 2, section 5.14: TPM_SIGN_INFO +/// Size of data indicated by dataLen +/// +typedef struct tdTPM_SIGN_INFO { + TPM_STRUCTURE_TAG tag; + UINT8 fixed[4]; + TPM_NONCE replay; + UINT32 dataLen; + UINT8 *data; +} TPM_SIGN_INFO; + +/// +/// Part 2, section 5.15: TPM_MSA_COMPOSITE +/// Number of migAuthDigest indicated by MSAlist +/// +typedef struct tdTPM_MSA_COMPOSITE { + UINT32 MSAlist; + TPM_DIGEST migAuthDigest[1]; +} TPM_MSA_COMPOSITE; + +/// +/// Part 2, section 5.16: TPM_CMK_AUTH +/// +typedef struct tdTPM_CMK_AUTH{ + TPM_DIGEST migrationAuthorityDigest; + TPM_DIGEST destinationKeyDigest; + TPM_DIGEST sourceKeyDigest; +} TPM_CMK_AUTH; + +// +// Part 2, section 5.17: TPM_CMK_DELEGATE +// +#define TPM_CMK_DELEGATE_SIGNING ((TPM_CMK_DELEGATE) BIT31) +#define TPM_CMK_DELEGATE_STORAGE ((TPM_CMK_DELEGATE) BIT30) +#define TPM_CMK_DELEGATE_BIND ((TPM_CMK_DELEGATE) BIT29) +#define TPM_CMK_DELEGATE_LEGACY ((TPM_CMK_DELEGATE) BIT28) +#define TPM_CMK_DELEGATE_MIGRATE ((TPM_CMK_DELEGATE) BIT27) + +/// +/// Part 2, section 5.18: TPM_SELECT_SIZE +/// +typedef struct tdTPM_SELECT_SIZE { + UINT8 major; + UINT8 minor; + UINT16 reqSize; +} TPM_SELECT_SIZE; + +/// +/// Part 2, section 5,19: TPM_CMK_MIGAUTH +/// +typedef struct tdTPM_CMK_MIGAUTH{ + TPM_STRUCTURE_TAG tag; + TPM_DIGEST msaDigest; + TPM_DIGEST pubKeyDigest; +} TPM_CMK_MIGAUTH; + +/// +/// Part 2, section 5.20: TPM_CMK_SIGTICKET +/// +typedef struct tdTPM_CMK_SIGTICKET{ + TPM_STRUCTURE_TAG tag; + TPM_DIGEST verKeyDigest; + TPM_DIGEST signedData; +} TPM_CMK_SIGTICKET; + +/// +/// Part 2, section 5.21: TPM_CMK_MA_APPROVAL +/// +typedef struct tdTPM_CMK_MA_APPROVAL{ + TPM_STRUCTURE_TAG tag; + TPM_DIGEST migrationAuthorityDigest; +} TPM_CMK_MA_APPROVAL; + +// +// Part 2, section 6: Command Tags +// +#define TPM_TAG_RQU_COMMAND ((TPM_STRUCTURE_TAG) 0x00C1) +#define TPM_TAG_RQU_AUTH1_COMMAND ((TPM_STRUCTURE_TAG) 0x00C2) +#define TPM_TAG_RQU_AUTH2_COMMAND ((TPM_STRUCTURE_TAG) 0x00C3) +#define TPM_TAG_RSP_COMMAND ((TPM_STRUCTURE_TAG) 0x00C4) +#define TPM_TAG_RSP_AUTH1_COMMAND ((TPM_STRUCTURE_TAG) 0x00C5) +#define TPM_TAG_RSP_AUTH2_COMMAND ((TPM_STRUCTURE_TAG) 0x00C6) + +/// +/// Part 2, section 7.1: TPM_PERMANENT_FLAGS +/// +typedef struct tdTPM_PERMANENT_FLAGS{ + TPM_STRUCTURE_TAG tag; + BOOLEAN disable; + BOOLEAN ownership; + BOOLEAN deactivated; + BOOLEAN readPubek; + BOOLEAN disableOwnerClear; + BOOLEAN allowMaintenance; + BOOLEAN physicalPresenceLifetimeLock; + BOOLEAN physicalPresenceHWEnable; + BOOLEAN physicalPresenceCMDEnable; + BOOLEAN CEKPUsed; + BOOLEAN TPMpost; + BOOLEAN TPMpostLock; + BOOLEAN FIPS; + BOOLEAN operator; + BOOLEAN enableRevokeEK; + BOOLEAN nvLocked; + BOOLEAN readSRKPub; + BOOLEAN tpmEstablished; + BOOLEAN maintenanceDone; + BOOLEAN disableFullDALogicInfo; +} TPM_PERMANENT_FLAGS; + +// +// Part 2, section 7.1.1: Flag Restrictions (of TPM_PERMANENT_FLAGS) +// +#define TPM_PF_DISABLE ((TPM_CAPABILITY_AREA) 1) +#define TPM_PF_OWNERSHIP ((TPM_CAPABILITY_AREA) 2) +#define TPM_PF_DEACTIVATED ((TPM_CAPABILITY_AREA) 3) +#define TPM_PF_READPUBEK ((TPM_CAPABILITY_AREA) 4) +#define TPM_PF_DISABLEOWNERCLEAR ((TPM_CAPABILITY_AREA) 5) +#define TPM_PF_ALLOWMAINTENANCE ((TPM_CAPABILITY_AREA) 6) +#define TPM_PF_PHYSICALPRESENCELIFETIMELOCK ((TPM_CAPABILITY_AREA) 7) +#define TPM_PF_PHYSICALPRESENCEHWENABLE ((TPM_CAPABILITY_AREA) 8) +#define TPM_PF_PHYSICALPRESENCECMDENABLE ((TPM_CAPABILITY_AREA) 9) +#define TPM_PF_CEKPUSED ((TPM_CAPABILITY_AREA) 10) +#define TPM_PF_TPMPOST ((TPM_CAPABILITY_AREA) 11) +#define TPM_PF_TPMPOSTLOCK ((TPM_CAPABILITY_AREA) 12) +#define TPM_PF_FIPS ((TPM_CAPABILITY_AREA) 13) +#define TPM_PF_OPERATOR ((TPM_CAPABILITY_AREA) 14) +#define TPM_PF_ENABLEREVOKEEK ((TPM_CAPABILITY_AREA) 15) +#define TPM_PF_NV_LOCKED ((TPM_CAPABILITY_AREA) 16) +#define TPM_PF_READSRKPUB ((TPM_CAPABILITY_AREA) 17) +#define TPM_PF_TPMESTABLISHED ((TPM_CAPABILITY_AREA) 18) +#define TPM_PF_MAINTENANCEDONE ((TPM_CAPABILITY_AREA) 19) +#define TPM_PF_DISABLEFULLDALOGICINFO ((TPM_CAPABILITY_AREA) 20) + +/// +/// Part 2, section 7.2: TPM_STCLEAR_FLAGS +/// +typedef struct tdTPM_STCLEAR_FLAGS{ + TPM_STRUCTURE_TAG tag; + BOOLEAN deactivated; + BOOLEAN disableForceClear; + BOOLEAN physicalPresence; + BOOLEAN physicalPresenceLock; + BOOLEAN bGlobalLock; +} TPM_STCLEAR_FLAGS; + +// +// Part 2, section 7.2.1: Flag Restrictions (of TPM_STCLEAR_FLAGS) +// +#define TPM_SF_DEACTIVATED ((TPM_CAPABILITY_AREA) 1) +#define TPM_SF_DISABLEFORCECLEAR ((TPM_CAPABILITY_AREA) 2) +#define TPM_SF_PHYSICALPRESENCE ((TPM_CAPABILITY_AREA) 3) +#define TPM_SF_PHYSICALPRESENCELOCK ((TPM_CAPABILITY_AREA) 4) +#define TPM_SF_BGLOBALLOCK ((TPM_CAPABILITY_AREA) 5) + +/// +/// Part 2, section 7.3: TPM_STANY_FLAGS +/// +typedef struct tdTPM_STANY_FLAGS{ + TPM_STRUCTURE_TAG tag; + BOOLEAN postInitialise; + TPM_MODIFIER_INDICATOR localityModifier; + BOOLEAN transportExclusive; + BOOLEAN TOSPresent; +} TPM_STANY_FLAGS; + +// +// Part 2, section 7.3.1: Flag Restrictions (of TPM_STANY_FLAGS) +// +#define TPM_AF_POSTINITIALISE ((TPM_CAPABILITY_AREA) 1) +#define TPM_AF_LOCALITYMODIFIER ((TPM_CAPABILITY_AREA) 2) +#define TPM_AF_TRANSPORTEXCLUSIVE ((TPM_CAPABILITY_AREA) 3) +#define TPM_AF_TOSPRESENT ((TPM_CAPABILITY_AREA) 4) + +// +// All those structures defined in section 7.4, 7.5, 7.6 are not normative and +// thus no definitions here +// +// Part 2, section 7.4: TPM_PERMANENT_DATA +// +#define TPM_MIN_COUNTERS 4 ///< the minimum number of counters is 4 +#define TPM_DELEGATE_KEY TPM_KEY +#define TPM_NUM_PCR 16 +#define TPM_MAX_NV_WRITE_NOOWNER 64 + +// +// Part 2, section 7.4.1: PERMANENT_DATA Subcap for SetCapability +// +#define TPM_PD_REVMAJOR ((TPM_CAPABILITY_AREA) 1) +#define TPM_PD_REVMINOR ((TPM_CAPABILITY_AREA) 2) +#define TPM_PD_TPMPROOF ((TPM_CAPABILITY_AREA) 3) +#define TPM_PD_OWNERAUTH ((TPM_CAPABILITY_AREA) 4) +#define TPM_PD_OPERATORAUTH ((TPM_CAPABILITY_AREA) 5) +#define TPM_PD_MANUMAINTPUB ((TPM_CAPABILITY_AREA) 6) +#define TPM_PD_ENDORSEMENTKEY ((TPM_CAPABILITY_AREA) 7) +#define TPM_PD_SRK ((TPM_CAPABILITY_AREA) 8) +#define TPM_PD_DELEGATEKEY ((TPM_CAPABILITY_AREA) 9) +#define TPM_PD_CONTEXTKEY ((TPM_CAPABILITY_AREA) 10) +#define TPM_PD_AUDITMONOTONICCOUNTER ((TPM_CAPABILITY_AREA) 11) +#define TPM_PD_MONOTONICCOUNTER ((TPM_CAPABILITY_AREA) 12) +#define TPM_PD_PCRATTRIB ((TPM_CAPABILITY_AREA) 13) +#define TPM_PD_ORDINALAUDITSTATUS ((TPM_CAPABILITY_AREA) 14) +#define TPM_PD_AUTHDIR ((TPM_CAPABILITY_AREA) 15) +#define TPM_PD_RNGSTATE ((TPM_CAPABILITY_AREA) 16) +#define TPM_PD_FAMILYTABLE ((TPM_CAPABILITY_AREA) 17) +#define TPM_DELEGATETABLE ((TPM_CAPABILITY_AREA) 18) +#define TPM_PD_EKRESET ((TPM_CAPABILITY_AREA) 19) +#define TPM_PD_MAXNVBUFSIZE ((TPM_CAPABILITY_AREA) 20) +#define TPM_PD_LASTFAMILYID ((TPM_CAPABILITY_AREA) 21) +#define TPM_PD_NOOWNERNVWRITE ((TPM_CAPABILITY_AREA) 22) +#define TPM_PD_RESTRICTDELEGATE ((TPM_CAPABILITY_AREA) 23) +#define TPM_PD_TPMDAASEED ((TPM_CAPABILITY_AREA) 24) +#define TPM_PD_DAAPROOF ((TPM_CAPABILITY_AREA) 25) + +/// +/// Part 2, section 7.5: TPM_STCLEAR_DATA +/// available inside TPM only +/// + typedef struct tdTPM_STCLEAR_DATA{ + TPM_STRUCTURE_TAG tag; + TPM_NONCE contextNonceKey; + TPM_COUNT_ID countID; + UINT32 ownerReference; + BOOLEAN disableResetLock; + TPM_PCRVALUE PCR[TPM_NUM_PCR]; + UINT32 deferredPhysicalPresence; + }TPM_STCLEAR_DATA; + +// +// Part 2, section 7.5.1: STCLEAR_DATA Subcap for SetCapability +// +#define TPM_SD_CONTEXTNONCEKEY ((TPM_CAPABILITY_AREA)0x00000001) +#define TPM_SD_COUNTID ((TPM_CAPABILITY_AREA)0x00000002) +#define TPM_SD_OWNERREFERENCE ((TPM_CAPABILITY_AREA)0x00000003) +#define TPM_SD_DISABLERESETLOCK ((TPM_CAPABILITY_AREA)0x00000004) +#define TPM_SD_PCR ((TPM_CAPABILITY_AREA)0x00000005) +#define TPM_SD_DEFERREDPHYSICALPRESENCE ((TPM_CAPABILITY_AREA)0x00000006) + +// +// Part 2, section 7.6.1: STANY_DATA Subcap for SetCapability +// +#define TPM_AD_CONTEXTNONCESESSION ((TPM_CAPABILITY_AREA) 1) +#define TPM_AD_AUDITDIGEST ((TPM_CAPABILITY_AREA) 2) +#define TPM_AD_CURRENTTICKS ((TPM_CAPABILITY_AREA) 3) +#define TPM_AD_CONTEXTCOUNT ((TPM_CAPABILITY_AREA) 4) +#define TPM_AD_CONTEXTLIST ((TPM_CAPABILITY_AREA) 5) +#define TPM_AD_SESSIONS ((TPM_CAPABILITY_AREA) 6) + +// +// Part 2, section 8: PCR Structures +// + +/// +/// Part 2, section 8.1: TPM_PCR_SELECTION +/// Size of pcrSelect[] indicated by sizeOfSelect +/// +typedef struct tdTPM_PCR_SELECTION { + UINT16 sizeOfSelect; + UINT8 pcrSelect[1]; +} TPM_PCR_SELECTION; + +/// +/// Part 2, section 8.2: TPM_PCR_COMPOSITE +/// Size of pcrValue[] indicated by valueSize +/// +typedef struct tdTPM_PCR_COMPOSITE { + TPM_PCR_SELECTION select; + UINT32 valueSize; + TPM_PCRVALUE pcrValue[1]; +} TPM_PCR_COMPOSITE; + +/// +/// Part 2, section 8.3: TPM_PCR_INFO +/// +typedef struct tdTPM_PCR_INFO { + TPM_PCR_SELECTION pcrSelection; + TPM_COMPOSITE_HASH digestAtRelease; + TPM_COMPOSITE_HASH digestAtCreation; +} TPM_PCR_INFO; + +/// +/// Part 2, section 8.6: TPM_LOCALITY_SELECTION +/// +typedef UINT8 TPM_LOCALITY_SELECTION; + +#define TPM_LOC_FOUR ((UINT8) 0x10) +#define TPM_LOC_THREE ((UINT8) 0x08) +#define TPM_LOC_TWO ((UINT8) 0x04) +#define TPM_LOC_ONE ((UINT8) 0x02) +#define TPM_LOC_ZERO ((UINT8) 0x01) + +/// +/// Part 2, section 8.4: TPM_PCR_INFO_LONG +/// +typedef struct tdTPM_PCR_INFO_LONG { + TPM_STRUCTURE_TAG tag; + TPM_LOCALITY_SELECTION localityAtCreation; + TPM_LOCALITY_SELECTION localityAtRelease; + TPM_PCR_SELECTION creationPCRSelection; + TPM_PCR_SELECTION releasePCRSelection; + TPM_COMPOSITE_HASH digestAtCreation; + TPM_COMPOSITE_HASH digestAtRelease; +} TPM_PCR_INFO_LONG; + +/// +/// Part 2, section 8.5: TPM_PCR_INFO_SHORT +/// +typedef struct tdTPM_PCR_INFO_SHORT{ + TPM_PCR_SELECTION pcrSelection; + TPM_LOCALITY_SELECTION localityAtRelease; + TPM_COMPOSITE_HASH digestAtRelease; +} TPM_PCR_INFO_SHORT; + +/// +/// Part 2, section 8.8: TPM_PCR_ATTRIBUTES +/// +typedef struct tdTPM_PCR_ATTRIBUTES{ + BOOLEAN pcrReset; + TPM_LOCALITY_SELECTION pcrExtendLocal; + TPM_LOCALITY_SELECTION pcrResetLocal; +} TPM_PCR_ATTRIBUTES; + +// +// Part 2, section 9: Storage Structures +// + +/// +/// Part 2, section 9.1: TPM_STORED_DATA +/// [size_is(sealInfoSize)] BYTE* sealInfo; +/// [size_is(encDataSize)] BYTE* encData; +/// +typedef struct tdTPM_STORED_DATA { + TPM_STRUCT_VER ver; + UINT32 sealInfoSize; + UINT8 *sealInfo; + UINT32 encDataSize; + UINT8 *encData; +} TPM_STORED_DATA; + +/// +/// Part 2, section 9.2: TPM_STORED_DATA12 +/// [size_is(sealInfoSize)] BYTE* sealInfo; +/// [size_is(encDataSize)] BYTE* encData; +/// +typedef struct tdTPM_STORED_DATA12 { + TPM_STRUCTURE_TAG tag; + TPM_ENTITY_TYPE et; + UINT32 sealInfoSize; + UINT8 *sealInfo; + UINT32 encDataSize; + UINT8 *encData; +} TPM_STORED_DATA12; + +/// +/// Part 2, section 9.3: TPM_SEALED_DATA +/// [size_is(dataSize)] BYTE* data; +/// +typedef struct tdTPM_SEALED_DATA { + TPM_PAYLOAD_TYPE payload; + TPM_SECRET authData; + TPM_NONCE tpmProof; + TPM_DIGEST storedDigest; + UINT32 dataSize; + UINT8 *data; +} TPM_SEALED_DATA; + +/// +/// Part 2, section 9.4: TPM_SYMMETRIC_KEY +/// [size_is(size)] BYTE* data; +/// +typedef struct tdTPM_SYMMETRIC_KEY { + TPM_ALGORITHM_ID algId; + TPM_ENC_SCHEME encScheme; + UINT16 dataSize; + UINT8 *data; +} TPM_SYMMETRIC_KEY; + +/// +/// Part 2, section 9.5: TPM_BOUND_DATA +/// +typedef struct tdTPM_BOUND_DATA { + TPM_STRUCT_VER ver; + TPM_PAYLOAD_TYPE payload; + UINT8 payloadData[1]; +} TPM_BOUND_DATA; + +// +// Part 2 section 10: TPM_KEY complex +// + +// +// Section 10.1, 10.4, and 10.5 have been defined previously +// + +/// +/// Part 2, section 10.2: TPM_KEY +/// [size_is(encDataSize)] BYTE* encData; +/// +typedef struct tdTPM_KEY{ + TPM_STRUCT_VER ver; + TPM_KEY_USAGE keyUsage; + TPM_KEY_FLAGS keyFlags; + TPM_AUTH_DATA_USAGE authDataUsage; + TPM_KEY_PARMS algorithmParms; + UINT32 PCRInfoSize; + UINT8 *PCRInfo; + TPM_STORE_PUBKEY pubKey; + UINT32 encDataSize; + UINT8 *encData; +} TPM_KEY; + +/// +/// Part 2, section 10.3: TPM_KEY12 +/// [size_is(encDataSize)] BYTE* encData; +/// +typedef struct tdTPM_KEY12{ + TPM_STRUCTURE_TAG tag; + UINT16 fill; + TPM_KEY_USAGE keyUsage; + TPM_KEY_FLAGS keyFlags; + TPM_AUTH_DATA_USAGE authDataUsage; + TPM_KEY_PARMS algorithmParms; + UINT32 PCRInfoSize; + UINT8 *PCRInfo; + TPM_STORE_PUBKEY pubKey; + UINT32 encDataSize; + UINT8 *encData; +} TPM_KEY12; + +/// +/// Part 2, section 10.7: TPM_STORE_PRIVKEY +/// [size_is(keyLength)] BYTE* key; +/// +typedef struct tdTPM_STORE_PRIVKEY { + UINT32 keyLength; + UINT8 *key; +} TPM_STORE_PRIVKEY; + +/// +/// Part 2, section 10.6: TPM_STORE_ASYMKEY +/// +typedef struct tdTPM_STORE_ASYMKEY { // pos len total + TPM_PAYLOAD_TYPE payload; // 0 1 1 + TPM_SECRET usageAuth; // 1 20 21 + TPM_SECRET migrationAuth; // 21 20 41 + TPM_DIGEST pubDataDigest; // 41 20 61 + TPM_STORE_PRIVKEY privKey; // 61 132-151 193-214 +} TPM_STORE_ASYMKEY; + +/// +/// Part 2, section 10.8: TPM_MIGRATE_ASYMKEY +/// [size_is(partPrivKeyLen)] BYTE* partPrivKey; +/// +typedef struct tdTPM_MIGRATE_ASYMKEY { // pos len total + TPM_PAYLOAD_TYPE payload; // 0 1 1 + TPM_SECRET usageAuth; // 1 20 21 + TPM_DIGEST pubDataDigest; // 21 20 41 + UINT32 partPrivKeyLen; // 41 4 45 + UINT8 *partPrivKey; // 45 112-127 157-172 +} TPM_MIGRATE_ASYMKEY; + +/// +/// Part 2, section 10.9: TPM_KEY_CONTROL +/// +#define TPM_KEY_CONTROL_OWNER_EVICT ((UINT32) 0x00000001) + +// +// Part 2, section 11: Signed Structures +// + +/// +/// Part 2, section 11.1: TPM_CERTIFY_INFO Structure +/// +typedef struct tdTPM_CERTIFY_INFO { + TPM_STRUCT_VER version; + TPM_KEY_USAGE keyUsage; + TPM_KEY_FLAGS keyFlags; + TPM_AUTH_DATA_USAGE authDataUsage; + TPM_KEY_PARMS algorithmParms; + TPM_DIGEST pubkeyDigest; + TPM_NONCE data; + BOOLEAN parentPCRStatus; + UINT32 PCRInfoSize; + UINT8 *PCRInfo; +} TPM_CERTIFY_INFO; + +/// +/// Part 2, section 11.2: TPM_CERTIFY_INFO2 Structure +/// +typedef struct tdTPM_CERTIFY_INFO2 { + TPM_STRUCTURE_TAG tag; + UINT8 fill; + TPM_PAYLOAD_TYPE payloadType; + TPM_KEY_USAGE keyUsage; + TPM_KEY_FLAGS keyFlags; + TPM_AUTH_DATA_USAGE authDataUsage; + TPM_KEY_PARMS algorithmParms; + TPM_DIGEST pubkeyDigest; + TPM_NONCE data; + BOOLEAN parentPCRStatus; + UINT32 PCRInfoSize; + UINT8 *PCRInfo; + UINT32 migrationAuthoritySize; + UINT8 *migrationAuthority; +} TPM_CERTIFY_INFO2; + +/// +/// Part 2, section 11.3 TPM_QUOTE_INFO Structure +/// +typedef struct tdTPM_QUOTE_INFO { + TPM_STRUCT_VER version; + UINT8 fixed[4]; + TPM_COMPOSITE_HASH digestValue; + TPM_NONCE externalData; +} TPM_QUOTE_INFO; + +/// +/// Part 2, section 11.4 TPM_QUOTE_INFO2 Structure +/// +typedef struct tdTPM_QUOTE_INFO2 { + TPM_STRUCTURE_TAG tag; + UINT8 fixed[4]; + TPM_NONCE externalData; + TPM_PCR_INFO_SHORT infoShort; +} TPM_QUOTE_INFO2; + +// +// Part 2, section 12: Identity Structures +// + +/// +/// Part 2, section 12.1 TPM_EK_BLOB +/// +typedef struct tdTPM_EK_BLOB { + TPM_STRUCTURE_TAG tag; + TPM_EK_TYPE ekType; + UINT32 blobSize; + UINT8 *blob; +} TPM_EK_BLOB; + +/// +/// Part 2, section 12.2 TPM_EK_BLOB_ACTIVATE +/// +typedef struct tdTPM_EK_BLOB_ACTIVATE { + TPM_STRUCTURE_TAG tag; + TPM_SYMMETRIC_KEY sessionKey; + TPM_DIGEST idDigest; + TPM_PCR_INFO_SHORT pcrInfo; +} TPM_EK_BLOB_ACTIVATE; + +/// +/// Part 2, section 12.3 TPM_EK_BLOB_AUTH +/// +typedef struct tdTPM_EK_BLOB_AUTH { + TPM_STRUCTURE_TAG tag; + TPM_SECRET authValue; +} TPM_EK_BLOB_AUTH; + + +/// +/// Part 2, section 12.5 TPM_IDENTITY_CONTENTS +/// +typedef struct tdTPM_IDENTITY_CONTENTS { + TPM_STRUCT_VER ver; + UINT32 ordinal; + TPM_CHOSENID_HASH labelPrivCADigest; + TPM_PUBKEY identityPubKey; +} TPM_IDENTITY_CONTENTS; + +/// +/// Part 2, section 12.6 TPM_IDENTITY_REQ +/// +typedef struct tdTPM_IDENTITY_REQ { + UINT32 asymSize; + UINT32 symSize; + TPM_KEY_PARMS asymAlgorithm; + TPM_KEY_PARMS symAlgorithm; + UINT8 *asymBlob; + UINT8 *symBlob; +} TPM_IDENTITY_REQ; + +/// +/// Part 2, section 12.7 TPM_IDENTITY_PROOF +/// +typedef struct tdTPM_IDENTITY_PROOF { + TPM_STRUCT_VER ver; + UINT32 labelSize; + UINT32 identityBindingSize; + UINT32 endorsementSize; + UINT32 platformSize; + UINT32 conformanceSize; + TPM_PUBKEY identityKey; + UINT8 *labelArea; + UINT8 *identityBinding; + UINT8 *endorsementCredential; + UINT8 *platformCredential; + UINT8 *conformanceCredential; +} TPM_IDENTITY_PROOF; + +/// +/// Part 2, section 12.8 TPM_ASYM_CA_CONTENTS +/// +typedef struct tdTPM_ASYM_CA_CONTENTS { + TPM_SYMMETRIC_KEY sessionKey; + TPM_DIGEST idDigest; +} TPM_ASYM_CA_CONTENTS; + +/// +/// Part 2, section 12.9 TPM_SYM_CA_ATTESTATION +/// +typedef struct tdTPM_SYM_CA_ATTESTATION { + UINT32 credSize; + TPM_KEY_PARMS algorithm; + UINT8 *credential; +} TPM_SYM_CA_ATTESTATION; + +/// +/// Part 2, section 15: Tick Structures +/// Placed here out of order because definitions are used in section 13. +/// +typedef struct tdTPM_CURRENT_TICKS { + TPM_STRUCTURE_TAG tag; + UINT64 currentTicks; + UINT16 tickRate; + TPM_NONCE tickNonce; +} TPM_CURRENT_TICKS; + +/// +/// Part 2, section 13: Transport structures +/// + +/// +/// Part 2, section 13.1: TPM _TRANSPORT_PUBLIC +/// +typedef struct tdTPM_TRANSPORT_PUBLIC { + TPM_STRUCTURE_TAG tag; + TPM_TRANSPORT_ATTRIBUTES transAttributes; + TPM_ALGORITHM_ID algId; + TPM_ENC_SCHEME encScheme; +} TPM_TRANSPORT_PUBLIC; + +// +// Part 2, section 13.1.1 TPM_TRANSPORT_ATTRIBUTES Definitions +// +#define TPM_TRANSPORT_ENCRYPT ((UINT32)BIT0) +#define TPM_TRANSPORT_LOG ((UINT32)BIT1) +#define TPM_TRANSPORT_EXCLUSIVE ((UINT32)BIT2) + +/// +/// Part 2, section 13.2 TPM_TRANSPORT_INTERNAL +/// +typedef struct tdTPM_TRANSPORT_INTERNAL { + TPM_STRUCTURE_TAG tag; + TPM_AUTHDATA authData; + TPM_TRANSPORT_PUBLIC transPublic; + TPM_TRANSHANDLE transHandle; + TPM_NONCE transNonceEven; + TPM_DIGEST transDigest; +} TPM_TRANSPORT_INTERNAL; + +/// +/// Part 2, section 13.3 TPM_TRANSPORT_LOG_IN structure +/// +typedef struct tdTPM_TRANSPORT_LOG_IN { + TPM_STRUCTURE_TAG tag; + TPM_DIGEST parameters; + TPM_DIGEST pubKeyHash; +} TPM_TRANSPORT_LOG_IN; + +/// +/// Part 2, section 13.4 TPM_TRANSPORT_LOG_OUT structure +/// +typedef struct tdTPM_TRANSPORT_LOG_OUT { + TPM_STRUCTURE_TAG tag; + TPM_CURRENT_TICKS currentTicks; + TPM_DIGEST parameters; + TPM_MODIFIER_INDICATOR locality; +} TPM_TRANSPORT_LOG_OUT; + +/// +/// Part 2, section 13.5 TPM_TRANSPORT_AUTH structure +/// +typedef struct tdTPM_TRANSPORT_AUTH { + TPM_STRUCTURE_TAG tag; + TPM_AUTHDATA authData; +} TPM_TRANSPORT_AUTH; + +// +// Part 2, section 14: Audit Structures +// + +/// +/// Part 2, section 14.1 TPM_AUDIT_EVENT_IN structure +/// +typedef struct tdTPM_AUDIT_EVENT_IN { + TPM_STRUCTURE_TAG tag; + TPM_DIGEST inputParms; + TPM_COUNTER_VALUE auditCount; +} TPM_AUDIT_EVENT_IN; + +/// +/// Part 2, section 14.2 TPM_AUDIT_EVENT_OUT structure +/// +typedef struct tdTPM_AUDIT_EVENT_OUT { + TPM_STRUCTURE_TAG tag; + TPM_COMMAND_CODE ordinal; + TPM_DIGEST outputParms; + TPM_COUNTER_VALUE auditCount; + TPM_RESULT returnCode; +} TPM_AUDIT_EVENT_OUT; + +// +// Part 2, section 16: Return Codes +// + +#define TPM_VENDOR_ERROR TPM_Vendor_Specific32 +#define TPM_NON_FATAL 0x00000800 + +#define TPM_SUCCESS ((TPM_RESULT) TPM_BASE) +#define TPM_AUTHFAIL ((TPM_RESULT) (TPM_BASE + 1)) +#define TPM_BADINDEX ((TPM_RESULT) (TPM_BASE + 2)) +#define TPM_BAD_PARAMETER ((TPM_RESULT) (TPM_BASE + 3)) +#define TPM_AUDITFAILURE ((TPM_RESULT) (TPM_BASE + 4)) +#define TPM_CLEAR_DISABLED ((TPM_RESULT) (TPM_BASE + 5)) +#define TPM_DEACTIVATED ((TPM_RESULT) (TPM_BASE + 6)) +#define TPM_DISABLED ((TPM_RESULT) (TPM_BASE + 7)) +#define TPM_DISABLED_CMD ((TPM_RESULT) (TPM_BASE + 8)) +#define TPM_FAIL ((TPM_RESULT) (TPM_BASE + 9)) +#define TPM_BAD_ORDINAL ((TPM_RESULT) (TPM_BASE + 10)) +#define TPM_INSTALL_DISABLED ((TPM_RESULT) (TPM_BASE + 11)) +#define TPM_INVALID_KEYHANDLE ((TPM_RESULT) (TPM_BASE + 12)) +#define TPM_KEYNOTFOUND ((TPM_RESULT) (TPM_BASE + 13)) +#define TPM_INAPPROPRIATE_ENC ((TPM_RESULT) (TPM_BASE + 14)) +#define TPM_MIGRATEFAIL ((TPM_RESULT) (TPM_BASE + 15)) +#define TPM_INVALID_PCR_INFO ((TPM_RESULT) (TPM_BASE + 16)) +#define TPM_NOSPACE ((TPM_RESULT) (TPM_BASE + 17)) +#define TPM_NOSRK ((TPM_RESULT) (TPM_BASE + 18)) +#define TPM_NOTSEALED_BLOB ((TPM_RESULT) (TPM_BASE + 19)) +#define TPM_OWNER_SET ((TPM_RESULT) (TPM_BASE + 20)) +#define TPM_RESOURCES ((TPM_RESULT) (TPM_BASE + 21)) +#define TPM_SHORTRANDOM ((TPM_RESULT) (TPM_BASE + 22)) +#define TPM_SIZE ((TPM_RESULT) (TPM_BASE + 23)) +#define TPM_WRONGPCRVAL ((TPM_RESULT) (TPM_BASE + 24)) +#define TPM_BAD_PARAM_SIZE ((TPM_RESULT) (TPM_BASE + 25)) +#define TPM_SHA_THREAD ((TPM_RESULT) (TPM_BASE + 26)) +#define TPM_SHA_ERROR ((TPM_RESULT) (TPM_BASE + 27)) +#define TPM_FAILEDSELFTEST ((TPM_RESULT) (TPM_BASE + 28)) +#define TPM_AUTH2FAIL ((TPM_RESULT) (TPM_BASE + 29)) +#define TPM_BADTAG ((TPM_RESULT) (TPM_BASE + 30)) +#define TPM_IOERROR ((TPM_RESULT) (TPM_BASE + 31)) +#define TPM_ENCRYPT_ERROR ((TPM_RESULT) (TPM_BASE + 32)) +#define TPM_DECRYPT_ERROR ((TPM_RESULT) (TPM_BASE + 33)) +#define TPM_INVALID_AUTHHANDLE ((TPM_RESULT) (TPM_BASE + 34)) +#define TPM_NO_ENDORSEMENT ((TPM_RESULT) (TPM_BASE + 35)) +#define TPM_INVALID_KEYUSAGE ((TPM_RESULT) (TPM_BASE + 36)) +#define TPM_WRONG_ENTITYTYPE ((TPM_RESULT) (TPM_BASE + 37)) +#define TPM_INVALID_POSTINIT ((TPM_RESULT) (TPM_BASE + 38)) +#define TPM_INAPPROPRIATE_SIG ((TPM_RESULT) (TPM_BASE + 39)) +#define TPM_BAD_KEY_PROPERTY ((TPM_RESULT) (TPM_BASE + 40)) +#define TPM_BAD_MIGRATION ((TPM_RESULT) (TPM_BASE + 41)) +#define TPM_BAD_SCHEME ((TPM_RESULT) (TPM_BASE + 42)) +#define TPM_BAD_DATASIZE ((TPM_RESULT) (TPM_BASE + 43)) +#define TPM_BAD_MODE ((TPM_RESULT) (TPM_BASE + 44)) +#define TPM_BAD_PRESENCE ((TPM_RESULT) (TPM_BASE + 45)) +#define TPM_BAD_VERSION ((TPM_RESULT) (TPM_BASE + 46)) +#define TPM_NO_WRAP_TRANSPORT ((TPM_RESULT) (TPM_BASE + 47)) +#define TPM_AUDITFAIL_UNSUCCESSFUL ((TPM_RESULT) (TPM_BASE + 48)) +#define TPM_AUDITFAIL_SUCCESSFUL ((TPM_RESULT) (TPM_BASE + 49)) +#define TPM_NOTRESETABLE ((TPM_RESULT) (TPM_BASE + 50)) +#define TPM_NOTLOCAL ((TPM_RESULT) (TPM_BASE + 51)) +#define TPM_BAD_TYPE ((TPM_RESULT) (TPM_BASE + 52)) +#define TPM_INVALID_RESOURCE ((TPM_RESULT) (TPM_BASE + 53)) +#define TPM_NOTFIPS ((TPM_RESULT) (TPM_BASE + 54)) +#define TPM_INVALID_FAMILY ((TPM_RESULT) (TPM_BASE + 55)) +#define TPM_NO_NV_PERMISSION ((TPM_RESULT) (TPM_BASE + 56)) +#define TPM_REQUIRES_SIGN ((TPM_RESULT) (TPM_BASE + 57)) +#define TPM_KEY_NOTSUPPORTED ((TPM_RESULT) (TPM_BASE + 58)) +#define TPM_AUTH_CONFLICT ((TPM_RESULT) (TPM_BASE + 59)) +#define TPM_AREA_LOCKED ((TPM_RESULT) (TPM_BASE + 60)) +#define TPM_BAD_LOCALITY ((TPM_RESULT) (TPM_BASE + 61)) +#define TPM_READ_ONLY ((TPM_RESULT) (TPM_BASE + 62)) +#define TPM_PER_NOWRITE ((TPM_RESULT) (TPM_BASE + 63)) +#define TPM_FAMILYCOUNT ((TPM_RESULT) (TPM_BASE + 64)) +#define TPM_WRITE_LOCKED ((TPM_RESULT) (TPM_BASE + 65)) +#define TPM_BAD_ATTRIBUTES ((TPM_RESULT) (TPM_BASE + 66)) +#define TPM_INVALID_STRUCTURE ((TPM_RESULT) (TPM_BASE + 67)) +#define TPM_KEY_OWNER_CONTROL ((TPM_RESULT) (TPM_BASE + 68)) +#define TPM_BAD_COUNTER ((TPM_RESULT) (TPM_BASE + 69)) +#define TPM_NOT_FULLWRITE ((TPM_RESULT) (TPM_BASE + 70)) +#define TPM_CONTEXT_GAP ((TPM_RESULT) (TPM_BASE + 71)) +#define TPM_MAXNVWRITES ((TPM_RESULT) (TPM_BASE + 72)) +#define TPM_NOOPERATOR ((TPM_RESULT) (TPM_BASE + 73)) +#define TPM_RESOURCEMISSING ((TPM_RESULT) (TPM_BASE + 74)) +#define TPM_DELEGATE_LOCK ((TPM_RESULT) (TPM_BASE + 75)) +#define TPM_DELEGATE_FAMILY ((TPM_RESULT) (TPM_BASE + 76)) +#define TPM_DELEGATE_ADMIN ((TPM_RESULT) (TPM_BASE + 77)) +#define TPM_TRANSPORT_NOTEXCLUSIVE ((TPM_RESULT) (TPM_BASE + 78)) +#define TPM_OWNER_CONTROL ((TPM_RESULT) (TPM_BASE + 79)) +#define TPM_DAA_RESOURCES ((TPM_RESULT) (TPM_BASE + 80)) +#define TPM_DAA_INPUT_DATA0 ((TPM_RESULT) (TPM_BASE + 81)) +#define TPM_DAA_INPUT_DATA1 ((TPM_RESULT) (TPM_BASE + 82)) +#define TPM_DAA_ISSUER_SETTINGS ((TPM_RESULT) (TPM_BASE + 83)) +#define TPM_DAA_TPM_SETTINGS ((TPM_RESULT) (TPM_BASE + 84)) +#define TPM_DAA_STAGE ((TPM_RESULT) (TPM_BASE + 85)) +#define TPM_DAA_ISSUER_VALIDITY ((TPM_RESULT) (TPM_BASE + 86)) +#define TPM_DAA_WRONG_W ((TPM_RESULT) (TPM_BASE + 87)) +#define TPM_BAD_HANDLE ((TPM_RESULT) (TPM_BASE + 88)) +#define TPM_BAD_DELEGATE ((TPM_RESULT) (TPM_BASE + 89)) +#define TPM_BADCONTEXT ((TPM_RESULT) (TPM_BASE + 90)) +#define TPM_TOOMANYCONTEXTS ((TPM_RESULT) (TPM_BASE + 91)) +#define TPM_MA_TICKET_SIGNATURE ((TPM_RESULT) (TPM_BASE + 92)) +#define TPM_MA_DESTINATION ((TPM_RESULT) (TPM_BASE + 93)) +#define TPM_MA_SOURCE ((TPM_RESULT) (TPM_BASE + 94)) +#define TPM_MA_AUTHORITY ((TPM_RESULT) (TPM_BASE + 95)) +#define TPM_PERMANENTEK ((TPM_RESULT) (TPM_BASE + 97)) +#define TPM_BAD_SIGNATURE ((TPM_RESULT) (TPM_BASE + 98)) +#define TPM_NOCONTEXTSPACE ((TPM_RESULT) (TPM_BASE + 99)) + +#define TPM_RETRY ((TPM_RESULT) (TPM_BASE + TPM_NON_FATAL)) +#define TPM_NEEDS_SELFTEST ((TPM_RESULT) (TPM_BASE + TPM_NON_FATAL + 1)) +#define TPM_DOING_SELFTEST ((TPM_RESULT) (TPM_BASE + TPM_NON_FATAL + 2)) +#define TPM_DEFEND_LOCK_RUNNING ((TPM_RESULT) (TPM_BASE + TPM_NON_FATAL + 3)) + +// +// Part 2, section 17: Ordinals +// +// Ordinals are 32 bit values. The upper byte contains values that serve as +// flag indicators, the next byte contains values indicating what committee +// designated the ordinal, and the final two bytes contain the Command +// Ordinal Index. +// 3 2 1 +// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |P|C|V| Reserved| Purview | Command Ordinal Index | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// Where: +// +// * P is Protected/Unprotected command. When 0 the command is a Protected +// command, when 1 the command is an Unprotected command. +// +// * C is Non-Connection/Connection related command. When 0 this command +// passes through to either the protected (TPM) or unprotected (TSS) +// components. +// +// * V is TPM/Vendor command. When 0 the command is TPM defined, when 1 the +// command is vendor defined. +// +// * All reserved area bits are set to 0. +// + +#define TPM_ORD_ActivateIdentity ((TPM_COMMAND_CODE) 0x0000007A) +#define TPM_ORD_AuthorizeMigrationKey ((TPM_COMMAND_CODE) 0x0000002B) +#define TPM_ORD_CertifyKey ((TPM_COMMAND_CODE) 0x00000032) +#define TPM_ORD_CertifyKey2 ((TPM_COMMAND_CODE) 0x00000033) +#define TPM_ORD_CertifySelfTest ((TPM_COMMAND_CODE) 0x00000052) +#define TPM_ORD_ChangeAuth ((TPM_COMMAND_CODE) 0x0000000C) +#define TPM_ORD_ChangeAuthAsymFinish ((TPM_COMMAND_CODE) 0x0000000F) +#define TPM_ORD_ChangeAuthAsymStart ((TPM_COMMAND_CODE) 0x0000000E) +#define TPM_ORD_ChangeAuthOwner ((TPM_COMMAND_CODE) 0x00000010) +#define TPM_ORD_CMK_ApproveMA ((TPM_COMMAND_CODE) 0x0000001D) +#define TPM_ORD_CMK_ConvertMigration ((TPM_COMMAND_CODE) 0x00000024) +#define TPM_ORD_CMK_CreateBlob ((TPM_COMMAND_CODE) 0x0000001B) +#define TPM_ORD_CMK_CreateKey ((TPM_COMMAND_CODE) 0x00000013) +#define TPM_ORD_CMK_CreateTicket ((TPM_COMMAND_CODE) 0x00000012) +#define TPM_ORD_CMK_SetRestrictions ((TPM_COMMAND_CODE) 0x0000001C) +#define TPM_ORD_ContinueSelfTest ((TPM_COMMAND_CODE) 0x00000053) +#define TPM_ORD_ConvertMigrationBlob ((TPM_COMMAND_CODE) 0x0000002A) +#define TPM_ORD_CreateCounter ((TPM_COMMAND_CODE) 0x000000DC) +#define TPM_ORD_CreateEndorsementKeyPair ((TPM_COMMAND_CODE) 0x00000078) +#define TPM_ORD_CreateMaintenanceArchive ((TPM_COMMAND_CODE) 0x0000002C) +#define TPM_ORD_CreateMigrationBlob ((TPM_COMMAND_CODE) 0x00000028) +#define TPM_ORD_CreateRevocableEK ((TPM_COMMAND_CODE) 0x0000007F) +#define TPM_ORD_CreateWrapKey ((TPM_COMMAND_CODE) 0x0000001F) +#define TPM_ORD_DAA_JOIN ((TPM_COMMAND_CODE) 0x00000029) +#define TPM_ORD_DAA_SIGN ((TPM_COMMAND_CODE) 0x00000031) +#define TPM_ORD_Delegate_CreateKeyDelegation ((TPM_COMMAND_CODE) 0x000000D4) +#define TPM_ORD_Delegate_CreateOwnerDelegation ((TPM_COMMAND_CODE) 0x000000D5) +#define TPM_ORD_Delegate_LoadOwnerDelegation ((TPM_COMMAND_CODE) 0x000000D8) +#define TPM_ORD_Delegate_Manage ((TPM_COMMAND_CODE) 0x000000D2) +#define TPM_ORD_Delegate_ReadTable ((TPM_COMMAND_CODE) 0x000000DB) +#define TPM_ORD_Delegate_UpdateVerification ((TPM_COMMAND_CODE) 0x000000D1) +#define TPM_ORD_Delegate_VerifyDelegation ((TPM_COMMAND_CODE) 0x000000D6) +#define TPM_ORD_DirRead ((TPM_COMMAND_CODE) 0x0000001A) +#define TPM_ORD_DirWriteAuth ((TPM_COMMAND_CODE) 0x00000019) +#define TPM_ORD_DisableForceClear ((TPM_COMMAND_CODE) 0x0000005E) +#define TPM_ORD_DisableOwnerClear ((TPM_COMMAND_CODE) 0x0000005C) +#define TPM_ORD_DisablePubekRead ((TPM_COMMAND_CODE) 0x0000007E) +#define TPM_ORD_DSAP ((TPM_COMMAND_CODE) 0x00000011) +#define TPM_ORD_EstablishTransport ((TPM_COMMAND_CODE) 0x000000E6) +#define TPM_ORD_EvictKey ((TPM_COMMAND_CODE) 0x00000022) +#define TPM_ORD_ExecuteTransport ((TPM_COMMAND_CODE) 0x000000E7) +#define TPM_ORD_Extend ((TPM_COMMAND_CODE) 0x00000014) +#define TPM_ORD_FieldUpgrade ((TPM_COMMAND_CODE) 0x000000AA) +#define TPM_ORD_FlushSpecific ((TPM_COMMAND_CODE) 0x000000BA) +#define TPM_ORD_ForceClear ((TPM_COMMAND_CODE) 0x0000005D) +#define TPM_ORD_GetAuditDigest ((TPM_COMMAND_CODE) 0x00000085) +#define TPM_ORD_GetAuditDigestSigned ((TPM_COMMAND_CODE) 0x00000086) +#define TPM_ORD_GetAuditEvent ((TPM_COMMAND_CODE) 0x00000082) +#define TPM_ORD_GetAuditEventSigned ((TPM_COMMAND_CODE) 0x00000083) +#define TPM_ORD_GetCapability ((TPM_COMMAND_CODE) 0x00000065) +#define TPM_ORD_GetCapabilityOwner ((TPM_COMMAND_CODE) 0x00000066) +#define TPM_ORD_GetCapabilitySigned ((TPM_COMMAND_CODE) 0x00000064) +#define TPM_ORD_GetOrdinalAuditStatus ((TPM_COMMAND_CODE) 0x0000008C) +#define TPM_ORD_GetPubKey ((TPM_COMMAND_CODE) 0x00000021) +#define TPM_ORD_GetRandom ((TPM_COMMAND_CODE) 0x00000046) +#define TPM_ORD_GetTestResult ((TPM_COMMAND_CODE) 0x00000054) +#define TPM_ORD_GetTicks ((TPM_COMMAND_CODE) 0x000000F1) +#define TPM_ORD_IncrementCounter ((TPM_COMMAND_CODE) 0x000000DD) +#define TPM_ORD_Init ((TPM_COMMAND_CODE) 0x00000097) +#define TPM_ORD_KeyControlOwner ((TPM_COMMAND_CODE) 0x00000023) +#define TPM_ORD_KillMaintenanceFeature ((TPM_COMMAND_CODE) 0x0000002E) +#define TPM_ORD_LoadAuthContext ((TPM_COMMAND_CODE) 0x000000B7) +#define TPM_ORD_LoadContext ((TPM_COMMAND_CODE) 0x000000B9) +#define TPM_ORD_LoadKey ((TPM_COMMAND_CODE) 0x00000020) +#define TPM_ORD_LoadKey2 ((TPM_COMMAND_CODE) 0x00000041) +#define TPM_ORD_LoadKeyContext ((TPM_COMMAND_CODE) 0x000000B5) +#define TPM_ORD_LoadMaintenanceArchive ((TPM_COMMAND_CODE) 0x0000002D) +#define TPM_ORD_LoadManuMaintPub ((TPM_COMMAND_CODE) 0x0000002F) +#define TPM_ORD_MakeIdentity ((TPM_COMMAND_CODE) 0x00000079) +#define TPM_ORD_MigrateKey ((TPM_COMMAND_CODE) 0x00000025) +#define TPM_ORD_NV_DefineSpace ((TPM_COMMAND_CODE) 0x000000CC) +#define TPM_ORD_NV_ReadValue ((TPM_COMMAND_CODE) 0x000000CF) +#define TPM_ORD_NV_ReadValueAuth ((TPM_COMMAND_CODE) 0x000000D0) +#define TPM_ORD_NV_WriteValue ((TPM_COMMAND_CODE) 0x000000CD) +#define TPM_ORD_NV_WriteValueAuth ((TPM_COMMAND_CODE) 0x000000CE) +#define TPM_ORD_OIAP ((TPM_COMMAND_CODE) 0x0000000A) +#define TPM_ORD_OSAP ((TPM_COMMAND_CODE) 0x0000000B) +#define TPM_ORD_OwnerClear ((TPM_COMMAND_CODE) 0x0000005B) +#define TPM_ORD_OwnerReadInternalPub ((TPM_COMMAND_CODE) 0x00000081) +#define TPM_ORD_OwnerReadPubek ((TPM_COMMAND_CODE) 0x0000007D) +#define TPM_ORD_OwnerSetDisable ((TPM_COMMAND_CODE) 0x0000006E) +#define TPM_ORD_PCR_Reset ((TPM_COMMAND_CODE) 0x000000C8) +#define TPM_ORD_PcrRead ((TPM_COMMAND_CODE) 0x00000015) +#define TPM_ORD_PhysicalDisable ((TPM_COMMAND_CODE) 0x00000070) +#define TPM_ORD_PhysicalEnable ((TPM_COMMAND_CODE) 0x0000006F) +#define TPM_ORD_PhysicalSetDeactivated ((TPM_COMMAND_CODE) 0x00000072) +#define TPM_ORD_Quote ((TPM_COMMAND_CODE) 0x00000016) +#define TPM_ORD_Quote2 ((TPM_COMMAND_CODE) 0x0000003E) +#define TPM_ORD_ReadCounter ((TPM_COMMAND_CODE) 0x000000DE) +#define TPM_ORD_ReadManuMaintPub ((TPM_COMMAND_CODE) 0x00000030) +#define TPM_ORD_ReadPubek ((TPM_COMMAND_CODE) 0x0000007C) +#define TPM_ORD_ReleaseCounter ((TPM_COMMAND_CODE) 0x000000DF) +#define TPM_ORD_ReleaseCounterOwner ((TPM_COMMAND_CODE) 0x000000E0) +#define TPM_ORD_ReleaseTransportSigned ((TPM_COMMAND_CODE) 0x000000E8) +#define TPM_ORD_Reset ((TPM_COMMAND_CODE) 0x0000005A) +#define TPM_ORD_ResetLockValue ((TPM_COMMAND_CODE) 0x00000040) +#define TPM_ORD_RevokeTrust ((TPM_COMMAND_CODE) 0x00000080) +#define TPM_ORD_SaveAuthContext ((TPM_COMMAND_CODE) 0x000000B6) +#define TPM_ORD_SaveContext ((TPM_COMMAND_CODE) 0x000000B8) +#define TPM_ORD_SaveKeyContext ((TPM_COMMAND_CODE) 0x000000B4) +#define TPM_ORD_SaveState ((TPM_COMMAND_CODE) 0x00000098) +#define TPM_ORD_Seal ((TPM_COMMAND_CODE) 0x00000017) +#define TPM_ORD_Sealx ((TPM_COMMAND_CODE) 0x0000003D) +#define TPM_ORD_SelfTestFull ((TPM_COMMAND_CODE) 0x00000050) +#define TPM_ORD_SetCapability ((TPM_COMMAND_CODE) 0x0000003F) +#define TPM_ORD_SetOperatorAuth ((TPM_COMMAND_CODE) 0x00000074) +#define TPM_ORD_SetOrdinalAuditStatus ((TPM_COMMAND_CODE) 0x0000008D) +#define TPM_ORD_SetOwnerInstall ((TPM_COMMAND_CODE) 0x00000071) +#define TPM_ORD_SetOwnerPointer ((TPM_COMMAND_CODE) 0x00000075) +#define TPM_ORD_SetRedirection ((TPM_COMMAND_CODE) 0x0000009A) +#define TPM_ORD_SetTempDeactivated ((TPM_COMMAND_CODE) 0x00000073) +#define TPM_ORD_SHA1Complete ((TPM_COMMAND_CODE) 0x000000A2) +#define TPM_ORD_SHA1CompleteExtend ((TPM_COMMAND_CODE) 0x000000A3) +#define TPM_ORD_SHA1Start ((TPM_COMMAND_CODE) 0x000000A0) +#define TPM_ORD_SHA1Update ((TPM_COMMAND_CODE) 0x000000A1) +#define TPM_ORD_Sign ((TPM_COMMAND_CODE) 0x0000003C) +#define TPM_ORD_Startup ((TPM_COMMAND_CODE) 0x00000099) +#define TPM_ORD_StirRandom ((TPM_COMMAND_CODE) 0x00000047) +#define TPM_ORD_TakeOwnership ((TPM_COMMAND_CODE) 0x0000000D) +#define TPM_ORD_Terminate_Handle ((TPM_COMMAND_CODE) 0x00000096) +#define TPM_ORD_TickStampBlob ((TPM_COMMAND_CODE) 0x000000F2) +#define TPM_ORD_UnBind ((TPM_COMMAND_CODE) 0x0000001E) +#define TPM_ORD_Unseal ((TPM_COMMAND_CODE) 0x00000018) +#define TSC_ORD_PhysicalPresence ((TPM_COMMAND_CODE) 0x4000000A) +#define TSC_ORD_ResetEstablishmentBit ((TPM_COMMAND_CODE) 0x4000000B) + +// +// Part 2, section 18: Context structures +// + +/// +/// Part 2, section 18.1: TPM_CONTEXT_BLOB +/// +typedef struct tdTPM_CONTEXT_BLOB { + TPM_STRUCTURE_TAG tag; + TPM_RESOURCE_TYPE resourceType; + TPM_HANDLE handle; + UINT8 label[16]; + UINT32 contextCount; + TPM_DIGEST integrityDigest; + UINT32 additionalSize; + UINT8 *additionalData; + UINT32 sensitiveSize; + UINT8 *sensitiveData; +} TPM_CONTEXT_BLOB; + +/// +/// Part 2, section 18.2 TPM_CONTEXT_SENSITIVE +/// +typedef struct tdTPM_CONTEXT_SENSITIVE { + TPM_STRUCTURE_TAG tag; + TPM_NONCE contextNonce; + UINT32 internalSize; + UINT8 *internalData; +} TPM_CONTEXT_SENSITIVE; + +// +// Part 2, section 19: NV Structures +// + +// +// Part 2, section 19.1.1: Required TPM_NV_INDEX values +// +#define TPM_NV_INDEX_LOCK ((UINT32)0xffffffff) +#define TPM_NV_INDEX0 ((UINT32)0x00000000) +#define TPM_NV_INDEX_DIR ((UINT32)0x10000001) +#define TPM_NV_INDEX_EKCert ((UINT32)0x0000f000) +#define TPM_NV_INDEX_TPM_CC ((UINT32)0x0000f001) +#define TPM_NV_INDEX_PlatformCert ((UINT32)0x0000f002) +#define TPM_NV_INDEX_Platform_CC ((UINT32)0x0000f003) +// +// Part 2, section 19.1.2: Reserved Index values +// +#define TPM_NV_INDEX_TSS_BASE ((UINT32)0x00011100) +#define TPM_NV_INDEX_PC_BASE ((UINT32)0x00011200) +#define TPM_NV_INDEX_SERVER_BASE ((UINT32)0x00011300) +#define TPM_NV_INDEX_MOBILE_BASE ((UINT32)0x00011400) +#define TPM_NV_INDEX_PERIPHERAL_BASE ((UINT32)0x00011500) +#define TPM_NV_INDEX_GROUP_RESV_BASE ((UINT32)0x00010000) + +/// +/// Part 2, section 19.2: TPM_NV_ATTRIBUTES +/// +typedef struct tdTPM_NV_ATTRIBUTES { + TPM_STRUCTURE_TAG tag; + UINT32 attributes; +} TPM_NV_ATTRIBUTES; + +#define TPM_NV_PER_READ_STCLEAR (BIT31) +#define TPM_NV_PER_AUTHREAD (BIT18) +#define TPM_NV_PER_OWNERREAD (BIT17) +#define TPM_NV_PER_PPREAD (BIT16) +#define TPM_NV_PER_GLOBALLOCK (BIT15) +#define TPM_NV_PER_WRITE_STCLEAR (BIT14) +#define TPM_NV_PER_WRITEDEFINE (BIT13) +#define TPM_NV_PER_WRITEALL (BIT12) +#define TPM_NV_PER_AUTHWRITE (BIT2) +#define TPM_NV_PER_OWNERWRITE (BIT1) +#define TPM_NV_PER_PPWRITE (BIT0) + +/// +/// Part 2, section 19.3: TPM_NV_DATA_PUBLIC +/// +typedef struct tdTPM_NV_DATA_PUBLIC { + TPM_STRUCTURE_TAG tag; + TPM_NV_INDEX nvIndex; + TPM_PCR_INFO_SHORT pcrInfoRead; + TPM_PCR_INFO_SHORT pcrInfoWrite; + TPM_NV_ATTRIBUTES permission; + BOOLEAN bReadSTClear; + BOOLEAN bWriteSTClear; + BOOLEAN bWriteDefine; + UINT32 dataSize; +} TPM_NV_DATA_PUBLIC; + +// +// Part 2, section 20: Delegate Structures +// + +#define TPM_DEL_OWNER_BITS ((UINT32)0x00000001) +#define TPM_DEL_KEY_BITS ((UINT32)0x00000002) +/// +/// Part 2, section 20.2: Delegate Definitions +/// +typedef struct tdTPM_DELEGATIONS { + TPM_STRUCTURE_TAG tag; + UINT32 delegateType; + UINT32 per1; + UINT32 per2; +} TPM_DELEGATIONS; + +// +// Part 2, section 20.2.1: Owner Permission Settings +// +#define TPM_DELEGATE_SetOrdinalAuditStatus (BIT30) +#define TPM_DELEGATE_DirWriteAuth (BIT29) +#define TPM_DELEGATE_CMK_ApproveMA (BIT28) +#define TPM_DELEGATE_NV_WriteValue (BIT27) +#define TPM_DELEGATE_CMK_CreateTicket (BIT26) +#define TPM_DELEGATE_NV_ReadValue (BIT25) +#define TPM_DELEGATE_Delegate_LoadOwnerDelegation (BIT24) +#define TPM_DELEGATE_DAA_Join (BIT23) +#define TPM_DELEGATE_AuthorizeMigrationKey (BIT22) +#define TPM_DELEGATE_CreateMaintenanceArchive (BIT21) +#define TPM_DELEGATE_LoadMaintenanceArchive (BIT20) +#define TPM_DELEGATE_KillMaintenanceFeature (BIT19) +#define TPM_DELEGATE_OwnerReadInteralPub (BIT18) +#define TPM_DELEGATE_ResetLockValue (BIT17) +#define TPM_DELEGATE_OwnerClear (BIT16) +#define TPM_DELEGATE_DisableOwnerClear (BIT15) +#define TPM_DELEGATE_NV_DefineSpace (BIT14) +#define TPM_DELEGATE_OwnerSetDisable (BIT13) +#define TPM_DELEGATE_SetCapability (BIT12) +#define TPM_DELEGATE_MakeIdentity (BIT11) +#define TPM_DELEGATE_ActivateIdentity (BIT10) +#define TPM_DELEGATE_OwnerReadPubek (BIT9) +#define TPM_DELEGATE_DisablePubekRead (BIT8) +#define TPM_DELEGATE_SetRedirection (BIT7) +#define TPM_DELEGATE_FieldUpgrade (BIT6) +#define TPM_DELEGATE_Delegate_UpdateVerification (BIT5) +#define TPM_DELEGATE_CreateCounter (BIT4) +#define TPM_DELEGATE_ReleaseCounterOwner (BIT3) +#define TPM_DELEGATE_DelegateManage (BIT2) +#define TPM_DELEGATE_Delegate_CreateOwnerDelegation (BIT1) +#define TPM_DELEGATE_DAA_Sign (BIT0) + +// +// Part 2, section 20.2.3: Key Permission settings +// +#define TPM_KEY_DELEGATE_CMK_ConvertMigration (BIT28) +#define TPM_KEY_DELEGATE_TickStampBlob (BIT27) +#define TPM_KEY_DELEGATE_ChangeAuthAsymStart (BIT26) +#define TPM_KEY_DELEGATE_ChangeAuthAsymFinish (BIT25) +#define TPM_KEY_DELEGATE_CMK_CreateKey (BIT24) +#define TPM_KEY_DELEGATE_MigrateKey (BIT23) +#define TPM_KEY_DELEGATE_LoadKey2 (BIT22) +#define TPM_KEY_DELEGATE_EstablishTransport (BIT21) +#define TPM_KEY_DELEGATE_ReleaseTransportSigned (BIT20) +#define TPM_KEY_DELEGATE_Quote2 (BIT19) +#define TPM_KEY_DELEGATE_Sealx (BIT18) +#define TPM_KEY_DELEGATE_MakeIdentity (BIT17) +#define TPM_KEY_DELEGATE_ActivateIdentity (BIT16) +#define TPM_KEY_DELEGATE_GetAuditDigestSigned (BIT15) +#define TPM_KEY_DELEGATE_Sign (BIT14) +#define TPM_KEY_DELEGATE_CertifyKey2 (BIT13) +#define TPM_KEY_DELEGATE_CertifyKey (BIT12) +#define TPM_KEY_DELEGATE_CreateWrapKey (BIT11) +#define TPM_KEY_DELEGATE_CMK_CreateBlob (BIT10) +#define TPM_KEY_DELEGATE_CreateMigrationBlob (BIT9) +#define TPM_KEY_DELEGATE_ConvertMigrationBlob (BIT8) +#define TPM_KEY_DELEGATE_CreateKeyDelegation (BIT7) +#define TPM_KEY_DELEGATE_ChangeAuth (BIT6) +#define TPM_KEY_DELEGATE_GetPubKey (BIT5) +#define TPM_KEY_DELEGATE_UnBind (BIT4) +#define TPM_KEY_DELEGATE_Quote (BIT3) +#define TPM_KEY_DELEGATE_Unseal (BIT2) +#define TPM_KEY_DELEGATE_Seal (BIT1) +#define TPM_KEY_DELEGATE_LoadKey (BIT0) + +// +// Part 2, section 20.3: TPM_FAMILY_FLAGS +// +#define TPM_DELEGATE_ADMIN_LOCK (BIT1) +#define TPM_FAMFLAG_ENABLE (BIT0) + +/// +/// Part 2, section 20.4: TPM_FAMILY_LABEL +/// +typedef struct tdTPM_FAMILY_LABEL { + UINT8 label; +} TPM_FAMILY_LABEL; + +/// +/// Part 2, section 20.5: TPM_FAMILY_TABLE_ENTRY +/// +typedef struct tdTPM_FAMILY_TABLE_ENTRY { + TPM_STRUCTURE_TAG tag; + TPM_FAMILY_LABEL label; + TPM_FAMILY_ID familyID; + TPM_FAMILY_VERIFICATION verificationCount; + TPM_FAMILY_FLAGS flags; +} TPM_FAMILY_TABLE_ENTRY; + +// +// Part 2, section 20.6: TPM_FAMILY_TABLE +// +#define TPM_NUM_FAMILY_TABLE_ENTRY_MIN 8 + +typedef struct tdTPM_FAMILY_TABLE{ + TPM_FAMILY_TABLE_ENTRY famTableRow[TPM_NUM_FAMILY_TABLE_ENTRY_MIN]; +} TPM_FAMILY_TABLE; + +/// +/// Part 2, section 20.7: TPM_DELEGATE_LABEL +/// +typedef struct tdTPM_DELEGATE_LABEL { + UINT8 label; +} TPM_DELEGATE_LABEL; + +/// +/// Part 2, section 20.8: TPM_DELEGATE_PUBLIC +/// +typedef struct tdTPM_DELEGATE_PUBLIC { + TPM_STRUCTURE_TAG tag; + TPM_DELEGATE_LABEL label; + TPM_PCR_INFO_SHORT pcrInfo; + TPM_DELEGATIONS permissions; + TPM_FAMILY_ID familyID; + TPM_FAMILY_VERIFICATION verificationCount; +} TPM_DELEGATE_PUBLIC; + +/// +/// Part 2, section 20.9: TPM_DELEGATE_TABLE_ROW +/// +typedef struct tdTPM_DELEGATE_TABLE_ROW { + TPM_STRUCTURE_TAG tag; + TPM_DELEGATE_PUBLIC pub; + TPM_SECRET authValue; +} TPM_DELEGATE_TABLE_ROW; + +// +// Part 2, section 20.10: TPM_DELEGATE_TABLE +// +#define TPM_NUM_DELEGATE_TABLE_ENTRY_MIN 2 + +typedef struct tdTPM_DELEGATE_TABLE{ + TPM_DELEGATE_TABLE_ROW delRow[TPM_NUM_DELEGATE_TABLE_ENTRY_MIN]; +} TPM_DELEGATE_TABLE; + +/// +/// Part 2, section 20.11: TPM_DELEGATE_SENSITIVE +/// +typedef struct tdTPM_DELEGATE_SENSITIVE { + TPM_STRUCTURE_TAG tag; + TPM_SECRET authValue; +} TPM_DELEGATE_SENSITIVE; + +/// +/// Part 2, section 20.12: TPM_DELEGATE_OWNER_BLOB +/// +typedef struct tdTPM_DELEGATE_OWNER_BLOB { + TPM_STRUCTURE_TAG tag; + TPM_DELEGATE_PUBLIC pub; + TPM_DIGEST integrityDigest; + UINT32 additionalSize; + UINT8 *additionalArea; + UINT32 sensitiveSize; + UINT8 *sensitiveArea; +} TPM_DELEGATE_OWNER_BLOB; + +/// +/// Part 2, section 20.13: TTPM_DELEGATE_KEY_BLOB +/// +typedef struct tdTPM_DELEGATE_KEY_BLOB { + TPM_STRUCTURE_TAG tag; + TPM_DELEGATE_PUBLIC pub; + TPM_DIGEST integrityDigest; + TPM_DIGEST pubKeyDigest; + UINT32 additionalSize; + UINT8 *additionalArea; + UINT32 sensitiveSize; + UINT8 *sensitiveArea; +} TPM_DELEGATE_KEY_BLOB; + +// +// Part 2, section 20.14: TPM_FAMILY_OPERATION Values +// +#define TPM_FAMILY_CREATE ((UINT32)0x00000001) +#define TPM_FAMILY_ENABLE ((UINT32)0x00000002) +#define TPM_FAMILY_ADMIN ((UINT32)0x00000003) +#define TPM_FAMILY_INVALIDATE ((UINT32)0x00000004) + +// +// Part 2, section 21.1: TPM_CAPABILITY_AREA for GetCapability +// +#define TPM_CAP_ORD ((TPM_CAPABILITY_AREA) 0x00000001) +#define TPM_CAP_ALG ((TPM_CAPABILITY_AREA) 0x00000002) +#define TPM_CAP_PID ((TPM_CAPABILITY_AREA) 0x00000003) +#define TPM_CAP_FLAG ((TPM_CAPABILITY_AREA) 0x00000004) +#define TPM_CAP_PROPERTY ((TPM_CAPABILITY_AREA) 0x00000005) +#define TPM_CAP_VERSION ((TPM_CAPABILITY_AREA) 0x00000006) +#define TPM_CAP_KEY_HANDLE ((TPM_CAPABILITY_AREA) 0x00000007) +#define TPM_CAP_CHECK_LOADED ((TPM_CAPABILITY_AREA) 0x00000008) +#define TPM_CAP_SYM_MODE ((TPM_CAPABILITY_AREA) 0x00000009) +#define TPM_CAP_KEY_STATUS ((TPM_CAPABILITY_AREA) 0x0000000C) +#define TPM_CAP_NV_LIST ((TPM_CAPABILITY_AREA) 0x0000000D) +#define TPM_CAP_MFR ((TPM_CAPABILITY_AREA) 0x00000010) +#define TPM_CAP_NV_INDEX ((TPM_CAPABILITY_AREA) 0x00000011) +#define TPM_CAP_TRANS_ALG ((TPM_CAPABILITY_AREA) 0x00000012) +#define TPM_CAP_HANDLE ((TPM_CAPABILITY_AREA) 0x00000014) +#define TPM_CAP_TRANS_ES ((TPM_CAPABILITY_AREA) 0x00000015) +#define TPM_CAP_AUTH_ENCRYPT ((TPM_CAPABILITY_AREA) 0x00000017) +#define TPM_CAP_SELECT_SIZE ((TPM_CAPABILITY_AREA) 0x00000018) +#define TPM_CAP_VERSION_VAL ((TPM_CAPABILITY_AREA) 0x0000001A) + +#define TPM_CAP_FLAG_PERMANENT ((TPM_CAPABILITY_AREA) 0x00000108) +#define TPM_CAP_FLAG_VOLATILE ((TPM_CAPABILITY_AREA) 0x00000109) + +// +// Part 2, section 21.2: CAP_PROPERTY Subcap values for GetCapability +// +#define TPM_CAP_PROP_PCR ((TPM_CAPABILITY_AREA) 0x00000101) +#define TPM_CAP_PROP_DIR ((TPM_CAPABILITY_AREA) 0x00000102) +#define TPM_CAP_PROP_MANUFACTURER ((TPM_CAPABILITY_AREA) 0x00000103) +#define TPM_CAP_PROP_KEYS ((TPM_CAPABILITY_AREA) 0x00000104) +#define TPM_CAP_PROP_MIN_COUNTER ((TPM_CAPABILITY_AREA) 0x00000107) +#define TPM_CAP_PROP_AUTHSESS ((TPM_CAPABILITY_AREA) 0x0000010A) +#define TPM_CAP_PROP_TRANSESS ((TPM_CAPABILITY_AREA) 0x0000010B) +#define TPM_CAP_PROP_COUNTERS ((TPM_CAPABILITY_AREA) 0x0000010C) +#define TPM_CAP_PROP_MAX_AUTHSESS ((TPM_CAPABILITY_AREA) 0x0000010D) +#define TPM_CAP_PROP_MAX_TRANSESS ((TPM_CAPABILITY_AREA) 0x0000010E) +#define TPM_CAP_PROP_MAX_COUNTERS ((TPM_CAPABILITY_AREA) 0x0000010F) +#define TPM_CAP_PROP_MAX_KEYS ((TPM_CAPABILITY_AREA) 0x00000110) +#define TPM_CAP_PROP_OWNER ((TPM_CAPABILITY_AREA) 0x00000111) +#define TPM_CAP_PROP_CONTEXT ((TPM_CAPABILITY_AREA) 0x00000112) +#define TPM_CAP_PROP_MAX_CONTEXT ((TPM_CAPABILITY_AREA) 0x00000113) +#define TPM_CAP_PROP_FAMILYROWS ((TPM_CAPABILITY_AREA) 0x00000114) +#define TPM_CAP_PROP_TIS_TIMEOUT ((TPM_CAPABILITY_AREA) 0x00000115) +#define TPM_CAP_PROP_STARTUP_EFFECT ((TPM_CAPABILITY_AREA) 0x00000116) +#define TPM_CAP_PROP_DELEGATE_ROW ((TPM_CAPABILITY_AREA) 0x00000117) +#define TPM_CAP_PROP_DAA_MAX ((TPM_CAPABILITY_AREA) 0x00000119) +#define CAP_PROP_SESSION_DAA ((TPM_CAPABILITY_AREA) 0x0000011A) +#define TPM_CAP_PROP_CONTEXT_DIST ((TPM_CAPABILITY_AREA) 0x0000011B) +#define TPM_CAP_PROP_DAA_INTERRUPT ((TPM_CAPABILITY_AREA) 0x0000011C) +#define TPM_CAP_PROP_SESSIONS ((TPM_CAPABILITY_AREA) 0x0000011D) +#define TPM_CAP_PROP_MAX_SESSIONS ((TPM_CAPABILITY_AREA) 0x0000011E) +#define TPM_CAP_PROP_CMK_RESTRICTION ((TPM_CAPABILITY_AREA) 0x0000011F) +#define TPM_CAP_PROP_DURATION ((TPM_CAPABILITY_AREA) 0x00000120) +#define TPM_CAP_PROP_ACTIVE_COUNTER ((TPM_CAPABILITY_AREA) 0x00000122) +#define TPM_CAP_PROP_MAX_NV_AVAILABLE ((TPM_CAPABILITY_AREA) 0x00000123) +#define TPM_CAP_PROP_INPUT_BUFFER ((TPM_CAPABILITY_AREA) 0x00000124) + +// +// Part 2, section 21.4: TPM_CAPABILITY_AREA for SetCapability +// +#define TPM_SET_PERM_FLAGS ((TPM_CAPABILITY_AREA) 0x00000001) +#define TPM_SET_PERM_DATA ((TPM_CAPABILITY_AREA) 0x00000002) +#define TPM_SET_STCLEAR_FLAGS ((TPM_CAPABILITY_AREA) 0x00000003) +#define TPM_SET_STCLEAR_DATA ((TPM_CAPABILITY_AREA) 0x00000004) +#define TPM_SET_STANY_FLAGS ((TPM_CAPABILITY_AREA) 0x00000005) +#define TPM_SET_STANY_DATA ((TPM_CAPABILITY_AREA) 0x00000006) + +/// +/// Part 2, section 21.6: TPM_CAP_VERSION_INFO +/// [size_is(vendorSpecificSize)] BYTE* vendorSpecific; +/// +typedef struct tdTPM_CAP_VERSION_INFO { + TPM_STRUCTURE_TAG tag; + TPM_VERSION version; + UINT16 specLevel; + UINT8 errataRev; + UINT8 tpmVendorID[4]; + UINT16 vendorSpecificSize; + UINT8 *vendorSpecific; +} TPM_CAP_VERSION_INFO; + +/// +/// Part 2, section 21.10: TPM_DA_ACTION_TYPE +/// +typedef struct tdTPM_DA_ACTION_TYPE { + TPM_STRUCTURE_TAG tag; + UINT32 actions; +} TPM_DA_ACTION_TYPE; + +#define TPM_DA_ACTION_FAILURE_MODE (((UINT32)1)<<3) +#define TPM_DA_ACTION_DEACTIVATE (((UINT32)1)<<2) +#define TPM_DA_ACTION_DISABLE (((UINT32)1)<<1) +#define TPM_DA_ACTION_TIMEOUT (((UINT32)1)<<0) + +/// +/// Part 2, section 21.7: TPM_DA_INFO +/// +typedef struct tdTPM_DA_INFO { + TPM_STRUCTURE_TAG tag; + TPM_DA_STATE state; + UINT16 currentCount; + UINT16 thresholdCount; + TPM_DA_ACTION_TYPE actionAtThreshold; + UINT32 actionDependValue; + UINT32 vendorDataSize; + UINT8 *vendorData; +} TPM_DA_INFO; + +/// +/// Part 2, section 21.8: TPM_DA_INFO_LIMITED +/// +typedef struct tdTPM_DA_INFO_LIMITED { + TPM_STRUCTURE_TAG tag; + TPM_DA_STATE state; + TPM_DA_ACTION_TYPE actionAtThreshold; + UINT32 vendorDataSize; + UINT8 *vendorData; +} TPM_DA_INFO_LIMITED; + +// +// Part 2, section 21.9: CAP_PROPERTY Subcap values for GetCapability +// +#define TPM_DA_STATE_INACTIVE ((UINT8)0x00) +#define TPM_DA_STATE_ACTIVE ((UINT8)0x01) + +// +// Part 2, section 22: DAA Structures +// + +// +// Part 2, section 22.1: Size definitions +// +#define TPM_DAA_SIZE_r0 (43) +#define TPM_DAA_SIZE_r1 (43) +#define TPM_DAA_SIZE_r2 (128) +#define TPM_DAA_SIZE_r3 (168) +#define TPM_DAA_SIZE_r4 (219) +#define TPM_DAA_SIZE_NT (20) +#define TPM_DAA_SIZE_v0 (128) +#define TPM_DAA_SIZE_v1 (192) +#define TPM_DAA_SIZE_NE (256) +#define TPM_DAA_SIZE_w (256) +#define TPM_DAA_SIZE_issuerModulus (256) +// +// Part 2, section 22.2: Constant definitions +// +#define TPM_DAA_power0 (104) +#define TPM_DAA_power1 (1024) + +/// +/// Part 2, section 22.3: TPM_DAA_ISSUER +/// +typedef struct tdTPM_DAA_ISSUER { + TPM_STRUCTURE_TAG tag; + TPM_DIGEST DAA_digest_R0; + TPM_DIGEST DAA_digest_R1; + TPM_DIGEST DAA_digest_S0; + TPM_DIGEST DAA_digest_S1; + TPM_DIGEST DAA_digest_n; + TPM_DIGEST DAA_digest_gamma; + UINT8 DAA_generic_q[26]; +} TPM_DAA_ISSUER; + +/// +/// Part 2, section 22.4: TPM_DAA_TPM +/// +typedef struct tdTPM_DAA_TPM { + TPM_STRUCTURE_TAG tag; + TPM_DIGEST DAA_digestIssuer; + TPM_DIGEST DAA_digest_v0; + TPM_DIGEST DAA_digest_v1; + TPM_DIGEST DAA_rekey; + UINT32 DAA_count; +} TPM_DAA_TPM; + +/// +/// Part 2, section 22.5: TPM_DAA_CONTEXT +/// +typedef struct tdTPM_DAA_CONTEXT { + TPM_STRUCTURE_TAG tag; + TPM_DIGEST DAA_digestContext; + TPM_DIGEST DAA_digest; + TPM_DAA_CONTEXT_SEED DAA_contextSeed; + UINT8 DAA_scratch[256]; + UINT8 DAA_stage; +} TPM_DAA_CONTEXT; + +/// +/// Part 2, section 22.6: TPM_DAA_JOINDATA +/// +typedef struct tdTPM_DAA_JOINDATA { + UINT8 DAA_join_u0[128]; + UINT8 DAA_join_u1[138]; + TPM_DIGEST DAA_digest_n0; +} TPM_DAA_JOINDATA; + +/// +/// Part 2, section 22.8: TPM_DAA_BLOB +/// +typedef struct tdTPM_DAA_BLOB { + TPM_STRUCTURE_TAG tag; + TPM_RESOURCE_TYPE resourceType; + UINT8 label[16]; + TPM_DIGEST blobIntegrity; + UINT32 additionalSize; + UINT8 *additionalData; + UINT32 sensitiveSize; + UINT8 *sensitiveData; +} TPM_DAA_BLOB; + +/// +/// Part 2, section 22.9: TPM_DAA_SENSITIVE +/// +typedef struct tdTPM_DAA_SENSITIVE { + TPM_STRUCTURE_TAG tag; + UINT32 internalSize; + UINT8 *internalData; +} TPM_DAA_SENSITIVE; + + +// +// Part 2, section 23: Redirection +// + +/// +/// Part 2 section 23.1: TPM_REDIR_COMMAND +/// This section defines exactly one value but does not +/// give it a name. The definition of TPM_SetRedirection in Part3 +/// refers to exactly one name but does not give its value. We join +/// them here. +/// +#define TPM_REDIR_GPIO (0x00000001) + +/// +/// TPM Command Headers defined in Part 3 +/// +typedef struct tdTPM_RQU_COMMAND_HDR { + TPM_STRUCTURE_TAG tag; + UINT32 paramSize; + TPM_COMMAND_CODE ordinal; +} TPM_RQU_COMMAND_HDR; + +/// +/// TPM Response Headers defined in Part 3 +/// +typedef struct tdTPM_RSP_COMMAND_HDR { + TPM_STRUCTURE_TAG tag; + UINT32 paramSize; + TPM_RESULT returnCode; +} TPM_RSP_COMMAND_HDR; + +#pragma pack () + +#endif diff --git a/src/include/ipxe/efi/IndustryStandard/Tpm20.h b/src/include/ipxe/efi/IndustryStandard/Tpm20.h new file mode 100644 index 00000000..656bf21e --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/Tpm20.h @@ -0,0 +1,1822 @@ +/** @file + TPM2.0 Specification data structures + (Trusted Platform Module Library Specification, Family "2.0", Level 00, Revision 00.96, + @http://www.trustedcomputinggroup.org/resources/tpm_library_specification) + + Check http://trustedcomputinggroup.org for latest specification updates. + +Copyright (c) 2013 - 2015, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + + +#ifndef _TPM20_H_ +#define _TPM20_H_ + +FILE_LICENCE ( BSD3 ); + +#include + +#pragma pack (1) + +// Annex A Algorithm Constants + +// Table 205 - Defines for SHA1 Hash Values +#define SHA1_DIGEST_SIZE 20 +#define SHA1_BLOCK_SIZE 64 + +// Table 206 - Defines for SHA256 Hash Values +#define SHA256_DIGEST_SIZE 32 +#define SHA256_BLOCK_SIZE 64 + +// Table 207 - Defines for SHA384 Hash Values +#define SHA384_DIGEST_SIZE 48 +#define SHA384_BLOCK_SIZE 128 + +// Table 208 - Defines for SHA512 Hash Values +#define SHA512_DIGEST_SIZE 64 +#define SHA512_BLOCK_SIZE 128 + +// Table 209 - Defines for SM3_256 Hash Values +#define SM3_256_DIGEST_SIZE 32 +#define SM3_256_BLOCK_SIZE 64 + +// Table 210 - Defines for Architectural Limits Values +#define MAX_SESSION_NUMBER 3 + +// Annex B Implementation Definitions + +// Table 211 - Defines for Logic Values +#define YES 1 +#define NO 0 +#define SET 1 +#define CLEAR 0 + +// Table 215 - Defines for RSA Algorithm Constants +#define MAX_RSA_KEY_BITS 2048 +#define MAX_RSA_KEY_BYTES ((MAX_RSA_KEY_BITS + 7) / 8) + +// Table 216 - Defines for ECC Algorithm Constants +#define MAX_ECC_KEY_BITS 256 +#define MAX_ECC_KEY_BYTES ((MAX_ECC_KEY_BITS + 7) / 8) + +// Table 217 - Defines for AES Algorithm Constants +#define MAX_AES_KEY_BITS 128 +#define MAX_AES_BLOCK_SIZE_BYTES 16 +#define MAX_AES_KEY_BYTES ((MAX_AES_KEY_BITS + 7) / 8) + +// Table 218 - Defines for SM4 Algorithm Constants +#define MAX_SM4_KEY_BITS 128 +#define MAX_SM4_BLOCK_SIZE_BYTES 16 +#define MAX_SM4_KEY_BYTES ((MAX_SM4_KEY_BITS + 7) / 8) + +// Table 219 - Defines for Symmetric Algorithm Constants +#define MAX_SYM_KEY_BITS MAX_AES_KEY_BITS +#define MAX_SYM_KEY_BYTES MAX_AES_KEY_BYTES +#define MAX_SYM_BLOCK_SIZE MAX_AES_BLOCK_SIZE_BYTES + +// Table 220 - Defines for Implementation Values +typedef UINT16 BSIZE; +#define BUFFER_ALIGNMENT 4 +#define IMPLEMENTATION_PCR 24 +#define PLATFORM_PCR 24 +#define DRTM_PCR 17 +#define NUM_LOCALITIES 5 +#define MAX_HANDLE_NUM 3 +#define MAX_ACTIVE_SESSIONS 64 +typedef UINT16 CONTEXT_SLOT; +typedef UINT64 CONTEXT_COUNTER; +#define MAX_LOADED_SESSIONS 3 +#define MAX_SESSION_NUM 3 +#define MAX_LOADED_OBJECTS 3 +#define MIN_EVICT_OBJECTS 2 +#define PCR_SELECT_MIN ((PLATFORM_PCR + 7) / 8) +#define PCR_SELECT_MAX ((IMPLEMENTATION_PCR + 7) / 8) +#define NUM_POLICY_PCR_GROUP 1 +#define NUM_AUTHVALUE_PCR_GROUP 1 +#define MAX_CONTEXT_SIZE 4000 +#define MAX_DIGEST_BUFFER 1024 +#define MAX_NV_INDEX_SIZE 1024 +#define MAX_CAP_BUFFER 1024 +#define NV_MEMORY_SIZE 16384 +#define NUM_STATIC_PCR 16 +#define MAX_ALG_LIST_SIZE 64 +#define TIMER_PRESCALE 100000 +#define PRIMARY_SEED_SIZE 32 +#define CONTEXT_ENCRYPT_ALG TPM_ALG_AES +#define CONTEXT_ENCRYPT_KEY_BITS MAX_SYM_KEY_BITS +#define CONTEXT_ENCRYPT_KEY_BYTES ((CONTEXT_ENCRYPT_KEY_BITS + 7) / 8) +#define CONTEXT_INTEGRITY_HASH_ALG TPM_ALG_SHA256 +#define CONTEXT_INTEGRITY_HASH_SIZE SHA256_DIGEST_SIZE +#define PROOF_SIZE CONTEXT_INTEGRITY_HASH_SIZE +#define NV_CLOCK_UPDATE_INTERVAL 12 +#define NUM_POLICY_PCR 1 +#define MAX_COMMAND_SIZE 4096 +#define MAX_RESPONSE_SIZE 4096 +#define ORDERLY_BITS 8 +#define MAX_ORDERLY_COUNT ((1 << ORDERLY_BITS) - 1) +#define ALG_ID_FIRST TPM_ALG_FIRST +#define ALG_ID_LAST TPM_ALG_LAST +#define MAX_SYM_DATA 128 +#define MAX_RNG_ENTROPY_SIZE 64 +#define RAM_INDEX_SPACE 512 +#define RSA_DEFAULT_PUBLIC_EXPONENT 0x00010001 +#define CRT_FORMAT_RSA YES +#define PRIVATE_VENDOR_SPECIFIC_BYTES ((MAX_RSA_KEY_BYTES / 2) * ( 3 + CRT_FORMAT_RSA * 2)) + +// Capability related MAX_ value +#define MAX_CAP_DATA (MAX_CAP_BUFFER - sizeof(TPM_CAP) - sizeof(UINT32)) +#define MAX_CAP_ALGS (MAX_CAP_DATA / sizeof(TPMS_ALG_PROPERTY)) +#define MAX_CAP_HANDLES (MAX_CAP_DATA / sizeof(TPM_HANDLE)) +#define MAX_CAP_CC (MAX_CAP_DATA / sizeof(TPM_CC)) +#define MAX_TPM_PROPERTIES (MAX_CAP_DATA / sizeof(TPMS_TAGGED_PROPERTY)) +#define MAX_PCR_PROPERTIES (MAX_CAP_DATA / sizeof(TPMS_TAGGED_PCR_SELECT)) +#define MAX_ECC_CURVES (MAX_CAP_DATA / sizeof(TPM_ECC_CURVE)) + +// +// Always set 5 here, because we want to support all hash algo in BIOS. +// +#define HASH_COUNT 5 + +// 5 Base Types + +// Table 3 - Definition of Base Types +typedef UINT8 BYTE; + +// Table 4 - Definition of Types for Documentation Clarity +// +// NOTE: Comment because it has same name as TPM1.2 (value is same, so not runtime issue) +// +//typedef UINT32 TPM_ALGORITHM_ID; +//typedef UINT32 TPM_MODIFIER_INDICATOR; +typedef UINT32 TPM_AUTHORIZATION_SIZE; +typedef UINT32 TPM_PARAMETER_SIZE; +typedef UINT16 TPM_KEY_SIZE; +typedef UINT16 TPM_KEY_BITS; + +// 6 Constants + +// Table 6 - TPM_GENERATED Constants +typedef UINT32 TPM_GENERATED; +#define TPM_GENERATED_VALUE (TPM_GENERATED)(0xff544347) + +// Table 7 - TPM_ALG_ID Constants +typedef UINT16 TPM_ALG_ID; +// +// NOTE: Comment some algo which has same name as TPM1.2 (value is same, so not runtime issue) +// +#define TPM_ALG_ERROR (TPM_ALG_ID)(0x0000) +#define TPM_ALG_FIRST (TPM_ALG_ID)(0x0001) +//#define TPM_ALG_RSA (TPM_ALG_ID)(0x0001) +//#define TPM_ALG_SHA (TPM_ALG_ID)(0x0004) +#define TPM_ALG_SHA1 (TPM_ALG_ID)(0x0004) +//#define TPM_ALG_HMAC (TPM_ALG_ID)(0x0005) +#define TPM_ALG_AES (TPM_ALG_ID)(0x0006) +//#define TPM_ALG_MGF1 (TPM_ALG_ID)(0x0007) +#define TPM_ALG_KEYEDHASH (TPM_ALG_ID)(0x0008) +//#define TPM_ALG_XOR (TPM_ALG_ID)(0x000A) +#define TPM_ALG_SHA256 (TPM_ALG_ID)(0x000B) +#define TPM_ALG_SHA384 (TPM_ALG_ID)(0x000C) +#define TPM_ALG_SHA512 (TPM_ALG_ID)(0x000D) +#define TPM_ALG_NULL (TPM_ALG_ID)(0x0010) +#define TPM_ALG_SM3_256 (TPM_ALG_ID)(0x0012) +#define TPM_ALG_SM4 (TPM_ALG_ID)(0x0013) +#define TPM_ALG_RSASSA (TPM_ALG_ID)(0x0014) +#define TPM_ALG_RSAES (TPM_ALG_ID)(0x0015) +#define TPM_ALG_RSAPSS (TPM_ALG_ID)(0x0016) +#define TPM_ALG_OAEP (TPM_ALG_ID)(0x0017) +#define TPM_ALG_ECDSA (TPM_ALG_ID)(0x0018) +#define TPM_ALG_ECDH (TPM_ALG_ID)(0x0019) +#define TPM_ALG_ECDAA (TPM_ALG_ID)(0x001A) +#define TPM_ALG_SM2 (TPM_ALG_ID)(0x001B) +#define TPM_ALG_ECSCHNORR (TPM_ALG_ID)(0x001C) +#define TPM_ALG_ECMQV (TPM_ALG_ID)(0x001D) +#define TPM_ALG_KDF1_SP800_56a (TPM_ALG_ID)(0x0020) +#define TPM_ALG_KDF2 (TPM_ALG_ID)(0x0021) +#define TPM_ALG_KDF1_SP800_108 (TPM_ALG_ID)(0x0022) +#define TPM_ALG_ECC (TPM_ALG_ID)(0x0023) +#define TPM_ALG_SYMCIPHER (TPM_ALG_ID)(0x0025) +#define TPM_ALG_CTR (TPM_ALG_ID)(0x0040) +#define TPM_ALG_OFB (TPM_ALG_ID)(0x0041) +#define TPM_ALG_CBC (TPM_ALG_ID)(0x0042) +#define TPM_ALG_CFB (TPM_ALG_ID)(0x0043) +#define TPM_ALG_ECB (TPM_ALG_ID)(0x0044) +#define TPM_ALG_LAST (TPM_ALG_ID)(0x0044) + +// Table 8 - TPM_ECC_CURVE Constants +typedef UINT16 TPM_ECC_CURVE; +#define TPM_ECC_NONE (TPM_ECC_CURVE)(0x0000) +#define TPM_ECC_NIST_P192 (TPM_ECC_CURVE)(0x0001) +#define TPM_ECC_NIST_P224 (TPM_ECC_CURVE)(0x0002) +#define TPM_ECC_NIST_P256 (TPM_ECC_CURVE)(0x0003) +#define TPM_ECC_NIST_P384 (TPM_ECC_CURVE)(0x0004) +#define TPM_ECC_NIST_P521 (TPM_ECC_CURVE)(0x0005) +#define TPM_ECC_BN_P256 (TPM_ECC_CURVE)(0x0010) +#define TPM_ECC_BN_P638 (TPM_ECC_CURVE)(0x0011) +#define TPM_ECC_SM2_P256 (TPM_ECC_CURVE)(0x0020) + +// Table 11 - TPM_CC Constants (Numeric Order) +typedef UINT32 TPM_CC; +#define TPM_CC_FIRST (TPM_CC)(0x0000011F) +#define TPM_CC_PP_FIRST (TPM_CC)(0x0000011F) +#define TPM_CC_NV_UndefineSpaceSpecial (TPM_CC)(0x0000011F) +#define TPM_CC_EvictControl (TPM_CC)(0x00000120) +#define TPM_CC_HierarchyControl (TPM_CC)(0x00000121) +#define TPM_CC_NV_UndefineSpace (TPM_CC)(0x00000122) +#define TPM_CC_ChangeEPS (TPM_CC)(0x00000124) +#define TPM_CC_ChangePPS (TPM_CC)(0x00000125) +#define TPM_CC_Clear (TPM_CC)(0x00000126) +#define TPM_CC_ClearControl (TPM_CC)(0x00000127) +#define TPM_CC_ClockSet (TPM_CC)(0x00000128) +#define TPM_CC_HierarchyChangeAuth (TPM_CC)(0x00000129) +#define TPM_CC_NV_DefineSpace (TPM_CC)(0x0000012A) +#define TPM_CC_PCR_Allocate (TPM_CC)(0x0000012B) +#define TPM_CC_PCR_SetAuthPolicy (TPM_CC)(0x0000012C) +#define TPM_CC_PP_Commands (TPM_CC)(0x0000012D) +#define TPM_CC_SetPrimaryPolicy (TPM_CC)(0x0000012E) +#define TPM_CC_FieldUpgradeStart (TPM_CC)(0x0000012F) +#define TPM_CC_ClockRateAdjust (TPM_CC)(0x00000130) +#define TPM_CC_CreatePrimary (TPM_CC)(0x00000131) +#define TPM_CC_NV_GlobalWriteLock (TPM_CC)(0x00000132) +#define TPM_CC_PP_LAST (TPM_CC)(0x00000132) +#define TPM_CC_GetCommandAuditDigest (TPM_CC)(0x00000133) +#define TPM_CC_NV_Increment (TPM_CC)(0x00000134) +#define TPM_CC_NV_SetBits (TPM_CC)(0x00000135) +#define TPM_CC_NV_Extend (TPM_CC)(0x00000136) +#define TPM_CC_NV_Write (TPM_CC)(0x00000137) +#define TPM_CC_NV_WriteLock (TPM_CC)(0x00000138) +#define TPM_CC_DictionaryAttackLockReset (TPM_CC)(0x00000139) +#define TPM_CC_DictionaryAttackParameters (TPM_CC)(0x0000013A) +#define TPM_CC_NV_ChangeAuth (TPM_CC)(0x0000013B) +#define TPM_CC_PCR_Event (TPM_CC)(0x0000013C) +#define TPM_CC_PCR_Reset (TPM_CC)(0x0000013D) +#define TPM_CC_SequenceComplete (TPM_CC)(0x0000013E) +#define TPM_CC_SetAlgorithmSet (TPM_CC)(0x0000013F) +#define TPM_CC_SetCommandCodeAuditStatus (TPM_CC)(0x00000140) +#define TPM_CC_FieldUpgradeData (TPM_CC)(0x00000141) +#define TPM_CC_IncrementalSelfTest (TPM_CC)(0x00000142) +#define TPM_CC_SelfTest (TPM_CC)(0x00000143) +#define TPM_CC_Startup (TPM_CC)(0x00000144) +#define TPM_CC_Shutdown (TPM_CC)(0x00000145) +#define TPM_CC_StirRandom (TPM_CC)(0x00000146) +#define TPM_CC_ActivateCredential (TPM_CC)(0x00000147) +#define TPM_CC_Certify (TPM_CC)(0x00000148) +#define TPM_CC_PolicyNV (TPM_CC)(0x00000149) +#define TPM_CC_CertifyCreation (TPM_CC)(0x0000014A) +#define TPM_CC_Duplicate (TPM_CC)(0x0000014B) +#define TPM_CC_GetTime (TPM_CC)(0x0000014C) +#define TPM_CC_GetSessionAuditDigest (TPM_CC)(0x0000014D) +#define TPM_CC_NV_Read (TPM_CC)(0x0000014E) +#define TPM_CC_NV_ReadLock (TPM_CC)(0x0000014F) +#define TPM_CC_ObjectChangeAuth (TPM_CC)(0x00000150) +#define TPM_CC_PolicySecret (TPM_CC)(0x00000151) +#define TPM_CC_Rewrap (TPM_CC)(0x00000152) +#define TPM_CC_Create (TPM_CC)(0x00000153) +#define TPM_CC_ECDH_ZGen (TPM_CC)(0x00000154) +#define TPM_CC_HMAC (TPM_CC)(0x00000155) +#define TPM_CC_Import (TPM_CC)(0x00000156) +#define TPM_CC_Load (TPM_CC)(0x00000157) +#define TPM_CC_Quote (TPM_CC)(0x00000158) +#define TPM_CC_RSA_Decrypt (TPM_CC)(0x00000159) +#define TPM_CC_HMAC_Start (TPM_CC)(0x0000015B) +#define TPM_CC_SequenceUpdate (TPM_CC)(0x0000015C) +#define TPM_CC_Sign (TPM_CC)(0x0000015D) +#define TPM_CC_Unseal (TPM_CC)(0x0000015E) +#define TPM_CC_PolicySigned (TPM_CC)(0x00000160) +#define TPM_CC_ContextLoad (TPM_CC)(0x00000161) +#define TPM_CC_ContextSave (TPM_CC)(0x00000162) +#define TPM_CC_ECDH_KeyGen (TPM_CC)(0x00000163) +#define TPM_CC_EncryptDecrypt (TPM_CC)(0x00000164) +#define TPM_CC_FlushContext (TPM_CC)(0x00000165) +#define TPM_CC_LoadExternal (TPM_CC)(0x00000167) +#define TPM_CC_MakeCredential (TPM_CC)(0x00000168) +#define TPM_CC_NV_ReadPublic (TPM_CC)(0x00000169) +#define TPM_CC_PolicyAuthorize (TPM_CC)(0x0000016A) +#define TPM_CC_PolicyAuthValue (TPM_CC)(0x0000016B) +#define TPM_CC_PolicyCommandCode (TPM_CC)(0x0000016C) +#define TPM_CC_PolicyCounterTimer (TPM_CC)(0x0000016D) +#define TPM_CC_PolicyCpHash (TPM_CC)(0x0000016E) +#define TPM_CC_PolicyLocality (TPM_CC)(0x0000016F) +#define TPM_CC_PolicyNameHash (TPM_CC)(0x00000170) +#define TPM_CC_PolicyOR (TPM_CC)(0x00000171) +#define TPM_CC_PolicyTicket (TPM_CC)(0x00000172) +#define TPM_CC_ReadPublic (TPM_CC)(0x00000173) +#define TPM_CC_RSA_Encrypt (TPM_CC)(0x00000174) +#define TPM_CC_StartAuthSession (TPM_CC)(0x00000176) +#define TPM_CC_VerifySignature (TPM_CC)(0x00000177) +#define TPM_CC_ECC_Parameters (TPM_CC)(0x00000178) +#define TPM_CC_FirmwareRead (TPM_CC)(0x00000179) +#define TPM_CC_GetCapability (TPM_CC)(0x0000017A) +#define TPM_CC_GetRandom (TPM_CC)(0x0000017B) +#define TPM_CC_GetTestResult (TPM_CC)(0x0000017C) +#define TPM_CC_Hash (TPM_CC)(0x0000017D) +#define TPM_CC_PCR_Read (TPM_CC)(0x0000017E) +#define TPM_CC_PolicyPCR (TPM_CC)(0x0000017F) +#define TPM_CC_PolicyRestart (TPM_CC)(0x00000180) +#define TPM_CC_ReadClock (TPM_CC)(0x00000181) +#define TPM_CC_PCR_Extend (TPM_CC)(0x00000182) +#define TPM_CC_PCR_SetAuthValue (TPM_CC)(0x00000183) +#define TPM_CC_NV_Certify (TPM_CC)(0x00000184) +#define TPM_CC_EventSequenceComplete (TPM_CC)(0x00000185) +#define TPM_CC_HashSequenceStart (TPM_CC)(0x00000186) +#define TPM_CC_PolicyPhysicalPresence (TPM_CC)(0x00000187) +#define TPM_CC_PolicyDuplicationSelect (TPM_CC)(0x00000188) +#define TPM_CC_PolicyGetDigest (TPM_CC)(0x00000189) +#define TPM_CC_TestParms (TPM_CC)(0x0000018A) +#define TPM_CC_Commit (TPM_CC)(0x0000018B) +#define TPM_CC_PolicyPassword (TPM_CC)(0x0000018C) +#define TPM_CC_ZGen_2Phase (TPM_CC)(0x0000018D) +#define TPM_CC_EC_Ephemeral (TPM_CC)(0x0000018E) +#define TPM_CC_LAST (TPM_CC)(0x0000018E) + +// Table 15 - TPM_RC Constants (Actions) +typedef UINT32 TPM_RC; +#define TPM_RC_SUCCESS (TPM_RC)(0x000) +#define TPM_RC_BAD_TAG (TPM_RC)(0x030) +#define RC_VER1 (TPM_RC)(0x100) +#define TPM_RC_INITIALIZE (TPM_RC)(RC_VER1 + 0x000) +#define TPM_RC_FAILURE (TPM_RC)(RC_VER1 + 0x001) +#define TPM_RC_SEQUENCE (TPM_RC)(RC_VER1 + 0x003) +#define TPM_RC_PRIVATE (TPM_RC)(RC_VER1 + 0x00B) +#define TPM_RC_HMAC (TPM_RC)(RC_VER1 + 0x019) +#define TPM_RC_DISABLED (TPM_RC)(RC_VER1 + 0x020) +#define TPM_RC_EXCLUSIVE (TPM_RC)(RC_VER1 + 0x021) +#define TPM_RC_AUTH_TYPE (TPM_RC)(RC_VER1 + 0x024) +#define TPM_RC_AUTH_MISSING (TPM_RC)(RC_VER1 + 0x025) +#define TPM_RC_POLICY (TPM_RC)(RC_VER1 + 0x026) +#define TPM_RC_PCR (TPM_RC)(RC_VER1 + 0x027) +#define TPM_RC_PCR_CHANGED (TPM_RC)(RC_VER1 + 0x028) +#define TPM_RC_UPGRADE (TPM_RC)(RC_VER1 + 0x02D) +#define TPM_RC_TOO_MANY_CONTEXTS (TPM_RC)(RC_VER1 + 0x02E) +#define TPM_RC_AUTH_UNAVAILABLE (TPM_RC)(RC_VER1 + 0x02F) +#define TPM_RC_REBOOT (TPM_RC)(RC_VER1 + 0x030) +#define TPM_RC_UNBALANCED (TPM_RC)(RC_VER1 + 0x031) +#define TPM_RC_COMMAND_SIZE (TPM_RC)(RC_VER1 + 0x042) +#define TPM_RC_COMMAND_CODE (TPM_RC)(RC_VER1 + 0x043) +#define TPM_RC_AUTHSIZE (TPM_RC)(RC_VER1 + 0x044) +#define TPM_RC_AUTH_CONTEXT (TPM_RC)(RC_VER1 + 0x045) +#define TPM_RC_NV_RANGE (TPM_RC)(RC_VER1 + 0x046) +#define TPM_RC_NV_SIZE (TPM_RC)(RC_VER1 + 0x047) +#define TPM_RC_NV_LOCKED (TPM_RC)(RC_VER1 + 0x048) +#define TPM_RC_NV_AUTHORIZATION (TPM_RC)(RC_VER1 + 0x049) +#define TPM_RC_NV_UNINITIALIZED (TPM_RC)(RC_VER1 + 0x04A) +#define TPM_RC_NV_SPACE (TPM_RC)(RC_VER1 + 0x04B) +#define TPM_RC_NV_DEFINED (TPM_RC)(RC_VER1 + 0x04C) +#define TPM_RC_BAD_CONTEXT (TPM_RC)(RC_VER1 + 0x050) +#define TPM_RC_CPHASH (TPM_RC)(RC_VER1 + 0x051) +#define TPM_RC_PARENT (TPM_RC)(RC_VER1 + 0x052) +#define TPM_RC_NEEDS_TEST (TPM_RC)(RC_VER1 + 0x053) +#define TPM_RC_NO_RESULT (TPM_RC)(RC_VER1 + 0x054) +#define TPM_RC_SENSITIVE (TPM_RC)(RC_VER1 + 0x055) +#define RC_MAX_FM0 (TPM_RC)(RC_VER1 + 0x07F) +#define RC_FMT1 (TPM_RC)(0x080) +#define TPM_RC_ASYMMETRIC (TPM_RC)(RC_FMT1 + 0x001) +#define TPM_RC_ATTRIBUTES (TPM_RC)(RC_FMT1 + 0x002) +#define TPM_RC_HASH (TPM_RC)(RC_FMT1 + 0x003) +#define TPM_RC_VALUE (TPM_RC)(RC_FMT1 + 0x004) +#define TPM_RC_HIERARCHY (TPM_RC)(RC_FMT1 + 0x005) +#define TPM_RC_KEY_SIZE (TPM_RC)(RC_FMT1 + 0x007) +#define TPM_RC_MGF (TPM_RC)(RC_FMT1 + 0x008) +#define TPM_RC_MODE (TPM_RC)(RC_FMT1 + 0x009) +#define TPM_RC_TYPE (TPM_RC)(RC_FMT1 + 0x00A) +#define TPM_RC_HANDLE (TPM_RC)(RC_FMT1 + 0x00B) +#define TPM_RC_KDF (TPM_RC)(RC_FMT1 + 0x00C) +#define TPM_RC_RANGE (TPM_RC)(RC_FMT1 + 0x00D) +#define TPM_RC_AUTH_FAIL (TPM_RC)(RC_FMT1 + 0x00E) +#define TPM_RC_NONCE (TPM_RC)(RC_FMT1 + 0x00F) +#define TPM_RC_PP (TPM_RC)(RC_FMT1 + 0x010) +#define TPM_RC_SCHEME (TPM_RC)(RC_FMT1 + 0x012) +#define TPM_RC_SIZE (TPM_RC)(RC_FMT1 + 0x015) +#define TPM_RC_SYMMETRIC (TPM_RC)(RC_FMT1 + 0x016) +#define TPM_RC_TAG (TPM_RC)(RC_FMT1 + 0x017) +#define TPM_RC_SELECTOR (TPM_RC)(RC_FMT1 + 0x018) +#define TPM_RC_INSUFFICIENT (TPM_RC)(RC_FMT1 + 0x01A) +#define TPM_RC_SIGNATURE (TPM_RC)(RC_FMT1 + 0x01B) +#define TPM_RC_KEY (TPM_RC)(RC_FMT1 + 0x01C) +#define TPM_RC_POLICY_FAIL (TPM_RC)(RC_FMT1 + 0x01D) +#define TPM_RC_INTEGRITY (TPM_RC)(RC_FMT1 + 0x01F) +#define TPM_RC_TICKET (TPM_RC)(RC_FMT1 + 0x020) +#define TPM_RC_RESERVED_BITS (TPM_RC)(RC_FMT1 + 0x021) +#define TPM_RC_BAD_AUTH (TPM_RC)(RC_FMT1 + 0x022) +#define TPM_RC_EXPIRED (TPM_RC)(RC_FMT1 + 0x023) +#define TPM_RC_POLICY_CC (TPM_RC)(RC_FMT1 + 0x024 ) +#define TPM_RC_BINDING (TPM_RC)(RC_FMT1 + 0x025) +#define TPM_RC_CURVE (TPM_RC)(RC_FMT1 + 0x026) +#define TPM_RC_ECC_POINT (TPM_RC)(RC_FMT1 + 0x027) +#define RC_WARN (TPM_RC)(0x900) +#define TPM_RC_CONTEXT_GAP (TPM_RC)(RC_WARN + 0x001) +#define TPM_RC_OBJECT_MEMORY (TPM_RC)(RC_WARN + 0x002) +#define TPM_RC_SESSION_MEMORY (TPM_RC)(RC_WARN + 0x003) +#define TPM_RC_MEMORY (TPM_RC)(RC_WARN + 0x004) +#define TPM_RC_SESSION_HANDLES (TPM_RC)(RC_WARN + 0x005) +#define TPM_RC_OBJECT_HANDLES (TPM_RC)(RC_WARN + 0x006) +#define TPM_RC_LOCALITY (TPM_RC)(RC_WARN + 0x007) +#define TPM_RC_YIELDED (TPM_RC)(RC_WARN + 0x008) +#define TPM_RC_CANCELED (TPM_RC)(RC_WARN + 0x009) +#define TPM_RC_TESTING (TPM_RC)(RC_WARN + 0x00A) +#define TPM_RC_REFERENCE_H0 (TPM_RC)(RC_WARN + 0x010) +#define TPM_RC_REFERENCE_H1 (TPM_RC)(RC_WARN + 0x011) +#define TPM_RC_REFERENCE_H2 (TPM_RC)(RC_WARN + 0x012) +#define TPM_RC_REFERENCE_H3 (TPM_RC)(RC_WARN + 0x013) +#define TPM_RC_REFERENCE_H4 (TPM_RC)(RC_WARN + 0x014) +#define TPM_RC_REFERENCE_H5 (TPM_RC)(RC_WARN + 0x015) +#define TPM_RC_REFERENCE_H6 (TPM_RC)(RC_WARN + 0x016) +#define TPM_RC_REFERENCE_S0 (TPM_RC)(RC_WARN + 0x018) +#define TPM_RC_REFERENCE_S1 (TPM_RC)(RC_WARN + 0x019) +#define TPM_RC_REFERENCE_S2 (TPM_RC)(RC_WARN + 0x01A) +#define TPM_RC_REFERENCE_S3 (TPM_RC)(RC_WARN + 0x01B) +#define TPM_RC_REFERENCE_S4 (TPM_RC)(RC_WARN + 0x01C) +#define TPM_RC_REFERENCE_S5 (TPM_RC)(RC_WARN + 0x01D) +#define TPM_RC_REFERENCE_S6 (TPM_RC)(RC_WARN + 0x01E) +#define TPM_RC_NV_RATE (TPM_RC)(RC_WARN + 0x020) +#define TPM_RC_LOCKOUT (TPM_RC)(RC_WARN + 0x021) +#define TPM_RC_RETRY (TPM_RC)(RC_WARN + 0x022) +#define TPM_RC_NV_UNAVAILABLE (TPM_RC)(RC_WARN + 0x023) +#define TPM_RC_NOT_USED (TPM_RC)(RC_WARN + 0x7F) +#define TPM_RC_H (TPM_RC)(0x000) +#define TPM_RC_P (TPM_RC)(0x040) +#define TPM_RC_S (TPM_RC)(0x800) +#define TPM_RC_1 (TPM_RC)(0x100) +#define TPM_RC_2 (TPM_RC)(0x200) +#define TPM_RC_3 (TPM_RC)(0x300) +#define TPM_RC_4 (TPM_RC)(0x400) +#define TPM_RC_5 (TPM_RC)(0x500) +#define TPM_RC_6 (TPM_RC)(0x600) +#define TPM_RC_7 (TPM_RC)(0x700) +#define TPM_RC_8 (TPM_RC)(0x800) +#define TPM_RC_9 (TPM_RC)(0x900) +#define TPM_RC_A (TPM_RC)(0xA00) +#define TPM_RC_B (TPM_RC)(0xB00) +#define TPM_RC_C (TPM_RC)(0xC00) +#define TPM_RC_D (TPM_RC)(0xD00) +#define TPM_RC_E (TPM_RC)(0xE00) +#define TPM_RC_F (TPM_RC)(0xF00) +#define TPM_RC_N_MASK (TPM_RC)(0xF00) + +// Table 16 - TPM_CLOCK_ADJUST Constants +typedef INT8 TPM_CLOCK_ADJUST; +#define TPM_CLOCK_COARSE_SLOWER (TPM_CLOCK_ADJUST)(-3) +#define TPM_CLOCK_MEDIUM_SLOWER (TPM_CLOCK_ADJUST)(-2) +#define TPM_CLOCK_FINE_SLOWER (TPM_CLOCK_ADJUST)(-1) +#define TPM_CLOCK_NO_CHANGE (TPM_CLOCK_ADJUST)(0) +#define TPM_CLOCK_FINE_FASTER (TPM_CLOCK_ADJUST)(1) +#define TPM_CLOCK_MEDIUM_FASTER (TPM_CLOCK_ADJUST)(2) +#define TPM_CLOCK_COARSE_FASTER (TPM_CLOCK_ADJUST)(3) + +// Table 17 - TPM_EO Constants +typedef UINT16 TPM_EO; +#define TPM_EO_EQ (TPM_EO)(0x0000) +#define TPM_EO_NEQ (TPM_EO)(0x0001) +#define TPM_EO_SIGNED_GT (TPM_EO)(0x0002) +#define TPM_EO_UNSIGNED_GT (TPM_EO)(0x0003) +#define TPM_EO_SIGNED_LT (TPM_EO)(0x0004) +#define TPM_EO_UNSIGNED_LT (TPM_EO)(0x0005) +#define TPM_EO_SIGNED_GE (TPM_EO)(0x0006) +#define TPM_EO_UNSIGNED_GE (TPM_EO)(0x0007) +#define TPM_EO_SIGNED_LE (TPM_EO)(0x0008) +#define TPM_EO_UNSIGNED_LE (TPM_EO)(0x0009) +#define TPM_EO_BITSET (TPM_EO)(0x000A) +#define TPM_EO_BITCLEAR (TPM_EO)(0x000B) + +// Table 18 - TPM_ST Constants +typedef UINT16 TPM_ST; +#define TPM_ST_RSP_COMMAND (TPM_ST)(0x00C4) +#define TPM_ST_NULL (TPM_ST)(0X8000) +#define TPM_ST_NO_SESSIONS (TPM_ST)(0x8001) +#define TPM_ST_SESSIONS (TPM_ST)(0x8002) +#define TPM_ST_ATTEST_NV (TPM_ST)(0x8014) +#define TPM_ST_ATTEST_COMMAND_AUDIT (TPM_ST)(0x8015) +#define TPM_ST_ATTEST_SESSION_AUDIT (TPM_ST)(0x8016) +#define TPM_ST_ATTEST_CERTIFY (TPM_ST)(0x8017) +#define TPM_ST_ATTEST_QUOTE (TPM_ST)(0x8018) +#define TPM_ST_ATTEST_TIME (TPM_ST)(0x8019) +#define TPM_ST_ATTEST_CREATION (TPM_ST)(0x801A) +#define TPM_ST_CREATION (TPM_ST)(0x8021) +#define TPM_ST_VERIFIED (TPM_ST)(0x8022) +#define TPM_ST_AUTH_SECRET (TPM_ST)(0x8023) +#define TPM_ST_HASHCHECK (TPM_ST)(0x8024) +#define TPM_ST_AUTH_SIGNED (TPM_ST)(0x8025) +#define TPM_ST_FU_MANIFEST (TPM_ST)(0x8029) + +// Table 19 - TPM_SU Constants +typedef UINT16 TPM_SU; +#define TPM_SU_CLEAR (TPM_SU)(0x0000) +#define TPM_SU_STATE (TPM_SU)(0x0001) + +// Table 20 - TPM_SE Constants +typedef UINT8 TPM_SE; +#define TPM_SE_HMAC (TPM_SE)(0x00) +#define TPM_SE_POLICY (TPM_SE)(0x01) +#define TPM_SE_TRIAL (TPM_SE)(0x03) + +// Table 21 - TPM_CAP Constants +typedef UINT32 TPM_CAP; +#define TPM_CAP_FIRST (TPM_CAP)(0x00000000) +#define TPM_CAP_ALGS (TPM_CAP)(0x00000000) +#define TPM_CAP_HANDLES (TPM_CAP)(0x00000001) +#define TPM_CAP_COMMANDS (TPM_CAP)(0x00000002) +#define TPM_CAP_PP_COMMANDS (TPM_CAP)(0x00000003) +#define TPM_CAP_AUDIT_COMMANDS (TPM_CAP)(0x00000004) +#define TPM_CAP_PCRS (TPM_CAP)(0x00000005) +#define TPM_CAP_TPM_PROPERTIES (TPM_CAP)(0x00000006) +#define TPM_CAP_PCR_PROPERTIES (TPM_CAP)(0x00000007) +#define TPM_CAP_ECC_CURVES (TPM_CAP)(0x00000008) +#define TPM_CAP_LAST (TPM_CAP)(0x00000008) +#define TPM_CAP_VENDOR_PROPERTY (TPM_CAP)(0x00000100) + +// Table 22 - TPM_PT Constants +typedef UINT32 TPM_PT; +#define TPM_PT_NONE (TPM_PT)(0x00000000) +#define PT_GROUP (TPM_PT)(0x00000100) +#define PT_FIXED (TPM_PT)(PT_GROUP * 1) +#define TPM_PT_FAMILY_INDICATOR (TPM_PT)(PT_FIXED + 0) +#define TPM_PT_LEVEL (TPM_PT)(PT_FIXED + 1) +#define TPM_PT_REVISION (TPM_PT)(PT_FIXED + 2) +#define TPM_PT_DAY_OF_YEAR (TPM_PT)(PT_FIXED + 3) +#define TPM_PT_YEAR (TPM_PT)(PT_FIXED + 4) +#define TPM_PT_MANUFACTURER (TPM_PT)(PT_FIXED + 5) +#define TPM_PT_VENDOR_STRING_1 (TPM_PT)(PT_FIXED + 6) +#define TPM_PT_VENDOR_STRING_2 (TPM_PT)(PT_FIXED + 7) +#define TPM_PT_VENDOR_STRING_3 (TPM_PT)(PT_FIXED + 8) +#define TPM_PT_VENDOR_STRING_4 (TPM_PT)(PT_FIXED + 9) +#define TPM_PT_VENDOR_TPM_TYPE (TPM_PT)(PT_FIXED + 10) +#define TPM_PT_FIRMWARE_VERSION_1 (TPM_PT)(PT_FIXED + 11) +#define TPM_PT_FIRMWARE_VERSION_2 (TPM_PT)(PT_FIXED + 12) +#define TPM_PT_INPUT_BUFFER (TPM_PT)(PT_FIXED + 13) +#define TPM_PT_HR_TRANSIENT_MIN (TPM_PT)(PT_FIXED + 14) +#define TPM_PT_HR_PERSISTENT_MIN (TPM_PT)(PT_FIXED + 15) +#define TPM_PT_HR_LOADED_MIN (TPM_PT)(PT_FIXED + 16) +#define TPM_PT_ACTIVE_SESSIONS_MAX (TPM_PT)(PT_FIXED + 17) +#define TPM_PT_PCR_COUNT (TPM_PT)(PT_FIXED + 18) +#define TPM_PT_PCR_SELECT_MIN (TPM_PT)(PT_FIXED + 19) +#define TPM_PT_CONTEXT_GAP_MAX (TPM_PT)(PT_FIXED + 20) +#define TPM_PT_NV_COUNTERS_MAX (TPM_PT)(PT_FIXED + 22) +#define TPM_PT_NV_INDEX_MAX (TPM_PT)(PT_FIXED + 23) +#define TPM_PT_MEMORY (TPM_PT)(PT_FIXED + 24) +#define TPM_PT_CLOCK_UPDATE (TPM_PT)(PT_FIXED + 25) +#define TPM_PT_CONTEXT_HASH (TPM_PT)(PT_FIXED + 26) +#define TPM_PT_CONTEXT_SYM (TPM_PT)(PT_FIXED + 27) +#define TPM_PT_CONTEXT_SYM_SIZE (TPM_PT)(PT_FIXED + 28) +#define TPM_PT_ORDERLY_COUNT (TPM_PT)(PT_FIXED + 29) +#define TPM_PT_MAX_COMMAND_SIZE (TPM_PT)(PT_FIXED + 30) +#define TPM_PT_MAX_RESPONSE_SIZE (TPM_PT)(PT_FIXED + 31) +#define TPM_PT_MAX_DIGEST (TPM_PT)(PT_FIXED + 32) +#define TPM_PT_MAX_OBJECT_CONTEXT (TPM_PT)(PT_FIXED + 33) +#define TPM_PT_MAX_SESSION_CONTEXT (TPM_PT)(PT_FIXED + 34) +#define TPM_PT_PS_FAMILY_INDICATOR (TPM_PT)(PT_FIXED + 35) +#define TPM_PT_PS_LEVEL (TPM_PT)(PT_FIXED + 36) +#define TPM_PT_PS_REVISION (TPM_PT)(PT_FIXED + 37) +#define TPM_PT_PS_DAY_OF_YEAR (TPM_PT)(PT_FIXED + 38) +#define TPM_PT_PS_YEAR (TPM_PT)(PT_FIXED + 39) +#define TPM_PT_SPLIT_MAX (TPM_PT)(PT_FIXED + 40) +#define TPM_PT_TOTAL_COMMANDS (TPM_PT)(PT_FIXED + 41) +#define TPM_PT_LIBRARY_COMMANDS (TPM_PT)(PT_FIXED + 42) +#define TPM_PT_VENDOR_COMMANDS (TPM_PT)(PT_FIXED + 43) +#define PT_VAR (TPM_PT)(PT_GROUP * 2) +#define TPM_PT_PERMANENT (TPM_PT)(PT_VAR + 0) +#define TPM_PT_STARTUP_CLEAR (TPM_PT)(PT_VAR + 1) +#define TPM_PT_HR_NV_INDEX (TPM_PT)(PT_VAR + 2) +#define TPM_PT_HR_LOADED (TPM_PT)(PT_VAR + 3) +#define TPM_PT_HR_LOADED_AVAIL (TPM_PT)(PT_VAR + 4) +#define TPM_PT_HR_ACTIVE (TPM_PT)(PT_VAR + 5) +#define TPM_PT_HR_ACTIVE_AVAIL (TPM_PT)(PT_VAR + 6) +#define TPM_PT_HR_TRANSIENT_AVAIL (TPM_PT)(PT_VAR + 7) +#define TPM_PT_HR_PERSISTENT (TPM_PT)(PT_VAR + 8) +#define TPM_PT_HR_PERSISTENT_AVAIL (TPM_PT)(PT_VAR + 9) +#define TPM_PT_NV_COUNTERS (TPM_PT)(PT_VAR + 10) +#define TPM_PT_NV_COUNTERS_AVAIL (TPM_PT)(PT_VAR + 11) +#define TPM_PT_ALGORITHM_SET (TPM_PT)(PT_VAR + 12) +#define TPM_PT_LOADED_CURVES (TPM_PT)(PT_VAR + 13) +#define TPM_PT_LOCKOUT_COUNTER (TPM_PT)(PT_VAR + 14) +#define TPM_PT_MAX_AUTH_FAIL (TPM_PT)(PT_VAR + 15) +#define TPM_PT_LOCKOUT_INTERVAL (TPM_PT)(PT_VAR + 16) +#define TPM_PT_LOCKOUT_RECOVERY (TPM_PT)(PT_VAR + 17) +#define TPM_PT_NV_WRITE_RECOVERY (TPM_PT)(PT_VAR + 18) +#define TPM_PT_AUDIT_COUNTER_0 (TPM_PT)(PT_VAR + 19) +#define TPM_PT_AUDIT_COUNTER_1 (TPM_PT)(PT_VAR + 20) + +// Table 23 - TPM_PT_PCR Constants +typedef UINT32 TPM_PT_PCR; +#define TPM_PT_PCR_FIRST (TPM_PT_PCR)(0x00000000) +#define TPM_PT_PCR_SAVE (TPM_PT_PCR)(0x00000000) +#define TPM_PT_PCR_EXTEND_L0 (TPM_PT_PCR)(0x00000001) +#define TPM_PT_PCR_RESET_L0 (TPM_PT_PCR)(0x00000002) +#define TPM_PT_PCR_EXTEND_L1 (TPM_PT_PCR)(0x00000003) +#define TPM_PT_PCR_RESET_L1 (TPM_PT_PCR)(0x00000004) +#define TPM_PT_PCR_EXTEND_L2 (TPM_PT_PCR)(0x00000005) +#define TPM_PT_PCR_RESET_L2 (TPM_PT_PCR)(0x00000006) +#define TPM_PT_PCR_EXTEND_L3 (TPM_PT_PCR)(0x00000007) +#define TPM_PT_PCR_RESET_L3 (TPM_PT_PCR)(0x00000008) +#define TPM_PT_PCR_EXTEND_L4 (TPM_PT_PCR)(0x00000009) +#define TPM_PT_PCR_RESET_L4 (TPM_PT_PCR)(0x0000000A) +#define TPM_PT_PCR_NO_INCREMENT (TPM_PT_PCR)(0x00000011) +#define TPM_PT_PCR_DRTM_RESET (TPM_PT_PCR)(0x00000012) +#define TPM_PT_PCR_POLICY (TPM_PT_PCR)(0x00000013) +#define TPM_PT_PCR_AUTH (TPM_PT_PCR)(0x00000014) +#define TPM_PT_PCR_LAST (TPM_PT_PCR)(0x00000014) + +// Table 24 - TPM_PS Constants +typedef UINT32 TPM_PS; +#define TPM_PS_MAIN (TPM_PS)(0x00000000) +#define TPM_PS_PC (TPM_PS)(0x00000001) +#define TPM_PS_PDA (TPM_PS)(0x00000002) +#define TPM_PS_CELL_PHONE (TPM_PS)(0x00000003) +#define TPM_PS_SERVER (TPM_PS)(0x00000004) +#define TPM_PS_PERIPHERAL (TPM_PS)(0x00000005) +#define TPM_PS_TSS (TPM_PS)(0x00000006) +#define TPM_PS_STORAGE (TPM_PS)(0x00000007) +#define TPM_PS_AUTHENTICATION (TPM_PS)(0x00000008) +#define TPM_PS_EMBEDDED (TPM_PS)(0x00000009) +#define TPM_PS_HARDCOPY (TPM_PS)(0x0000000A) +#define TPM_PS_INFRASTRUCTURE (TPM_PS)(0x0000000B) +#define TPM_PS_VIRTUALIZATION (TPM_PS)(0x0000000C) +#define TPM_PS_TNC (TPM_PS)(0x0000000D) +#define TPM_PS_MULTI_TENANT (TPM_PS)(0x0000000E) +#define TPM_PS_TC (TPM_PS)(0x0000000F) + +// 7 Handles + +// Table 25 - Handles Types +// +// NOTE: Comment because it has same name as TPM1.2 (value is same, so not runtime issue) +// +//typedef UINT32 TPM_HANDLE; + +// Table 26 - TPM_HT Constants +typedef UINT8 TPM_HT; +#define TPM_HT_PCR (TPM_HT)(0x00) +#define TPM_HT_NV_INDEX (TPM_HT)(0x01) +#define TPM_HT_HMAC_SESSION (TPM_HT)(0x02) +#define TPM_HT_LOADED_SESSION (TPM_HT)(0x02) +#define TPM_HT_POLICY_SESSION (TPM_HT)(0x03) +#define TPM_HT_ACTIVE_SESSION (TPM_HT)(0x03) +#define TPM_HT_PERMANENT (TPM_HT)(0x40) +#define TPM_HT_TRANSIENT (TPM_HT)(0x80) +#define TPM_HT_PERSISTENT (TPM_HT)(0x81) + +// Table 27 - TPM_RH Constants +typedef UINT32 TPM_RH; +#define TPM_RH_FIRST (TPM_RH)(0x40000000) +#define TPM_RH_SRK (TPM_RH)(0x40000000) +#define TPM_RH_OWNER (TPM_RH)(0x40000001) +#define TPM_RH_REVOKE (TPM_RH)(0x40000002) +#define TPM_RH_TRANSPORT (TPM_RH)(0x40000003) +#define TPM_RH_OPERATOR (TPM_RH)(0x40000004) +#define TPM_RH_ADMIN (TPM_RH)(0x40000005) +#define TPM_RH_EK (TPM_RH)(0x40000006) +#define TPM_RH_NULL (TPM_RH)(0x40000007) +#define TPM_RH_UNASSIGNED (TPM_RH)(0x40000008) +#define TPM_RS_PW (TPM_RH)(0x40000009) +#define TPM_RH_LOCKOUT (TPM_RH)(0x4000000A) +#define TPM_RH_ENDORSEMENT (TPM_RH)(0x4000000B) +#define TPM_RH_PLATFORM (TPM_RH)(0x4000000C) +#define TPM_RH_PLATFORM_NV (TPM_RH)(0x4000000D) +#define TPM_RH_AUTH_00 (TPM_RH)(0x40000010) +#define TPM_RH_AUTH_FF (TPM_RH)(0x4000010F) +#define TPM_RH_LAST (TPM_RH)(0x4000010F) + +// Table 28 - TPM_HC Constants +typedef TPM_HANDLE TPM_HC; +#define HR_HANDLE_MASK (TPM_HC)(0x00FFFFFF) +#define HR_RANGE_MASK (TPM_HC)(0xFF000000) +#define HR_SHIFT (TPM_HC)(24) +#define HR_PCR (TPM_HC)((TPM_HC)TPM_HT_PCR << HR_SHIFT) +#define HR_HMAC_SESSION (TPM_HC)((TPM_HC)TPM_HT_HMAC_SESSION << HR_SHIFT) +#define HR_POLICY_SESSION (TPM_HC)((TPM_HC)TPM_HT_POLICY_SESSION << HR_SHIFT) +#define HR_TRANSIENT (TPM_HC)((TPM_HC)TPM_HT_TRANSIENT << HR_SHIFT) +#define HR_PERSISTENT (TPM_HC)((TPM_HC)TPM_HT_PERSISTENT << HR_SHIFT) +#define HR_NV_INDEX (TPM_HC)((TPM_HC)TPM_HT_NV_INDEX << HR_SHIFT) +#define HR_PERMANENT (TPM_HC)((TPM_HC)TPM_HT_PERMANENT << HR_SHIFT) +#define PCR_FIRST (TPM_HC)(HR_PCR + 0) +#define PCR_LAST (TPM_HC)(PCR_FIRST + IMPLEMENTATION_PCR - 1) +#define HMAC_SESSION_FIRST (TPM_HC)(HR_HMAC_SESSION + 0) +#define HMAC_SESSION_LAST (TPM_HC)(HMAC_SESSION_FIRST + MAX_ACTIVE_SESSIONS - 1) +#define LOADED_SESSION_FIRST (TPM_HC)(HMAC_SESSION_FIRST) +#define LOADED_SESSION_LAST (TPM_HC)(HMAC_SESSION_LAST) +#define POLICY_SESSION_FIRST (TPM_HC)(HR_POLICY_SESSION + 0) +#define POLICY_SESSION_LAST (TPM_HC)(POLICY_SESSION_FIRST + MAX_ACTIVE_SESSIONS - 1) +#define TRANSIENT_FIRST (TPM_HC)(HR_TRANSIENT + 0) +#define ACTIVE_SESSION_FIRST (TPM_HC)(POLICY_SESSION_FIRST) +#define ACTIVE_SESSION_LAST (TPM_HC)(POLICY_SESSION_LAST) +#define TRANSIENT_LAST (TPM_HC)(TRANSIENT_FIRST+MAX_LOADED_OBJECTS - 1) +#define PERSISTENT_FIRST (TPM_HC)(HR_PERSISTENT + 0) +#define PERSISTENT_LAST (TPM_HC)(PERSISTENT_FIRST + 0x00FFFFFF) +#define PLATFORM_PERSISTENT (TPM_HC)(PERSISTENT_FIRST + 0x00800000) +#define NV_INDEX_FIRST (TPM_HC)(HR_NV_INDEX + 0) +#define NV_INDEX_LAST (TPM_HC)(NV_INDEX_FIRST + 0x00FFFFFF) +#define PERMANENT_FIRST (TPM_HC)(TPM_RH_FIRST) +#define PERMANENT_LAST (TPM_HC)(TPM_RH_LAST) + +// 8 Attribute Structures + +// Table 29 - TPMA_ALGORITHM Bits +typedef struct { + UINT32 asymmetric : 1; + UINT32 symmetric : 1; + UINT32 hash : 1; + UINT32 object : 1; + UINT32 reserved4_7 : 4; + UINT32 signing : 1; + UINT32 encrypting : 1; + UINT32 method : 1; + UINT32 reserved11_31 : 21; +} TPMA_ALGORITHM; + +// Table 30 - TPMA_OBJECT Bits +typedef struct { + UINT32 reserved1 : 1; + UINT32 fixedTPM : 1; + UINT32 stClear : 1; + UINT32 reserved4 : 1; + UINT32 fixedParent : 1; + UINT32 sensitiveDataOrigin : 1; + UINT32 userWithAuth : 1; + UINT32 adminWithPolicy : 1; + UINT32 reserved8_9 : 2; + UINT32 noDA : 1; + UINT32 encryptedDuplication : 1; + UINT32 reserved12_15 : 4; + UINT32 restricted : 1; + UINT32 decrypt : 1; + UINT32 sign : 1; + UINT32 reserved19_31 : 13; +} TPMA_OBJECT; + +// Table 31 - TPMA_SESSION Bits +typedef struct { + UINT8 continueSession : 1; + UINT8 auditExclusive : 1; + UINT8 auditReset : 1; + UINT8 reserved3_4 : 2; + UINT8 decrypt : 1; + UINT8 encrypt : 1; + UINT8 audit : 1; +} TPMA_SESSION; + +// Table 32 - TPMA_LOCALITY Bits +// +// NOTE: Use low case here to resolve conflict +// +typedef struct { + UINT8 locZero : 1; + UINT8 locOne : 1; + UINT8 locTwo : 1; + UINT8 locThree : 1; + UINT8 locFour : 1; + UINT8 Extended : 3; +} TPMA_LOCALITY; + +// Table 33 - TPMA_PERMANENT Bits +typedef struct { + UINT32 ownerAuthSet : 1; + UINT32 endorsementAuthSet : 1; + UINT32 lockoutAuthSet : 1; + UINT32 reserved3_7 : 5; + UINT32 disableClear : 1; + UINT32 inLockout : 1; + UINT32 tpmGeneratedEPS : 1; + UINT32 reserved11_31 : 21; +} TPMA_PERMANENT; + +// Table 34 - TPMA_STARTUP_CLEAR Bits +typedef struct { + UINT32 phEnable : 1; + UINT32 shEnable : 1; + UINT32 ehEnable : 1; + UINT32 reserved3_30 : 28; + UINT32 orderly : 1; +} TPMA_STARTUP_CLEAR; + +// Table 35 - TPMA_MEMORY Bits +typedef struct { + UINT32 sharedRAM : 1; + UINT32 sharedNV : 1; + UINT32 objectCopiedToRam : 1; + UINT32 reserved3_31 : 29; +} TPMA_MEMORY; + +// Table 36 - TPMA_CC Bits +typedef struct { + UINT32 commandIndex : 16; + UINT32 reserved16_21 : 6; + UINT32 nv : 1; + UINT32 extensive : 1; + UINT32 flushed : 1; + UINT32 cHandles : 3; + UINT32 rHandle : 1; + UINT32 V : 1; + UINT32 Res : 2; +} TPMA_CC; + +// 9 Interface Types + +// Table 37 - TPMI_YES_NO Type +typedef BYTE TPMI_YES_NO; + +// Table 38 - TPMI_DH_OBJECT Type +typedef TPM_HANDLE TPMI_DH_OBJECT; + +// Table 39 - TPMI_DH_PERSISTENT Type +typedef TPM_HANDLE TPMI_DH_PERSISTENT; + +// Table 40 - TPMI_DH_ENTITY Type +typedef TPM_HANDLE TPMI_DH_ENTITY; + +// Table 41 - TPMI_DH_PCR Type +typedef TPM_HANDLE TPMI_DH_PCR; + +// Table 42 - TPMI_SH_AUTH_SESSION Type +typedef TPM_HANDLE TPMI_SH_AUTH_SESSION; + +// Table 43 - TPMI_SH_HMAC Type +typedef TPM_HANDLE TPMI_SH_HMAC; + +// Table 44 - TPMI_SH_POLICY Type +typedef TPM_HANDLE TPMI_SH_POLICY; + +// Table 45 - TPMI_DH_CONTEXT Type +typedef TPM_HANDLE TPMI_DH_CONTEXT; + +// Table 46 - TPMI_RH_HIERARCHY Type +typedef TPM_HANDLE TPMI_RH_HIERARCHY; + +// Table 47 - TPMI_RH_HIERARCHY_AUTH Type +typedef TPM_HANDLE TPMI_RH_HIERARCHY_AUTH; + +// Table 48 - TPMI_RH_PLATFORM Type +typedef TPM_HANDLE TPMI_RH_PLATFORM; + +// Table 49 - TPMI_RH_OWNER Type +typedef TPM_HANDLE TPMI_RH_OWNER; + +// Table 50 - TPMI_RH_ENDORSEMENT Type +typedef TPM_HANDLE TPMI_RH_ENDORSEMENT; + +// Table 51 - TPMI_RH_PROVISION Type +typedef TPM_HANDLE TPMI_RH_PROVISION; + +// Table 52 - TPMI_RH_CLEAR Type +typedef TPM_HANDLE TPMI_RH_CLEAR; + +// Table 53 - TPMI_RH_NV_AUTH Type +typedef TPM_HANDLE TPMI_RH_NV_AUTH; + +// Table 54 - TPMI_RH_LOCKOUT Type +typedef TPM_HANDLE TPMI_RH_LOCKOUT; + +// Table 55 - TPMI_RH_NV_INDEX Type +typedef TPM_HANDLE TPMI_RH_NV_INDEX; + +// Table 56 - TPMI_ALG_HASH Type +typedef TPM_ALG_ID TPMI_ALG_HASH; + +// Table 57 - TPMI_ALG_ASYM Type +typedef TPM_ALG_ID TPMI_ALG_ASYM; + +// Table 58 - TPMI_ALG_SYM Type +typedef TPM_ALG_ID TPMI_ALG_SYM; + +// Table 59 - TPMI_ALG_SYM_OBJECT Type +typedef TPM_ALG_ID TPMI_ALG_SYM_OBJECT; + +// Table 60 - TPMI_ALG_SYM_MODE Type +typedef TPM_ALG_ID TPMI_ALG_SYM_MODE; + +// Table 61 - TPMI_ALG_KDF Type +typedef TPM_ALG_ID TPMI_ALG_KDF; + +// Table 62 - TPMI_ALG_SIG_SCHEME Type +typedef TPM_ALG_ID TPMI_ALG_SIG_SCHEME; + +// Table 63 - TPMI_ECC_KEY_EXCHANGE Type +typedef TPM_ALG_ID TPMI_ECC_KEY_EXCHANGE; + +// Table 64 - TPMI_ST_COMMAND_TAG Type +typedef TPM_ST TPMI_ST_COMMAND_TAG; + +// 10 Structure Definitions + +// Table 65 - TPMS_ALGORITHM_DESCRIPTION Structure +typedef struct { + TPM_ALG_ID alg; + TPMA_ALGORITHM attributes; +} TPMS_ALGORITHM_DESCRIPTION; + +// Table 66 - TPMU_HA Union +typedef union { + BYTE sha1[SHA1_DIGEST_SIZE]; + BYTE sha256[SHA256_DIGEST_SIZE]; + BYTE sm3_256[SM3_256_DIGEST_SIZE]; + BYTE sha384[SHA384_DIGEST_SIZE]; + BYTE sha512[SHA512_DIGEST_SIZE]; +} TPMU_HA; + +// Table 67 - TPMT_HA Structure +typedef struct { + TPMI_ALG_HASH hashAlg; + TPMU_HA digest; +} TPMT_HA; + +// Table 68 - TPM2B_DIGEST Structure +typedef struct { + UINT16 size; + BYTE buffer[sizeof(TPMU_HA)]; +} TPM2B_DIGEST; + +// Table 69 - TPM2B_DATA Structure +typedef struct { + UINT16 size; + BYTE buffer[sizeof(TPMT_HA)]; +} TPM2B_DATA; + +// Table 70 - TPM2B_NONCE Types +typedef TPM2B_DIGEST TPM2B_NONCE; + +// Table 71 - TPM2B_AUTH Types +typedef TPM2B_DIGEST TPM2B_AUTH; + +// Table 72 - TPM2B_OPERAND Types +typedef TPM2B_DIGEST TPM2B_OPERAND; + +// Table 73 - TPM2B_EVENT Structure +typedef struct { + UINT16 size; + BYTE buffer[1024]; +} TPM2B_EVENT; + +// Table 74 - TPM2B_MAX_BUFFER Structure +typedef struct { + UINT16 size; + BYTE buffer[MAX_DIGEST_BUFFER]; +} TPM2B_MAX_BUFFER; + +// Table 75 - TPM2B_MAX_NV_BUFFER Structure +typedef struct { + UINT16 size; + BYTE buffer[MAX_NV_INDEX_SIZE]; +} TPM2B_MAX_NV_BUFFER; + +// Table 76 - TPM2B_TIMEOUT Structure +typedef struct { + UINT16 size; + BYTE buffer[sizeof(UINT64)]; +} TPM2B_TIMEOUT; + +// Table 77 -- TPM2B_IV Structure +typedef struct { + UINT16 size; + BYTE buffer[MAX_SYM_BLOCK_SIZE]; +} TPM2B_IV; + +// Table 78 - TPMU_NAME Union +typedef union { + TPMT_HA digest; + TPM_HANDLE handle; +} TPMU_NAME; + +// Table 79 - TPM2B_NAME Structure +typedef struct { + UINT16 size; + BYTE name[sizeof(TPMU_NAME)]; +} TPM2B_NAME; + +// Table 80 - TPMS_PCR_SELECT Structure +typedef struct { + UINT8 sizeofSelect; + BYTE pcrSelect[PCR_SELECT_MAX]; +} TPMS_PCR_SELECT; + +// Table 81 - TPMS_PCR_SELECTION Structure +typedef struct { + TPMI_ALG_HASH hash; + UINT8 sizeofSelect; + BYTE pcrSelect[PCR_SELECT_MAX]; +} TPMS_PCR_SELECTION; + +// Table 84 - TPMT_TK_CREATION Structure +typedef struct { + TPM_ST tag; + TPMI_RH_HIERARCHY hierarchy; + TPM2B_DIGEST digest; +} TPMT_TK_CREATION; + +// Table 85 - TPMT_TK_VERIFIED Structure +typedef struct { + TPM_ST tag; + TPMI_RH_HIERARCHY hierarchy; + TPM2B_DIGEST digest; +} TPMT_TK_VERIFIED; + +// Table 86 - TPMT_TK_AUTH Structure +typedef struct { + TPM_ST tag; + TPMI_RH_HIERARCHY hierarchy; + TPM2B_DIGEST digest; +} TPMT_TK_AUTH; + +// Table 87 - TPMT_TK_HASHCHECK Structure +typedef struct { + TPM_ST tag; + TPMI_RH_HIERARCHY hierarchy; + TPM2B_DIGEST digest; +} TPMT_TK_HASHCHECK; + +// Table 88 - TPMS_ALG_PROPERTY Structure +typedef struct { + TPM_ALG_ID alg; + TPMA_ALGORITHM algProperties; +} TPMS_ALG_PROPERTY; + +// Table 89 - TPMS_TAGGED_PROPERTY Structure +typedef struct { + TPM_PT property; + UINT32 value; +} TPMS_TAGGED_PROPERTY; + +// Table 90 - TPMS_TAGGED_PCR_SELECT Structure +typedef struct { + TPM_PT tag; + UINT8 sizeofSelect; + BYTE pcrSelect[PCR_SELECT_MAX]; +} TPMS_TAGGED_PCR_SELECT; + +// Table 91 - TPML_CC Structure +typedef struct { + UINT32 count; + TPM_CC commandCodes[MAX_CAP_CC]; +} TPML_CC; + +// Table 92 - TPML_CCA Structure +typedef struct { + UINT32 count; + TPMA_CC commandAttributes[MAX_CAP_CC]; +} TPML_CCA; + +// Table 93 - TPML_ALG Structure +typedef struct { + UINT32 count; + TPM_ALG_ID algorithms[MAX_ALG_LIST_SIZE]; +} TPML_ALG; + +// Table 94 - TPML_HANDLE Structure +typedef struct { + UINT32 count; + TPM_HANDLE handle[MAX_CAP_HANDLES]; +} TPML_HANDLE; + +// Table 95 - TPML_DIGEST Structure +typedef struct { + UINT32 count; + TPM2B_DIGEST digests[8]; +} TPML_DIGEST; + +// Table 96 -- TPML_DIGEST_VALUES Structure +typedef struct { + UINT32 count; + TPMT_HA digests[HASH_COUNT]; +} TPML_DIGEST_VALUES; + +// Table 97 - TPM2B_DIGEST_VALUES Structure +typedef struct { + UINT16 size; + BYTE buffer[sizeof(TPML_DIGEST_VALUES)]; +} TPM2B_DIGEST_VALUES; + +// Table 98 - TPML_PCR_SELECTION Structure +typedef struct { + UINT32 count; + TPMS_PCR_SELECTION pcrSelections[HASH_COUNT]; +} TPML_PCR_SELECTION; + +// Table 99 - TPML_ALG_PROPERTY Structure +typedef struct { + UINT32 count; + TPMS_ALG_PROPERTY algProperties[MAX_CAP_ALGS]; +} TPML_ALG_PROPERTY; + +// Table 100 - TPML_TAGGED_TPM_PROPERTY Structure +typedef struct { + UINT32 count; + TPMS_TAGGED_PROPERTY tpmProperty[MAX_TPM_PROPERTIES]; +} TPML_TAGGED_TPM_PROPERTY; + +// Table 101 - TPML_TAGGED_PCR_PROPERTY Structure +typedef struct { + UINT32 count; + TPMS_TAGGED_PCR_SELECT pcrProperty[MAX_PCR_PROPERTIES]; +} TPML_TAGGED_PCR_PROPERTY; + +// Table 102 - TPML_ECC_CURVE Structure +typedef struct { + UINT32 count; + TPM_ECC_CURVE eccCurves[MAX_ECC_CURVES]; +} TPML_ECC_CURVE; + +// Table 103 - TPMU_CAPABILITIES Union +typedef union { + TPML_ALG_PROPERTY algorithms; + TPML_HANDLE handles; + TPML_CCA command; + TPML_CC ppCommands; + TPML_CC auditCommands; + TPML_PCR_SELECTION assignedPCR; + TPML_TAGGED_TPM_PROPERTY tpmProperties; + TPML_TAGGED_PCR_PROPERTY pcrProperties; + TPML_ECC_CURVE eccCurves; +} TPMU_CAPABILITIES; + +// Table 104 - TPMS_CAPABILITY_DATA Structure +typedef struct { + TPM_CAP capability; + TPMU_CAPABILITIES data; +} TPMS_CAPABILITY_DATA; + +// Table 105 - TPMS_CLOCK_INFO Structure +typedef struct { + UINT64 clock; + UINT32 resetCount; + UINT32 restartCount; + TPMI_YES_NO safe; +} TPMS_CLOCK_INFO; + +// Table 106 - TPMS_TIME_INFO Structure +typedef struct { + UINT64 time; + TPMS_CLOCK_INFO clockInfo; +} TPMS_TIME_INFO; + +// Table 107 - TPMS_TIME_ATTEST_INFO Structure +typedef struct { + TPMS_TIME_INFO time; + UINT64 firmwareVersion; +} TPMS_TIME_ATTEST_INFO; + +// Table 108 - TPMS_CERTIFY_INFO Structure +typedef struct { + TPM2B_NAME name; + TPM2B_NAME qualifiedName; +} TPMS_CERTIFY_INFO; + +// Table 109 - TPMS_QUOTE_INFO Structure +typedef struct { + TPML_PCR_SELECTION pcrSelect; + TPM2B_DIGEST pcrDigest; +} TPMS_QUOTE_INFO; + +// Table 110 - TPMS_COMMAND_AUDIT_INFO Structure +typedef struct { + UINT64 auditCounter; + TPM_ALG_ID digestAlg; + TPM2B_DIGEST auditDigest; + TPM2B_DIGEST commandDigest; +} TPMS_COMMAND_AUDIT_INFO; + +// Table 111 - TPMS_SESSION_AUDIT_INFO Structure +typedef struct { + TPMI_YES_NO exclusiveSession; + TPM2B_DIGEST sessionDigest; +} TPMS_SESSION_AUDIT_INFO; + +// Table 112 - TPMS_CREATION_INFO Structure +typedef struct { + TPM2B_NAME objectName; + TPM2B_DIGEST creationHash; +} TPMS_CREATION_INFO; + +// Table 113 - TPMS_NV_CERTIFY_INFO Structure +typedef struct { + TPM2B_NAME indexName; + UINT16 offset; + TPM2B_MAX_NV_BUFFER nvContents; +} TPMS_NV_CERTIFY_INFO; + +// Table 114 - TPMI_ST_ATTEST Type +typedef TPM_ST TPMI_ST_ATTEST; + +// Table 115 - TPMU_ATTEST Union +typedef union { + TPMS_CERTIFY_INFO certify; + TPMS_CREATION_INFO creation; + TPMS_QUOTE_INFO quote; + TPMS_COMMAND_AUDIT_INFO commandAudit; + TPMS_SESSION_AUDIT_INFO sessionAudit; + TPMS_TIME_ATTEST_INFO time; + TPMS_NV_CERTIFY_INFO nv; +} TPMU_ATTEST; + +// Table 116 - TPMS_ATTEST Structure +typedef struct { + TPM_GENERATED magic; + TPMI_ST_ATTEST type; + TPM2B_NAME qualifiedSigner; + TPM2B_DATA extraData; + TPMS_CLOCK_INFO clockInfo; + UINT64 firmwareVersion; + TPMU_ATTEST attested; +} TPMS_ATTEST; + +// Table 117 - TPM2B_ATTEST Structure +typedef struct { + UINT16 size; + BYTE attestationData[sizeof(TPMS_ATTEST)]; +} TPM2B_ATTEST; + +// Table 118 - TPMS_AUTH_COMMAND Structure +typedef struct { + TPMI_SH_AUTH_SESSION sessionHandle; + TPM2B_NONCE nonce; + TPMA_SESSION sessionAttributes; + TPM2B_AUTH hmac; +} TPMS_AUTH_COMMAND; + +// Table 119 - TPMS_AUTH_RESPONSE Structure +typedef struct { + TPM2B_NONCE nonce; + TPMA_SESSION sessionAttributes; + TPM2B_AUTH hmac; +} TPMS_AUTH_RESPONSE; + +// 11 Algorithm Parameters and Structures + +// Table 120 - TPMI_AES_KEY_BITS Type +typedef TPM_KEY_BITS TPMI_AES_KEY_BITS; + +// Table 121 - TPMI_SM4_KEY_BITS Type +typedef TPM_KEY_BITS TPMI_SM4_KEY_BITS; + +// Table 122 - TPMU_SYM_KEY_BITS Union +typedef union { + TPMI_AES_KEY_BITS aes; + TPMI_SM4_KEY_BITS SM4; + TPM_KEY_BITS sym; + TPMI_ALG_HASH xor; +} TPMU_SYM_KEY_BITS; + +// Table 123 - TPMU_SYM_MODE Union +typedef union { + TPMI_ALG_SYM_MODE aes; + TPMI_ALG_SYM_MODE SM4; + TPMI_ALG_SYM_MODE sym; +} TPMU_SYM_MODE; + +// Table 125 - TPMT_SYM_DEF Structure +typedef struct { + TPMI_ALG_SYM algorithm; + TPMU_SYM_KEY_BITS keyBits; + TPMU_SYM_MODE mode; +} TPMT_SYM_DEF; + +// Table 126 - TPMT_SYM_DEF_OBJECT Structure +typedef struct { + TPMI_ALG_SYM_OBJECT algorithm; + TPMU_SYM_KEY_BITS keyBits; + TPMU_SYM_MODE mode; +} TPMT_SYM_DEF_OBJECT; + +// Table 127 - TPM2B_SYM_KEY Structure +typedef struct { + UINT16 size; + BYTE buffer[MAX_SYM_KEY_BYTES]; +} TPM2B_SYM_KEY; + +// Table 128 - TPMS_SYMCIPHER_PARMS Structure +typedef struct { + TPMT_SYM_DEF_OBJECT sym; +} TPMS_SYMCIPHER_PARMS; + +// Table 129 - TPM2B_SENSITIVE_DATA Structure +typedef struct { + UINT16 size; + BYTE buffer[MAX_SYM_DATA]; +} TPM2B_SENSITIVE_DATA; + +// Table 130 - TPMS_SENSITIVE_CREATE Structure +typedef struct { + TPM2B_AUTH userAuth; + TPM2B_SENSITIVE_DATA data; +} TPMS_SENSITIVE_CREATE; + +// Table 131 - TPM2B_SENSITIVE_CREATE Structure +typedef struct { + UINT16 size; + TPMS_SENSITIVE_CREATE sensitive; +} TPM2B_SENSITIVE_CREATE; + +// Table 132 - TPMS_SCHEME_SIGHASH Structure +typedef struct { + TPMI_ALG_HASH hashAlg; +} TPMS_SCHEME_SIGHASH; + +// Table 133 - TPMI_ALG_KEYEDHASH_SCHEME Type +typedef TPM_ALG_ID TPMI_ALG_KEYEDHASH_SCHEME; + +// Table 134 - HMAC_SIG_SCHEME Types +typedef TPMS_SCHEME_SIGHASH TPMS_SCHEME_HMAC; + +// Table 135 - TPMS_SCHEME_XOR Structure +typedef struct { + TPMI_ALG_HASH hashAlg; + TPMI_ALG_KDF kdf; +} TPMS_SCHEME_XOR; + +// Table 136 - TPMU_SCHEME_KEYEDHASH Union +typedef union { + TPMS_SCHEME_HMAC hmac; + TPMS_SCHEME_XOR xor; +} TPMU_SCHEME_KEYEDHASH; + +// Table 137 - TPMT_KEYEDHASH_SCHEME Structure +typedef struct { + TPMI_ALG_KEYEDHASH_SCHEME scheme; + TPMU_SCHEME_KEYEDHASH details; +} TPMT_KEYEDHASH_SCHEME; + +// Table 138 - RSA_SIG_SCHEMES Types +typedef TPMS_SCHEME_SIGHASH TPMS_SCHEME_RSASSA; +typedef TPMS_SCHEME_SIGHASH TPMS_SCHEME_RSAPSS; + +// Table 139 - ECC_SIG_SCHEMES Types +typedef TPMS_SCHEME_SIGHASH TPMS_SCHEME_ECDSA; +typedef TPMS_SCHEME_SIGHASH TPMS_SCHEME_SM2; +typedef TPMS_SCHEME_SIGHASH TPMS_SCHEME_ECSCHNORR; + +// Table 140 - TPMS_SCHEME_ECDAA Structure +typedef struct { + TPMI_ALG_HASH hashAlg; + UINT16 count; +} TPMS_SCHEME_ECDAA; + +// Table 141 - TPMU_SIG_SCHEME Union +typedef union { + TPMS_SCHEME_RSASSA rsassa; + TPMS_SCHEME_RSAPSS rsapss; + TPMS_SCHEME_ECDSA ecdsa; + TPMS_SCHEME_ECDAA ecdaa; + TPMS_SCHEME_ECSCHNORR ecSchnorr; + TPMS_SCHEME_HMAC hmac; + TPMS_SCHEME_SIGHASH any; +} TPMU_SIG_SCHEME; + +// Table 142 - TPMT_SIG_SCHEME Structure +typedef struct { + TPMI_ALG_SIG_SCHEME scheme; + TPMU_SIG_SCHEME details; +} TPMT_SIG_SCHEME; + +// Table 143 - TPMS_SCHEME_OAEP Structure +typedef struct { + TPMI_ALG_HASH hashAlg; +} TPMS_SCHEME_OAEP; + +// Table 144 - TPMS_SCHEME_ECDH Structure +typedef struct { + TPMI_ALG_HASH hashAlg; +} TPMS_SCHEME_ECDH; + +// Table 145 - TPMS_SCHEME_MGF1 Structure +typedef struct { + TPMI_ALG_HASH hashAlg; +} TPMS_SCHEME_MGF1; + +// Table 146 - TPMS_SCHEME_KDF1_SP800_56a Structure +typedef struct { + TPMI_ALG_HASH hashAlg; +} TPMS_SCHEME_KDF1_SP800_56a; + +// Table 147 - TPMS_SCHEME_KDF2 Structure +typedef struct { + TPMI_ALG_HASH hashAlg; +} TPMS_SCHEME_KDF2; + +// Table 148 - TPMS_SCHEME_KDF1_SP800_108 Structure +typedef struct { + TPMI_ALG_HASH hashAlg; +} TPMS_SCHEME_KDF1_SP800_108; + +// Table 149 - TPMU_KDF_SCHEME Union +typedef union { + TPMS_SCHEME_MGF1 mgf1; + TPMS_SCHEME_KDF1_SP800_56a kdf1_SP800_56a; + TPMS_SCHEME_KDF2 kdf2; + TPMS_SCHEME_KDF1_SP800_108 kdf1_sp800_108; +} TPMU_KDF_SCHEME; + +// Table 150 - TPMT_KDF_SCHEME Structure +typedef struct { + TPMI_ALG_KDF scheme; + TPMU_KDF_SCHEME details; +} TPMT_KDF_SCHEME; + +// Table 151 - TPMI_ALG_ASYM_SCHEME Type +typedef TPM_ALG_ID TPMI_ALG_ASYM_SCHEME; + +// Table 152 - TPMU_ASYM_SCHEME Union +typedef union { + TPMS_SCHEME_RSASSA rsassa; + TPMS_SCHEME_RSAPSS rsapss; + TPMS_SCHEME_OAEP oaep; + TPMS_SCHEME_ECDSA ecdsa; + TPMS_SCHEME_ECDAA ecdaa; + TPMS_SCHEME_ECSCHNORR ecSchnorr; + TPMS_SCHEME_SIGHASH anySig; +} TPMU_ASYM_SCHEME; + +// Table 153 - TPMT_ASYM_SCHEME Structure +typedef struct { + TPMI_ALG_ASYM_SCHEME scheme; + TPMU_ASYM_SCHEME details; +} TPMT_ASYM_SCHEME; + +// Table 154 - TPMI_ALG_RSA_SCHEME Type +typedef TPM_ALG_ID TPMI_ALG_RSA_SCHEME; + +// Table 155 - TPMT_RSA_SCHEME Structure +typedef struct { + TPMI_ALG_RSA_SCHEME scheme; + TPMU_ASYM_SCHEME details; +} TPMT_RSA_SCHEME; + +// Table 156 - TPMI_ALG_RSA_DECRYPT Type +typedef TPM_ALG_ID TPMI_ALG_RSA_DECRYPT; + +// Table 157 - TPMT_RSA_DECRYPT Structure +typedef struct { + TPMI_ALG_RSA_DECRYPT scheme; + TPMU_ASYM_SCHEME details; +} TPMT_RSA_DECRYPT; + +// Table 158 - TPM2B_PUBLIC_KEY_RSA Structure +typedef struct { + UINT16 size; + BYTE buffer[MAX_RSA_KEY_BYTES]; +} TPM2B_PUBLIC_KEY_RSA; + +// Table 159 - TPMI_RSA_KEY_BITS Type +typedef TPM_KEY_BITS TPMI_RSA_KEY_BITS; + +// Table 160 - TPM2B_PRIVATE_KEY_RSA Structure +typedef struct { + UINT16 size; + BYTE buffer[MAX_RSA_KEY_BYTES/2]; +} TPM2B_PRIVATE_KEY_RSA; + +// Table 161 - TPM2B_ECC_PARAMETER Structure +typedef struct { + UINT16 size; + BYTE buffer[MAX_ECC_KEY_BYTES]; +} TPM2B_ECC_PARAMETER; + +// Table 162 - TPMS_ECC_POINT Structure +typedef struct { + TPM2B_ECC_PARAMETER x; + TPM2B_ECC_PARAMETER y; +} TPMS_ECC_POINT; + +// Table 163 -- TPM2B_ECC_POINT Structure +typedef struct { + UINT16 size; + TPMS_ECC_POINT point; +} TPM2B_ECC_POINT; + +// Table 164 - TPMI_ALG_ECC_SCHEME Type +typedef TPM_ALG_ID TPMI_ALG_ECC_SCHEME; + +// Table 165 - TPMI_ECC_CURVE Type +typedef TPM_ECC_CURVE TPMI_ECC_CURVE; + +// Table 166 - TPMT_ECC_SCHEME Structure +typedef struct { + TPMI_ALG_ECC_SCHEME scheme; + TPMU_SIG_SCHEME details; +} TPMT_ECC_SCHEME; + +// Table 167 - TPMS_ALGORITHM_DETAIL_ECC Structure +typedef struct { + TPM_ECC_CURVE curveID; + UINT16 keySize; + TPMT_KDF_SCHEME kdf; + TPMT_ECC_SCHEME sign; + TPM2B_ECC_PARAMETER p; + TPM2B_ECC_PARAMETER a; + TPM2B_ECC_PARAMETER b; + TPM2B_ECC_PARAMETER gX; + TPM2B_ECC_PARAMETER gY; + TPM2B_ECC_PARAMETER n; + TPM2B_ECC_PARAMETER h; +} TPMS_ALGORITHM_DETAIL_ECC; + +// Table 168 - TPMS_SIGNATURE_RSASSA Structure +typedef struct { + TPMI_ALG_HASH hash; + TPM2B_PUBLIC_KEY_RSA sig; +} TPMS_SIGNATURE_RSASSA; + +// Table 169 - TPMS_SIGNATURE_RSAPSS Structure +typedef struct { + TPMI_ALG_HASH hash; + TPM2B_PUBLIC_KEY_RSA sig; +} TPMS_SIGNATURE_RSAPSS; + +// Table 170 - TPMS_SIGNATURE_ECDSA Structure +typedef struct { + TPMI_ALG_HASH hash; + TPM2B_ECC_PARAMETER signatureR; + TPM2B_ECC_PARAMETER signatureS; +} TPMS_SIGNATURE_ECDSA; + +// Table 171 - TPMU_SIGNATURE Union +typedef union { + TPMS_SIGNATURE_RSASSA rsassa; + TPMS_SIGNATURE_RSAPSS rsapss; + TPMS_SIGNATURE_ECDSA ecdsa; + TPMS_SIGNATURE_ECDSA sm2; + TPMS_SIGNATURE_ECDSA ecdaa; + TPMS_SIGNATURE_ECDSA ecschnorr; + TPMT_HA hmac; + TPMS_SCHEME_SIGHASH any; +} TPMU_SIGNATURE; + +// Table 172 - TPMT_SIGNATURE Structure +typedef struct { + TPMI_ALG_SIG_SCHEME sigAlg; + TPMU_SIGNATURE signature; +} TPMT_SIGNATURE; + +// Table 173 - TPMU_ENCRYPTED_SECRET Union +typedef union { + BYTE ecc[sizeof(TPMS_ECC_POINT)]; + BYTE rsa[MAX_RSA_KEY_BYTES]; + BYTE symmetric[sizeof(TPM2B_DIGEST)]; + BYTE keyedHash[sizeof(TPM2B_DIGEST)]; +} TPMU_ENCRYPTED_SECRET; + +// Table 174 - TPM2B_ENCRYPTED_SECRET Structure +typedef struct { + UINT16 size; + BYTE secret[sizeof(TPMU_ENCRYPTED_SECRET)]; +} TPM2B_ENCRYPTED_SECRET; + +// 12 Key/Object Complex + +// Table 175 - TPMI_ALG_PUBLIC Type +typedef TPM_ALG_ID TPMI_ALG_PUBLIC; + +// Table 176 - TPMU_PUBLIC_ID Union +typedef union { + TPM2B_DIGEST keyedHash; + TPM2B_DIGEST sym; + TPM2B_PUBLIC_KEY_RSA rsa; + TPMS_ECC_POINT ecc; +} TPMU_PUBLIC_ID; + +// Table 177 - TPMS_KEYEDHASH_PARMS Structure +typedef struct { + TPMT_KEYEDHASH_SCHEME scheme; +} TPMS_KEYEDHASH_PARMS; + +// Table 178 - TPMS_ASYM_PARMS Structure +typedef struct { + TPMT_SYM_DEF_OBJECT symmetric; + TPMT_ASYM_SCHEME scheme; +} TPMS_ASYM_PARMS; + +// Table 179 - TPMS_RSA_PARMS Structure +typedef struct { + TPMT_SYM_DEF_OBJECT symmetric; + TPMT_RSA_SCHEME scheme; + TPMI_RSA_KEY_BITS keyBits; + UINT32 exponent; +} TPMS_RSA_PARMS; + +// Table 180 - TPMS_ECC_PARMS Structure +typedef struct { + TPMT_SYM_DEF_OBJECT symmetric; + TPMT_ECC_SCHEME scheme; + TPMI_ECC_CURVE curveID; + TPMT_KDF_SCHEME kdf; +} TPMS_ECC_PARMS; + +// Table 181 - TPMU_PUBLIC_PARMS Union +typedef union { + TPMS_KEYEDHASH_PARMS keyedHashDetail; + TPMT_SYM_DEF_OBJECT symDetail; + TPMS_RSA_PARMS rsaDetail; + TPMS_ECC_PARMS eccDetail; + TPMS_ASYM_PARMS asymDetail; +} TPMU_PUBLIC_PARMS; + +// Table 182 - TPMT_PUBLIC_PARMS Structure +typedef struct { + TPMI_ALG_PUBLIC type; + TPMU_PUBLIC_PARMS parameters; +} TPMT_PUBLIC_PARMS; + +// Table 183 - TPMT_PUBLIC Structure +typedef struct { + TPMI_ALG_PUBLIC type; + TPMI_ALG_HASH nameAlg; + TPMA_OBJECT objectAttributes; + TPM2B_DIGEST authPolicy; + TPMU_PUBLIC_PARMS parameters; + TPMU_PUBLIC_ID unique; +} TPMT_PUBLIC; + +// Table 184 - TPM2B_PUBLIC Structure +typedef struct { + UINT16 size; + TPMT_PUBLIC publicArea; +} TPM2B_PUBLIC; + +// Table 185 - TPM2B_PRIVATE_VENDOR_SPECIFIC Structure +typedef struct { + UINT16 size; + BYTE buffer[PRIVATE_VENDOR_SPECIFIC_BYTES]; +} TPM2B_PRIVATE_VENDOR_SPECIFIC; + +// Table 186 - TPMU_SENSITIVE_COMPOSITE Union +typedef union { + TPM2B_PRIVATE_KEY_RSA rsa; + TPM2B_ECC_PARAMETER ecc; + TPM2B_SENSITIVE_DATA bits; + TPM2B_SYM_KEY sym; + TPM2B_PRIVATE_VENDOR_SPECIFIC any; +} TPMU_SENSITIVE_COMPOSITE; + +// Table 187 - TPMT_SENSITIVE Structure +typedef struct { + TPMI_ALG_PUBLIC sensitiveType; + TPM2B_AUTH authValue; + TPM2B_DIGEST seedValue; + TPMU_SENSITIVE_COMPOSITE sensitive; +} TPMT_SENSITIVE; + +// Table 188 - TPM2B_SENSITIVE Structure +typedef struct { + UINT16 size; + TPMT_SENSITIVE sensitiveArea; +} TPM2B_SENSITIVE; + +// Table 189 - _PRIVATE Structure +typedef struct { + TPM2B_DIGEST integrityOuter; + TPM2B_DIGEST integrityInner; + TPMT_SENSITIVE sensitive; +} _PRIVATE; + +// Table 190 - TPM2B_PRIVATE Structure +typedef struct { + UINT16 size; + BYTE buffer[sizeof(_PRIVATE)]; +} TPM2B_PRIVATE; + +// Table 191 - _ID_OBJECT Structure +typedef struct { + TPM2B_DIGEST integrityHMAC; + TPM2B_DIGEST encIdentity; +} _ID_OBJECT; + +// Table 192 - TPM2B_ID_OBJECT Structure +typedef struct { + UINT16 size; + BYTE credential[sizeof(_ID_OBJECT)]; +} TPM2B_ID_OBJECT; + +// 13 NV Storage Structures + +// Table 193 - TPM_NV_INDEX Bits +// +// NOTE: Comment here to resolve conflict +// +//typedef struct { +// UINT32 index : 22; +// UINT32 space : 2; +// UINT32 RH_NV : 8; +//} TPM_NV_INDEX; + +// Table 195 - TPMA_NV Bits +typedef struct { + UINT32 TPMA_NV_PPWRITE : 1; + UINT32 TPMA_NV_OWNERWRITE : 1; + UINT32 TPMA_NV_AUTHWRITE : 1; + UINT32 TPMA_NV_POLICYWRITE : 1; + UINT32 TPMA_NV_COUNTER : 1; + UINT32 TPMA_NV_BITS : 1; + UINT32 TPMA_NV_EXTEND : 1; + UINT32 reserved7_9 : 3; + UINT32 TPMA_NV_POLICY_DELETE : 1; + UINT32 TPMA_NV_WRITELOCKED : 1; + UINT32 TPMA_NV_WRITEALL : 1; + UINT32 TPMA_NV_WRITEDEFINE : 1; + UINT32 TPMA_NV_WRITE_STCLEAR : 1; + UINT32 TPMA_NV_GLOBALLOCK : 1; + UINT32 TPMA_NV_PPREAD : 1; + UINT32 TPMA_NV_OWNERREAD : 1; + UINT32 TPMA_NV_AUTHREAD : 1; + UINT32 TPMA_NV_POLICYREAD : 1; + UINT32 reserved20_24 : 5; + UINT32 TPMA_NV_NO_DA : 1; + UINT32 TPMA_NV_ORDERLY : 1; + UINT32 TPMA_NV_CLEAR_STCLEAR : 1; + UINT32 TPMA_NV_READLOCKED : 1; + UINT32 TPMA_NV_WRITTEN : 1; + UINT32 TPMA_NV_PLATFORMCREATE : 1; + UINT32 TPMA_NV_READ_STCLEAR : 1; +} TPMA_NV; + +// Table 196 - TPMS_NV_PUBLIC Structure +typedef struct { + TPMI_RH_NV_INDEX nvIndex; + TPMI_ALG_HASH nameAlg; + TPMA_NV attributes; + TPM2B_DIGEST authPolicy; + UINT16 dataSize; +} TPMS_NV_PUBLIC; + +// Table 197 - TPM2B_NV_PUBLIC Structure +typedef struct { + UINT16 size; + TPMS_NV_PUBLIC nvPublic; +} TPM2B_NV_PUBLIC; + +// 14 Context Data + +// Table 198 - TPM2B_CONTEXT_SENSITIVE Structure +typedef struct { + UINT16 size; + BYTE buffer[MAX_CONTEXT_SIZE]; +} TPM2B_CONTEXT_SENSITIVE; + +// Table 199 - TPMS_CONTEXT_DATA Structure +typedef struct { + TPM2B_DIGEST integrity; + TPM2B_CONTEXT_SENSITIVE encrypted; +} TPMS_CONTEXT_DATA; + +// Table 200 - TPM2B_CONTEXT_DATA Structure +typedef struct { + UINT16 size; + BYTE buffer[sizeof(TPMS_CONTEXT_DATA)]; +} TPM2B_CONTEXT_DATA; + +// Table 201 - TPMS_CONTEXT Structure +typedef struct { + UINT64 sequence; + TPMI_DH_CONTEXT savedHandle; + TPMI_RH_HIERARCHY hierarchy; + TPM2B_CONTEXT_DATA contextBlob; +} TPMS_CONTEXT; + +// 15 Creation Data + +// Table 203 - TPMS_CREATION_DATA Structure +typedef struct { + TPML_PCR_SELECTION pcrSelect; + TPM2B_DIGEST pcrDigest; + TPMA_LOCALITY locality; + TPM_ALG_ID parentNameAlg; + TPM2B_NAME parentName; + TPM2B_NAME parentQualifiedName; + TPM2B_DATA outsideInfo; +} TPMS_CREATION_DATA; + +// Table 204 - TPM2B_CREATION_DATA Structure +typedef struct { + UINT16 size; + TPMS_CREATION_DATA creationData; +} TPM2B_CREATION_DATA; + + +// +// Command Header +// +typedef struct { + TPM_ST tag; + UINT32 paramSize; + TPM_CC commandCode; +} TPM2_COMMAND_HEADER; + +typedef struct { + TPM_ST tag; + UINT32 paramSize; + TPM_RC responseCode; +} TPM2_RESPONSE_HEADER; + +#pragma pack () + +// +// TCG Algorithm Registry +// +#define HASH_ALG_SHA1 0x00000001 +#define HASH_ALG_SHA256 0x00000002 +#define HASH_ALG_SHA384 0x00000004 +#define HASH_ALG_SHA512 0x00000008 +#define HASH_ALG_SM3_256 0x00000010 + +#endif diff --git a/src/include/ipxe/efi/IndustryStandard/UefiTcgPlatform.h b/src/include/ipxe/efi/IndustryStandard/UefiTcgPlatform.h new file mode 100644 index 00000000..3394c7cb --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/UefiTcgPlatform.h @@ -0,0 +1,335 @@ +/** @file + TCG EFI Platform Definition in TCG_EFI_Platform_1_20_Final + + Copyright (c) 2006 - 2017, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __UEFI_TCG_PLATFORM_H__ +#define __UEFI_TCG_PLATFORM_H__ + +FILE_LICENCE ( BSD3 ); + +#include +#include +#include + +// +// Standard event types +// +#define EV_POST_CODE ((TCG_EVENTTYPE) 0x00000001) +#define EV_NO_ACTION ((TCG_EVENTTYPE) 0x00000003) +#define EV_SEPARATOR ((TCG_EVENTTYPE) 0x00000004) +#define EV_S_CRTM_CONTENTS ((TCG_EVENTTYPE) 0x00000007) +#define EV_S_CRTM_VERSION ((TCG_EVENTTYPE) 0x00000008) +#define EV_CPU_MICROCODE ((TCG_EVENTTYPE) 0x00000009) +#define EV_TABLE_OF_DEVICES ((TCG_EVENTTYPE) 0x0000000B) + +// +// EFI specific event types +// +#define EV_EFI_EVENT_BASE ((TCG_EVENTTYPE) 0x80000000) +#define EV_EFI_VARIABLE_DRIVER_CONFIG (EV_EFI_EVENT_BASE + 1) +#define EV_EFI_VARIABLE_BOOT (EV_EFI_EVENT_BASE + 2) +#define EV_EFI_BOOT_SERVICES_APPLICATION (EV_EFI_EVENT_BASE + 3) +#define EV_EFI_BOOT_SERVICES_DRIVER (EV_EFI_EVENT_BASE + 4) +#define EV_EFI_RUNTIME_SERVICES_DRIVER (EV_EFI_EVENT_BASE + 5) +#define EV_EFI_GPT_EVENT (EV_EFI_EVENT_BASE + 6) +#define EV_EFI_ACTION (EV_EFI_EVENT_BASE + 7) +#define EV_EFI_PLATFORM_FIRMWARE_BLOB (EV_EFI_EVENT_BASE + 8) +#define EV_EFI_HANDOFF_TABLES (EV_EFI_EVENT_BASE + 9) +#define EV_EFI_VARIABLE_AUTHORITY (EV_EFI_EVENT_BASE + 0xE0) + +#define EFI_CALLING_EFI_APPLICATION \ + "Calling EFI Application from Boot Option" +#define EFI_RETURNING_FROM_EFI_APPLICATOIN \ + "Returning from EFI Application from Boot Option" +#define EFI_EXIT_BOOT_SERVICES_INVOCATION \ + "Exit Boot Services Invocation" +#define EFI_EXIT_BOOT_SERVICES_FAILED \ + "Exit Boot Services Returned with Failure" +#define EFI_EXIT_BOOT_SERVICES_SUCCEEDED \ + "Exit Boot Services Returned with Success" + + +#define EV_POSTCODE_INFO_POST_CODE "POST CODE" +#define POST_CODE_STR_LEN (sizeof(EV_POSTCODE_INFO_POST_CODE) - 1) + +#define EV_POSTCODE_INFO_SMM_CODE "SMM CODE" +#define SMM_CODE_STR_LEN (sizeof(EV_POSTCODE_INFO_SMM_CODE) - 1) + +#define EV_POSTCODE_INFO_ACPI_DATA "ACPI DATA" +#define ACPI_DATA_LEN (sizeof(EV_POSTCODE_INFO_ACPI_DATA) - 1) + +#define EV_POSTCODE_INFO_BIS_CODE "BIS CODE" +#define BIS_CODE_LEN (sizeof(EV_POSTCODE_INFO_BIS_CODE) - 1) + +#define EV_POSTCODE_INFO_UEFI_PI "UEFI PI" +#define UEFI_PI_LEN (sizeof(EV_POSTCODE_INFO_UEFI_PI) - 1) + +#define EV_POSTCODE_INFO_OPROM "Embedded Option ROM" +#define OPROM_LEN (sizeof(EV_POSTCODE_INFO_OPROM) - 1) + +#define FIRMWARE_DEBUGGER_EVENT_STRING "UEFI Debug Mode" +#define FIRMWARE_DEBUGGER_EVENT_STRING_LEN (sizeof(FIRMWARE_DEBUGGER_EVENT_STRING) - 1) + +// +// Set structure alignment to 1-byte +// +#pragma pack (1) + +typedef UINT32 TCG_EVENTTYPE; +typedef TPM_PCRINDEX TCG_PCRINDEX; +typedef TPM_DIGEST TCG_DIGEST; +/// +/// Event Log Entry Structure Definition +/// +typedef struct tdTCG_PCR_EVENT { + TCG_PCRINDEX PCRIndex; ///< PCRIndex event extended to + TCG_EVENTTYPE EventType; ///< TCG EFI event type + TCG_DIGEST Digest; ///< Value extended into PCRIndex + UINT32 EventSize; ///< Size of the event data + UINT8 Event[1]; ///< The event data +} TCG_PCR_EVENT; + +#define TSS_EVENT_DATA_MAX_SIZE 256 + +/// +/// TCG_PCR_EVENT_HDR +/// +typedef struct tdTCG_PCR_EVENT_HDR { + TCG_PCRINDEX PCRIndex; + TCG_EVENTTYPE EventType; + TCG_DIGEST Digest; + UINT32 EventSize; +} TCG_PCR_EVENT_HDR; + +/// +/// EFI_PLATFORM_FIRMWARE_BLOB +/// +/// BlobLength should be of type UINTN but we use UINT64 here +/// because PEI is 32-bit while DXE is 64-bit on x64 platforms +/// +typedef struct tdEFI_PLATFORM_FIRMWARE_BLOB { + EFI_PHYSICAL_ADDRESS BlobBase; + UINT64 BlobLength; +} EFI_PLATFORM_FIRMWARE_BLOB; + +/// +/// EFI_IMAGE_LOAD_EVENT +/// +/// This structure is used in EV_EFI_BOOT_SERVICES_APPLICATION, +/// EV_EFI_BOOT_SERVICES_DRIVER and EV_EFI_RUNTIME_SERVICES_DRIVER +/// +typedef struct tdEFI_IMAGE_LOAD_EVENT { + EFI_PHYSICAL_ADDRESS ImageLocationInMemory; + UINTN ImageLengthInMemory; + UINTN ImageLinkTimeAddress; + UINTN LengthOfDevicePath; + EFI_DEVICE_PATH_PROTOCOL DevicePath[1]; +} EFI_IMAGE_LOAD_EVENT; + +/// +/// EFI_HANDOFF_TABLE_POINTERS +/// +/// This structure is used in EV_EFI_HANDOFF_TABLES event to facilitate +/// the measurement of given configuration tables. +/// +typedef struct tdEFI_HANDOFF_TABLE_POINTERS { + UINTN NumberOfTables; + EFI_CONFIGURATION_TABLE TableEntry[1]; +} EFI_HANDOFF_TABLE_POINTERS; + +/// +/// EFI_VARIABLE_DATA +/// +/// This structure serves as the header for measuring variables. The name of the +/// variable (in Unicode format) should immediately follow, then the variable +/// data. +/// This is defined in TCG EFI Platform Spec for TPM1.1 or 1.2 V1.22 +/// +typedef struct tdEFI_VARIABLE_DATA { + EFI_GUID VariableName; + UINTN UnicodeNameLength; + UINTN VariableDataLength; + CHAR16 UnicodeName[1]; + INT8 VariableData[1]; ///< Driver or platform-specific data +} EFI_VARIABLE_DATA; + +/// +/// UEFI_VARIABLE_DATA +/// +/// This structure serves as the header for measuring variables. The name of the +/// variable (in Unicode format) should immediately follow, then the variable +/// data. +/// This is defined in TCG PC Client Firmware Profile Spec 00.21 +/// +typedef struct tdUEFI_VARIABLE_DATA { + EFI_GUID VariableName; + UINT64 UnicodeNameLength; + UINT64 VariableDataLength; + CHAR16 UnicodeName[1]; + INT8 VariableData[1]; ///< Driver or platform-specific data +} UEFI_VARIABLE_DATA; + +// +// For TrEE1.0 compatibility +// +typedef struct { + EFI_GUID VariableName; + UINT64 UnicodeNameLength; // The TCG Definition used UINTN + UINT64 VariableDataLength; // The TCG Definition used UINTN + CHAR16 UnicodeName[1]; + INT8 VariableData[1]; +} EFI_VARIABLE_DATA_TREE; + +typedef struct tdEFI_GPT_DATA { + EFI_PARTITION_TABLE_HEADER EfiPartitionHeader; + UINTN NumberOfPartitions; + EFI_PARTITION_ENTRY Partitions[1]; +} EFI_GPT_DATA; + +// +// Crypto Agile Log Entry Format +// +typedef struct tdTCG_PCR_EVENT2 { + TCG_PCRINDEX PCRIndex; + TCG_EVENTTYPE EventType; + TPML_DIGEST_VALUES Digest; + UINT32 EventSize; + UINT8 Event[1]; +} TCG_PCR_EVENT2; + +// +// TCG PCR Event2 Header +// Follow TCG EFI Protocol Spec 5.2 Crypto Agile Log Entry Format +// +typedef struct tdTCG_PCR_EVENT2_HDR{ + TCG_PCRINDEX PCRIndex; + TCG_EVENTTYPE EventType; + TPML_DIGEST_VALUES Digests; + UINT32 EventSize; +} TCG_PCR_EVENT2_HDR; + +// +// Log Header Entry Data +// +typedef struct { + // + // TCG defined hashing algorithm ID. + // + UINT16 algorithmId; + // + // The size of the digest for the respective hashing algorithm. + // + UINT16 digestSize; +} TCG_EfiSpecIdEventAlgorithmSize; + +#define TCG_EfiSpecIDEventStruct_SIGNATURE_02 "Spec ID Event02" +#define TCG_EfiSpecIDEventStruct_SIGNATURE_03 "Spec ID Event03" + +#define TCG_EfiSpecIDEventStruct_SPEC_VERSION_MAJOR_TPM12 1 +#define TCG_EfiSpecIDEventStruct_SPEC_VERSION_MINOR_TPM12 2 +#define TCG_EfiSpecIDEventStruct_SPEC_ERRATA_TPM12 2 + +#define TCG_EfiSpecIDEventStruct_SPEC_VERSION_MAJOR_TPM2 2 +#define TCG_EfiSpecIDEventStruct_SPEC_VERSION_MINOR_TPM2 0 +#define TCG_EfiSpecIDEventStruct_SPEC_ERRATA_TPM2 0 + +typedef struct { + UINT8 signature[16]; + // + // The value for the Platform Class. + // The enumeration is defined in the TCG ACPI Specification Client Common Header. + // + UINT32 platformClass; + // + // The TCG EFI Platform Specification minor version number this BIOS supports. + // Any BIOS supporting version (1.22) MUST set this value to 02h. + // Any BIOS supporting version (2.0) SHALL set this value to 0x00. + // + UINT8 specVersionMinor; + // + // The TCG EFI Platform Specification major version number this BIOS supports. + // Any BIOS supporting version (1.22) MUST set this value to 01h. + // Any BIOS supporting version (2.0) SHALL set this value to 0x02. + // + UINT8 specVersionMajor; + // + // The TCG EFI Platform Specification errata for this specification this BIOS supports. + // Any BIOS supporting version and errata (1.22) MUST set this value to 02h. + // Any BIOS supporting version and errata (2.0) SHALL set this value to 0x00. + // + UINT8 specErrata; + // + // Specifies the size of the UINTN fields used in various data structures used in this specification. + // 0x01 indicates UINT32 and 0x02 indicates UINT64. + // + UINT8 uintnSize; + // + // This field is added in "Spec ID Event03". + // The number of hashing algorithms used in this event log (except the first event). + // All events in this event log use all hashing algorithms defined here. + // +//UINT32 numberOfAlgorithms; + // + // This field is added in "Spec ID Event03". + // An array of size numberOfAlgorithms of value pairs. + // +//TCG_EfiSpecIdEventAlgorithmSize digestSize[numberOfAlgorithms]; + // + // Size in bytes of the VendorInfo field. + // Maximum value SHALL be FFh bytes. + // +//UINT8 vendorInfoSize; + // + // Provided for use by the BIOS implementer. + // The value might be used, for example, to provide more detailed information about the specific BIOS such as BIOS revision numbers, etc. + // The values within this field are not standardized and are implementer-specific. + // Platform-specific or -unique information SHALL NOT be provided in this field. + // +//UINT8 vendorInfo[vendorInfoSize]; +} TCG_EfiSpecIDEventStruct; + + + +#define TCG_EfiStartupLocalityEvent_SIGNATURE "StartupLocality" + + +// +// PC Client PTP spec Table 8 Relationship between Locality and Locality Attribute +// +#define LOCALITY_0_INDICATOR 0x01 +#define LOCALITY_1_INDICATOR 0x02 +#define LOCALITY_2_INDICATOR 0x03 +#define LOCALITY_3_INDICATOR 0x04 +#define LOCALITY_4_INDICATOR 0x05 + + +// +// Startup Locality Event +// +typedef struct tdTCG_EfiStartupLocalityEvent{ + UINT8 Signature[16]; + // + // The Locality Indicator which sent the TPM2_Startup command + // + UINT8 StartupLocality; +} TCG_EfiStartupLocalityEvent; + + +// +// Restore original structure alignment +// +#pragma pack () + +#endif + + diff --git a/src/include/ipxe/efi/IndustryStandard/Usb.h b/src/include/ipxe/efi/IndustryStandard/Usb.h new file mode 100644 index 00000000..7eb1a8d9 --- /dev/null +++ b/src/include/ipxe/efi/IndustryStandard/Usb.h @@ -0,0 +1,388 @@ +/** @file + Support for USB 2.0 standard. + + Copyright (c) 2006 - 2014, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __USB_H__ +#define __USB_H__ + +FILE_LICENCE ( BSD3 ); + +// +// Subset of Class and Subclass definitions from USB Specs +// + +// +// Usb mass storage class code +// +#define USB_MASS_STORE_CLASS 0x08 + +// +// Usb mass storage subclass code, specify the command set used. +// +#define USB_MASS_STORE_RBC 0x01 ///< Reduced Block Commands +#define USB_MASS_STORE_8020I 0x02 ///< SFF-8020i, typically a CD/DVD device +#define USB_MASS_STORE_QIC 0x03 ///< Typically a tape device +#define USB_MASS_STORE_UFI 0x04 ///< Typically a floppy disk driver device +#define USB_MASS_STORE_8070I 0x05 ///< SFF-8070i, typically a floppy disk driver device. +#define USB_MASS_STORE_SCSI 0x06 ///< SCSI transparent command set + +// +// Usb mass storage protocol code, specify the transport protocol +// +#define USB_MASS_STORE_CBI0 0x00 ///< CBI protocol with command completion interrupt +#define USB_MASS_STORE_CBI1 0x01 ///< CBI protocol without command completion interrupt +#define USB_MASS_STORE_BOT 0x50 ///< Bulk-Only Transport + +// +// Standard device request and request type +// USB 2.0 spec, Section 9.4 +// +#define USB_DEV_GET_STATUS 0x00 +#define USB_DEV_GET_STATUS_REQ_TYPE_D 0x80 // Receiver : Device +#define USB_DEV_GET_STATUS_REQ_TYPE_I 0x81 // Receiver : Interface +#define USB_DEV_GET_STATUS_REQ_TYPE_E 0x82 // Receiver : Endpoint + +#define USB_DEV_CLEAR_FEATURE 0x01 +#define USB_DEV_CLEAR_FEATURE_REQ_TYPE_D 0x00 // Receiver : Device +#define USB_DEV_CLEAR_FEATURE_REQ_TYPE_I 0x01 // Receiver : Interface +#define USB_DEV_CLEAR_FEATURE_REQ_TYPE_E 0x02 // Receiver : Endpoint + +#define USB_DEV_SET_FEATURE 0x03 +#define USB_DEV_SET_FEATURE_REQ_TYPE_D 0x00 // Receiver : Device +#define USB_DEV_SET_FEATURE_REQ_TYPE_I 0x01 // Receiver : Interface +#define USB_DEV_SET_FEATURE_REQ_TYPE_E 0x02 // Receiver : Endpoint + +#define USB_DEV_SET_ADDRESS 0x05 +#define USB_DEV_SET_ADDRESS_REQ_TYPE 0x00 + +#define USB_DEV_GET_DESCRIPTOR 0x06 +#define USB_DEV_GET_DESCRIPTOR_REQ_TYPE 0x80 + +#define USB_DEV_SET_DESCRIPTOR 0x07 +#define USB_DEV_SET_DESCRIPTOR_REQ_TYPE 0x00 + +#define USB_DEV_GET_CONFIGURATION 0x08 +#define USB_DEV_GET_CONFIGURATION_REQ_TYPE 0x80 + +#define USB_DEV_SET_CONFIGURATION 0x09 +#define USB_DEV_SET_CONFIGURATION_REQ_TYPE 0x00 + +#define USB_DEV_GET_INTERFACE 0x0A +#define USB_DEV_GET_INTERFACE_REQ_TYPE 0x81 + +#define USB_DEV_SET_INTERFACE 0x0B +#define USB_DEV_SET_INTERFACE_REQ_TYPE 0x01 + +#define USB_DEV_SYNCH_FRAME 0x0C +#define USB_DEV_SYNCH_FRAME_REQ_TYPE 0x82 + + +// +// USB standard descriptors and reqeust +// +#pragma pack(1) + +/// +/// Format of Setup Data for USB Device Requests +/// USB 2.0 spec, Section 9.3 +/// +typedef struct { + UINT8 RequestType; + UINT8 Request; + UINT16 Value; + UINT16 Index; + UINT16 Length; +} USB_DEVICE_REQUEST; + +/// +/// Standard Device Descriptor +/// USB 2.0 spec, Section 9.6.1 +/// +typedef struct { + UINT8 Length; + UINT8 DescriptorType; + UINT16 BcdUSB; + UINT8 DeviceClass; + UINT8 DeviceSubClass; + UINT8 DeviceProtocol; + UINT8 MaxPacketSize0; + UINT16 IdVendor; + UINT16 IdProduct; + UINT16 BcdDevice; + UINT8 StrManufacturer; + UINT8 StrProduct; + UINT8 StrSerialNumber; + UINT8 NumConfigurations; +} USB_DEVICE_DESCRIPTOR; + +/// +/// Standard Configuration Descriptor +/// USB 2.0 spec, Section 9.6.3 +/// +typedef struct { + UINT8 Length; + UINT8 DescriptorType; + UINT16 TotalLength; + UINT8 NumInterfaces; + UINT8 ConfigurationValue; + UINT8 Configuration; + UINT8 Attributes; + UINT8 MaxPower; +} USB_CONFIG_DESCRIPTOR; + +/// +/// Standard Interface Descriptor +/// USB 2.0 spec, Section 9.6.5 +/// +typedef struct { + UINT8 Length; + UINT8 DescriptorType; + UINT8 InterfaceNumber; + UINT8 AlternateSetting; + UINT8 NumEndpoints; + UINT8 InterfaceClass; + UINT8 InterfaceSubClass; + UINT8 InterfaceProtocol; + UINT8 Interface; +} USB_INTERFACE_DESCRIPTOR; + +/// +/// Standard Endpoint Descriptor +/// USB 2.0 spec, Section 9.6.6 +/// +typedef struct { + UINT8 Length; + UINT8 DescriptorType; + UINT8 EndpointAddress; + UINT8 Attributes; + UINT16 MaxPacketSize; + UINT8 Interval; +} USB_ENDPOINT_DESCRIPTOR; + +/// +/// UNICODE String Descriptor +/// USB 2.0 spec, Section 9.6.7 +/// +typedef struct { + UINT8 Length; + UINT8 DescriptorType; + CHAR16 String[1]; +} EFI_USB_STRING_DESCRIPTOR; + +#pragma pack() + + +typedef enum { + // + // USB request type + // + USB_REQ_TYPE_STANDARD = (0x00 << 5), + USB_REQ_TYPE_CLASS = (0x01 << 5), + USB_REQ_TYPE_VENDOR = (0x02 << 5), + + // + // Standard control transfer request type, or the value + // to fill in EFI_USB_DEVICE_REQUEST.Request + // + USB_REQ_GET_STATUS = 0x00, + USB_REQ_CLEAR_FEATURE = 0x01, + USB_REQ_SET_FEATURE = 0x03, + USB_REQ_SET_ADDRESS = 0x05, + USB_REQ_GET_DESCRIPTOR = 0x06, + USB_REQ_SET_DESCRIPTOR = 0x07, + USB_REQ_GET_CONFIG = 0x08, + USB_REQ_SET_CONFIG = 0x09, + USB_REQ_GET_INTERFACE = 0x0A, + USB_REQ_SET_INTERFACE = 0x0B, + USB_REQ_SYNCH_FRAME = 0x0C, + + // + // Usb control transfer target + // + USB_TARGET_DEVICE = 0, + USB_TARGET_INTERFACE = 0x01, + USB_TARGET_ENDPOINT = 0x02, + USB_TARGET_OTHER = 0x03, + + // + // USB Descriptor types + // + USB_DESC_TYPE_DEVICE = 0x01, + USB_DESC_TYPE_CONFIG = 0x02, + USB_DESC_TYPE_STRING = 0x03, + USB_DESC_TYPE_INTERFACE = 0x04, + USB_DESC_TYPE_ENDPOINT = 0x05, + USB_DESC_TYPE_HID = 0x21, + USB_DESC_TYPE_REPORT = 0x22, + + // + // Features to be cleared by CLEAR_FEATURE requests + // + USB_FEATURE_ENDPOINT_HALT = 0, + + // + // USB endpoint types: 00: control, 01: isochronous, 10: bulk, 11: interrupt + // + USB_ENDPOINT_CONTROL = 0x00, + USB_ENDPOINT_ISO = 0x01, + USB_ENDPOINT_BULK = 0x02, + USB_ENDPOINT_INTERRUPT = 0x03, + + USB_ENDPOINT_TYPE_MASK = 0x03, + USB_ENDPOINT_DIR_IN = 0x80, + + // + //Use 200 ms to increase the error handling response time + // + EFI_USB_INTERRUPT_DELAY = 2000000 +} USB_TYPES_DEFINITION; + + +// +// HID constants definition, see Device Class Definition +// for Human Interface Devices (HID) rev1.11 +// + +// +// HID standard GET_DESCRIPTOR request. +// +#define USB_HID_GET_DESCRIPTOR_REQ_TYPE 0x81 + +// +// HID specific requests. +// +#define USB_HID_CLASS_GET_REQ_TYPE 0xa1 +#define USB_HID_CLASS_SET_REQ_TYPE 0x21 + +// +// HID report item format +// +#define HID_ITEM_FORMAT_SHORT 0 +#define HID_ITEM_FORMAT_LONG 1 + +// +// Special tag indicating long items +// +#define HID_ITEM_TAG_LONG 15 + +// +// HID report descriptor item type (prefix bit 2,3) +// +#define HID_ITEM_TYPE_MAIN 0 +#define HID_ITEM_TYPE_GLOBAL 1 +#define HID_ITEM_TYPE_LOCAL 2 +#define HID_ITEM_TYPE_RESERVED 3 + +// +// HID report descriptor main item tags +// +#define HID_MAIN_ITEM_TAG_INPUT 8 +#define HID_MAIN_ITEM_TAG_OUTPUT 9 +#define HID_MAIN_ITEM_TAG_FEATURE 11 +#define HID_MAIN_ITEM_TAG_BEGIN_COLLECTION 10 +#define HID_MAIN_ITEM_TAG_END_COLLECTION 12 + +// +// HID report descriptor main item contents +// +#define HID_MAIN_ITEM_CONSTANT 0x001 +#define HID_MAIN_ITEM_VARIABLE 0x002 +#define HID_MAIN_ITEM_RELATIVE 0x004 +#define HID_MAIN_ITEM_WRAP 0x008 +#define HID_MAIN_ITEM_NONLINEAR 0x010 +#define HID_MAIN_ITEM_NO_PREFERRED 0x020 +#define HID_MAIN_ITEM_NULL_STATE 0x040 +#define HID_MAIN_ITEM_VOLATILE 0x080 +#define HID_MAIN_ITEM_BUFFERED_BYTE 0x100 + +// +// HID report descriptor collection item types +// +#define HID_COLLECTION_PHYSICAL 0 +#define HID_COLLECTION_APPLICATION 1 +#define HID_COLLECTION_LOGICAL 2 + +// +// HID report descriptor global item tags +// +#define HID_GLOBAL_ITEM_TAG_USAGE_PAGE 0 +#define HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM 1 +#define HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM 2 +#define HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM 3 +#define HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM 4 +#define HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT 5 +#define HID_GLOBAL_ITEM_TAG_UNIT 6 +#define HID_GLOBAL_ITEM_TAG_REPORT_SIZE 7 +#define HID_GLOBAL_ITEM_TAG_REPORT_ID 8 +#define HID_GLOBAL_ITEM_TAG_REPORT_COUNT 9 +#define HID_GLOBAL_ITEM_TAG_PUSH 10 +#define HID_GLOBAL_ITEM_TAG_POP 11 + +// +// HID report descriptor local item tags +// +#define HID_LOCAL_ITEM_TAG_USAGE 0 +#define HID_LOCAL_ITEM_TAG_USAGE_MINIMUM 1 +#define HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM 2 +#define HID_LOCAL_ITEM_TAG_DESIGNATOR_INDEX 3 +#define HID_LOCAL_ITEM_TAG_DESIGNATOR_MINIMUM 4 +#define HID_LOCAL_ITEM_TAG_DESIGNATOR_MAXIMUM 5 +#define HID_LOCAL_ITEM_TAG_STRING_INDEX 7 +#define HID_LOCAL_ITEM_TAG_STRING_MINIMUM 8 +#define HID_LOCAL_ITEM_TAG_STRING_MAXIMUM 9 +#define HID_LOCAL_ITEM_TAG_DELIMITER 10 + +// +// HID report types +// +#define HID_INPUT_REPORT 1 +#define HID_OUTPUT_REPORT 2 +#define HID_FEATURE_REPORT 3 + +// +// HID class protocol request +// +#define EFI_USB_GET_REPORT_REQUEST 0x01 +#define EFI_USB_GET_IDLE_REQUEST 0x02 +#define EFI_USB_GET_PROTOCOL_REQUEST 0x03 +#define EFI_USB_SET_REPORT_REQUEST 0x09 +#define EFI_USB_SET_IDLE_REQUEST 0x0a +#define EFI_USB_SET_PROTOCOL_REQUEST 0x0b + +#pragma pack(1) +/// +/// Descriptor header for Report/Physical Descriptors +/// HID 1.1, section 6.2.1 +/// +typedef struct hid_class_descriptor { + UINT8 DescriptorType; + UINT16 DescriptorLength; +} EFI_USB_HID_CLASS_DESCRIPTOR; + +/// +/// The HID descriptor identifies the length and type +/// of subordinate descriptors for a device. +/// HID 1.1, section 6.2.1 +/// +typedef struct hid_descriptor { + UINT8 Length; + UINT8 DescriptorType; + UINT16 BcdHID; + UINT8 CountryCode; + UINT8 NumDescriptors; + EFI_USB_HID_CLASS_DESCRIPTOR HidClassDesc[1]; +} EFI_USB_HID_DESCRIPTOR; + +#pragma pack() + +#endif diff --git a/src/include/ipxe/efi/Protocol/AbsolutePointer.h b/src/include/ipxe/efi/Protocol/AbsolutePointer.h new file mode 100644 index 00000000..b20ca057 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/AbsolutePointer.h @@ -0,0 +1,207 @@ +/** @file + The file provides services that allow information about an + absolute pointer device to be retrieved. + + Copyright (c) 2006 - 2012, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __ABSOLUTE_POINTER_H__ +#define __ABSOLUTE_POINTER_H__ + +FILE_LICENCE ( BSD3 ); + + +#define EFI_ABSOLUTE_POINTER_PROTOCOL_GUID \ + { 0x8D59D32B, 0xC655, 0x4AE9, { 0x9B, 0x15, 0xF2, 0x59, 0x04, 0x99, 0x2A, 0x43 } } + + +typedef struct _EFI_ABSOLUTE_POINTER_PROTOCOL EFI_ABSOLUTE_POINTER_PROTOCOL; + + +//******************************************************* +// EFI_ABSOLUTE_POINTER_MODE +//******************************************************* + + +/** + The following data values in the EFI_ABSOLUTE_POINTER_MODE + interface are read-only and are changed by using the appropriate + interface functions. +**/ +typedef struct { + UINT64 AbsoluteMinX; ///< The Absolute Minimum of the device on the x-axis + UINT64 AbsoluteMinY; ///< The Absolute Minimum of the device on the y axis. + UINT64 AbsoluteMinZ; ///< The Absolute Minimum of the device on the z-axis + UINT64 AbsoluteMaxX; ///< The Absolute Maximum of the device on the x-axis. If 0, and the + ///< AbsoluteMinX is 0, then the pointer device does not support a xaxis + UINT64 AbsoluteMaxY; ///< The Absolute Maximum of the device on the y -axis. If 0, and the + ///< AbsoluteMinX is 0, then the pointer device does not support a yaxis. + UINT64 AbsoluteMaxZ; ///< The Absolute Maximum of the device on the z-axis. If 0 , and the + ///< AbsoluteMinX is 0, then the pointer device does not support a zaxis + UINT32 Attributes; ///< The following bits are set as needed (or'd together) to indicate the + ///< capabilities of the device supported. The remaining bits are undefined + ///< and should be 0 +} EFI_ABSOLUTE_POINTER_MODE; + +/// +/// If set, indicates this device supports an alternate button input. +/// +#define EFI_ABSP_SupportsAltActive 0x00000001 + +/// +/// If set, indicates this device returns pressure data in parameter CurrentZ. +/// +#define EFI_ABSP_SupportsPressureAsZ 0x00000002 + + +/** + This function resets the pointer device hardware. As part of + initialization process, the firmware/device will make a quick + but reasonable attempt to verify that the device is + functioning. If the ExtendedVerification flag is TRUE the + firmware may take an extended amount of time to verify the + device is operating on reset. Otherwise the reset operation is + to occur as quickly as possible. The hardware verification + process is not defined by this specification and is left up to + the platform firmware or driver to implement. + + @param This A pointer to the EFI_ABSOLUTE_POINTER_PROTOCOL + instance. + + @param ExtendedVerification Indicates that the driver may + perform a more exhaustive + verification operation of the + device during reset. + + @retval EFI_SUCCESS The device was reset. + + @retval EFI_DEVICE_ERROR The device is not functioning + correctly and could not be reset. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_ABSOLUTE_POINTER_RESET)( + IN EFI_ABSOLUTE_POINTER_PROTOCOL *This, + IN BOOLEAN ExtendedVerification +); + +/// +/// This bit is set if the touch sensor is active. +/// +#define EFI_ABSP_TouchActive 0x00000001 + +/// +/// This bit is set if the alt sensor, such as pen-side button, is active +/// +#define EFI_ABS_AltActive 0x00000002 + + +/** + Definition of EFI_ABSOLUTE_POINTER_STATE. +**/ +typedef struct { + /// + /// The unsigned position of the activation on the x axis. If the AboluteMinX + /// and the AboluteMaxX fields of the EFI_ABSOLUTE_POINTER_MODE structure are + /// both 0, then this pointer device does not support an x-axis, and this field + /// must be ignored. + /// + UINT64 CurrentX; + + /// + /// The unsigned position of the activation on the y axis. If the AboluteMinY + /// and the AboluteMaxY fields of the EFI_ABSOLUTE_POINTER_MODE structure are + /// both 0, then this pointer device does not support an y-axis, and this field + /// must be ignored. + /// + UINT64 CurrentY; + + /// + /// The unsigned position of the activation on the z axis, or the pressure + /// measurement. If the AboluteMinZ and the AboluteMaxZ fields of the + /// EFI_ABSOLUTE_POINTER_MODE structure are both 0, then this pointer device + /// does not support an z-axis, and this field must be ignored. + /// + UINT64 CurrentZ; + + /// + /// Bits are set to 1 in this structure item to indicate that device buttons are + /// active. + /// + UINT32 ActiveButtons; +} EFI_ABSOLUTE_POINTER_STATE; + +/** + The GetState() function retrieves the current state of a pointer + device. This includes information on the active state associated + with the pointer device and the current position of the axes + associated with the pointer device. If the state of the pointer + device has not changed since the last call to GetState(), then + EFI_NOT_READY is returned. If the state of the pointer device + has changed since the last call to GetState(), then the state + information is placed in State, and EFI_SUCCESS is returned. If + a device error occurs while attempting to retrieve the state + information, then EFI_DEVICE_ERROR is returned. + + + @param This A pointer to the EFI_ABSOLUTE_POINTER_PROTOCOL + instance. + + @param State A pointer to the state information on the + pointer device. + + @retval EFI_SUCCESS The state of the pointer device was + returned in State. + + @retval EFI_NOT_READY The state of the pointer device has not + changed since the last call to GetState(). + + @retval EFI_DEVICE_ERROR A device error occurred while + attempting to retrieve the pointer + device's current state. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_ABSOLUTE_POINTER_GET_STATE)( + IN EFI_ABSOLUTE_POINTER_PROTOCOL *This, + IN OUT EFI_ABSOLUTE_POINTER_STATE *State +); + + +/// +/// The EFI_ABSOLUTE_POINTER_PROTOCOL provides a set of services +/// for a pointer device that can be used as an input device from an +/// application written to this specification. The services include +/// the ability to: reset the pointer device, retrieve the state of +/// the pointer device, and retrieve the capabilities of the pointer +/// device. The service also provides certain data items describing the device. +/// +struct _EFI_ABSOLUTE_POINTER_PROTOCOL { + EFI_ABSOLUTE_POINTER_RESET Reset; + EFI_ABSOLUTE_POINTER_GET_STATE GetState; + /// + /// Event to use with WaitForEvent() to wait for input from the pointer device. + /// + EFI_EVENT WaitForInput; + /// + /// Pointer to EFI_ABSOLUTE_POINTER_MODE data. + /// + EFI_ABSOLUTE_POINTER_MODE *Mode; +}; + + +extern EFI_GUID gEfiAbsolutePointerProtocolGuid; + + +#endif + diff --git a/src/include/ipxe/efi/Protocol/AcpiTable.h b/src/include/ipxe/efi/Protocol/AcpiTable.h new file mode 100644 index 00000000..798b13dc --- /dev/null +++ b/src/include/ipxe/efi/Protocol/AcpiTable.h @@ -0,0 +1,129 @@ +/** @file + The file provides the protocol to install or remove an ACPI + table from a platform. + + Copyright (c) 2006 - 2014, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __ACPI_TABLE_H___ +#define __ACPI_TABLE_H___ + +FILE_LICENCE ( BSD3 ); + +#define EFI_ACPI_TABLE_PROTOCOL_GUID \ + { 0xffe06bdd, 0x6107, 0x46a6, { 0x7b, 0xb2, 0x5a, 0x9c, 0x7e, 0xc5, 0x27, 0x5c }} + + +typedef struct _EFI_ACPI_TABLE_PROTOCOL EFI_ACPI_TABLE_PROTOCOL; + +/** + + The InstallAcpiTable() function allows a caller to install an + ACPI table. When successful, the table will be linked by the + RSDT/XSDT. AcpiTableBuffer specifies the table to be installed. + InstallAcpiTable() will make a copy of the table and insert the + copy into the RSDT/XSDT. InstallAcpiTable() must insert the new + table at the end of the RSDT/XSDT. To prevent namespace + collision, ACPI tables may be created using UEFI ACPI table + format. If this protocol is used to install a table with a + signature already present in the system, the new table will not + replace the existing table. It is a platform implementation + decision to add a new table with a signature matching an + existing table or disallow duplicate table signatures and + return EFI_ACCESS_DENIED. On successful output, TableKey is + initialized with a unique key. Its value may be used in a + subsequent call to UninstallAcpiTable to remove an ACPI table. + If an EFI application is running at the time of this call, the + relevant EFI_CONFIGURATION_TABLE pointer to the RSDT is no + longer considered valid. + + + @param This A pointer to a EFI_ACPI_TABLE_PROTOCOL. + + @param AcpiTableBuffer A pointer to a buffer containing the + ACPI table to be installed. + + @param AcpiTableBufferSize Specifies the size, in bytes, of + the AcpiTableBuffer buffer. + + + @param TableKey Returns a key to refer to the ACPI table. + + @retval EFI_SUCCESS The table was successfully inserted + + @retval EFI_INVALID_PARAMETER Either AcpiTableBuffer is NULL, + TableKey is NULL, or + AcpiTableBufferSize and the size + field embedded in the ACPI table + pointed to by AcpiTableBuffer + are not in sync. + + @retval EFI_OUT_OF_RESOURCES Insufficient resources exist to + complete the request. + @retval EFI_ACCESS_DENIED The table signature matches a table already + present in the system and platform policy + does not allow duplicate tables of this type. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_ACPI_TABLE_INSTALL_ACPI_TABLE)( + IN EFI_ACPI_TABLE_PROTOCOL *This, + IN VOID *AcpiTableBuffer, + IN UINTN AcpiTableBufferSize, + OUT UINTN *TableKey +); + + +/** + + The UninstallAcpiTable() function allows a caller to remove an + ACPI table. The routine will remove its reference from the + RSDT/XSDT. A table is referenced by the TableKey parameter + returned from a prior call to InstallAcpiTable(). If an EFI + application is running at the time of this call, the relevant + EFI_CONFIGURATION_TABLE pointer to the RSDT is no longer + considered valid. + + @param This A pointer to a EFI_ACPI_TABLE_PROTOCOL. + + @param TableKey Specifies the table to uninstall. The key was + returned from InstallAcpiTable(). + + @retval EFI_SUCCESS The table was successfully inserted + + @retval EFI_NOT_FOUND TableKey does not refer to a valid key + for a table entry. + + @retval EFI_OUT_OF_RESOURCES Insufficient resources exist to + complete the request. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_ACPI_TABLE_UNINSTALL_ACPI_TABLE)( + IN EFI_ACPI_TABLE_PROTOCOL *This, + IN UINTN TableKey +); + +/// +/// The EFI_ACPI_TABLE_PROTOCOL provides the ability for a component +/// to install and uninstall ACPI tables from a platform. +/// +struct _EFI_ACPI_TABLE_PROTOCOL { + EFI_ACPI_TABLE_INSTALL_ACPI_TABLE InstallAcpiTable; + EFI_ACPI_TABLE_UNINSTALL_ACPI_TABLE UninstallAcpiTable; +}; + +extern EFI_GUID gEfiAcpiTableProtocolGuid; + +#endif + diff --git a/src/include/ipxe/efi/Protocol/AppleNetBoot.h b/src/include/ipxe/efi/Protocol/AppleNetBoot.h new file mode 100644 index 00000000..5946524f --- /dev/null +++ b/src/include/ipxe/efi/Protocol/AppleNetBoot.h @@ -0,0 +1,46 @@ +#ifndef _IPXE_EFI_APPLE_NET_BOOT_PROTOCOL_H +#define _IPXE_EFI_APPLE_NET_BOOT_PROTOCOL_H + +/** @file + * + * Apple Net Boot Protocol + * + */ + +FILE_LICENCE ( BSD3 ); + +#define EFI_APPLE_NET_BOOT_PROTOCOL_GUID \ + { 0x78ee99fb, 0x6a5e, 0x4186, \ + { 0x97, 0xde, 0xcd, 0x0a, 0xba, 0x34, 0x5a, 0x74 } } + +typedef struct _EFI_APPLE_NET_BOOT_PROTOCOL EFI_APPLE_NET_BOOT_PROTOCOL; + +/** + Get a DHCP packet obtained by the firmware during NetBoot. + + @param This A pointer to the APPLE_NET_BOOT_PROTOCOL instance. + @param BufferSize A pointer to the size of the buffer in bytes. + @param DataBuffer The memory buffer to copy the packet to. If it is + NULL, then the size of the packet is returned + in BufferSize. + @retval EFI_SUCCESS The packet was copied. + @retval EFI_BUFFER_TOO_SMALL The BufferSize is too small to read the + current packet. BufferSize has been + updated with the size needed to + complete the request. +**/ +typedef +EFI_STATUS +(EFIAPI *GET_DHCP_RESPONSE) ( + IN EFI_APPLE_NET_BOOT_PROTOCOL *This, + IN OUT UINTN *BufferSize, + OUT VOID *DataBuffer + ); + +struct _EFI_APPLE_NET_BOOT_PROTOCOL +{ + GET_DHCP_RESPONSE GetDhcpResponse; + GET_DHCP_RESPONSE GetBsdpResponse; +}; + +#endif /*_IPXE_EFI_APPLE_NET_BOOT_PROTOCOL_H */ diff --git a/src/include/ipxe/efi/Protocol/Arp.h b/src/include/ipxe/efi/Protocol/Arp.h new file mode 100644 index 00000000..80921f9a --- /dev/null +++ b/src/include/ipxe/efi/Protocol/Arp.h @@ -0,0 +1,387 @@ +/** @file + EFI ARP Protocol Definition + + The EFI ARP Service Binding Protocol is used to locate EFI + ARP Protocol drivers to create and destroy child of the + driver to communicate with other host using ARP protocol. + The EFI ARP Protocol provides services to map IP network + address to hardware address used by a data link protocol. + +Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + + @par Revision Reference: + This Protocol was introduced in UEFI Specification 2.0. + +**/ + +#ifndef __EFI_ARP_PROTOCOL_H__ +#define __EFI_ARP_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + +#define EFI_ARP_SERVICE_BINDING_PROTOCOL_GUID \ + { \ + 0xf44c00ee, 0x1f2c, 0x4a00, {0xaa, 0x9, 0x1c, 0x9f, 0x3e, 0x8, 0x0, 0xa3 } \ + } + +#define EFI_ARP_PROTOCOL_GUID \ + { \ + 0xf4b427bb, 0xba21, 0x4f16, {0xbc, 0x4e, 0x43, 0xe4, 0x16, 0xab, 0x61, 0x9c } \ + } + +typedef struct _EFI_ARP_PROTOCOL EFI_ARP_PROTOCOL; + +typedef struct { + /// + /// Length in bytes of this entry. + /// + UINT32 Size; + + /// + /// Set to TRUE if this entry is a "deny" entry. + /// Set to FALSE if this entry is a "normal" entry. + /// + BOOLEAN DenyFlag; + + /// + /// Set to TRUE if this entry will not time out. + /// Set to FALSE if this entry will time out. + /// + BOOLEAN StaticFlag; + + /// + /// 16-bit ARP hardware identifier number. + /// + UINT16 HwAddressType; + + /// + /// 16-bit protocol type number. + /// + UINT16 SwAddressType; + + /// + /// The length of the hardware address. + /// + UINT8 HwAddressLength; + + /// + /// The length of the protocol address. + /// + UINT8 SwAddressLength; +} EFI_ARP_FIND_DATA; + +typedef struct { + /// + /// 16-bit protocol type number in host byte order. + /// + UINT16 SwAddressType; + + /// + /// The length in bytes of the station's protocol address to register. + /// + UINT8 SwAddressLength; + + /// + /// The pointer to the first byte of the protocol address to register. For + /// example, if SwAddressType is 0x0800 (IP), then + /// StationAddress points to the first byte of this station's IP + /// address stored in network byte order. + /// + VOID *StationAddress; + + /// + /// The timeout value in 100-ns units that is associated with each + /// new dynamic ARP cache entry. If it is set to zero, the value is + /// implementation-specific. + /// + UINT32 EntryTimeOut; + + /// + /// The number of retries before a MAC address is resolved. If it is + /// set to zero, the value is implementation-specific. + /// + UINT32 RetryCount; + + /// + /// The timeout value in 100-ns units that is used to wait for the ARP + /// reply packet or the timeout value between two retries. Set to zero + /// to use implementation-specific value. + /// + UINT32 RetryTimeOut; +} EFI_ARP_CONFIG_DATA; + + +/** + This function is used to assign a station address to the ARP cache for this instance + of the ARP driver. + + Each ARP instance has one station address. The EFI_ARP_PROTOCOL driver will + respond to ARP requests that match this registered station address. A call to + this function with the ConfigData field set to NULL will reset this ARP instance. + + Once a protocol type and station address have been assigned to this ARP instance, + all the following ARP functions will use this information. Attempting to change + the protocol type or station address to a configured ARP instance will result in errors. + + @param This The pointer to the EFI_ARP_PROTOCOL instance. + @param ConfigData The pointer to the EFI_ARP_CONFIG_DATA structure. + + @retval EFI_SUCCESS The new station address was successfully + registered. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + * This is NULL. + * SwAddressLength is zero when ConfigData is not NULL. + * StationAddress is NULL when ConfigData is not NULL. + @retval EFI_ACCESS_DENIED The SwAddressType, SwAddressLength, or + StationAddress is different from the one that is + already registered. + @retval EFI_OUT_OF_RESOURCES Storage for the new StationAddress could not be + allocated. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_ARP_CONFIGURE)( + IN EFI_ARP_PROTOCOL *This, + IN EFI_ARP_CONFIG_DATA *ConfigData OPTIONAL + ); + +/** + This function is used to insert entries into the ARP cache. + + ARP cache entries are typically inserted and updated by network protocol drivers + as network traffic is processed. Most ARP cache entries will time out and be + deleted if the network traffic stops. ARP cache entries that were inserted + by the Add() function may be static (will not time out) or dynamic (will time out). + Default ARP cache timeout values are not covered in most network protocol + specifications (although RFC 1122 comes pretty close) and will only be + discussed in general terms in this specification. The timeout values that are + used in the EFI Sample Implementation should be used only as a guideline. + Final product implementations of the EFI network stack should be tuned for + their expected network environments. + + @param This Pointer to the EFI_ARP_PROTOCOL instance. + @param DenyFlag Set to TRUE if this entry is a deny entry. Set to + FALSE if this entry is a normal entry. + @param TargetSwAddress Pointer to a protocol address to add (or deny). + May be set to NULL if DenyFlag is TRUE. + @param TargetHwAddress Pointer to a hardware address to add (or deny). + May be set to NULL if DenyFlag is TRUE. + @param TimeoutValue Time in 100-ns units that this entry will remain + in the ARP cache. A value of zero means that the + entry is permanent. A nonzero value will override + the one given by Configure() if the entry to be + added is a dynamic entry. + @param Overwrite If TRUE, the matching cache entry will be + overwritten with the supplied parameters. If + FALSE, EFI_ACCESS_DENIED is returned if the + corresponding cache entry already exists. + + @retval EFI_SUCCESS The entry has been added or updated. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + * This is NULL. + * DenyFlag is FALSE and TargetHwAddress is NULL. + * DenyFlag is FALSE and TargetSwAddress is NULL. + * TargetHwAddress is NULL and TargetSwAddress is NULL. + * Neither TargetSwAddress nor TargetHwAddress are NULL when DenyFlag is + TRUE. + @retval EFI_OUT_OF_RESOURCES The new ARP cache entry could not be allocated. + @retval EFI_ACCESS_DENIED The ARP cache entry already exists and Overwrite + is not true. + @retval EFI_NOT_STARTED The ARP driver instance has not been configured. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_ARP_ADD)( + IN EFI_ARP_PROTOCOL *This, + IN BOOLEAN DenyFlag, + IN VOID *TargetSwAddress OPTIONAL, + IN VOID *TargetHwAddress OPTIONAL, + IN UINT32 TimeoutValue, + IN BOOLEAN Overwrite + ); + +/** + This function searches the ARP cache for matching entries and allocates a buffer into + which those entries are copied. + + The first part of the allocated buffer is EFI_ARP_FIND_DATA, following which + are protocol address pairs and hardware address pairs. + When finding a specific protocol address (BySwAddress is TRUE and AddressBuffer + is not NULL), the ARP cache timeout for the found entry is reset if Refresh is + set to TRUE. If the found ARP cache entry is a permanent entry, it is not + affected by Refresh. + + @param This The pointer to the EFI_ARP_PROTOCOL instance. + @param BySwAddress Set to TRUE to look for matching software protocol + addresses. Set to FALSE to look for matching + hardware protocol addresses. + @param AddressBuffer The pointer to the address buffer. Set to NULL + to match all addresses. + @param EntryLength The size of an entry in the entries buffer. + @param EntryCount The number of ARP cache entries that are found by + the specified criteria. + @param Entries The pointer to the buffer that will receive the ARP + cache entries. + @param Refresh Set to TRUE to refresh the timeout value of the + matching ARP cache entry. + + @retval EFI_SUCCESS The requested ARP cache entries were copied into + the buffer. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + This is NULL. Both EntryCount and EntryLength are + NULL, when Refresh is FALSE. + @retval EFI_NOT_FOUND No matching entries were found. + @retval EFI_NOT_STARTED The ARP driver instance has not been configured. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_ARP_FIND)( + IN EFI_ARP_PROTOCOL *This, + IN BOOLEAN BySwAddress, + IN VOID *AddressBuffer OPTIONAL, + OUT UINT32 *EntryLength OPTIONAL, + OUT UINT32 *EntryCount OPTIONAL, + OUT EFI_ARP_FIND_DATA **Entries OPTIONAL, + IN BOOLEAN Refresh + ); + + +/** + This function removes specified ARP cache entries. + + @param This The pointer to the EFI_ARP_PROTOCOL instance. + @param BySwAddress Set to TRUE to delete matching protocol addresses. + Set to FALSE to delete matching hardware + addresses. + @param AddressBuffer The pointer to the address buffer that is used as a + key to look for the cache entry. Set to NULL to + delete all entries. + + @retval EFI_SUCCESS The entry was removed from the ARP cache. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_NOT_FOUND The specified deletion key was not found. + @retval EFI_NOT_STARTED The ARP driver instance has not been configured. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_ARP_DELETE)( + IN EFI_ARP_PROTOCOL *This, + IN BOOLEAN BySwAddress, + IN VOID *AddressBuffer OPTIONAL + ); + +/** + This function delete all dynamic entries from the ARP cache that match the specified + software protocol type. + + @param This The pointer to the EFI_ARP_PROTOCOL instance. + + @retval EFI_SUCCESS The cache has been flushed. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_NOT_FOUND There are no matching dynamic cache entries. + @retval EFI_NOT_STARTED The ARP driver instance has not been configured. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_ARP_FLUSH)( + IN EFI_ARP_PROTOCOL *This + ); + +/** + This function tries to resolve the TargetSwAddress and optionally returns a + TargetHwAddress if it already exists in the ARP cache. + + @param This The pointer to the EFI_ARP_PROTOCOL instance. + @param TargetSwAddress The pointer to the protocol address to resolve. + @param ResolvedEvent The pointer to the event that will be signaled when + the address is resolved or some error occurs. + @param TargetHwAddress The pointer to the buffer for the resolved hardware + address in network byte order. + + @retval EFI_SUCCESS The data is copied from the ARP cache into the + TargetHwAddress buffer. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + This is NULL. TargetHwAddress is NULL. + @retval EFI_ACCESS_DENIED The requested address is not present in the normal + ARP cache but is present in the deny address list. + Outgoing traffic to that address is forbidden. + @retval EFI_NOT_STARTED The ARP driver instance has not been configured. + @retval EFI_NOT_READY The request has been started and is not finished. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_ARP_REQUEST)( + IN EFI_ARP_PROTOCOL *This, + IN VOID *TargetSwAddress OPTIONAL, + IN EFI_EVENT ResolvedEvent OPTIONAL, + OUT VOID *TargetHwAddress + ); + +/** + This function aborts the previous ARP request (identified by This, TargetSwAddress + and ResolvedEvent) that is issued by EFI_ARP_PROTOCOL.Request(). + + If the request is in the internal ARP request queue, the request is aborted + immediately and its ResolvedEvent is signaled. Only an asynchronous address + request needs to be canceled. If TargeSwAddress and ResolveEvent are both + NULL, all the pending asynchronous requests that have been issued by This + instance will be cancelled and their corresponding events will be signaled. + + @param This The pointer to the EFI_ARP_PROTOCOL instance. + @param TargetSwAddress The pointer to the protocol address in previous + request session. + @param ResolvedEvent Pointer to the event that is used as the + notification event in previous request session. + + @retval EFI_SUCCESS The pending request session(s) is/are aborted and + corresponding event(s) is/are signaled. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + This is NULL. TargetSwAddress is not NULL and + ResolvedEvent is NULL. TargetSwAddress is NULL and + ResolvedEvent is not NULL. + @retval EFI_NOT_STARTED The ARP driver instance has not been configured. + @retval EFI_NOT_FOUND The request is not issued by + EFI_ARP_PROTOCOL.Request(). + + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_ARP_CANCEL)( + IN EFI_ARP_PROTOCOL *This, + IN VOID *TargetSwAddress OPTIONAL, + IN EFI_EVENT ResolvedEvent OPTIONAL + ); + +/// +/// ARP is used to resolve local network protocol addresses into +/// network hardware addresses. +/// +struct _EFI_ARP_PROTOCOL { + EFI_ARP_CONFIGURE Configure; + EFI_ARP_ADD Add; + EFI_ARP_FIND Find; + EFI_ARP_DELETE Delete; + EFI_ARP_FLUSH Flush; + EFI_ARP_REQUEST Request; + EFI_ARP_CANCEL Cancel; +}; + + +extern EFI_GUID gEfiArpServiceBindingProtocolGuid; +extern EFI_GUID gEfiArpProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/BlockIo2.h b/src/include/ipxe/efi/Protocol/BlockIo2.h new file mode 100644 index 00000000..0b9cf8eb --- /dev/null +++ b/src/include/ipxe/efi/Protocol/BlockIo2.h @@ -0,0 +1,208 @@ +/** @file + Block IO2 protocol as defined in the UEFI 2.3.1 specification. + + The Block IO2 protocol defines an extension to the Block IO protocol which + enables the ability to read and write data at a block level in a non-blocking + manner. + + Copyright (c) 2011, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __BLOCK_IO2_H__ +#define __BLOCK_IO2_H__ + +FILE_LICENCE ( BSD3 ); + +#include + +#define EFI_BLOCK_IO2_PROTOCOL_GUID \ + { \ + 0xa77b2472, 0xe282, 0x4e9f, {0xa2, 0x45, 0xc2, 0xc0, 0xe2, 0x7b, 0xbc, 0xc1} \ + } + +typedef struct _EFI_BLOCK_IO2_PROTOCOL EFI_BLOCK_IO2_PROTOCOL; + +/** + The struct of Block IO2 Token. +**/ +typedef struct { + + /// + /// If Event is NULL, then blocking I/O is performed.If Event is not NULL and + /// non-blocking I/O is supported, then non-blocking I/O is performed, and + /// Event will be signaled when the read request is completed. + /// + EFI_EVENT Event; + + /// + /// Defines whether or not the signaled event encountered an error. + /// + EFI_STATUS TransactionStatus; +} EFI_BLOCK_IO2_TOKEN; + + +/** + Reset the block device hardware. + + @param[in] This Indicates a pointer to the calling context. + @param[in] ExtendedVerification Indicates that the driver may perform a more + exhausive verification operation of the device + during reset. + + @retval EFI_SUCCESS The device was reset. + @retval EFI_DEVICE_ERROR The device is not functioning properly and could + not be reset. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_BLOCK_RESET_EX) ( + IN EFI_BLOCK_IO2_PROTOCOL *This, + IN BOOLEAN ExtendedVerification + ); + +/** + Read BufferSize bytes from Lba into Buffer. + + This function reads the requested number of blocks from the device. All the + blocks are read, or an error is returned. + If EFI_DEVICE_ERROR, EFI_NO_MEDIA,_or EFI_MEDIA_CHANGED is returned and + non-blocking I/O is being used, the Event associated with this request will + not be signaled. + + @param[in] This Indicates a pointer to the calling context. + @param[in] MediaId Id of the media, changes every time the media is + replaced. + @param[in] Lba The starting Logical Block Address to read from. + @param[in, out] Token A pointer to the token associated with the transaction. + @param[in] BufferSize Size of Buffer, must be a multiple of device block size. + @param[out] Buffer A pointer to the destination buffer for the data. The + caller is responsible for either having implicit or + explicit ownership of the buffer. + + @retval EFI_SUCCESS The read request was queued if Token->Event is + not NULL.The data was read correctly from the + device if the Token->Event is NULL. + @retval EFI_DEVICE_ERROR The device reported an error while performing + the read. + @retval EFI_NO_MEDIA There is no media in the device. + @retval EFI_MEDIA_CHANGED The MediaId is not for the current media. + @retval EFI_BAD_BUFFER_SIZE The BufferSize parameter is not a multiple of the + intrinsic block size of the device. + @retval EFI_INVALID_PARAMETER The read request contains LBAs that are not valid, + or the buffer is not on proper alignment. + @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack + of resources. +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_BLOCK_READ_EX) ( + IN EFI_BLOCK_IO2_PROTOCOL *This, + IN UINT32 MediaId, + IN EFI_LBA LBA, + IN OUT EFI_BLOCK_IO2_TOKEN *Token, + IN UINTN BufferSize, + OUT VOID *Buffer + ); + +/** + Write BufferSize bytes from Lba into Buffer. + + This function writes the requested number of blocks to the device. All blocks + are written, or an error is returned.If EFI_DEVICE_ERROR, EFI_NO_MEDIA, + EFI_WRITE_PROTECTED or EFI_MEDIA_CHANGED is returned and non-blocking I/O is + being used, the Event associated with this request will not be signaled. + + @param[in] This Indicates a pointer to the calling context. + @param[in] MediaId The media ID that the write request is for. + @param[in] Lba The starting logical block address to be written. The + caller is responsible for writing to only legitimate + locations. + @param[in, out] Token A pointer to the token associated with the transaction. + @param[in] BufferSize Size of Buffer, must be a multiple of device block size. + @param[in] Buffer A pointer to the source buffer for the data. + + @retval EFI_SUCCESS The write request was queued if Event is not NULL. + The data was written correctly to the device if + the Event is NULL. + @retval EFI_WRITE_PROTECTED The device can not be written to. + @retval EFI_NO_MEDIA There is no media in the device. + @retval EFI_MEDIA_CHNAGED The MediaId does not matched the current device. + @retval EFI_DEVICE_ERROR The device reported an error while performing the write. + @retval EFI_BAD_BUFFER_SIZE The Buffer was not a multiple of the block size of the device. + @retval EFI_INVALID_PARAMETER The write request contains LBAs that are not valid, + or the buffer is not on proper alignment. + @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack + of resources. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_BLOCK_WRITE_EX) ( + IN EFI_BLOCK_IO2_PROTOCOL *This, + IN UINT32 MediaId, + IN EFI_LBA LBA, + IN OUT EFI_BLOCK_IO2_TOKEN *Token, + IN UINTN BufferSize, + IN VOID *Buffer + ); + +/** + Flush the Block Device. + + If EFI_DEVICE_ERROR, EFI_NO_MEDIA,_EFI_WRITE_PROTECTED or EFI_MEDIA_CHANGED + is returned and non-blocking I/O is being used, the Event associated with + this request will not be signaled. + + @param[in] This Indicates a pointer to the calling context. + @param[in,out] Token A pointer to the token associated with the transaction + + @retval EFI_SUCCESS The flush request was queued if Event is not NULL. + All outstanding data was written correctly to the + device if the Event is NULL. + @retval EFI_DEVICE_ERROR The device reported an error while writting back + the data. + @retval EFI_WRITE_PROTECTED The device cannot be written to. + @retval EFI_NO_MEDIA There is no media in the device. + @retval EFI_MEDIA_CHANGED The MediaId is not for the current media. + @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack + of resources. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_BLOCK_FLUSH_EX) ( + IN EFI_BLOCK_IO2_PROTOCOL *This, + IN OUT EFI_BLOCK_IO2_TOKEN *Token + ); + +/// +/// The Block I/O2 protocol defines an extension to the Block I/O protocol which +/// enables the ability to read and write data at a block level in a non-blocking +// manner. +/// +struct _EFI_BLOCK_IO2_PROTOCOL { + /// + /// A pointer to the EFI_BLOCK_IO_MEDIA data for this device. + /// Type EFI_BLOCK_IO_MEDIA is defined in BlockIo.h. + /// + EFI_BLOCK_IO_MEDIA *Media; + + EFI_BLOCK_RESET_EX Reset; + EFI_BLOCK_READ_EX ReadBlocksEx; + EFI_BLOCK_WRITE_EX WriteBlocksEx; + EFI_BLOCK_FLUSH_EX FlushBlocksEx; +}; + +extern EFI_GUID gEfiBlockIo2ProtocolGuid; + +#endif + diff --git a/src/include/ipxe/efi/Protocol/BusSpecificDriverOverride.h b/src/include/ipxe/efi/Protocol/BusSpecificDriverOverride.h new file mode 100644 index 00000000..be92323f --- /dev/null +++ b/src/include/ipxe/efi/Protocol/BusSpecificDriverOverride.h @@ -0,0 +1,74 @@ +/** @file + Bus Specific Driver Override protocol as defined in the UEFI 2.0 specification. + + Bus drivers that have a bus specific algorithm for matching drivers to controllers are + required to produce this protocol for each controller. For example, a PCI Bus Driver will produce an + instance of this protocol for every PCI controller that has a PCI option ROM that contains one or + more UEFI drivers. The protocol instance is attached to the handle of the PCI controller. + + Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef _EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_PROTOCOL_H_ +#define _EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_PROTOCOL_H_ + +FILE_LICENCE ( BSD3 ); + +/// +/// Global ID for the Bus Specific Driver Override Protocol +/// +#define EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_PROTOCOL_GUID \ + { \ + 0x3bc1b285, 0x8a15, 0x4a82, {0xaa, 0xbf, 0x4d, 0x7d, 0x13, 0xfb, 0x32, 0x65 } \ + } + +typedef struct _EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_PROTOCOL EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_PROTOCOL; + +// +// Prototypes for the Bus Specific Driver Override Protocol +// + +/** + Uses a bus specific algorithm to retrieve a driver image handle for a controller. + + @param This A pointer to the EFI_BUS_SPECIFIC_DRIVER_ + OVERRIDE_PROTOCOL instance. + @param DriverImageHandle On input, a pointer to the previous driver image handle returned + by GetDriver(). On output, a pointer to the next driver + image handle. Passing in a NULL, will return the first driver + image handle. + + @retval EFI_SUCCESS A bus specific override driver is returned in DriverImageHandle. + @retval EFI_NOT_FOUND The end of the list of override drivers was reached. + A bus specific override driver is not returned in DriverImageHandle. + @retval EFI_INVALID_PARAMETER DriverImageHandle is not a handle that was returned on a + previous call to GetDriver(). + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_GET_DRIVER)( + IN EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_PROTOCOL *This, + IN OUT EFI_HANDLE *DriverImageHandle + ); + +/// +/// This protocol matches one or more drivers to a controller. This protocol is produced by a bus driver, +/// and it is installed on the child handles of buses that require a bus specific algorithm for matching +/// drivers to controllers. +/// +struct _EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_PROTOCOL { + EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_GET_DRIVER GetDriver; +}; + +extern EFI_GUID gEfiBusSpecificDriverOverrideProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/ComponentName.h b/src/include/ipxe/efi/Protocol/ComponentName.h new file mode 100644 index 00000000..87b6d61a --- /dev/null +++ b/src/include/ipxe/efi/Protocol/ComponentName.h @@ -0,0 +1,131 @@ +/** @file + EFI Component Name Protocol as defined in the EFI 1.1 specification. + This protocol is used to retrieve user readable names of EFI Drivers + and controllers managed by EFI Drivers. + +Copyright (c) 2006 - 2011, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __EFI_COMPONENT_NAME_H__ +#define __EFI_COMPONENT_NAME_H__ + +FILE_LICENCE ( BSD3 ); + +/// +/// The global ID for the Component Name Protocol. +/// +#define EFI_COMPONENT_NAME_PROTOCOL_GUID \ + { \ + 0x107a772c, 0xd5e1, 0x11d4, {0x9a, 0x46, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d } \ + } + +typedef struct _EFI_COMPONENT_NAME_PROTOCOL EFI_COMPONENT_NAME_PROTOCOL; + + +/** + Retrieves a Unicode string that is the user-readable name of the EFI Driver. + + @param This A pointer to the EFI_COMPONENT_NAME_PROTOCOL instance. + @param Language A pointer to a three-character ISO 639-2 language identifier. + This is the language of the driver name that that the caller + is requesting, and it must match one of the languages specified + in SupportedLanguages. The number of languages supported by a + driver is up to the driver writer. + @param DriverName A pointer to the Unicode string to return. This Unicode string + is the name of the driver specified by This in the language + specified by Language. + + @retval EFI_SUCCESS The Unicode string for the Driver specified by This + and the language specified by Language was returned + in DriverName. + @retval EFI_INVALID_PARAMETER Language is NULL. + @retval EFI_INVALID_PARAMETER DriverName is NULL. + @retval EFI_UNSUPPORTED The driver specified by This does not support the + language specified by Language. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_COMPONENT_NAME_GET_DRIVER_NAME)( + IN EFI_COMPONENT_NAME_PROTOCOL *This, + IN CHAR8 *Language, + OUT CHAR16 **DriverName + ); + + +/** + Retrieves a Unicode string that is the user readable name of the controller + that is being managed by an EFI Driver. + + @param This A pointer to the EFI_COMPONENT_NAME_PROTOCOL instance. + @param ControllerHandle The handle of a controller that the driver specified by + This is managing. This handle specifies the controller + whose name is to be returned. + @param ChildHandle The handle of the child controller to retrieve the name + of. This is an optional parameter that may be NULL. It + will be NULL for device drivers. It will also be NULL + for a bus drivers that wish to retrieve the name of the + bus controller. It will not be NULL for a bus driver + that wishes to retrieve the name of a child controller. + @param Language A pointer to a three character ISO 639-2 language + identifier. This is the language of the controller name + that the caller is requesting, and it must match one + of the languages specified in SupportedLanguages. The + number of languages supported by a driver is up to the + driver writer. + @param ControllerName A pointer to the Unicode string to return. This Unicode + string is the name of the controller specified by + ControllerHandle and ChildHandle in the language specified + by Language, from the point of view of the driver specified + by This. + + @retval EFI_SUCCESS The Unicode string for the user-readable name in the + language specified by Language for the driver + specified by This was returned in DriverName. + @retval EFI_INVALID_PARAMETER ControllerHandle is NULL. + @retval EFI_INVALID_PARAMETER ChildHandle is not NULL and it is not a valid EFI_HANDLE. + @retval EFI_INVALID_PARAMETER Language is NULL. + @retval EFI_INVALID_PARAMETER ControllerName is NULL. + @retval EFI_UNSUPPORTED The driver specified by This is not currently managing + the controller specified by ControllerHandle and + ChildHandle. + @retval EFI_UNSUPPORTED The driver specified by This does not support the + language specified by Language. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_COMPONENT_NAME_GET_CONTROLLER_NAME)( + IN EFI_COMPONENT_NAME_PROTOCOL *This, + IN EFI_HANDLE ControllerHandle, + IN EFI_HANDLE ChildHandle OPTIONAL, + IN CHAR8 *Language, + OUT CHAR16 **ControllerName + ); + +/// +/// This protocol is used to retrieve user readable names of drivers +/// and controllers managed by UEFI Drivers. +/// +struct _EFI_COMPONENT_NAME_PROTOCOL { + EFI_COMPONENT_NAME_GET_DRIVER_NAME GetDriverName; + EFI_COMPONENT_NAME_GET_CONTROLLER_NAME GetControllerName; + /// + /// A Null-terminated ASCII string that contains one or more + /// ISO 639-2 language codes. This is the list of language codes + /// that this protocol supports. + /// + CHAR8 *SupportedLanguages; +}; + +extern EFI_GUID gEfiComponentNameProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/ConsoleControl/ConsoleControl.h b/src/include/ipxe/efi/Protocol/ConsoleControl/ConsoleControl.h new file mode 100644 index 00000000..0bf5799e --- /dev/null +++ b/src/include/ipxe/efi/Protocol/ConsoleControl/ConsoleControl.h @@ -0,0 +1,124 @@ +/*++ + +Copyright (c) 2004 - 2010, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +Module Name: + + ConsoleControl.h + +Abstract: + + Abstraction of a Text mode or GOP/UGA screen + +--*/ + +#ifndef __CONSOLE_CONTROL_H__ +#define __CONSOLE_CONTROL_H__ + +FILE_LICENCE ( BSD3 ); + +#define EFI_CONSOLE_CONTROL_PROTOCOL_GUID \ + { 0xf42f7782, 0x12e, 0x4c12, {0x99, 0x56, 0x49, 0xf9, 0x43, 0x4, 0xf7, 0x21} } + +typedef struct _EFI_CONSOLE_CONTROL_PROTOCOL EFI_CONSOLE_CONTROL_PROTOCOL; + + +typedef enum { + EfiConsoleControlScreenText, + EfiConsoleControlScreenGraphics, + EfiConsoleControlScreenMaxValue +} EFI_CONSOLE_CONTROL_SCREEN_MODE; + + +typedef +EFI_STATUS +(EFIAPI *EFI_CONSOLE_CONTROL_PROTOCOL_GET_MODE) ( + IN EFI_CONSOLE_CONTROL_PROTOCOL *This, + OUT EFI_CONSOLE_CONTROL_SCREEN_MODE *Mode, + OUT BOOLEAN *GopUgaExists, OPTIONAL + OUT BOOLEAN *StdInLocked OPTIONAL + ) +/*++ + + Routine Description: + Return the current video mode information. Also returns info about existence + of Graphics Output devices or UGA Draw devices in system, and if the Std In + device is locked. All the arguments are optional and only returned if a non + NULL pointer is passed in. + + Arguments: + This - Protocol instance pointer. + Mode - Are we in text of grahics mode. + GopUgaExists - TRUE if Console Spliter has found a GOP or UGA device + StdInLocked - TRUE if StdIn device is keyboard locked + + Returns: + EFI_SUCCESS - Mode information returned. + +--*/ +; + + +typedef +EFI_STATUS +(EFIAPI *EFI_CONSOLE_CONTROL_PROTOCOL_SET_MODE) ( + IN EFI_CONSOLE_CONTROL_PROTOCOL *This, + IN EFI_CONSOLE_CONTROL_SCREEN_MODE Mode + ) +/*++ + + Routine Description: + Set the current mode to either text or graphics. Graphics is + for Quiet Boot. + + Arguments: + This - Protocol instance pointer. + Mode - Mode to set the + + Returns: + EFI_SUCCESS - Mode information returned. + +--*/ +; + + +typedef +EFI_STATUS +(EFIAPI *EFI_CONSOLE_CONTROL_PROTOCOL_LOCK_STD_IN) ( + IN EFI_CONSOLE_CONTROL_PROTOCOL *This, + IN CHAR16 *Password + ) +/*++ + + Routine Description: + Lock Std In devices until Password is typed. + + Arguments: + This - Protocol instance pointer. + Password - Password needed to unlock screen. NULL means unlock keyboard + + Returns: + EFI_SUCCESS - Mode information returned. + EFI_DEVICE_ERROR - Std In not locked + +--*/ +; + + + +struct _EFI_CONSOLE_CONTROL_PROTOCOL { + EFI_CONSOLE_CONTROL_PROTOCOL_GET_MODE GetMode; + EFI_CONSOLE_CONTROL_PROTOCOL_SET_MODE SetMode; + EFI_CONSOLE_CONTROL_PROTOCOL_LOCK_STD_IN LockStdIn; +}; + +extern EFI_GUID gEfiConsoleControlProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/Dhcp4.h b/src/include/ipxe/efi/Protocol/Dhcp4.h new file mode 100644 index 00000000..560ee322 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/Dhcp4.h @@ -0,0 +1,782 @@ +/** @file + EFI_DHCP4_PROTOCOL as defined in UEFI 2.0. + EFI_DHCP4_SERVICE_BINDING_PROTOCOL as defined in UEFI 2.0. + These protocols are used to collect configuration information for the EFI IPv4 Protocol + drivers and to provide DHCPv4 server and PXE boot server discovery services. + +Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + + @par Revision Reference: + This Protocol was introduced in UEFI Specification 2.0. + +**/ + +#ifndef __EFI_DHCP4_PROTOCOL_H__ +#define __EFI_DHCP4_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + +#define EFI_DHCP4_PROTOCOL_GUID \ + { \ + 0x8a219718, 0x4ef5, 0x4761, {0x91, 0xc8, 0xc0, 0xf0, 0x4b, 0xda, 0x9e, 0x56 } \ + } + +#define EFI_DHCP4_SERVICE_BINDING_PROTOCOL_GUID \ + { \ + 0x9d9a39d8, 0xbd42, 0x4a73, {0xa4, 0xd5, 0x8e, 0xe9, 0x4b, 0xe1, 0x13, 0x80 } \ + } + +typedef struct _EFI_DHCP4_PROTOCOL EFI_DHCP4_PROTOCOL; + + +#pragma pack(1) +typedef struct { + /// + /// DHCP option code. + /// + UINT8 OpCode; + /// + /// Length of the DHCP option data. Not present if OpCode is 0 or 255. + /// + UINT8 Length; + /// + /// Start of the DHCP option data. Not present if OpCode is 0 or 255 or if Length is zero. + /// + UINT8 Data[1]; +} EFI_DHCP4_PACKET_OPTION; +#pragma pack() + + +#pragma pack(1) +/// +/// EFI_DHCP4_PACKET defines the format of DHCPv4 packets. See RFC 2131 for more information. +/// +typedef struct { + UINT8 OpCode; + UINT8 HwType; + UINT8 HwAddrLen; + UINT8 Hops; + UINT32 Xid; + UINT16 Seconds; + UINT16 Reserved; + EFI_IPv4_ADDRESS ClientAddr; ///< Client IP address from client. + EFI_IPv4_ADDRESS YourAddr; ///< Client IP address from server. + EFI_IPv4_ADDRESS ServerAddr; ///< IP address of next server in bootstrap. + EFI_IPv4_ADDRESS GatewayAddr; ///< Relay agent IP address. + UINT8 ClientHwAddr[16]; ///< Client hardware address. + CHAR8 ServerName[64]; + CHAR8 BootFileName[128]; +}EFI_DHCP4_HEADER; +#pragma pack() + + +#pragma pack(1) +typedef struct { + /// + /// Size of the EFI_DHCP4_PACKET buffer. + /// + UINT32 Size; + /// + /// Length of the EFI_DHCP4_PACKET from the first byte of the Header field + /// to the last byte of the Option[] field. + /// + UINT32 Length; + + struct { + /// + /// DHCP packet header. + /// + EFI_DHCP4_HEADER Header; + /// + /// DHCP magik cookie in network byte order. + /// + UINT32 Magik; + /// + /// Start of the DHCP packed option data. + /// + UINT8 Option[1]; + } Dhcp4; +} EFI_DHCP4_PACKET; +#pragma pack() + + +typedef enum { + /// + /// The EFI DHCPv4 Protocol driver is stopped. + /// + Dhcp4Stopped = 0x0, + /// + /// The EFI DHCPv4 Protocol driver is inactive. + /// + Dhcp4Init = 0x1, + /// + /// The EFI DHCPv4 Protocol driver is collecting DHCP offer packets from DHCP servers. + /// + Dhcp4Selecting = 0x2, + /// + /// The EFI DHCPv4 Protocol driver has sent the request to the DHCP server and is waiting for a response. + /// + Dhcp4Requesting = 0x3, + /// + /// The DHCP configuration has completed. + /// + Dhcp4Bound = 0x4, + /// + /// The DHCP configuration is being renewed and another request has + /// been sent out, but it has not received a response from the server yet. + /// + Dhcp4Renewing = 0x5, + /// + /// The DHCP configuration has timed out and the EFI DHCPv4 + /// Protocol driver is trying to extend the lease time. + /// + Dhcp4Rebinding = 0x6, + /// + /// The EFI DHCPv4 Protocol driver was initialized with a previously + /// allocated or known IP address. + /// + Dhcp4InitReboot = 0x7, + /// + /// The EFI DHCPv4 Protocol driver is seeking to reuse the previously + /// allocated IP address by sending a request to the DHCP server. + /// + Dhcp4Rebooting = 0x8 +} EFI_DHCP4_STATE; + + +typedef enum{ + /// + /// The packet to start the configuration sequence is about to be sent. + /// + Dhcp4SendDiscover = 0x01, + /// + /// A reply packet was just received. + /// + Dhcp4RcvdOffer = 0x02, + /// + /// It is time for Dhcp4Callback to select an offer. + /// + Dhcp4SelectOffer = 0x03, + /// + /// A request packet is about to be sent. + /// + Dhcp4SendRequest = 0x04, + /// + /// A DHCPACK packet was received and will be passed to Dhcp4Callback. + /// + Dhcp4RcvdAck = 0x05, + /// + /// A DHCPNAK packet was received and will be passed to Dhcp4Callback. + /// + Dhcp4RcvdNak = 0x06, + /// + /// A decline packet is about to be sent. + /// + Dhcp4SendDecline = 0x07, + /// + /// The DHCP configuration process has completed. No packet is associated with this event. + /// + Dhcp4BoundCompleted = 0x08, + /// + /// It is time to enter the Dhcp4Renewing state and to contact the server + /// that originally issued the network address. No packet is associated with this event. + /// + Dhcp4EnterRenewing = 0x09, + /// + /// It is time to enter the Dhcp4Rebinding state and to contact any server. + /// No packet is associated with this event. + /// + Dhcp4EnterRebinding = 0x0a, + /// + /// The configured IP address was lost either because the lease has expired, + /// the user released the configuration, or a DHCPNAK packet was received in + /// the Dhcp4Renewing or Dhcp4Rebinding state. No packet is associated with this event. + /// + Dhcp4AddressLost = 0x0b, + /// + /// The DHCP process failed because a DHCPNAK packet was received or the user + /// aborted the DHCP process at a time when the configuration was not available yet. + /// No packet is associated with this event. + /// + Dhcp4Fail = 0x0c +} EFI_DHCP4_EVENT; + +/** + Callback routine. + + EFI_DHCP4_CALLBACK is provided by the consumer of the EFI DHCPv4 Protocol driver + to intercept events that occurred in the configuration process. This structure + provides advanced control of each state transition of the DHCP process. The + returned status code determines the behavior of the EFI DHCPv4 Protocol driver. + There are three possible returned values, which are described in the following + table. + + @param This The pointer to the EFI DHCPv4 Protocol instance that is used to + configure this callback function. + @param Context The pointer to the context that is initialized by + EFI_DHCP4_PROTOCOL.Configure(). + @param CurrentState The current operational state of the EFI DHCPv4 Protocol + driver. + @param Dhcp4Event The event that occurs in the current state, which usually means a + state transition. + @param Packet The DHCP packet that is going to be sent or already received. + @param NewPacket The packet that is used to replace the above Packet. + + @retval EFI_SUCCESS Tells the EFI DHCPv4 Protocol driver to continue the DHCP process. + When it is in the Dhcp4Selecting state, it tells the EFI DHCPv4 Protocol + driver to stop collecting additional packets. The driver will exit + the Dhcp4Selecting state and enter the Dhcp4Requesting state. + @retval EFI_NOT_READY Only used in the Dhcp4Selecting state. The EFI DHCPv4 Protocol + driver will continue to wait for more packets until the retry + timeout expires. + @retval EFI_ABORTED Tells the EFI DHCPv4 Protocol driver to abort the current process and + return to the Dhcp4Init or Dhcp4InitReboot state. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_DHCP4_CALLBACK)( + IN EFI_DHCP4_PROTOCOL *This, + IN VOID *Context, + IN EFI_DHCP4_STATE CurrentState, + IN EFI_DHCP4_EVENT Dhcp4Event, + IN EFI_DHCP4_PACKET *Packet OPTIONAL, + OUT EFI_DHCP4_PACKET **NewPacket OPTIONAL + ); + +typedef struct { + /// + /// The number of times to try sending a packet during the Dhcp4SendDiscover + /// event and waiting for a response during the Dhcp4RcvdOffer event. + /// Set to zero to use the default try counts and timeout values. + /// + UINT32 DiscoverTryCount; + /// + /// The maximum amount of time (in seconds) to wait for returned packets in each + /// of the retries. Timeout values of zero will default to a timeout value + /// of one second. Set to NULL to use default timeout values. + /// + UINT32 *DiscoverTimeout; + /// + /// The number of times to try sending a packet during the Dhcp4SendRequest event + /// and waiting for a response during the Dhcp4RcvdAck event before accepting + /// failure. Set to zero to use the default try counts and timeout values. + /// + UINT32 RequestTryCount; + /// + /// The maximum amount of time (in seconds) to wait for return packets in each of the retries. + /// Timeout values of zero will default to a timeout value of one second. + /// Set to NULL to use default timeout values. + /// + UINT32 *RequestTimeout; + /// + /// For a DHCPDISCOVER, setting this parameter to the previously allocated IP + /// address will cause the EFI DHCPv4 Protocol driver to enter the Dhcp4InitReboot state. + /// And set this field to 0.0.0.0 to enter the Dhcp4Init state. + /// For a DHCPINFORM this parameter should be set to the client network address + /// which was assigned to the client during a DHCPDISCOVER. + /// + EFI_IPv4_ADDRESS ClientAddress; + /// + /// The callback function to intercept various events that occurred in + /// the DHCP configuration process. Set to NULL to ignore all those events. + /// + EFI_DHCP4_CALLBACK Dhcp4Callback; + /// + /// The pointer to the context that will be passed to Dhcp4Callback when it is called. + /// + VOID *CallbackContext; + /// + /// Number of DHCP options in the OptionList. + /// + UINT32 OptionCount; + /// + /// List of DHCP options to be included in every packet that is sent during the + /// Dhcp4SendDiscover event. Pad options are appended automatically by DHCP driver + /// in outgoing DHCP packets. If OptionList itself contains pad option, they are + /// ignored by the driver. OptionList can be freed after EFI_DHCP4_PROTOCOL.Configure() + /// returns. Ignored if OptionCount is zero. + /// + EFI_DHCP4_PACKET_OPTION **OptionList; +} EFI_DHCP4_CONFIG_DATA; + + +typedef struct { + /// + /// The EFI DHCPv4 Protocol driver operating state. + /// + EFI_DHCP4_STATE State; + /// + /// The configuration data of the current EFI DHCPv4 Protocol driver instance. + /// + EFI_DHCP4_CONFIG_DATA ConfigData; + /// + /// The client IP address that was acquired from the DHCP server. If it is zero, + /// the DHCP acquisition has not completed yet and the following fields in this structure are undefined. + /// + EFI_IPv4_ADDRESS ClientAddress; + /// + /// The local hardware address. + /// + EFI_MAC_ADDRESS ClientMacAddress; + /// + /// The server IP address that is providing the DHCP service to this client. + /// + EFI_IPv4_ADDRESS ServerAddress; + /// + /// The router IP address that was acquired from the DHCP server. + /// May be zero if the server does not offer this address. + /// + EFI_IPv4_ADDRESS RouterAddress; + /// + /// The subnet mask of the connected network that was acquired from the DHCP server. + /// + EFI_IPv4_ADDRESS SubnetMask; + /// + /// The lease time (in 1-second units) of the configured IP address. + /// The value 0xFFFFFFFF means that the lease time is infinite. + /// A default lease of 7 days is used if the DHCP server does not provide a value. + /// + UINT32 LeaseTime; + /// + /// The cached latest DHCPACK or DHCPNAK or BOOTP REPLY packet. May be NULL if no packet is cached. + /// + EFI_DHCP4_PACKET *ReplyPacket; +} EFI_DHCP4_MODE_DATA; + + +typedef struct { + /// + /// Alternate listening address. It can be a unicast, multicast, or broadcast address. + /// + EFI_IPv4_ADDRESS ListenAddress; + /// + /// The subnet mask of above listening unicast/broadcast IP address. + /// Ignored if ListenAddress is a multicast address. + /// + EFI_IPv4_ADDRESS SubnetMask; + /// + /// Alternate station source (or listening) port number. + /// If zero, then the default station port number (68) will be used. + /// + UINT16 ListenPort; +} EFI_DHCP4_LISTEN_POINT; + + +typedef struct { + /// + /// The completion status of transmitting and receiving. + /// + EFI_STATUS Status; + /// + /// If not NULL, the event that will be signaled when the collection process + /// completes. If NULL, this function will busy-wait until the collection process competes. + /// + EFI_EVENT CompletionEvent; + /// + /// The pointer to the server IP address. This address may be a unicast, multicast, or broadcast address. + /// + EFI_IPv4_ADDRESS RemoteAddress; + /// + /// The server listening port number. If zero, the default server listening port number (67) will be used. + /// + UINT16 RemotePort; + /// + /// The pointer to the gateway address to override the existing setting. + /// + EFI_IPv4_ADDRESS GatewayAddress; + /// + /// The number of entries in ListenPoints. If zero, the default station address and port number 68 are used. + /// + UINT32 ListenPointCount; + /// + /// An array of station address and port number pairs that are used as receiving filters. + /// The first entry is also used as the source address and source port of the outgoing packet. + /// + EFI_DHCP4_LISTEN_POINT *ListenPoints; + /// + /// The number of seconds to collect responses. Zero is invalid. + /// + UINT32 TimeoutValue; + /// + /// The pointer to the packet to be transmitted. + /// + EFI_DHCP4_PACKET *Packet; + /// + /// Number of received packets. + /// + UINT32 ResponseCount; + /// + /// The pointer to the allocated list of received packets. + /// + EFI_DHCP4_PACKET *ResponseList; +} EFI_DHCP4_TRANSMIT_RECEIVE_TOKEN; + + +/** + Returns the current operating mode and cached data packet for the EFI DHCPv4 Protocol driver. + + The GetModeData() function returns the current operating mode and cached data + packet for the EFI DHCPv4 Protocol driver. + + @param This The pointer to the EFI_DHCP4_PROTOCOL instance. + @param Dhcp4ModeData The pointer to storage for the EFI_DHCP4_MODE_DATA structure. + + @retval EFI_SUCCESS The mode data was returned. + @retval EFI_INVALID_PARAMETER This is NULL. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_DHCP4_GET_MODE_DATA)( + IN EFI_DHCP4_PROTOCOL *This, + OUT EFI_DHCP4_MODE_DATA *Dhcp4ModeData + ); + +/** + Initializes, changes, or resets the operational settings for the EFI DHCPv4 Protocol driver. + + The Configure() function is used to initialize, change, or reset the operational + settings of the EFI DHCPv4 Protocol driver for the communication device on which + the EFI DHCPv4 Service Binding Protocol is installed. This function can be + successfully called only if both of the following are true: + * This instance of the EFI DHCPv4 Protocol driver is in the Dhcp4Stopped, Dhcp4Init, + Dhcp4InitReboot, or Dhcp4Bound states. + * No other EFI DHCPv4 Protocol driver instance that is controlled by this EFI + DHCPv4 Service Binding Protocol driver instance has configured this EFI DHCPv4 + Protocol driver. + When this driver is in the Dhcp4Stopped state, it can transfer into one of the + following two possible initial states: + * Dhcp4Init + * Dhcp4InitReboot. + The driver can transfer into these states by calling Configure() with a non-NULL + Dhcp4CfgData. The driver will transfer into the appropriate state based on the + supplied client network address in the ClientAddress parameter and DHCP options + in the OptionList parameter as described in RFC 2131. + When Configure() is called successfully while Dhcp4CfgData is set to NULL, the + default configuring data will be reset in the EFI DHCPv4 Protocol driver and + the state of the EFI DHCPv4 Protocol driver will not be changed. If one instance + wants to make it possible for another instance to configure the EFI DHCPv4 Protocol + driver, it must call this function with Dhcp4CfgData set to NULL. + + @param This The pointer to the EFI_DHCP4_PROTOCOL instance. + @param Dhcp4CfgData The pointer to the EFI_DHCP4_CONFIG_DATA. + + @retval EFI_SUCCESS The EFI DHCPv4 Protocol driver is now in the Dhcp4Init or + Dhcp4InitReboot state, if the original state of this driver + was Dhcp4Stopped, Dhcp4Init,Dhcp4InitReboot, or Dhcp4Bound + and the value of Dhcp4CfgData was not NULL. + Otherwise, the state was left unchanged. + @retval EFI_ACCESS_DENIED This instance of the EFI DHCPv4 Protocol driver was not in the + Dhcp4Stopped, Dhcp4Init, Dhcp4InitReboot, or Dhcp4Bound state; + Or onother instance of this EFI DHCPv4 Protocol driver is already + in a valid configured state. + @retval EFI_INVALID_PARAMETER One or more following conditions are TRUE: + This is NULL. + DiscoverTryCount > 0 and DiscoverTimeout is NULL + RequestTryCount > 0 and RequestTimeout is NULL. + OptionCount >0 and OptionList is NULL. + ClientAddress is not a valid unicast address. + @retval EFI_OUT_OF_RESOURCES Required system resources could not be allocated. + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_DHCP4_CONFIGURE)( + IN EFI_DHCP4_PROTOCOL *This, + IN EFI_DHCP4_CONFIG_DATA *Dhcp4CfgData OPTIONAL + ); + + +/** + Starts the DHCP configuration process. + + The Start() function starts the DHCP configuration process. This function can + be called only when the EFI DHCPv4 Protocol driver is in the Dhcp4Init or + Dhcp4InitReboot state. + If the DHCP process completes successfully, the state of the EFI DHCPv4 Protocol + driver will be transferred through Dhcp4Selecting and Dhcp4Requesting to the + Dhcp4Bound state. The CompletionEvent will then be signaled if it is not NULL. + If the process aborts, either by the user or by some unexpected network error, + the state is restored to the Dhcp4Init state. The Start() function can be called + again to restart the process. + Refer to RFC 2131 for precise state transitions during this process. At the + time when each event occurs in this process, the callback function that was set + by EFI_DHCP4_PROTOCOL.Configure() will be called and the user can take this + opportunity to control the process. + + @param This The pointer to the EFI_DHCP4_PROTOCOL instance. + @param CompletionEvent If not NULL, it indicates the event that will be signaled when the + EFI DHCPv4 Protocol driver is transferred into the + Dhcp4Bound state or when the DHCP process is aborted. + EFI_DHCP4_PROTOCOL.GetModeData() can be called to + check the completion status. If NULL, + EFI_DHCP4_PROTOCOL.Start() will wait until the driver + is transferred into the Dhcp4Bound state or the process fails. + + @retval EFI_SUCCESS The DHCP configuration process has started, or it has completed + when CompletionEvent is NULL. + @retval EFI_NOT_STARTED The EFI DHCPv4 Protocol driver is in the Dhcp4Stopped + state. EFI_DHCP4_PROTOCOL. Configure() needs to be called. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_OUT_OF_RESOURCES Required system resources could not be allocated. + @retval EFI_TIMEOUT The DHCP configuration process failed because no response was + received from the server within the specified timeout value. + @retval EFI_ABORTED The user aborted the DHCP process. + @retval EFI_ALREADY_STARTED Some other EFI DHCPv4 Protocol instance already started the + DHCP process. + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + @retval EFI_NO_MEDIA There was a media error. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_DHCP4_START)( + IN EFI_DHCP4_PROTOCOL *This, + IN EFI_EVENT CompletionEvent OPTIONAL + ); + +/** + Extends the lease time by sending a request packet. + + The RenewRebind() function is used to manually extend the lease time when the + EFI DHCPv4 Protocol driver is in the Dhcp4Bound state, and the lease time has + not expired yet. This function will send a request packet to the previously + found server (or to any server when RebindRequest is TRUE) and transfer the + state into the Dhcp4Renewing state (or Dhcp4Rebinding when RebindingRequest is + TRUE). When a response is received, the state is returned to Dhcp4Bound. + If no response is received before the try count is exceeded (the RequestTryCount + field that is specified in EFI_DHCP4_CONFIG_DATA) but before the lease time that + was issued by the previous server expires, the driver will return to the Dhcp4Bound + state, and the previous configuration is restored. The outgoing and incoming packets + can be captured by the EFI_DHCP4_CALLBACK function. + + @param This The pointer to the EFI_DHCP4_PROTOCOL instance. + @param RebindRequest If TRUE, this function broadcasts the request packets and enters + the Dhcp4Rebinding state. Otherwise, it sends a unicast + request packet and enters the Dhcp4Renewing state. + @param CompletionEvent If not NULL, this event is signaled when the renew/rebind phase + completes or some error occurs. + EFI_DHCP4_PROTOCOL.GetModeData() can be called to + check the completion status. If NULL, + EFI_DHCP4_PROTOCOL.RenewRebind() will busy-wait + until the DHCP process finishes. + + @retval EFI_SUCCESS The EFI DHCPv4 Protocol driver is now in the + Dhcp4Renewing state or is back to the Dhcp4Bound state. + @retval EFI_NOT_STARTED The EFI DHCPv4 Protocol driver is in the Dhcp4Stopped + state. EFI_DHCP4_PROTOCOL.Configure() needs to + be called. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_TIMEOUT There was no response from the server when the try count was + exceeded. + @retval EFI_ACCESS_DENIED The driver is not in the Dhcp4Bound state. + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_DHCP4_RENEW_REBIND)( + IN EFI_DHCP4_PROTOCOL *This, + IN BOOLEAN RebindRequest, + IN EFI_EVENT CompletionEvent OPTIONAL + ); + +/** + Releases the current address configuration. + + The Release() function releases the current configured IP address by doing either + of the following: + * Sending a DHCPRELEASE packet when the EFI DHCPv4 Protocol driver is in the + Dhcp4Bound state + * Setting the previously assigned IP address that was provided with the + EFI_DHCP4_PROTOCOL.Configure() function to 0.0.0.0 when the driver is in + Dhcp4InitReboot state + After a successful call to this function, the EFI DHCPv4 Protocol driver returns + to the Dhcp4Init state, and any subsequent incoming packets will be discarded silently. + + @param This The pointer to the EFI_DHCP4_PROTOCOL instance. + + @retval EFI_SUCCESS The EFI DHCPv4 Protocol driver is now in the Dhcp4Init phase. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_ACCESS_DENIED The EFI DHCPv4 Protocol driver is not Dhcp4InitReboot state. + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_DHCP4_RELEASE)( + IN EFI_DHCP4_PROTOCOL *This + ); + +/** + Stops the current address configuration. + + The Stop() function is used to stop the DHCP configuration process. After this + function is called successfully, the EFI DHCPv4 Protocol driver is transferred + into the Dhcp4Stopped state. EFI_DHCP4_PROTOCOL.Configure() needs to be called + before DHCP configuration process can be started again. This function can be + called when the EFI DHCPv4 Protocol driver is in any state. + + @param This The pointer to the EFI_DHCP4_PROTOCOL instance. + + @retval EFI_SUCCESS The EFI DHCPv4 Protocol driver is now in the Dhcp4Stopped phase. + @retval EFI_INVALID_PARAMETER This is NULL. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_DHCP4_STOP)( + IN EFI_DHCP4_PROTOCOL *This + ); + +/** + Builds a DHCP packet, given the options to be appended or deleted or replaced. + + The Build() function is used to assemble a new packet from the original packet + by replacing or deleting existing options or appending new options. This function + does not change any state of the EFI DHCPv4 Protocol driver and can be used at + any time. + + @param This The pointer to the EFI_DHCP4_PROTOCOL instance. + @param SeedPacket Initial packet to be used as a base for building new packet. + @param DeleteCount Number of opcodes in the DeleteList. + @param DeleteList List of opcodes to be deleted from the seed packet. + Ignored if DeleteCount is zero. + @param AppendCount Number of entries in the OptionList. + @param AppendList The pointer to a DHCP option list to be appended to SeedPacket. + If SeedPacket also contains options in this list, they are + replaced by new options (except pad option). Ignored if + AppendCount is zero. Type EFI_DHCP4_PACKET_OPTION + @param NewPacket The pointer to storage for the pointer to the new allocated packet. + Use the EFI Boot Service FreePool() on the resulting pointer + when done with the packet. + + @retval EFI_SUCCESS The new packet was built. + @retval EFI_OUT_OF_RESOURCES Storage for the new packet could not be allocated. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + This is NULL. + SeedPacket is NULL. + SeedPacket is not a well-formed DHCP packet. + AppendCount is not zero and AppendList is NULL. + DeleteCount is not zero and DeleteList is NULL. + NewPacket is NULL + Both DeleteCount and AppendCount are zero and + NewPacket is not NULL. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_DHCP4_BUILD)( + IN EFI_DHCP4_PROTOCOL *This, + IN EFI_DHCP4_PACKET *SeedPacket, + IN UINT32 DeleteCount, + IN UINT8 *DeleteList OPTIONAL, + IN UINT32 AppendCount, + IN EFI_DHCP4_PACKET_OPTION *AppendList[] OPTIONAL, + OUT EFI_DHCP4_PACKET **NewPacket + ); + + +/** + Transmits a DHCP formatted packet and optionally waits for responses. + + The TransmitReceive() function is used to transmit a DHCP packet and optionally + wait for the response from servers. This function does not change the state of + the EFI DHCPv4 Protocol driver. It can be used at any time because of this. + + @param This The pointer to the EFI_DHCP4_PROTOCOL instance. + @param Token The pointer to the EFI_DHCP4_TRANSMIT_RECEIVE_TOKEN structure. + + @retval EFI_SUCCESS The packet was successfully queued for transmission. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + This is NULL. + Token.RemoteAddress is zero. + Token.Packet is NULL. + Token.Packet is not a well-formed DHCP packet. + The transaction ID in Token.Packet is in use by another DHCP process. + @retval EFI_NOT_READY The previous call to this function has not finished yet. Try to call + this function after collection process completes. + @retval EFI_NO_MAPPING The default station address is not available yet. + @retval EFI_OUT_OF_RESOURCES Required system resources could not be allocated. + @retval EFI_UNSUPPORTED The implementation doesn't support this function + @retval Others Some other unexpected error occurred. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_DHCP4_TRANSMIT_RECEIVE)( + IN EFI_DHCP4_PROTOCOL *This, + IN EFI_DHCP4_TRANSMIT_RECEIVE_TOKEN *Token + ); + + +/** + Parses the packed DHCP option data. + + The Parse() function is used to retrieve the option list from a DHCP packet. + If *OptionCount isn't zero, and there is enough space for all the DHCP options + in the Packet, each element of PacketOptionList is set to point to somewhere in + the Packet->Dhcp4.Option where a new DHCP option begins. If RFC3396 is supported, + the caller should reassemble the parsed DHCP options to get the final result. + If *OptionCount is zero or there isn't enough space for all of them, the number + of DHCP options in the Packet is returned in OptionCount. + + @param This The pointer to the EFI_DHCP4_PROTOCOL instance. + @param Packet The pointer to packet to be parsed. + @param OptionCount On input, the number of entries in the PacketOptionList. + On output, the number of entries that were written into the + PacketOptionList. + @param PacketOptionList A list of packet option entries to be filled in. End option or pad + options are not included. + + @retval EFI_SUCCESS The packet was successfully parsed. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + This is NULL. + The packet is NULL. + The packet is not a well-formed DHCP packet. + OptionCount is NULL. + @retval EFI_BUFFER_TOO_SMALL One or more of the following conditions is TRUE: + 1) *OptionCount is smaller than the number of options that + were found in the Packet. + 2) PacketOptionList is NULL. + @retval EFI_OUT_OF_RESOURCE The packet failed to parse because of a resource shortage. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_DHCP4_PARSE)( + IN EFI_DHCP4_PROTOCOL *This, + IN EFI_DHCP4_PACKET *Packet, + IN OUT UINT32 *OptionCount, + OUT EFI_DHCP4_PACKET_OPTION *PacketOptionList[] OPTIONAL + ); + +/// +/// This protocol is used to collect configuration information for the EFI IPv4 Protocol drivers +/// and to provide DHCPv4 server and PXE boot server discovery services. +/// +struct _EFI_DHCP4_PROTOCOL { + EFI_DHCP4_GET_MODE_DATA GetModeData; + EFI_DHCP4_CONFIGURE Configure; + EFI_DHCP4_START Start; + EFI_DHCP4_RENEW_REBIND RenewRebind; + EFI_DHCP4_RELEASE Release; + EFI_DHCP4_STOP Stop; + EFI_DHCP4_BUILD Build; + EFI_DHCP4_TRANSMIT_RECEIVE TransmitReceive; + EFI_DHCP4_PARSE Parse; +}; + +extern EFI_GUID gEfiDhcp4ProtocolGuid; +extern EFI_GUID gEfiDhcp4ServiceBindingProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/DiskIo.h b/src/include/ipxe/efi/Protocol/DiskIo.h new file mode 100644 index 00000000..1b47ce52 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/DiskIo.h @@ -0,0 +1,119 @@ +/** @file + Disk IO protocol as defined in the UEFI 2.0 specification. + + The Disk IO protocol is used to convert block oriented devices into byte + oriented devices. The Disk IO protocol is intended to layer on top of the + Block IO protocol. + + Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __DISK_IO_H__ +#define __DISK_IO_H__ + +FILE_LICENCE ( BSD3 ); + +#define EFI_DISK_IO_PROTOCOL_GUID \ + { \ + 0xce345171, 0xba0b, 0x11d2, {0x8e, 0x4f, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b } \ + } + +/// +/// Protocol GUID name defined in EFI1.1. +/// +#define DISK_IO_PROTOCOL EFI_DISK_IO_PROTOCOL_GUID + +typedef struct _EFI_DISK_IO_PROTOCOL EFI_DISK_IO_PROTOCOL; + +/// +/// Protocol defined in EFI1.1. +/// +typedef EFI_DISK_IO_PROTOCOL EFI_DISK_IO; + +/** + Read BufferSize bytes from Offset into Buffer. + + @param This Protocol instance pointer. + @param MediaId Id of the media, changes every time the media is replaced. + @param Offset The starting byte offset to read from + @param BufferSize Size of Buffer + @param Buffer Buffer containing read data + + @retval EFI_SUCCESS The data was read correctly from the device. + @retval EFI_DEVICE_ERROR The device reported an error while performing the read. + @retval EFI_NO_MEDIA There is no media in the device. + @retval EFI_MEDIA_CHNAGED The MediaId does not matched the current device. + @retval EFI_INVALID_PARAMETER The read request contains device addresses that are not + valid for the device. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_DISK_READ)( + IN EFI_DISK_IO_PROTOCOL *This, + IN UINT32 MediaId, + IN UINT64 Offset, + IN UINTN BufferSize, + OUT VOID *Buffer + ); + +/** + Writes a specified number of bytes to a device. + + @param This Indicates a pointer to the calling context. + @param MediaId ID of the medium to be written. + @param Offset The starting byte offset on the logical block I/O device to write. + @param BufferSize The size in bytes of Buffer. The number of bytes to write to the device. + @param Buffer A pointer to the buffer containing the data to be written. + + @retval EFI_SUCCESS The data was written correctly to the device. + @retval EFI_WRITE_PROTECTED The device can not be written to. + @retval EFI_DEVICE_ERROR The device reported an error while performing the write. + @retval EFI_NO_MEDIA There is no media in the device. + @retval EFI_MEDIA_CHNAGED The MediaId does not matched the current device. + @retval EFI_INVALID_PARAMETER The write request contains device addresses that are not + valid for the device. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_DISK_WRITE)( + IN EFI_DISK_IO_PROTOCOL *This, + IN UINT32 MediaId, + IN UINT64 Offset, + IN UINTN BufferSize, + IN VOID *Buffer + ); + +#define EFI_DISK_IO_PROTOCOL_REVISION 0x00010000 + +/// +/// Revision defined in EFI1.1 +/// +#define EFI_DISK_IO_INTERFACE_REVISION EFI_DISK_IO_PROTOCOL_REVISION + +/// +/// This protocol is used to abstract Block I/O interfaces. +/// +struct _EFI_DISK_IO_PROTOCOL { + /// + /// The revision to which the disk I/O interface adheres. All future + /// revisions must be backwards compatible. If a future version is not + /// backwards compatible, it is not the same GUID. + /// + UINT64 Revision; + EFI_DISK_READ ReadDisk; + EFI_DISK_WRITE WriteDisk; +}; + +extern EFI_GUID gEfiDiskIoProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/GraphicsOutput.h b/src/include/ipxe/efi/Protocol/GraphicsOutput.h new file mode 100644 index 00000000..98ca8c9c --- /dev/null +++ b/src/include/ipxe/efi/Protocol/GraphicsOutput.h @@ -0,0 +1,278 @@ +/** @file + Graphics Output Protocol from the UEFI 2.0 specification. + + Abstraction of a very simple graphics device. + + Copyright (c) 2006 - 2012, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __GRAPHICS_OUTPUT_H__ +#define __GRAPHICS_OUTPUT_H__ + +FILE_LICENCE ( BSD3 ); + +#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \ + { \ + 0x9042a9de, 0x23dc, 0x4a38, {0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a } \ + } + +typedef struct _EFI_GRAPHICS_OUTPUT_PROTOCOL EFI_GRAPHICS_OUTPUT_PROTOCOL; + +typedef struct { + UINT32 RedMask; + UINT32 GreenMask; + UINT32 BlueMask; + UINT32 ReservedMask; +} EFI_PIXEL_BITMASK; + +typedef enum { + /// + /// A pixel is 32-bits and byte zero represents red, byte one represents green, + /// byte two represents blue, and byte three is reserved. This is the definition + /// for the physical frame buffer. The byte values for the red, green, and blue + /// components represent the color intensity. This color intensity value range + /// from a minimum intensity of 0 to maximum intensity of 255. + /// + PixelRedGreenBlueReserved8BitPerColor, + /// + /// A pixel is 32-bits and byte zero represents blue, byte one represents green, + /// byte two represents red, and byte three is reserved. This is the definition + /// for the physical frame buffer. The byte values for the red, green, and blue + /// components represent the color intensity. This color intensity value range + /// from a minimum intensity of 0 to maximum intensity of 255. + /// + PixelBlueGreenRedReserved8BitPerColor, + /// + /// The Pixel definition of the physical frame buffer. + /// + PixelBitMask, + /// + /// This mode does not support a physical frame buffer. + /// + PixelBltOnly, + /// + /// Valid EFI_GRAPHICS_PIXEL_FORMAT enum values are less than this value. + /// + PixelFormatMax +} EFI_GRAPHICS_PIXEL_FORMAT; + +typedef struct { + /// + /// The version of this data structure. A value of zero represents the + /// EFI_GRAPHICS_OUTPUT_MODE_INFORMATION structure as defined in this specification. + /// + UINT32 Version; + /// + /// The size of video screen in pixels in the X dimension. + /// + UINT32 HorizontalResolution; + /// + /// The size of video screen in pixels in the Y dimension. + /// + UINT32 VerticalResolution; + /// + /// Enumeration that defines the physical format of the pixel. A value of PixelBltOnly + /// implies that a linear frame buffer is not available for this mode. + /// + EFI_GRAPHICS_PIXEL_FORMAT PixelFormat; + /// + /// This bit-mask is only valid if PixelFormat is set to PixelPixelBitMask. + /// A bit being set defines what bits are used for what purpose such as Red, Green, Blue, or Reserved. + /// + EFI_PIXEL_BITMASK PixelInformation; + /// + /// Defines the number of pixel elements per video memory line. + /// + UINT32 PixelsPerScanLine; +} EFI_GRAPHICS_OUTPUT_MODE_INFORMATION; + +/** + Returns information for an available graphics mode that the graphics device + and the set of active video output devices supports. + + @param This The EFI_GRAPHICS_OUTPUT_PROTOCOL instance. + @param ModeNumber The mode number to return information on. + @param SizeOfInfo A pointer to the size, in bytes, of the Info buffer. + @param Info A pointer to callee allocated buffer that returns information about ModeNumber. + + @retval EFI_SUCCESS Valid mode information was returned. + @retval EFI_DEVICE_ERROR A hardware error occurred trying to retrieve the video mode. + @retval EFI_INVALID_PARAMETER ModeNumber is not valid. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_GRAPHICS_OUTPUT_PROTOCOL_QUERY_MODE)( + IN EFI_GRAPHICS_OUTPUT_PROTOCOL *This, + IN UINT32 ModeNumber, + OUT UINTN *SizeOfInfo, + OUT EFI_GRAPHICS_OUTPUT_MODE_INFORMATION **Info + ); + +/** + Set the video device into the specified mode and clears the visible portions of + the output display to black. + + @param This The EFI_GRAPHICS_OUTPUT_PROTOCOL instance. + @param ModeNumber Abstraction that defines the current video mode. + + @retval EFI_SUCCESS The graphics mode specified by ModeNumber was selected. + @retval EFI_DEVICE_ERROR The device had an error and could not complete the request. + @retval EFI_UNSUPPORTED ModeNumber is not supported by this device. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_GRAPHICS_OUTPUT_PROTOCOL_SET_MODE)( + IN EFI_GRAPHICS_OUTPUT_PROTOCOL *This, + IN UINT32 ModeNumber + ); + +typedef struct { + UINT8 Blue; + UINT8 Green; + UINT8 Red; + UINT8 Reserved; +} EFI_GRAPHICS_OUTPUT_BLT_PIXEL; + +typedef union { + EFI_GRAPHICS_OUTPUT_BLT_PIXEL Pixel; + UINT32 Raw; +} EFI_GRAPHICS_OUTPUT_BLT_PIXEL_UNION; + +/// +/// actions for BltOperations +/// +typedef enum { + /// + /// Write data from the BltBuffer pixel (0, 0) + /// directly to every pixel of the video display rectangle + /// (DestinationX, DestinationY) (DestinationX + Width, DestinationY + Height). + /// Only one pixel will be used from the BltBuffer. Delta is NOT used. + /// + EfiBltVideoFill, + + /// + /// Read data from the video display rectangle + /// (SourceX, SourceY) (SourceX + Width, SourceY + Height) and place it in + /// the BltBuffer rectangle (DestinationX, DestinationY ) + /// (DestinationX + Width, DestinationY + Height). If DestinationX or + /// DestinationY is not zero then Delta must be set to the length in bytes + /// of a row in the BltBuffer. + /// + EfiBltVideoToBltBuffer, + + /// + /// Write data from the BltBuffer rectangle + /// (SourceX, SourceY) (SourceX + Width, SourceY + Height) directly to the + /// video display rectangle (DestinationX, DestinationY) + /// (DestinationX + Width, DestinationY + Height). If SourceX or SourceY is + /// not zero then Delta must be set to the length in bytes of a row in the + /// BltBuffer. + /// + EfiBltBufferToVideo, + + /// + /// Copy from the video display rectangle (SourceX, SourceY) + /// (SourceX + Width, SourceY + Height) to the video display rectangle + /// (DestinationX, DestinationY) (DestinationX + Width, DestinationY + Height). + /// The BltBuffer and Delta are not used in this mode. + /// + EfiBltVideoToVideo, + + EfiGraphicsOutputBltOperationMax +} EFI_GRAPHICS_OUTPUT_BLT_OPERATION; + +/** + Blt a rectangle of pixels on the graphics screen. Blt stands for BLock Transfer. + + @param This Protocol instance pointer. + @param BltBuffer The data to transfer to the graphics screen. + Size is at least Width*Height*sizeof(EFI_GRAPHICS_OUTPUT_BLT_PIXEL). + @param BltOperation The operation to perform when copying BltBuffer on to the graphics screen. + @param SourceX The X coordinate of source for the BltOperation. + @param SourceY The Y coordinate of source for the BltOperation. + @param DestinationX The X coordinate of destination for the BltOperation. + @param DestinationY The Y coordinate of destination for the BltOperation. + @param Width The width of a rectangle in the blt rectangle in pixels. + @param Height The height of a rectangle in the blt rectangle in pixels. + @param Delta Not used for EfiBltVideoFill or the EfiBltVideoToVideo operation. + If a Delta of zero is used, the entire BltBuffer is being operated on. + If a subrectangle of the BltBuffer is being used then Delta + represents the number of bytes in a row of the BltBuffer. + + @retval EFI_SUCCESS BltBuffer was drawn to the graphics screen. + @retval EFI_INVALID_PARAMETER BltOperation is not valid. + @retval EFI_DEVICE_ERROR The device had an error and could not complete the request. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_GRAPHICS_OUTPUT_PROTOCOL_BLT)( + IN EFI_GRAPHICS_OUTPUT_PROTOCOL *This, + IN EFI_GRAPHICS_OUTPUT_BLT_PIXEL *BltBuffer, OPTIONAL + IN EFI_GRAPHICS_OUTPUT_BLT_OPERATION BltOperation, + IN UINTN SourceX, + IN UINTN SourceY, + IN UINTN DestinationX, + IN UINTN DestinationY, + IN UINTN Width, + IN UINTN Height, + IN UINTN Delta OPTIONAL + ); + +typedef struct { + /// + /// The number of modes supported by QueryMode() and SetMode(). + /// + UINT32 MaxMode; + /// + /// Current Mode of the graphics device. Valid mode numbers are 0 to MaxMode -1. + /// + UINT32 Mode; + /// + /// Pointer to read-only EFI_GRAPHICS_OUTPUT_MODE_INFORMATION data. + /// + EFI_GRAPHICS_OUTPUT_MODE_INFORMATION *Info; + /// + /// Size of Info structure in bytes. + /// + UINTN SizeOfInfo; + /// + /// Base address of graphics linear frame buffer. + /// Offset zero in FrameBufferBase represents the upper left pixel of the display. + /// + EFI_PHYSICAL_ADDRESS FrameBufferBase; + /// + /// Amount of frame buffer needed to support the active mode as defined by + /// PixelsPerScanLine xVerticalResolution x PixelElementSize. + /// + UINTN FrameBufferSize; +} EFI_GRAPHICS_OUTPUT_PROTOCOL_MODE; + +/// +/// Provides a basic abstraction to set video modes and copy pixels to and from +/// the graphics controller's frame buffer. The linear address of the hardware +/// frame buffer is also exposed so software can write directly to the video hardware. +/// +struct _EFI_GRAPHICS_OUTPUT_PROTOCOL { + EFI_GRAPHICS_OUTPUT_PROTOCOL_QUERY_MODE QueryMode; + EFI_GRAPHICS_OUTPUT_PROTOCOL_SET_MODE SetMode; + EFI_GRAPHICS_OUTPUT_PROTOCOL_BLT Blt; + /// + /// Pointer to EFI_GRAPHICS_OUTPUT_PROTOCOL_MODE data. + /// + EFI_GRAPHICS_OUTPUT_PROTOCOL_MODE *Mode; +}; + +extern EFI_GUID gEfiGraphicsOutputProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/HiiFont.h b/src/include/ipxe/efi/Protocol/HiiFont.h new file mode 100644 index 00000000..f2b72dc1 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/HiiFont.h @@ -0,0 +1,474 @@ +/** @file + The file provides services to retrieve font information. + +Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __HII_FONT_H__ +#define __HII_FONT_H__ + +FILE_LICENCE ( BSD3 ); + +#include +#include + +#define EFI_HII_FONT_PROTOCOL_GUID \ +{ 0xe9ca4775, 0x8657, 0x47fc, { 0x97, 0xe7, 0x7e, 0xd6, 0x5a, 0x8, 0x43, 0x24 } } + +typedef struct _EFI_HII_FONT_PROTOCOL EFI_HII_FONT_PROTOCOL; + +typedef VOID *EFI_FONT_HANDLE; + +/// +/// EFI_HII_OUT_FLAGS. +/// +typedef UINT32 EFI_HII_OUT_FLAGS; + +#define EFI_HII_OUT_FLAG_CLIP 0x00000001 +#define EFI_HII_OUT_FLAG_WRAP 0x00000002 +#define EFI_HII_OUT_FLAG_CLIP_CLEAN_Y 0x00000004 +#define EFI_HII_OUT_FLAG_CLIP_CLEAN_X 0x00000008 +#define EFI_HII_OUT_FLAG_TRANSPARENT 0x00000010 +#define EFI_HII_IGNORE_IF_NO_GLYPH 0x00000020 +#define EFI_HII_IGNORE_LINE_BREAK 0x00000040 +#define EFI_HII_DIRECT_TO_SCREEN 0x00000080 + +/** + Definition of EFI_HII_ROW_INFO. +**/ +typedef struct _EFI_HII_ROW_INFO { + /// + /// The index of the first character in the string which is displayed on the line. + /// + UINTN StartIndex; + /// + /// The index of the last character in the string which is displayed on the line. + /// If this is the same as StartIndex, then no characters are displayed. + /// + UINTN EndIndex; + UINTN LineHeight; ///< The height of the line, in pixels. + UINTN LineWidth; ///< The width of the text on the line, in pixels. + + /// + /// The font baseline offset in pixels from the bottom of the row, or 0 if none. + /// + UINTN BaselineOffset; +} EFI_HII_ROW_INFO; + +/// +/// Font info flag. All flags (FONT, SIZE, STYLE, and COLOR) are defined. +/// They are defined as EFI_FONT_INFO_*** +/// +typedef UINT32 EFI_FONT_INFO_MASK; + +#define EFI_FONT_INFO_SYS_FONT 0x00000001 +#define EFI_FONT_INFO_SYS_SIZE 0x00000002 +#define EFI_FONT_INFO_SYS_STYLE 0x00000004 +#define EFI_FONT_INFO_SYS_FORE_COLOR 0x00000010 +#define EFI_FONT_INFO_SYS_BACK_COLOR 0x00000020 +#define EFI_FONT_INFO_RESIZE 0x00001000 +#define EFI_FONT_INFO_RESTYLE 0x00002000 +#define EFI_FONT_INFO_ANY_FONT 0x00010000 +#define EFI_FONT_INFO_ANY_SIZE 0x00020000 +#define EFI_FONT_INFO_ANY_STYLE 0x00040000 + +// +// EFI_FONT_INFO +// +typedef struct { + EFI_HII_FONT_STYLE FontStyle; + UINT16 FontSize; ///< character cell height in pixels + CHAR16 FontName[1]; +} EFI_FONT_INFO; + +/** + Describes font output-related information. + + This structure is used for describing the way in which a string + should be rendered in a particular font. FontInfo specifies the + basic font information and ForegroundColor and BackgroundColor + specify the color in which they should be displayed. The flags + in FontInfoMask describe where the system default should be + supplied instead of the specified information. The flags also + describe what options can be used to make a match between the + font requested and the font available. +**/ +typedef struct _EFI_FONT_DISPLAY_INFO { + EFI_GRAPHICS_OUTPUT_BLT_PIXEL ForegroundColor; + EFI_GRAPHICS_OUTPUT_BLT_PIXEL BackgroundColor; + EFI_FONT_INFO_MASK FontInfoMask; + EFI_FONT_INFO FontInfo; +} EFI_FONT_DISPLAY_INFO; + +/** + + This function renders a string to a bitmap or the screen using + the specified font, color and options. It either draws the + string and glyphs on an existing bitmap, allocates a new bitmap, + or uses the screen. The strings can be clipped or wrapped. + Optionally, the function also returns the information about each + row and the character position on that row. If + EFI_HII_OUT_FLAG_CLIP is set, then text will be formatted only + based on explicit line breaks and all pixels which would lie + outside the bounding box specified by Width and Height are + ignored. The information in the RowInfoArray only describes + characters which are at least partially displayed. For the final + row, the LineHeight and BaseLine may describe pixels that are + outside the limit specified by Height (unless + EFI_HII_OUT_FLAG_CLIP_CLEAN_Y is specified) even though those + pixels were not drawn. The LineWidth may describe pixels which + are outside the limit specified by Width (unless + EFI_HII_OUT_FLAG_CLIP_CLEAN_X is specified) even though those + pixels were not drawn. If EFI_HII_OUT_FLAG_CLIP_CLEAN_X is set, + then it modifies the behavior of EFI_HII_OUT_FLAG_CLIP so that + if a character's right-most on pixel cannot fit, then it will + not be drawn at all. This flag requires that + EFI_HII_OUT_FLAG_CLIP be set. If EFI_HII_OUT_FLAG_CLIP_CLEAN_Y + is set, then it modifies the behavior of EFI_HII_OUT_FLAG_CLIP + so that if a row's bottom-most pixel cannot fit, then it will + not be drawn at all. This flag requires that + EFI_HII_OUT_FLAG_CLIP be set. If EFI_HII_OUT_FLAG_WRAP is set, + then text will be wrapped at the right-most line-break + opportunity prior to a character whose right-most extent would + exceed Width. If no line-break opportunity can be found, then + the text will behave as if EFI_HII_OUT_FLAG_CLIP_CLEAN_X is set. + This flag cannot be used with EFI_HII_OUT_FLAG_CLIP_CLEAN_X. If + EFI_HII_OUT_FLAG_TRANSPARENT is set, then BackgroundColor is + ignored and all 'off' pixels in the character's drawn + will use the pixel value from Blt. This flag cannot be used if + Blt is NULL upon entry. If EFI_HII_IGNORE_IF_NO_GLYPH is set, + then characters which have no glyphs are not drawn. Otherwise, + they are replaced with Unicode character code 0xFFFD (REPLACEMENT + CHARACTER). If EFI_HII_IGNORE_LINE_BREAK is set, then explicit + line break characters will be ignored. If + EFI_HII_DIRECT_TO_SCREEN is set, then the string will be written + directly to the output device specified by Screen. Otherwise the + string will be rendered to the bitmap specified by Bitmap. + + @param This A pointer to the EFI_HII_FONT_PROTOCOL instance. + + @param Flags Describes how the string is to be drawn. + + @param String Points to the null-terminated string to be + + @param StringInfo Points to the string output information, + including the color and font. If NULL, then + the string will be output in the default + system font and color. + + @param Blt If this points to a non-NULL on entry, this points + to the image, which is Width pixels wide and + Height pixels high. The string will be drawn onto + this image and EFI_HII_OUT_FLAG_CLIP is implied. + If this points to a NULL on entry, then a buffer + will be allocated to hold the generated image and + the pointer updated on exit. It is the caller's + responsibility to free this buffer. + + @param BltX, BltY Specifies the offset from the left and top + edge of the image of the first character + cell in the image. + + @param RowInfoArray If this is non-NULL on entry, then on + exit, this will point to an allocated buffer + containing row information and + RowInfoArraySize will be updated to contain + the number of elements. This array describes + the characters that were at least partially + drawn and the heights of the rows. It is the + caller's responsibility to free this buffer. + + @param RowInfoArraySize If this is non-NULL on entry, then on + exit it contains the number of + elements in RowInfoArray. + + @param ColumnInfoArray If this is non-NULL, then on return it + will be filled with the horizontal + offset for each character in the + string on the row where it is + displayed. Non-printing characters + will have the offset ~0. The caller is + responsible for allocating a buffer large + enough so that there is one entry for + each character in the string, not + including the null-terminator. It is + possible when character display is + normalized that some character cells + overlap. + + @retval EFI_SUCCESS The string was successfully updated. + + @retval EFI_OUT_OF_RESOURCES Unable to allocate an output buffer for RowInfoArray or Blt. + + @retval EFI_INVALID_PARAMETER The String or Blt was NULL. + + @retval EFI_INVALID_PARAMETER Flags were invalid combination. +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_HII_STRING_TO_IMAGE)( + IN CONST EFI_HII_FONT_PROTOCOL *This, + IN EFI_HII_OUT_FLAGS Flags, + IN CONST EFI_STRING String, + IN CONST EFI_FONT_DISPLAY_INFO *StringInfo, + IN OUT EFI_IMAGE_OUTPUT **Blt, + IN UINTN BltX, + IN UINTN BltY, + OUT EFI_HII_ROW_INFO **RowInfoArray OPTIONAL, + OUT UINTN *RowInfoArraySize OPTIONAL, + OUT UINTN *ColumnInfoArray OPTIONAL +); + + + +/** + + This function renders a string as a bitmap or to the screen + and can clip or wrap the string. The bitmap is either supplied + by the caller or allocated by the function. The + strings are drawn with the font, size and style specified and + can be drawn transparently or opaquely. The function can also + return information about each row and each character's + position on the row. If EFI_HII_OUT_FLAG_CLIP is set, then + text will be formatted based only on explicit line breaks, and + all pixels that would lie outside the bounding box specified + by Width and Height are ignored. The information in the + RowInfoArray only describes characters which are at least + partially displayed. For the final row, the LineHeight and + BaseLine may describe pixels which are outside the limit + specified by Height (unless EFI_HII_OUT_FLAG_CLIP_CLEAN_Y is + specified) even though those pixels were not drawn. If + EFI_HII_OUT_FLAG_CLIP_CLEAN_X is set, then it modifies the + behavior of EFI_HII_OUT_FLAG_CLIP so that if a character's + right-most on pixel cannot fit, then it will not be drawn at + all. This flag requires that EFI_HII_OUT_FLAG_CLIP be set. If + EFI_HII_OUT_FLAG_CLIP_CLEAN_Y is set, then it modifies the + behavior of EFI_HII_OUT_FLAG_CLIP so that if a row's bottom + most pixel cannot fit, then it will not be drawn at all. This + flag requires that EFI_HII_OUT_FLAG_CLIP be set. If + EFI_HII_OUT_FLAG_WRAP is set, then text will be wrapped at the + right-most line-break opportunity prior to a character whose + right-most extent would exceed Width. If no line-break + opportunity can be found, then the text will behave as if + EFI_HII_OUT_FLAG_CLIP_CLEAN_X is set. This flag cannot be used + with EFI_HII_OUT_FLAG_CLIP_CLEAN_X. If + EFI_HII_OUT_FLAG_TRANSPARENT is set, then BackgroundColor is + ignored and all off" pixels in the character's glyph will + use the pixel value from Blt. This flag cannot be used if Blt + is NULL upon entry. If EFI_HII_IGNORE_IF_NO_GLYPH is set, then + characters which have no glyphs are not drawn. Otherwise, they + are replaced with Unicode character code 0xFFFD (REPLACEMENT + CHARACTER). If EFI_HII_IGNORE_LINE_BREAK is set, then explicit + line break characters will be ignored. If + EFI_HII_DIRECT_TO_SCREEN is set, then the string will be + written directly to the output device specified by Screen. + Otherwise the string will be rendered to the bitmap specified + by Bitmap. + + + @param This A pointer to the EFI_HII_FONT_PROTOCOL instance. + + @param Flags Describes how the string is to be drawn. + + @param PackageList + The package list in the HII database to + search for the specified string. + + @param StringId The string's id, which is unique within + PackageList. + + @param Language Points to the language for the retrieved + string. If NULL, then the current system + language is used. + + @param StringInfo Points to the string output information, + including the color and font. If NULL, then + the string will be output in the default + system font and color. + + @param Blt If this points to a non-NULL on entry, this points + to the image, which is Width pixels wide and + Height pixels high. The string will be drawn onto + this image and EFI_HII_OUT_FLAG_CLIP is implied. + If this points to a NULL on entry, then a buffer + will be allocated to hold the generated image and + the pointer updated on exit. It is the caller's + responsibility to free this buffer. + + @param BltX, BltY Specifies the offset from the left and top + edge of the output image of the first + character cell in the image. + + @param RowInfoArray If this is non-NULL on entry, then on + exit, this will point to an allocated + buffer containing row information and + RowInfoArraySize will be updated to + contain the number of elements. This array + describes the characters which were at + least partially drawn and the heights of + the rows. It is the caller's + responsibility to free this buffer. + + @param RowInfoArraySize If this is non-NULL on entry, then on + exit it contains the number of + elements in RowInfoArray. + + @param ColumnInfoArray If non-NULL, on return it is filled + with the horizontal offset for each + character in the string on the row + where it is displayed. Non-printing + characters will have the offset ~0. + The caller is responsible to allocate + a buffer large enough so that there is + one entry for each character in the + string, not including the + null-terminator. It is possible when + character display is normalized that + some character cells overlap. + + + @retval EFI_SUCCESS The string was successfully updated. + + @retval EFI_OUT_OF_RESOURCES Unable to allocate an output + buffer for RowInfoArray or Blt. + + @retval EFI_INVALID_PARAMETER The String, or Blt, or Height, or + Width was NULL. + @retval EFI_INVALID_PARAMETER The Blt or PackageList was NULL. + @retval EFI_INVALID_PARAMETER Flags were invalid combination. + @retval EFI_NOT_FOUND The specified PackageList is not in the Database, + or the stringid is not in the specified PackageList. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_HII_STRING_ID_TO_IMAGE)( + IN CONST EFI_HII_FONT_PROTOCOL *This, + IN EFI_HII_OUT_FLAGS Flags, + IN EFI_HII_HANDLE PackageList, + IN EFI_STRING_ID StringId, + IN CONST CHAR8 *Language, + IN CONST EFI_FONT_DISPLAY_INFO *StringInfo OPTIONAL, + IN OUT EFI_IMAGE_OUTPUT **Blt, + IN UINTN BltX, + IN UINTN BltY, + OUT EFI_HII_ROW_INFO **RowInfoArray OPTIONAL, + OUT UINTN *RowInfoArraySize OPTIONAL, + OUT UINTN *ColumnInfoArray OPTIONAL +); + + +/** + + Convert the glyph for a single character into a bitmap. + + @param This A pointer to the EFI_HII_FONT_PROTOCOL instance. + + @param Char The character to retrieve. + + @param StringInfo Points to the string font and color + information or NULL if the string should use + the default system font and color. + + @param Blt This must point to a NULL on entry. A buffer will + be allocated to hold the output and the pointer + updated on exit. It is the caller's responsibility + to free this buffer. + + @param Baseline The number of pixels from the bottom of the bitmap + to the baseline. + + + @retval EFI_SUCCESS The glyph bitmap created. + + @retval EFI_OUT_OF_RESOURCES Unable to allocate the output buffer Blt. + + @retval EFI_WARN_UNKNOWN_GLYPH The glyph was unknown and was + replaced with the glyph for + Unicode character code 0xFFFD. + + @retval EFI_INVALID_PARAMETER Blt is NULL, or Width is NULL, or + Height is NULL + + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_HII_GET_GLYPH)( + IN CONST EFI_HII_FONT_PROTOCOL *This, + IN CONST CHAR16 Char, + IN CONST EFI_FONT_DISPLAY_INFO *StringInfo, + OUT EFI_IMAGE_OUTPUT **Blt, + OUT UINTN *Baseline OPTIONAL +); + +/** + + This function iterates through fonts which match the specified + font, using the specified criteria. If String is non-NULL, then + all of the characters in the string must exist in order for a + candidate font to be returned. + + @param This A pointer to the EFI_HII_FONT_PROTOCOL instance. + + @param FontHandle On entry, points to the font handle returned + by a previous call to GetFontInfo() or NULL + to start with the first font. On return, + points to the returned font handle or points + to NULL if there are no more matching fonts. + + @param StringInfoIn Upon entry, points to the font to return + information about. If NULL, then the information + about the system default font will be returned. + + @param StringInfoOut Upon return, contains the matching font's information. + If NULL, then no information is returned. This buffer + is allocated with a call to the Boot Service AllocatePool(). + It is the caller's responsibility to call the Boot + Service FreePool() when the caller no longer requires + the contents of StringInfoOut. + + @param String Points to the string which will be tested to + determine if all characters are available. If + NULL, then any font is acceptable. + + @retval EFI_SUCCESS Matching font returned successfully. + + @retval EFI_NOT_FOUND No matching font was found. + + @retval EFI_OUT_OF_RESOURCES There were insufficient resources to complete the request. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_HII_GET_FONT_INFO)( + IN CONST EFI_HII_FONT_PROTOCOL *This, + IN OUT EFI_FONT_HANDLE *FontHandle, + IN CONST EFI_FONT_DISPLAY_INFO *StringInfoIn, OPTIONAL + OUT EFI_FONT_DISPLAY_INFO **StringInfoOut, + IN CONST EFI_STRING String OPTIONAL +); + +/// +/// The protocol provides the service to retrieve the font informations. +/// +struct _EFI_HII_FONT_PROTOCOL { + EFI_HII_STRING_TO_IMAGE StringToImage; + EFI_HII_STRING_ID_TO_IMAGE StringIdToImage; + EFI_HII_GET_GLYPH GetGlyph; + EFI_HII_GET_FONT_INFO GetFontInfo; +}; + +extern EFI_GUID gEfiHiiFontProtocolGuid; + + +#endif + diff --git a/src/include/ipxe/efi/Protocol/HiiImage.h b/src/include/ipxe/efi/Protocol/HiiImage.h new file mode 100644 index 00000000..ba934a9f --- /dev/null +++ b/src/include/ipxe/efi/Protocol/HiiImage.h @@ -0,0 +1,358 @@ +/** @file + The file provides services to access to images in the images database. + + Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __HII_IMAGE_H__ +#define __HII_IMAGE_H__ + +FILE_LICENCE ( BSD3 ); + +#include + +#define EFI_HII_IMAGE_PROTOCOL_GUID \ + { 0x31a6406a, 0x6bdf, 0x4e46, { 0xb2, 0xa2, 0xeb, 0xaa, 0x89, 0xc4, 0x9, 0x20 } } + +typedef struct _EFI_HII_IMAGE_PROTOCOL EFI_HII_IMAGE_PROTOCOL; + + +/// +/// Flags in EFI_IMAGE_INPUT +/// +#define EFI_IMAGE_TRANSPARENT 0x00000001 + +/** + + Definition of EFI_IMAGE_INPUT. + + @param Flags Describe image characteristics. If + EFI_IMAGE_TRANSPARENT is set, then the image was + designed for transparent display. + + @param Width Image width, in pixels. + + @param Height Image height, in pixels. + + @param Bitmap A pointer to the actual bitmap, organized left-to-right, + top-to-bottom. The size of the bitmap is + Width*Height*sizeof(EFI_GRAPHICS_OUTPUT_BLT_PIXEL). + + +**/ +typedef struct _EFI_IMAGE_INPUT { + UINT32 Flags; + UINT16 Width; + UINT16 Height; + EFI_GRAPHICS_OUTPUT_BLT_PIXEL *Bitmap; +} EFI_IMAGE_INPUT; + + +/** + + This function adds the image Image to the group of images + owned by PackageList, and returns a new image identifier + (ImageId). + + @param This A pointer to the EFI_HII_IMAGE_PROTOCOL instance. + + @param PackageList Handle of the package list where this image will be added. + + @param ImageId On return, contains the new image id, which is + unique within PackageList. + + @param Image Points to the image. + + @retval EFI_SUCCESS The new image was added + successfully + + @retval EFI_OUT_OF_RESOURCES Could not add the image. + + @retval EFI_INVALID_PARAMETER Image is NULL or ImageId is + NULL. + + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_HII_NEW_IMAGE)( + IN CONST EFI_HII_IMAGE_PROTOCOL *This, + IN EFI_HII_HANDLE PackageList, + OUT EFI_IMAGE_ID *ImageId, + IN CONST EFI_IMAGE_INPUT *Image +); + +/** + + This function retrieves the image specified by ImageId which + is associated with the specified PackageList and copies it + into the buffer specified by Image. If the image specified by + ImageId is not present in the specified PackageList, then + EFI_NOT_FOUND is returned. If the buffer specified by + ImageSize is too small to hold the image, then + EFI_BUFFER_TOO_SMALL will be returned. ImageSize will be + updated to the size of buffer actually required to hold the + image. + + @param This A pointer to the EFI_HII_IMAGE_PROTOCOL instance. + + @param PackageList The package list in the HII database to + search for the specified image. + + @param ImageId The image's id, which is unique within + PackageList. + + @param Image Points to the new image. + + @retval EFI_SUCCESS The image was returned successfully. + + @retval EFI_NOT_FOUND The image specified by ImageId is not + available. Or The specified PackageList is not in the database. + + @retval EFI_INVALID_PARAMETER The Image or Langugae was NULL. + @retval EFI_OUT_OF_RESOURCES The bitmap could not be retrieved because there was not + enough memory. + + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_HII_GET_IMAGE)( + IN CONST EFI_HII_IMAGE_PROTOCOL *This, + IN EFI_HII_HANDLE PackageList, + IN EFI_IMAGE_ID ImageId, + OUT EFI_IMAGE_INPUT *Image +); + +/** + + This function updates the image specified by ImageId in the + specified PackageListHandle to the image specified by Image. + + + @param This A pointer to the EFI_HII_IMAGE_PROTOCOL instance. + + @param PackageList The package list containing the images. + + @param ImageId The image id, which is unique within PackageList. + + @param Image Points to the image. + + @retval EFI_SUCCESS The image was successfully updated. + + @retval EFI_NOT_FOUND The image specified by ImageId is not in the database. + The specified PackageList is not in the database. + + @retval EFI_INVALID_PARAMETER The Image or Language was NULL. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_HII_SET_IMAGE)( + IN CONST EFI_HII_IMAGE_PROTOCOL *This, + IN EFI_HII_HANDLE PackageList, + IN EFI_IMAGE_ID ImageId, + IN CONST EFI_IMAGE_INPUT *Image +); + + +/// +/// EFI_HII_DRAW_FLAGS describes how the image is to be drawn. +/// These flags are defined as EFI_HII_DRAW_FLAG_*** +/// +typedef UINT32 EFI_HII_DRAW_FLAGS; + +#define EFI_HII_DRAW_FLAG_CLIP 0x00000001 +#define EFI_HII_DRAW_FLAG_TRANSPARENT 0x00000030 +#define EFI_HII_DRAW_FLAG_DEFAULT 0x00000000 +#define EFI_HII_DRAW_FLAG_FORCE_TRANS 0x00000010 +#define EFI_HII_DRAW_FLAG_FORCE_OPAQUE 0x00000020 +#define EFI_HII_DIRECT_TO_SCREEN 0x00000080 + +/** + + Definition of EFI_IMAGE_OUTPUT. + + @param Width Width of the output image. + + @param Height Height of the output image. + + @param Bitmap Points to the output bitmap. + + @param Screen Points to the EFI_GRAPHICS_OUTPUT_PROTOCOL which + describes the screen on which to draw the + specified image. + +**/ +typedef struct _EFI_IMAGE_OUTPUT { + UINT16 Width; + UINT16 Height; + union { + EFI_GRAPHICS_OUTPUT_BLT_PIXEL *Bitmap; + EFI_GRAPHICS_OUTPUT_PROTOCOL *Screen; + } Image; +} EFI_IMAGE_OUTPUT; + + +/** + + This function renders an image to a bitmap or the screen using + the specified color and options. It draws the image on an + existing bitmap, allocates a new bitmap or uses the screen. The + images can be clipped. If EFI_HII_DRAW_FLAG_CLIP is set, then + all pixels drawn outside the bounding box specified by Width and + Height are ignored. If EFI_HII_DRAW_FLAG_TRANSPARENT is set, + then all 'off' pixels in the images drawn will use the + pixel value from Blt. This flag cannot be used if Blt is NULL + upon entry. If EFI_HII_DIRECT_TO_SCREEN is set, then the image + will be written directly to the output device specified by + Screen. Otherwise the image will be rendered to the bitmap + specified by Bitmap. + + + @param This A pointer to the EFI_HII_IMAGE_PROTOCOL instance. + + @param Flags Describes how the image is to be drawn. + EFI_HII_DRAW_FLAGS is defined in Related + Definitions, below. + + @param Image Points to the image to be displayed. + + @param Blt If this points to a non-NULL on entry, this points + to the image, which is Width pixels wide and + Height pixels high. The image will be drawn onto + this image and EFI_HII_DRAW_FLAG_CLIP is implied. + If this points to a NULL on entry, then a buffer + will be allocated to hold the generated image and + the pointer updated on exit. It is the caller's + responsibility to free this buffer. + + @param BltX, BltY Specifies the offset from the left and top + edge of the image of the first pixel in + the image. + + @retval EFI_SUCCESS The image was successfully updated. + + @retval EFI_OUT_OF_RESOURCES Unable to allocate an output + buffer for RowInfoArray or Blt. + + @retval EFI_INVALID_PARAMETER The Image or Blt or Height or + Width was NULL. + + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_HII_DRAW_IMAGE)( + IN CONST EFI_HII_IMAGE_PROTOCOL *This, + IN EFI_HII_DRAW_FLAGS Flags, + IN CONST EFI_IMAGE_INPUT *Image, + IN OUT EFI_IMAGE_OUTPUT **Blt, + IN UINTN BltX, + IN UINTN BltY +); + +/** + + This function renders an image as a bitmap or to the screen and + can clip the image. The bitmap is either supplied by the caller + or else is allocated by the function. The images can be drawn + transparently or opaquely. If EFI_HII_DRAW_FLAG_CLIP is set, + then all pixels drawn outside the bounding box specified by + Width and Height are ignored. If EFI_HII_DRAW_FLAG_TRANSPARENT + is set, then all "off" pixels in the character's glyph will + use the pixel value from Blt. This flag cannot be used if Blt + is NULL upon entry. If EFI_HII_DIRECT_TO_SCREEN is set, then + the image will be written directly to the output device + specified by Screen. Otherwise the image will be rendered to + the bitmap specified by Bitmap. + This function renders an image to a bitmap or the screen using + the specified color and options. It draws the image on an + existing bitmap, allocates a new bitmap or uses the screen. The + images can be clipped. If EFI_HII_DRAW_FLAG_CLIP is set, then + all pixels drawn outside the bounding box specified by Width and + Height are ignored. The EFI_HII_DRAW_FLAG_TRANSPARENT flag + determines whether the image will be drawn transparent or + opaque. If EFI_HII_DRAW_FLAG_FORCE_TRANS is set, then the image + will be drawn so that all 'off' pixels in the image will + be drawn using the pixel value from Blt and all other pixels + will be copied. If EFI_HII_DRAW_FLAG_FORCE_OPAQUE is set, then + the image's pixels will be copied directly to the + destination. If EFI_HII_DRAW_FLAG_DEFAULT is set, then the image + will be drawn transparently or opaque, depending on the + image's transparency setting (see EFI_IMAGE_TRANSPARENT). + Images cannot be drawn transparently if Blt is NULL. If + EFI_HII_DIRECT_TO_SCREEN is set, then the image will be written + directly to the output device specified by Screen. Otherwise the + image will be rendered to the bitmap specified by Bitmap. + + @param This A pointer to the EFI_HII_IMAGE_PROTOCOL instance. + + @param Flags Describes how the image is to be drawn. + + @param PackageList The package list in the HII database to + search for the specified image. + + @param ImageId The image's id, which is unique within PackageList. + + @param Blt If this points to a non-NULL on entry, this points + to the image, which is Width pixels wide and + Height pixels high. The image will be drawn onto + this image and EFI_HII_DRAW_FLAG_CLIP is implied. + If this points to a NULL on entry, then a buffer + will be allocated to hold the generated image and + the pointer updated on exit. It is the caller's + responsibility to free this buffer. + + @param BltX, BltY Specifies the offset from the left and top + edge of the output image of the first + pixel in the image. + + @retval EFI_SUCCESS The image was successfully updated. + + @retval EFI_OUT_OF_RESOURCES Unable to allocate an output + buffer for RowInfoArray or Blt. + + @retval EFI_NOT_FOUND The image specified by ImageId is not in the database. + Or The specified PackageList is not in the database. + + @retval EFI_INVALID_PARAMETER The Blt was NULL. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_HII_DRAW_IMAGE_ID)( +IN CONST EFI_HII_IMAGE_PROTOCOL *This, +IN EFI_HII_DRAW_FLAGS Flags, +IN EFI_HII_HANDLE PackageList, +IN EFI_IMAGE_ID ImageId, +IN OUT EFI_IMAGE_OUTPUT **Blt, +IN UINTN BltX, +IN UINTN BltY +); + + +/// +/// Services to access to images in the images database. +/// +struct _EFI_HII_IMAGE_PROTOCOL { + EFI_HII_NEW_IMAGE NewImage; + EFI_HII_GET_IMAGE GetImage; + EFI_HII_SET_IMAGE SetImage; + EFI_HII_DRAW_IMAGE DrawImage; + EFI_HII_DRAW_IMAGE_ID DrawImageId; +}; + +extern EFI_GUID gEfiHiiImageProtocolGuid; + +#endif + + diff --git a/src/include/ipxe/efi/Protocol/Ip4.h b/src/include/ipxe/efi/Protocol/Ip4.h new file mode 100644 index 00000000..f174c0cf --- /dev/null +++ b/src/include/ipxe/efi/Protocol/Ip4.h @@ -0,0 +1,614 @@ +/** @file + This file defines the EFI IPv4 (Internet Protocol version 4) + Protocol interface. It is split into the following three main + sections: + - EFI IPv4 Service Binding Protocol + - EFI IPv4 Variable (deprecated in UEFI 2.4B) + - EFI IPv4 Protocol. + The EFI IPv4 Protocol provides basic network IPv4 packet I/O services, + which includes support foR a subset of the Internet Control Message + Protocol (ICMP) and may include support for the Internet Group Management + Protocol (IGMP). + +Copyright (c) 2006 - 2014, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + + @par Revision Reference: + This Protocol is introduced in UEFI Specification 2.0. + +**/ + +#ifndef __EFI_IP4_PROTOCOL_H__ +#define __EFI_IP4_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + +#include + +#define EFI_IP4_SERVICE_BINDING_PROTOCOL_GUID \ + { \ + 0xc51711e7, 0xb4bf, 0x404a, {0xbf, 0xb8, 0x0a, 0x04, 0x8e, 0xf1, 0xff, 0xe4 } \ + } + +#define EFI_IP4_PROTOCOL_GUID \ + { \ + 0x41d94cd2, 0x35b6, 0x455a, {0x82, 0x58, 0xd4, 0xe5, 0x13, 0x34, 0xaa, 0xdd } \ + } + +typedef struct _EFI_IP4_PROTOCOL EFI_IP4_PROTOCOL; + +/// +/// EFI_IP4_ADDRESS_PAIR is deprecated in the UEFI 2.4B and should not be used any more. +/// The definition in here is only present to provide backwards compatability. +/// +typedef struct { + EFI_HANDLE InstanceHandle; + EFI_IPv4_ADDRESS Ip4Address; + EFI_IPv4_ADDRESS SubnetMask; +} EFI_IP4_ADDRESS_PAIR; + +/// +/// EFI_IP4_VARIABLE_DATA is deprecated in the UEFI 2.4B and should not be used any more. +/// The definition in here is only present to provide backwards compatability. +/// +typedef struct { + EFI_HANDLE DriverHandle; + UINT32 AddressCount; + EFI_IP4_ADDRESS_PAIR AddressPairs[1]; +} EFI_IP4_VARIABLE_DATA; + +typedef struct { + /// + /// The default IPv4 protocol packets to send and receive. Ignored + /// when AcceptPromiscuous is TRUE. + /// + UINT8 DefaultProtocol; + /// + /// Set to TRUE to receive all IPv4 packets that get through the receive filters. + /// Set to FALSE to receive only the DefaultProtocol IPv4 + /// packets that get through the receive filters. + /// + BOOLEAN AcceptAnyProtocol; + /// + /// Set to TRUE to receive ICMP error report packets. Ignored when + /// AcceptPromiscuous or AcceptAnyProtocol is TRUE. + /// + BOOLEAN AcceptIcmpErrors; + /// + /// Set to TRUE to receive broadcast IPv4 packets. Ignored when + /// AcceptPromiscuous is TRUE. + /// Set to FALSE to stop receiving broadcast IPv4 packets. + /// + BOOLEAN AcceptBroadcast; + /// + /// Set to TRUE to receive all IPv4 packets that are sent to any + /// hardware address or any protocol address. + /// Set to FALSE to stop receiving all promiscuous IPv4 packets + /// + BOOLEAN AcceptPromiscuous; + /// + /// Set to TRUE to use the default IPv4 address and default routing table. + /// + BOOLEAN UseDefaultAddress; + /// + /// The station IPv4 address that will be assigned to this EFI IPv4Protocol instance. + /// + EFI_IPv4_ADDRESS StationAddress; + /// + /// The subnet address mask that is associated with the station address. + /// + EFI_IPv4_ADDRESS SubnetMask; + /// + /// TypeOfService field in transmitted IPv4 packets. + /// + UINT8 TypeOfService; + /// + /// TimeToLive field in transmitted IPv4 packets. + /// + UINT8 TimeToLive; + /// + /// State of the DoNotFragment bit in transmitted IPv4 packets. + /// + BOOLEAN DoNotFragment; + /// + /// Set to TRUE to send and receive unformatted packets. The other + /// IPv4 receive filters are still applied. Fragmentation is disabled for RawData mode. + /// + BOOLEAN RawData; + /// + /// The timer timeout value (number of microseconds) for the + /// receive timeout event to be associated with each assembled + /// packet. Zero means do not drop assembled packets. + /// + UINT32 ReceiveTimeout; + /// + /// The timer timeout value (number of microseconds) for the + /// transmit timeout event to be associated with each outgoing + /// packet. Zero means do not drop outgoing packets. + /// + UINT32 TransmitTimeout; +} EFI_IP4_CONFIG_DATA; + + +typedef struct { + EFI_IPv4_ADDRESS SubnetAddress; + EFI_IPv4_ADDRESS SubnetMask; + EFI_IPv4_ADDRESS GatewayAddress; +} EFI_IP4_ROUTE_TABLE; + +typedef struct { + UINT8 Type; + UINT8 Code; +} EFI_IP4_ICMP_TYPE; + +typedef struct { + /// + /// Set to TRUE after this EFI IPv4 Protocol instance has been successfully configured. + /// + BOOLEAN IsStarted; + /// + /// The maximum packet size, in bytes, of the packet which the upper layer driver could feed. + /// + UINT32 MaxPacketSize; + /// + /// Current configuration settings. + /// + EFI_IP4_CONFIG_DATA ConfigData; + /// + /// Set to TRUE when the EFI IPv4 Protocol instance has a station address and subnet mask. + /// + BOOLEAN IsConfigured; + /// + /// Number of joined multicast groups. + /// + UINT32 GroupCount; + /// + /// List of joined multicast group addresses. + /// + EFI_IPv4_ADDRESS *GroupTable; + /// + /// Number of entries in the routing table. + /// + UINT32 RouteCount; + /// + /// Routing table entries. + /// + EFI_IP4_ROUTE_TABLE *RouteTable; + /// + /// Number of entries in the supported ICMP types list. + /// + UINT32 IcmpTypeCount; + /// + /// Array of ICMP types and codes that are supported by this EFI IPv4 Protocol driver + /// + EFI_IP4_ICMP_TYPE *IcmpTypeList; +} EFI_IP4_MODE_DATA; + +#pragma pack(1) + +typedef struct { + UINT8 HeaderLength:4; + UINT8 Version:4; + UINT8 TypeOfService; + UINT16 TotalLength; + UINT16 Identification; + UINT16 Fragmentation; + UINT8 TimeToLive; + UINT8 Protocol; + UINT16 Checksum; + EFI_IPv4_ADDRESS SourceAddress; + EFI_IPv4_ADDRESS DestinationAddress; +} EFI_IP4_HEADER; +#pragma pack() + + +typedef struct { + UINT32 FragmentLength; + VOID *FragmentBuffer; +} EFI_IP4_FRAGMENT_DATA; + + +typedef struct { + EFI_TIME TimeStamp; + EFI_EVENT RecycleSignal; + UINT32 HeaderLength; + EFI_IP4_HEADER *Header; + UINT32 OptionsLength; + VOID *Options; + UINT32 DataLength; + UINT32 FragmentCount; + EFI_IP4_FRAGMENT_DATA FragmentTable[1]; +} EFI_IP4_RECEIVE_DATA; + + +typedef struct { + EFI_IPv4_ADDRESS SourceAddress; + EFI_IPv4_ADDRESS GatewayAddress; + UINT8 Protocol; + UINT8 TypeOfService; + UINT8 TimeToLive; + BOOLEAN DoNotFragment; +} EFI_IP4_OVERRIDE_DATA; + +typedef struct { + EFI_IPv4_ADDRESS DestinationAddress; + EFI_IP4_OVERRIDE_DATA *OverrideData; //OPTIONAL + UINT32 OptionsLength; //OPTIONAL + VOID *OptionsBuffer; //OPTIONAL + UINT32 TotalDataLength; + UINT32 FragmentCount; + EFI_IP4_FRAGMENT_DATA FragmentTable[1]; +} EFI_IP4_TRANSMIT_DATA; + +typedef struct { + /// + /// This Event will be signaled after the Status field is updated + /// by the EFI IPv4 Protocol driver. The type of Event must be + /// EFI_NOTIFY_SIGNAL. The Task Priority Level (TPL) of + /// Event must be lower than or equal to TPL_CALLBACK. + /// + EFI_EVENT Event; + /// + /// The status that is returned to the caller at the end of the operation + /// to indicate whether this operation completed successfully. + /// + EFI_STATUS Status; + union { + /// + /// When this token is used for receiving, RxData is a pointer to the EFI_IP4_RECEIVE_DATA. + /// + EFI_IP4_RECEIVE_DATA *RxData; + /// + /// When this token is used for transmitting, TxData is a pointer to the EFI_IP4_TRANSMIT_DATA. + /// + EFI_IP4_TRANSMIT_DATA *TxData; + } Packet; +} EFI_IP4_COMPLETION_TOKEN; + +/** + Gets the current operational settings for this instance of the EFI IPv4 Protocol driver. + + The GetModeData() function returns the current operational mode data for this + driver instance. The data fields in EFI_IP4_MODE_DATA are read only. This + function is used optionally to retrieve the operational mode data of underlying + networks or drivers. + + @param This The pointer to the EFI_IP4_PROTOCOL instance. + @param Ip4ModeData The pointer to the EFI IPv4 Protocol mode data structure. + @param MnpConfigData The pointer to the managed network configuration data structure. + @param SnpModeData The pointer to the simple network mode data structure. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_OUT_OF_RESOURCES The required mode data could not be allocated. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_IP4_GET_MODE_DATA)( + IN CONST EFI_IP4_PROTOCOL *This, + OUT EFI_IP4_MODE_DATA *Ip4ModeData OPTIONAL, + OUT EFI_MANAGED_NETWORK_CONFIG_DATA *MnpConfigData OPTIONAL, + OUT EFI_SIMPLE_NETWORK_MODE *SnpModeData OPTIONAL + ); + +/** + Assigns an IPv4 address and subnet mask to this EFI IPv4 Protocol driver instance. + + The Configure() function is used to set, change, or reset the operational + parameters and filter settings for this EFI IPv4 Protocol instance. Until these + parameters have been set, no network traffic can be sent or received by this + instance. Once the parameters have been reset (by calling this function with + IpConfigData set to NULL), no more traffic can be sent or received until these + parameters have been set again. Each EFI IPv4 Protocol instance can be started + and stopped independently of each other by enabling or disabling their receive + filter settings with the Configure() function. + + When IpConfigData.UseDefaultAddress is set to FALSE, the new station address will + be appended as an alias address into the addresses list in the EFI IPv4 Protocol + driver. While set to TRUE, Configure() will trigger the EFI_IP4_CONFIG_PROTOCOL + to retrieve the default IPv4 address if it is not available yet. Clients could + frequently call GetModeData() to check the status to ensure that the default IPv4 + address is ready. + + If operational parameters are reset or changed, any pending transmit and receive + requests will be cancelled. Their completion token status will be set to EFI_ABORTED + and their events will be signaled. + + @param This The pointer to the EFI_IP4_PROTOCOL instance. + @param IpConfigData The pointer to the EFI IPv4 Protocol configuration data structure. + + @retval EFI_SUCCESS The driver instance was successfully opened. + @retval EFI_NO_MAPPING When using the default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + This is NULL. + IpConfigData.StationAddress is not a unicast IPv4 address. + IpConfigData.SubnetMask is not a valid IPv4 subnet + @retval EFI_UNSUPPORTED One or more of the following conditions is TRUE: + A configuration protocol (DHCP, BOOTP, RARP, etc.) could + not be located when clients choose to use the default IPv4 + address. This EFI IPv4 Protocol implementation does not + support this requested filter or timeout setting. + @retval EFI_OUT_OF_RESOURCES The EFI IPv4 Protocol driver instance data could not be allocated. + @retval EFI_ALREADY_STARTED The interface is already open and must be stopped before the + IPv4 address or subnet mask can be changed. The interface must + also be stopped when switching to/from raw packet mode. + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. The EFI IPv4 + Protocol driver instance is not opened. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_IP4_CONFIGURE)( + IN EFI_IP4_PROTOCOL *This, + IN EFI_IP4_CONFIG_DATA *IpConfigData OPTIONAL + ); + +/** + Joins and leaves multicast groups. + + The Groups() function is used to join and leave multicast group sessions. Joining + a group will enable reception of matching multicast packets. Leaving a group will + disable the multicast packet reception. + + If JoinFlag is FALSE and GroupAddress is NULL, all joined groups will be left. + + @param This The pointer to the EFI_IP4_PROTOCOL instance. + @param JoinFlag Set to TRUE to join the multicast group session and FALSE to leave. + @param GroupAddress The pointer to the IPv4 multicast address. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_INVALID_PARAMETER One or more of the following is TRUE: + - This is NULL. + - JoinFlag is TRUE and GroupAddress is NULL. + - GroupAddress is not NULL and *GroupAddress is + not a multicast IPv4 address. + @retval EFI_NOT_STARTED This instance has not been started. + @retval EFI_NO_MAPPING When using the default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_OUT_OF_RESOURCES System resources could not be allocated. + @retval EFI_UNSUPPORTED This EFI IPv4 Protocol implementation does not support multicast groups. + @retval EFI_ALREADY_STARTED The group address is already in the group table (when + JoinFlag is TRUE). + @retval EFI_NOT_FOUND The group address is not in the group table (when JoinFlag is FALSE). + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_IP4_GROUPS)( + IN EFI_IP4_PROTOCOL *This, + IN BOOLEAN JoinFlag, + IN EFI_IPv4_ADDRESS *GroupAddress OPTIONAL + ); + +/** + Adds and deletes routing table entries. + + The Routes() function adds a route to or deletes a route from the routing table. + + Routes are determined by comparing the SubnetAddress with the destination IPv4 + address arithmetically AND-ed with the SubnetMask. The gateway address must be + on the same subnet as the configured station address. + + The default route is added with SubnetAddress and SubnetMask both set to 0.0.0.0. + The default route matches all destination IPv4 addresses that do not match any + other routes. + + A GatewayAddress that is zero is a nonroute. Packets are sent to the destination + IP address if it can be found in the ARP cache or on the local subnet. One automatic + nonroute entry will be inserted into the routing table for outgoing packets that + are addressed to a local subnet (gateway address of 0.0.0.0). + + Each EFI IPv4 Protocol instance has its own independent routing table. Those EFI + IPv4 Protocol instances that use the default IPv4 address will also have copies + of the routing table that was provided by the EFI_IP4_CONFIG_PROTOCOL, and these + copies will be updated whenever the EIF IPv4 Protocol driver reconfigures its + instances. As a result, client modification to the routing table will be lost. + + @param This The pointer to the EFI_IP4_PROTOCOL instance. + @param DeleteRoute Set to TRUE to delete this route from the routing table. Set to + FALSE to add this route to the routing table. SubnetAddress + and SubnetMask are used as the key to each route entry. + @param SubnetAddress The address of the subnet that needs to be routed. + @param SubnetMask The subnet mask of SubnetAddress. + @param GatewayAddress The unicast gateway IPv4 address for this route. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_NOT_STARTED The driver instance has not been started. + @retval EFI_NO_MAPPING When using the default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + - This is NULL. + - SubnetAddress is NULL. + - SubnetMask is NULL. + - GatewayAddress is NULL. + - *SubnetAddress is not a valid subnet address. + - *SubnetMask is not a valid subnet mask. + - *GatewayAddress is not a valid unicast IPv4 address. + @retval EFI_OUT_OF_RESOURCES Could not add the entry to the routing table. + @retval EFI_NOT_FOUND This route is not in the routing table (when DeleteRoute is TRUE). + @retval EFI_ACCESS_DENIED The route is already defined in the routing table (when + DeleteRoute is FALSE). + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_IP4_ROUTES)( + IN EFI_IP4_PROTOCOL *This, + IN BOOLEAN DeleteRoute, + IN EFI_IPv4_ADDRESS *SubnetAddress, + IN EFI_IPv4_ADDRESS *SubnetMask, + IN EFI_IPv4_ADDRESS *GatewayAddress + ); + +/** + Places outgoing data packets into the transmit queue. + + The Transmit() function places a sending request in the transmit queue of this + EFI IPv4 Protocol instance. Whenever the packet in the token is sent out or some + errors occur, the event in the token will be signaled and the status is updated. + + @param This The pointer to the EFI_IP4_PROTOCOL instance. + @param Token The pointer to the transmit token. + + @retval EFI_SUCCESS The data has been queued for transmission. + @retval EFI_NOT_STARTED This instance has not been started. + @retval EFI_NO_MAPPING When using the default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_INVALID_PARAMETER One or more pameters are invalid. + @retval EFI_ACCESS_DENIED The transmit completion token with the same Token.Event + was already in the transmit queue. + @retval EFI_NOT_READY The completion token could not be queued because the transmit + queue is full. + @retval EFI_NOT_FOUND Not route is found to destination address. + @retval EFI_OUT_OF_RESOURCES Could not queue the transmit data. + @retval EFI_BUFFER_TOO_SMALL Token.Packet.TxData.TotalDataLength is too + short to transmit. + @retval EFI_BAD_BUFFER_SIZE The length of the IPv4 header + option length + total data length is + greater than MTU (or greater than the maximum packet size if + Token.Packet.TxData.OverrideData. + DoNotFragment is TRUE.) + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_IP4_TRANSMIT)( + IN EFI_IP4_PROTOCOL *This, + IN EFI_IP4_COMPLETION_TOKEN *Token + ); + +/** + Places a receiving request into the receiving queue. + + The Receive() function places a completion token into the receive packet queue. + This function is always asynchronous. + + The Token.Event field in the completion token must be filled in by the caller + and cannot be NULL. When the receive operation completes, the EFI IPv4 Protocol + driver updates the Token.Status and Token.Packet.RxData fields and the Token.Event + is signaled. + + @param This The pointer to the EFI_IP4_PROTOCOL instance. + @param Token The pointer to a token that is associated with the receive data descriptor. + + @retval EFI_SUCCESS The receive completion token was cached. + @retval EFI_NOT_STARTED This EFI IPv4 Protocol instance has not been started. + @retval EFI_NO_MAPPING When using the default address, configuration (DHCP, BOOTP, RARP, etc.) + is not finished yet. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + - This is NULL. + - Token is NULL. + - Token.Event is NULL. + @retval EFI_OUT_OF_RESOURCES The receive completion token could not be queued due to a lack of system + resources (usually memory). + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + The EFI IPv4 Protocol instance has been reset to startup defaults. + @retval EFI_ACCESS_DENIED The receive completion token with the same Token.Event was already + in the receive queue. + @retval EFI_NOT_READY The receive request could not be queued because the receive queue is full. + @retval EFI_ICMP_ERROR An ICMP error packet was received. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_IP4_RECEIVE)( + IN EFI_IP4_PROTOCOL *This, + IN EFI_IP4_COMPLETION_TOKEN *Token + ); + +/** + Abort an asynchronous transmit or receive request. + + The Cancel() function is used to abort a pending transmit or receive request. + If the token is in the transmit or receive request queues, after calling this + function, Token->Status will be set to EFI_ABORTED and then Token->Event will + be signaled. If the token is not in one of the queues, which usually means the + asynchronous operation has completed, this function will not signal the token + and EFI_NOT_FOUND is returned. + + @param This The pointer to the EFI_IP4_PROTOCOL instance. + @param Token The pointer to a token that has been issued by + EFI_IP4_PROTOCOL.Transmit() or + EFI_IP4_PROTOCOL.Receive(). If NULL, all pending + tokens are aborted. Type EFI_IP4_COMPLETION_TOKEN is + defined in EFI_IP4_PROTOCOL.Transmit(). + + @retval EFI_SUCCESS The asynchronous I/O request was aborted and + Token->Event was signaled. When Token is NULL, all + pending requests were aborted and their events were signaled. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_NOT_STARTED This instance has not been started. + @retval EFI_NO_MAPPING When using the default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_NOT_FOUND When Token is not NULL, the asynchronous I/O request was + not found in the transmit or receive queue. It has either completed + or was not issued by Transmit() and Receive(). + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_IP4_CANCEL)( + IN EFI_IP4_PROTOCOL *This, + IN EFI_IP4_COMPLETION_TOKEN *Token OPTIONAL + ); + +/** + Polls for incoming data packets and processes outgoing data packets. + + The Poll() function polls for incoming data packets and processes outgoing data + packets. Network drivers and applications can call the EFI_IP4_PROTOCOL.Poll() + function to increase the rate that data packets are moved between the communications + device and the transmit and receive queues. + + In some systems the periodic timer event may not poll the underlying communications + device fast enough to transmit and/or receive all data packets without missing + incoming packets or dropping outgoing packets. Drivers and applications that are + experiencing packet loss should try calling the EFI_IP4_PROTOCOL.Poll() function + more often. + + @param This The pointer to the EFI_IP4_PROTOCOL instance. + + @retval EFI_SUCCESS Incoming or outgoing data was processed. + @retval EFI_NOT_STARTED This EFI IPv4 Protocol instance has not been started. + @retval EFI_NO_MAPPING When using the default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + @retval EFI_NOT_READY No incoming or outgoing data is processed. + @retval EFI_TIMEOUT Data was dropped out of the transmit and/or receive queue. + Consider increasing the polling rate. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_IP4_POLL)( + IN EFI_IP4_PROTOCOL *This + ); + +/// +/// The EFI IPv4 Protocol implements a simple packet-oriented interface that can be +/// used by drivers, daemons, and applications to transmit and receive network packets. +/// +struct _EFI_IP4_PROTOCOL { + EFI_IP4_GET_MODE_DATA GetModeData; + EFI_IP4_CONFIGURE Configure; + EFI_IP4_GROUPS Groups; + EFI_IP4_ROUTES Routes; + EFI_IP4_TRANSMIT Transmit; + EFI_IP4_RECEIVE Receive; + EFI_IP4_CANCEL Cancel; + EFI_IP4_POLL Poll; +}; + +extern EFI_GUID gEfiIp4ServiceBindingProtocolGuid; +extern EFI_GUID gEfiIp4ProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/Ip4Config.h b/src/include/ipxe/efi/Protocol/Ip4Config.h new file mode 100644 index 00000000..227ae039 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/Ip4Config.h @@ -0,0 +1,184 @@ +/** @file + This file provides a definition of the EFI IPv4 Configuration + Protocol. + +Copyright (c) 2006 - 2014, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + + @par Revision Reference: + This Protocol is introduced in UEFI Specification 2.0. + +**/ +#ifndef __EFI_IP4CONFIG_PROTOCOL_H__ +#define __EFI_IP4CONFIG_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + +#include + +#define EFI_IP4_CONFIG_PROTOCOL_GUID \ + { \ + 0x3b95aa31, 0x3793, 0x434b, {0x86, 0x67, 0xc8, 0x07, 0x08, 0x92, 0xe0, 0x5e } \ + } + +typedef struct _EFI_IP4_CONFIG_PROTOCOL EFI_IP4_CONFIG_PROTOCOL; + +#define IP4_CONFIG_VARIABLE_ATTRIBUTES \ + (EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS) + +/// +/// EFI_IP4_IPCONFIG_DATA contains the minimum IPv4 configuration data +/// that is needed to start basic network communication. The StationAddress +/// and SubnetMask must be a valid unicast IP address and subnet mask. +/// If RouteTableSize is not zero, then RouteTable contains a properly +/// formatted routing table for the StationAddress/SubnetMask, with the +/// last entry in the table being the default route. +/// +typedef struct { + /// + /// Default station IP address, stored in network byte order. + /// + EFI_IPv4_ADDRESS StationAddress; + /// + /// Default subnet mask, stored in network byte order. + /// + EFI_IPv4_ADDRESS SubnetMask; + /// + /// Number of entries in the following RouteTable. May be zero. + /// + UINT32 RouteTableSize; + /// + /// Default routing table data (stored in network byte order). + /// Ignored if RouteTableSize is zero. + /// + EFI_IP4_ROUTE_TABLE *RouteTable; +} EFI_IP4_IPCONFIG_DATA; + + +/** + Starts running the configuration policy for the EFI IPv4 Protocol driver. + + The Start() function is called to determine and to begin the platform + configuration policy by the EFI IPv4 Protocol driver. This determination may + be as simple as returning EFI_UNSUPPORTED if there is no EFI IPv4 Protocol + driver configuration policy. It may be as involved as loading some defaults + from nonvolatile storage, downloading dynamic data from a DHCP server, and + checking permissions with a site policy server. + Starting the configuration policy is just the beginning. It may finish almost + instantly or it may take several minutes before it fails to retrieve configuration + information from one or more servers. Once the policy is started, drivers + should use the DoneEvent parameter to determine when the configuration policy + has completed. EFI_IP4_CONFIG_PROTOCOL.GetData() must then be called to + determine if the configuration succeeded or failed. + Until the configuration completes successfully, EFI IPv4 Protocol driver instances + that are attempting to use default configurations must return EFI_NO_MAPPING. + Once the configuration is complete, the EFI IPv4 Configuration Protocol driver + signals DoneEvent. The configuration may need to be updated in the future. + Note that in this case the EFI IPv4 Configuration Protocol driver must signal + ReconfigEvent, and all EFI IPv4 Protocol driver instances that are using default + configurations must return EFI_NO_MAPPING until the configuration policy has + been rerun. + + @param This The pointer to the EFI_IP4_CONFIG_PROTOCOL instance. + @param DoneEvent Event that will be signaled when the EFI IPv4 + Protocol driver configuration policy completes + execution. This event must be of type EVT_NOTIFY_SIGNAL. + @param ReconfigEvent Event that will be signaled when the EFI IPv4 + Protocol driver configuration needs to be updated. + This event must be of type EVT_NOTIFY_SIGNAL. + + @retval EFI_SUCCESS The configuration policy for the EFI IPv4 Protocol + driver is now running. + @retval EFI_INVALID_PARAMETER One or more of the following parameters is NULL: + This + DoneEvent + ReconfigEvent + @retval EFI_OUT_OF_RESOURCES Required system resources could not be allocated. + @retval EFI_ALREADY_STARTED The configuration policy for the EFI IPv4 Protocol + driver was already started. + @retval EFI_DEVICE_ERROR An unexpected system error or network error occurred. + @retval EFI_UNSUPPORTED This interface does not support the EFI IPv4 Protocol + driver configuration. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_IP4_CONFIG_START)( + IN EFI_IP4_CONFIG_PROTOCOL *This, + IN EFI_EVENT DoneEvent, + IN EFI_EVENT ReconfigEvent + ); + +/** + Stops running the configuration policy for the EFI IPv4 Protocol driver. + + The Stop() function stops the configuration policy for the EFI IPv4 Protocol driver. + All configuration data will be lost after calling Stop(). + + @param This The pointer to the EFI_IP4_CONFIG_PROTOCOL instance. + + @retval EFI_SUCCESS The configuration policy for the EFI IPv4 Protocol + driver has been stopped. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_NOT_STARTED The configuration policy for the EFI IPv4 Protocol + driver was not started. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_IP4_CONFIG_STOP)( + IN EFI_IP4_CONFIG_PROTOCOL *This + ); + +/** + Returns the default configuration data (if any) for the EFI IPv4 Protocol driver. + + The GetData() function returns the current configuration data for the EFI IPv4 + Protocol driver after the configuration policy has completed. + + @param This The pointer to the EFI_IP4_CONFIG_PROTOCOL instance. + @param IpConfigDataSize On input, the size of the IpConfigData buffer. + On output, the count of bytes that were written + into the IpConfigData buffer. + @param IpConfigData The pointer to the EFI IPv4 Configuration Protocol + driver configuration data structure. + Type EFI_IP4_IPCONFIG_DATA is defined in + "Related Definitions" below. + + @retval EFI_SUCCESS The EFI IPv4 Protocol driver configuration has been returned. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_NOT_STARTED The configuration policy for the EFI IPv4 Protocol + driver is not running. + @retval EFI_NOT_READY EFI IPv4 Protocol driver configuration is still running. + @retval EFI_ABORTED EFI IPv4 Protocol driver configuration could not complete. + @retval EFI_BUFFER_TOO_SMALL *IpConfigDataSize is smaller than the configuration + data buffer or IpConfigData is NULL. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_IP4_CONFIG_GET_DATA)( + IN EFI_IP4_CONFIG_PROTOCOL *This, + IN OUT UINTN *IpConfigDataSize, + OUT EFI_IP4_IPCONFIG_DATA *IpConfigData OPTIONAL + ); + +/// +/// The EFI_IP4_CONFIG_PROTOCOL driver performs platform-dependent and policy-dependent +/// configurations for the EFI IPv4 Protocol driver. +/// +struct _EFI_IP4_CONFIG_PROTOCOL { + EFI_IP4_CONFIG_START Start; + EFI_IP4_CONFIG_STOP Stop; + EFI_IP4_CONFIG_GET_DATA GetData; +}; + +extern EFI_GUID gEfiIp4ConfigProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/LoadFile2.h b/src/include/ipxe/efi/Protocol/LoadFile2.h new file mode 100644 index 00000000..6cb26fff --- /dev/null +++ b/src/include/ipxe/efi/Protocol/LoadFile2.h @@ -0,0 +1,87 @@ +/** @file + Load File protocol as defined in the UEFI 2.0 specification. + + Load file protocol exists to supports the addition of new boot devices, + and to support booting from devices that do not map well to file system. + Network boot is done via a LoadFile protocol. + + UEFI 2.0 can boot from any device that produces a LoadFile protocol. + + Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __EFI_LOAD_FILE2_PROTOCOL_H__ +#define __EFI_LOAD_FILE2_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + +#define EFI_LOAD_FILE2_PROTOCOL_GUID \ + { \ + 0x4006c0c1, 0xfcb3, 0x403e, {0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d } \ + } + +/// +/// Protocol Guid defined by UEFI2.1. +/// +#define LOAD_FILE2_PROTOCOL EFI_LOAD_FILE2_PROTOCOL_GUID + +typedef struct _EFI_LOAD_FILE2_PROTOCOL EFI_LOAD_FILE2_PROTOCOL; + + +/** + Causes the driver to load a specified file. + + @param This Protocol instance pointer. + @param FilePath The device specific path of the file to load. + @param BootPolicy Should always be FALSE. + @param BufferSize On input the size of Buffer in bytes. On output with a return + code of EFI_SUCCESS, the amount of data transferred to + Buffer. On output with a return code of EFI_BUFFER_TOO_SMALL, + the size of Buffer required to retrieve the requested file. + @param Buffer The memory buffer to transfer the file to. IF Buffer is NULL, + then no the size of the requested file is returned in + BufferSize. + + @retval EFI_SUCCESS The file was loaded. + @retval EFI_UNSUPPORTED BootPolicy is TRUE. + @retval EFI_INVALID_PARAMETER FilePath is not a valid device path, or + BufferSize is NULL. + @retval EFI_NO_MEDIA No medium was present to load the file. + @retval EFI_DEVICE_ERROR The file was not loaded due to a device error. + @retval EFI_NO_RESPONSE The remote system did not respond. + @retval EFI_NOT_FOUND The file was not found + @retval EFI_ABORTED The file load process was manually canceled. + @retval EFI_BUFFER_TOO_SMALL The BufferSize is too small to read the current + directory entry. BufferSize has been updated with + the size needed to complete the request. + + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_LOAD_FILE2)( + IN EFI_LOAD_FILE2_PROTOCOL *This, + IN EFI_DEVICE_PATH_PROTOCOL *FilePath, + IN BOOLEAN BootPolicy, + IN OUT UINTN *BufferSize, + IN VOID *Buffer OPTIONAL + ); + +/// +/// The EFI_LOAD_FILE_PROTOCOL is a simple protocol used to obtain files from arbitrary devices. +/// +struct _EFI_LOAD_FILE2_PROTOCOL { + EFI_LOAD_FILE2 LoadFile; +}; + +extern EFI_GUID gEfiLoadFile2ProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/ManagedNetwork.h b/src/include/ipxe/efi/Protocol/ManagedNetwork.h new file mode 100644 index 00000000..2bd09226 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/ManagedNetwork.h @@ -0,0 +1,374 @@ +/** @file + EFI_MANAGED_NETWORK_SERVICE_BINDING_PROTOCOL as defined in UEFI 2.0. + EFI_MANAGED_NETWORK_PROTOCOL as defined in UEFI 2.0. + +Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + + @par Revision Reference: + This Protocol is introduced in UEFI Specification 2.0 + +**/ + +#ifndef __EFI_MANAGED_NETWORK_PROTOCOL_H__ +#define __EFI_MANAGED_NETWORK_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + +#include + +#define EFI_MANAGED_NETWORK_SERVICE_BINDING_PROTOCOL_GUID \ + { \ + 0xf36ff770, 0xa7e1, 0x42cf, {0x9e, 0xd2, 0x56, 0xf0, 0xf2, 0x71, 0xf4, 0x4c } \ + } + +#define EFI_MANAGED_NETWORK_PROTOCOL_GUID \ + { \ + 0x7ab33a91, 0xace5, 0x4326, { 0xb5, 0x72, 0xe7, 0xee, 0x33, 0xd3, 0x9f, 0x16 } \ + } + +typedef struct _EFI_MANAGED_NETWORK_PROTOCOL EFI_MANAGED_NETWORK_PROTOCOL; + +typedef struct { + /// + /// Timeout value for a UEFI one-shot timer event. A packet that has not been removed + /// from the MNP receive queue will be dropped if its receive timeout expires. + /// + UINT32 ReceivedQueueTimeoutValue; + /// + /// Timeout value for a UEFI one-shot timer event. A packet that has not been removed + /// from the MNP transmit queue will be dropped if its receive timeout expires. + /// + UINT32 TransmitQueueTimeoutValue; + /// + /// Ethernet type II 16-bit protocol type in host byte order. Valid + /// values are zero and 1,500 to 65,535. + /// + UINT16 ProtocolTypeFilter; + /// + /// Set to TRUE to receive packets that are sent to the network + /// device MAC address. The startup default value is FALSE. + /// + BOOLEAN EnableUnicastReceive; + /// + /// Set to TRUE to receive packets that are sent to any of the + /// active multicast groups. The startup default value is FALSE. + /// + BOOLEAN EnableMulticastReceive; + /// + /// Set to TRUE to receive packets that are sent to the network + /// device broadcast address. The startup default value is FALSE. + /// + BOOLEAN EnableBroadcastReceive; + /// + /// Set to TRUE to receive packets that are sent to any MAC address. + /// The startup default value is FALSE. + /// + BOOLEAN EnablePromiscuousReceive; + /// + /// Set to TRUE to drop queued packets when the configuration + /// is changed. The startup default value is FALSE. + /// + BOOLEAN FlushQueuesOnReset; + /// + /// Set to TRUE to timestamp all packets when they are received + /// by the MNP. Note that timestamps may be unsupported in some + /// MNP implementations. The startup default value is FALSE. + /// + BOOLEAN EnableReceiveTimestamps; + /// + /// Set to TRUE to disable background polling in this MNP + /// instance. Note that background polling may not be supported in + /// all MNP implementations. The startup default value is FALSE, + /// unless background polling is not supported. + /// + BOOLEAN DisableBackgroundPolling; +} EFI_MANAGED_NETWORK_CONFIG_DATA; + +typedef struct { + EFI_TIME Timestamp; + EFI_EVENT RecycleEvent; + UINT32 PacketLength; + UINT32 HeaderLength; + UINT32 AddressLength; + UINT32 DataLength; + BOOLEAN BroadcastFlag; + BOOLEAN MulticastFlag; + BOOLEAN PromiscuousFlag; + UINT16 ProtocolType; + VOID *DestinationAddress; + VOID *SourceAddress; + VOID *MediaHeader; + VOID *PacketData; +} EFI_MANAGED_NETWORK_RECEIVE_DATA; + +typedef struct { + UINT32 FragmentLength; + VOID *FragmentBuffer; +} EFI_MANAGED_NETWORK_FRAGMENT_DATA; + +typedef struct { + EFI_MAC_ADDRESS *DestinationAddress; //OPTIONAL + EFI_MAC_ADDRESS *SourceAddress; //OPTIONAL + UINT16 ProtocolType; //OPTIONAL + UINT32 DataLength; + UINT16 HeaderLength; //OPTIONAL + UINT16 FragmentCount; + EFI_MANAGED_NETWORK_FRAGMENT_DATA FragmentTable[1]; +} EFI_MANAGED_NETWORK_TRANSMIT_DATA; + + +typedef struct { + /// + /// This Event will be signaled after the Status field is updated + /// by the MNP. The type of Event must be + /// EFI_NOTIFY_SIGNAL. The Task Priority Level (TPL) of + /// Event must be lower than or equal to TPL_CALLBACK. + /// + EFI_EVENT Event; + /// + /// The status that is returned to the caller at the end of the operation + /// to indicate whether this operation completed successfully. + /// + EFI_STATUS Status; + union { + /// + /// When this token is used for receiving, RxData is a pointer to the EFI_MANAGED_NETWORK_RECEIVE_DATA. + /// + EFI_MANAGED_NETWORK_RECEIVE_DATA *RxData; + /// + /// When this token is used for transmitting, TxData is a pointer to the EFI_MANAGED_NETWORK_TRANSMIT_DATA. + /// + EFI_MANAGED_NETWORK_TRANSMIT_DATA *TxData; + } Packet; +} EFI_MANAGED_NETWORK_COMPLETION_TOKEN; + +/** + Returns the operational parameters for the current MNP child driver. + + @param This The pointer to the EFI_MANAGED_NETWORK_PROTOCOL instance. + @param MnpConfigData The pointer to storage for MNP operational parameters. + @param SnpModeData The pointer to storage for SNP operational parameters. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_UNSUPPORTED The requested feature is unsupported in this MNP implementation. + @retval EFI_NOT_STARTED This MNP child driver instance has not been configured. The default + values are returned in MnpConfigData if it is not NULL. + @retval Other The mode data could not be read. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MANAGED_NETWORK_GET_MODE_DATA)( + IN EFI_MANAGED_NETWORK_PROTOCOL *This, + OUT EFI_MANAGED_NETWORK_CONFIG_DATA *MnpConfigData OPTIONAL, + OUT EFI_SIMPLE_NETWORK_MODE *SnpModeData OPTIONAL + ); + +/** + Sets or clears the operational parameters for the MNP child driver. + + @param This The pointer to the EFI_MANAGED_NETWORK_PROTOCOL instance. + @param MnpConfigData The pointer to configuration data that will be assigned to the MNP + child driver instance. If NULL, the MNP child driver instance is + reset to startup defaults and all pending transmit and receive + requests are flushed. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_OUT_OF_RESOURCES Required system resources (usually memory) could not be + allocated. + @retval EFI_UNSUPPORTED The requested feature is unsupported in this [MNP] + implementation. + @retval EFI_DEVICE_ERROR An unexpected network or system error occurred. + @retval Other The MNP child driver instance has been reset to startup defaults. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MANAGED_NETWORK_CONFIGURE)( + IN EFI_MANAGED_NETWORK_PROTOCOL *This, + IN EFI_MANAGED_NETWORK_CONFIG_DATA *MnpConfigData OPTIONAL + ); + +/** + Translates an IP multicast address to a hardware (MAC) multicast address. + + @param This The pointer to the EFI_MANAGED_NETWORK_PROTOCOL instance. + @param Ipv6Flag Set to TRUE to if IpAddress is an IPv6 multicast address. + Set to FALSE if IpAddress is an IPv4 multicast address. + @param IpAddress The pointer to the multicast IP address (in network byte order) to convert. + @param MacAddress The pointer to the resulting multicast MAC address. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_INVALID_PARAMETER One of the following conditions is TRUE: + - This is NULL. + - IpAddress is NULL. + - *IpAddress is not a valid multicast IP address. + - MacAddress is NULL. + @retval EFI_NOT_STARTED This MNP child driver instance has not been configured. + @retval EFI_UNSUPPORTED The requested feature is unsupported in this MNP implementation. + @retval EFI_DEVICE_ERROR An unexpected network or system error occurred. + @retval Other The address could not be converted. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MANAGED_NETWORK_MCAST_IP_TO_MAC)( + IN EFI_MANAGED_NETWORK_PROTOCOL *This, + IN BOOLEAN Ipv6Flag, + IN EFI_IP_ADDRESS *IpAddress, + OUT EFI_MAC_ADDRESS *MacAddress + ); + +/** + Enables and disables receive filters for multicast address. + + @param This The pointer to the EFI_MANAGED_NETWORK_PROTOCOL instance. + @param JoinFlag Set to TRUE to join this multicast group. + Set to FALSE to leave this multicast group. + @param MacAddress The pointer to the multicast MAC group (address) to join or leave. + + @retval EFI_SUCCESS The requested operation completed successfully. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + - This is NULL. + - JoinFlag is TRUE and MacAddress is NULL. + - *MacAddress is not a valid multicast MAC address. + @retval EFI_NOT_STARTED This MNP child driver instance has not been configured. + @retval EFI_ALREADY_STARTED The supplied multicast group is already joined. + @retval EFI_NOT_FOUND The supplied multicast group is not joined. + @retval EFI_DEVICE_ERROR An unexpected network or system error occurred. + @retval EFI_UNSUPPORTED The requested feature is unsupported in this MNP implementation. + @retval Other The requested operation could not be completed. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MANAGED_NETWORK_GROUPS)( + IN EFI_MANAGED_NETWORK_PROTOCOL *This, + IN BOOLEAN JoinFlag, + IN EFI_MAC_ADDRESS *MacAddress OPTIONAL + ); + +/** + Places asynchronous outgoing data packets into the transmit queue. + + @param This The pointer to the EFI_MANAGED_NETWORK_PROTOCOL instance. + @param Token The pointer to a token associated with the transmit data descriptor. + + @retval EFI_SUCCESS The transmit completion token was cached. + @retval EFI_NOT_STARTED This MNP child driver instance has not been configured. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_ACCESS_DENIED The transmit completion token is already in the transmit queue. + @retval EFI_OUT_OF_RESOURCES The transmit data could not be queued due to a lack of system resources + (usually memory). + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + @retval EFI_NOT_READY The transmit request could not be queued because the transmit queue is full. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MANAGED_NETWORK_TRANSMIT)( + IN EFI_MANAGED_NETWORK_PROTOCOL *This, + IN EFI_MANAGED_NETWORK_COMPLETION_TOKEN *Token + ); + +/** + Places an asynchronous receiving request into the receiving queue. + + @param This The pointer to the EFI_MANAGED_NETWORK_PROTOCOL instance. + @param Token The pointer to a token associated with the receive data descriptor. + + @retval EFI_SUCCESS The receive completion token was cached. + @retval EFI_NOT_STARTED This MNP child driver instance has not been configured. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + - This is NULL. + - Token is NULL. + - Token.Event is NULL. + @retval EFI_OUT_OF_RESOURCES The transmit data could not be queued due to a lack of system resources + (usually memory). + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + @retval EFI_ACCESS_DENIED The receive completion token was already in the receive queue. + @retval EFI_NOT_READY The receive request could not be queued because the receive queue is full. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MANAGED_NETWORK_RECEIVE)( + IN EFI_MANAGED_NETWORK_PROTOCOL *This, + IN EFI_MANAGED_NETWORK_COMPLETION_TOKEN *Token + ); + + +/** + Aborts an asynchronous transmit or receive request. + + @param This The pointer to the EFI_MANAGED_NETWORK_PROTOCOL instance. + @param Token The pointer to a token that has been issued by + EFI_MANAGED_NETWORK_PROTOCOL.Transmit() or + EFI_MANAGED_NETWORK_PROTOCOL.Receive(). If + NULL, all pending tokens are aborted. + + @retval EFI_SUCCESS The asynchronous I/O request was aborted and Token.Event + was signaled. When Token is NULL, all pending requests were + aborted and their events were signaled. + @retval EFI_NOT_STARTED This MNP child driver instance has not been configured. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_NOT_FOUND When Token is not NULL, the asynchronous I/O request was + not found in the transmit or receive queue. It has either completed + or was not issued by Transmit() and Receive(). + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MANAGED_NETWORK_CANCEL)( + IN EFI_MANAGED_NETWORK_PROTOCOL *This, + IN EFI_MANAGED_NETWORK_COMPLETION_TOKEN *Token OPTIONAL + ); + +/** + Polls for incoming data packets and processes outgoing data packets. + + @param This The pointer to the EFI_MANAGED_NETWORK_PROTOCOL instance. + + @retval EFI_SUCCESS Incoming or outgoing data was processed. + @retval EFI_NOT_STARTED This MNP child driver instance has not been configured. + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + @retval EFI_NOT_READY No incoming or outgoing data was processed. Consider increasing + the polling rate. + @retval EFI_TIMEOUT Data was dropped out of the transmit and/or receive queue. + Consider increasing the polling rate. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MANAGED_NETWORK_POLL)( + IN EFI_MANAGED_NETWORK_PROTOCOL *This + ); + +/// +/// The MNP is used by network applications (and drivers) to +/// perform raw (unformatted) asynchronous network packet I/O. +/// +struct _EFI_MANAGED_NETWORK_PROTOCOL { + EFI_MANAGED_NETWORK_GET_MODE_DATA GetModeData; + EFI_MANAGED_NETWORK_CONFIGURE Configure; + EFI_MANAGED_NETWORK_MCAST_IP_TO_MAC McastIpToMac; + EFI_MANAGED_NETWORK_GROUPS Groups; + EFI_MANAGED_NETWORK_TRANSMIT Transmit; + EFI_MANAGED_NETWORK_RECEIVE Receive; + EFI_MANAGED_NETWORK_CANCEL Cancel; + EFI_MANAGED_NETWORK_POLL Poll; +}; + +extern EFI_GUID gEfiManagedNetworkServiceBindingProtocolGuid; +extern EFI_GUID gEfiManagedNetworkProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/Mtftp4.h b/src/include/ipxe/efi/Protocol/Mtftp4.h new file mode 100644 index 00000000..bc0a8396 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/Mtftp4.h @@ -0,0 +1,595 @@ +/** @file + EFI Multicast Trivial File Transfer Protocol Definition + +Copyright (c) 2006 - 2011, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + + @par Revision Reference: + This Protocol is introduced in UEFI Specification 2.0 + +**/ + +#ifndef __EFI_MTFTP4_PROTOCOL_H__ +#define __EFI_MTFTP4_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + +#define EFI_MTFTP4_SERVICE_BINDING_PROTOCOL_GUID \ + { \ + 0x2FE800BE, 0x8F01, 0x4aa6, {0x94, 0x6B, 0xD7, 0x13, 0x88, 0xE1, 0x83, 0x3F } \ + } + +#define EFI_MTFTP4_PROTOCOL_GUID \ + { \ + 0x78247c57, 0x63db, 0x4708, {0x99, 0xc2, 0xa8, 0xb4, 0xa9, 0xa6, 0x1f, 0x6b } \ + } + +typedef struct _EFI_MTFTP4_PROTOCOL EFI_MTFTP4_PROTOCOL; +typedef struct _EFI_MTFTP4_TOKEN EFI_MTFTP4_TOKEN; + +// +//MTFTP4 packet opcode definition +// +#define EFI_MTFTP4_OPCODE_RRQ 1 +#define EFI_MTFTP4_OPCODE_WRQ 2 +#define EFI_MTFTP4_OPCODE_DATA 3 +#define EFI_MTFTP4_OPCODE_ACK 4 +#define EFI_MTFTP4_OPCODE_ERROR 5 +#define EFI_MTFTP4_OPCODE_OACK 6 +#define EFI_MTFTP4_OPCODE_DIR 7 +#define EFI_MTFTP4_OPCODE_DATA8 8 +#define EFI_MTFTP4_OPCODE_ACK8 9 + +// +// MTFTP4 error code definition +// +#define EFI_MTFTP4_ERRORCODE_NOT_DEFINED 0 +#define EFI_MTFTP4_ERRORCODE_FILE_NOT_FOUND 1 +#define EFI_MTFTP4_ERRORCODE_ACCESS_VIOLATION 2 +#define EFI_MTFTP4_ERRORCODE_DISK_FULL 3 +#define EFI_MTFTP4_ERRORCODE_ILLEGAL_OPERATION 4 +#define EFI_MTFTP4_ERRORCODE_UNKNOWN_TRANSFER_ID 5 +#define EFI_MTFTP4_ERRORCODE_FILE_ALREADY_EXISTS 6 +#define EFI_MTFTP4_ERRORCODE_NO_SUCH_USER 7 +#define EFI_MTFTP4_ERRORCODE_REQUEST_DENIED 8 + +// +// MTFTP4 pacekt definitions +// +#pragma pack(1) + +typedef struct { + UINT16 OpCode; + UINT8 Filename[1]; +} EFI_MTFTP4_REQ_HEADER; + +typedef struct { + UINT16 OpCode; + UINT8 Data[1]; +} EFI_MTFTP4_OACK_HEADER; + +typedef struct { + UINT16 OpCode; + UINT16 Block; + UINT8 Data[1]; +} EFI_MTFTP4_DATA_HEADER; + +typedef struct { + UINT16 OpCode; + UINT16 Block[1]; +} EFI_MTFTP4_ACK_HEADER; + +typedef struct { + UINT16 OpCode; + UINT64 Block; + UINT8 Data[1]; +} EFI_MTFTP4_DATA8_HEADER; + +typedef struct { + UINT16 OpCode; + UINT64 Block[1]; +} EFI_MTFTP4_ACK8_HEADER; + +typedef struct { + UINT16 OpCode; + UINT16 ErrorCode; + UINT8 ErrorMessage[1]; +} EFI_MTFTP4_ERROR_HEADER; + +typedef union { + /// + /// Type of packets as defined by the MTFTPv4 packet opcodes. + /// + UINT16 OpCode; + /// + /// Read request packet header. + /// + EFI_MTFTP4_REQ_HEADER Rrq; + /// + /// Write request packet header. + /// + EFI_MTFTP4_REQ_HEADER Wrq; + /// + /// Option acknowledge packet header. + /// + EFI_MTFTP4_OACK_HEADER Oack; + /// + /// Data packet header. + /// + EFI_MTFTP4_DATA_HEADER Data; + /// + /// Acknowledgement packet header. + /// + EFI_MTFTP4_ACK_HEADER Ack; + /// + /// Data packet header with big block number. + /// + EFI_MTFTP4_DATA8_HEADER Data8; + /// + /// Acknowledgement header with big block num. + /// + EFI_MTFTP4_ACK8_HEADER Ack8; + /// + /// Error packet header. + /// + EFI_MTFTP4_ERROR_HEADER Error; +} EFI_MTFTP4_PACKET; + +#pragma pack() + +/// +/// MTFTP4 option definition. +/// +typedef struct { + UINT8 *OptionStr; + UINT8 *ValueStr; +} EFI_MTFTP4_OPTION; + + +typedef struct { + BOOLEAN UseDefaultSetting; + EFI_IPv4_ADDRESS StationIp; + EFI_IPv4_ADDRESS SubnetMask; + UINT16 LocalPort; + EFI_IPv4_ADDRESS GatewayIp; + EFI_IPv4_ADDRESS ServerIp; + UINT16 InitialServerPort; + UINT16 TryCount; + UINT16 TimeoutValue; +} EFI_MTFTP4_CONFIG_DATA; + + +typedef struct { + EFI_MTFTP4_CONFIG_DATA ConfigData; + UINT8 SupportedOptionCount; + UINT8 **SupportedOptoins; + UINT8 UnsupportedOptionCount; + UINT8 **UnsupportedOptoins; +} EFI_MTFTP4_MODE_DATA; + + +typedef struct { + EFI_IPv4_ADDRESS GatewayIp; + EFI_IPv4_ADDRESS ServerIp; + UINT16 ServerPort; + UINT16 TryCount; + UINT16 TimeoutValue; +} EFI_MTFTP4_OVERRIDE_DATA; + +// +// Protocol interfaces definition +// + +/** + A callback function that is provided by the caller to intercept + the EFI_MTFTP4_OPCODE_DATA or EFI_MTFTP4_OPCODE_DATA8 packets processed in the + EFI_MTFTP4_PROTOCOL.ReadFile() function, and alternatively to intercept + EFI_MTFTP4_OPCODE_OACK or EFI_MTFTP4_OPCODE_ERROR packets during a call to + EFI_MTFTP4_PROTOCOL.ReadFile(), WriteFile() or ReadDirectory(). + + @param This The pointer to the EFI_MTFTP4_PROTOCOL instance. + @param Token The token that the caller provided in the + EFI_MTFTP4_PROTOCOL.ReadFile(), WriteFile() + or ReadDirectory() function. + @param PacketLen Indicates the length of the packet. + @param Packet The pointer to an MTFTPv4 packet. + + @retval EFI_SUCCESS The operation was successful. + @retval Others Aborts the transfer process. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MTFTP4_CHECK_PACKET)( + IN EFI_MTFTP4_PROTOCOL *This, + IN EFI_MTFTP4_TOKEN *Token, + IN UINT16 PacketLen, + IN EFI_MTFTP4_PACKET *Paket + ); + +/** + Timeout callback function. + + @param This The pointer to the EFI_MTFTP4_PROTOCOL instance. + @param Token The token that is provided in the + EFI_MTFTP4_PROTOCOL.ReadFile() or + EFI_MTFTP4_PROTOCOL.WriteFile() or + EFI_MTFTP4_PROTOCOL.ReadDirectory() functions + by the caller. + + @retval EFI_SUCCESS The operation was successful. + @retval Others Aborts download process. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MTFTP4_TIMEOUT_CALLBACK)( + IN EFI_MTFTP4_PROTOCOL *This, + IN EFI_MTFTP4_TOKEN *Token + ); + +/** + A callback function that the caller provides to feed data to the + EFI_MTFTP4_PROTOCOL.WriteFile() function. + + @param This The pointer to the EFI_MTFTP4_PROTOCOL instance. + @param Token The token provided in the + EFI_MTFTP4_PROTOCOL.WriteFile() by the caller. + @param Length Indicates the length of the raw data wanted on input, and the + length the data available on output. + @param Buffer The pointer to the buffer where the data is stored. + + @retval EFI_SUCCESS The operation was successful. + @retval Others Aborts session. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MTFTP4_PACKET_NEEDED)( + IN EFI_MTFTP4_PROTOCOL *This, + IN EFI_MTFTP4_TOKEN *Token, + IN OUT UINT16 *Length, + OUT VOID **Buffer + ); + + +/** + Submits an asynchronous interrupt transfer to an interrupt endpoint of a USB device. + + @param This The pointer to the EFI_MTFTP4_PROTOCOL instance. + @param ModeData The pointer to storage for the EFI MTFTPv4 Protocol driver mode data. + + @retval EFI_SUCCESS The configuration data was successfully returned. + @retval EFI_OUT_OF_RESOURCES The required mode data could not be allocated. + @retval EFI_INVALID_PARAMETER This is NULL or ModeData is NULL. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MTFTP4_GET_MODE_DATA)( + IN EFI_MTFTP4_PROTOCOL *This, + OUT EFI_MTFTP4_MODE_DATA *ModeData + ); + + +/** + Initializes, changes, or resets the default operational setting for this + EFI MTFTPv4 Protocol driver instance. + + @param This The pointer to the EFI_MTFTP4_PROTOCOL instance. + @param MtftpConfigData The pointer to the configuration data structure. + + @retval EFI_SUCCESS The EFI MTFTPv4 Protocol driver was configured successfully. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_ACCESS_DENIED The EFI configuration could not be changed at this time because + there is one MTFTP background operation in progress. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, + RARP, etc.) has not finished yet. + @retval EFI_UNSUPPORTED A configuration protocol (DHCP, BOOTP, RARP, etc.) could not + be located when clients choose to use the default address + settings. + @retval EFI_OUT_OF_RESOURCES The EFI MTFTPv4 Protocol driver instance data could not be + allocated. + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. The EFI + MTFTPv4 Protocol driver instance is not configured. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MTFTP4_CONFIGURE)( + IN EFI_MTFTP4_PROTOCOL *This, + IN EFI_MTFTP4_CONFIG_DATA *MtftpConfigData OPTIONAL + ); + + +/** + Gets information about a file from an MTFTPv4 server. + + @param This The pointer to the EFI_MTFTP4_PROTOCOL instance. + @param OverrideData Data that is used to override the existing parameters. If NULL, + the default parameters that were set in the + EFI_MTFTP4_PROTOCOL.Configure() function are used. + @param Filename The pointer to null-terminated ASCII file name string. + @param ModeStr The pointer to null-terminated ASCII mode string. If NULL, "octet" will be used. + @param OptionCount Number of option/value string pairs in OptionList. + @param OptionList The pointer to array of option/value string pairs. Ignored if + OptionCount is zero. + @param PacketLength The number of bytes in the returned packet. + @param Packet The pointer to the received packet. This buffer must be freed by + the caller. + + @retval EFI_SUCCESS An MTFTPv4 OACK packet was received and is in the Packet. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + - This is NULL. + - Filename is NULL. + - OptionCount is not zero and OptionList is NULL. + - One or more options in OptionList have wrong format. + - PacketLength is NULL. + - One or more IPv4 addresses in OverrideData are not valid + unicast IPv4 addresses if OverrideData is not NULL. + @retval EFI_UNSUPPORTED One or more options in the OptionList are in the + unsupported list of structure EFI_MTFTP4_MODE_DATA. + @retval EFI_NOT_STARTED The EFI MTFTPv4 Protocol driver has not been started. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, + RARP, etc.) has not finished yet. + @retval EFI_ACCESS_DENIED The previous operation has not completed yet. + @retval EFI_OUT_OF_RESOURCES Required system resources could not be allocated. + @retval EFI_TFTP_ERROR An MTFTPv4 ERROR packet was received and is in the Packet. + @retval EFI_NETWORK_UNREACHABLE An ICMP network unreachable error packet was received and the Packet is set to NULL. + @retval EFI_HOST_UNREACHABLE An ICMP host unreachable error packet was received and the Packet is set to NULL. + @retval EFI_PROTOCOL_UNREACHABLE An ICMP protocol unreachable error packet was received and the Packet is set to NULL. + @retval EFI_PORT_UNREACHABLE An ICMP port unreachable error packet was received and the Packet is set to NULL. + @retval EFI_ICMP_ERROR Some other ICMP ERROR packet was received and is in the Buffer. + @retval EFI_PROTOCOL_ERROR An unexpected MTFTPv4 packet was received and is in the Packet. + @retval EFI_TIMEOUT No responses were received from the MTFTPv4 server. + @retval EFI_DEVICE_ERROR An unexpected network error or system error occurred. + @retval EFI_NO_MEDIA There was a media error. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MTFTP4_GET_INFO)( + IN EFI_MTFTP4_PROTOCOL *This, + IN EFI_MTFTP4_OVERRIDE_DATA *OverrideData OPTIONAL, + IN UINT8 *Filename, + IN UINT8 *ModeStr OPTIONAL, + IN UINT8 OptionCount, + IN EFI_MTFTP4_OPTION *OptionList, + OUT UINT32 *PacketLength, + OUT EFI_MTFTP4_PACKET **Packet OPTIONAL + ); + +/** + Parses the options in an MTFTPv4 OACK packet. + + @param This The pointer to the EFI_MTFTP4_PROTOCOL instance. + @param PacketLen Length of the OACK packet to be parsed. + @param Packet The pointer to the OACK packet to be parsed. + @param OptionCount The pointer to the number of options in following OptionList. + @param OptionList The pointer to EFI_MTFTP4_OPTION storage. Call the EFI Boot + Service FreePool() to release the OptionList if the options + in this OptionList are not needed any more. + + @retval EFI_SUCCESS The OACK packet was valid and the OptionCount and + OptionList parameters have been updated. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + - PacketLen is 0. + - Packet is NULL or Packet is not a valid MTFTPv4 packet. + - OptionCount is NULL. + @retval EFI_NOT_FOUND No options were found in the OACK packet. + @retval EFI_OUT_OF_RESOURCES Storage for the OptionList array cannot be allocated. + @retval EFI_PROTOCOL_ERROR One or more of the option fields is invalid. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MTFTP4_PARSE_OPTIONS)( + IN EFI_MTFTP4_PROTOCOL *This, + IN UINT32 PacketLen, + IN EFI_MTFTP4_PACKET *Packet, + OUT UINT32 *OptionCount, + OUT EFI_MTFTP4_OPTION **OptionList OPTIONAL + ); + + +/** + Downloads a file from an MTFTPv4 server. + + @param This The pointer to the EFI_MTFTP4_PROTOCOL instance. + @param Token The pointer to the token structure to provide the parameters that are + used in this operation. + + @retval EFI_SUCCESS The data file has been transferred successfully. + @retval EFI_OUT_OF_RESOURCES Required system resources could not be allocated. + @retval EFI_BUFFER_TOO_SMALL BufferSize is not zero but not large enough to hold the + downloaded data in downloading process. + @retval EFI_ABORTED Current operation is aborted by user. + @retval EFI_NETWORK_UNREACHABLE An ICMP network unreachable error packet was received. + @retval EFI_HOST_UNREACHABLE An ICMP host unreachable error packet was received. + @retval EFI_PROTOCOL_UNREACHABLE An ICMP protocol unreachable error packet was received. + @retval EFI_PORT_UNREACHABLE An ICMP port unreachable error packet was received. + @retval EFI_ICMP_ERROR Some other ICMP ERROR packet was received. + @retval EFI_TIMEOUT No responses were received from the MTFTPv4 server. + @retval EFI_TFTP_ERROR An MTFTPv4 ERROR packet was received. + @retval EFI_DEVICE_ERROR An unexpected network error or system error occurred. + @retval EFI_NO_MEDIA There was a media error. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MTFTP4_READ_FILE)( + IN EFI_MTFTP4_PROTOCOL *This, + IN EFI_MTFTP4_TOKEN *Token + ); + + + +/** + Sends a file to an MTFTPv4 server. + + @param This The pointer to the EFI_MTFTP4_PROTOCOL instance. + @param Token The pointer to the token structure to provide the parameters that are + used in this operation. + + @retval EFI_SUCCESS The upload session has started. + @retval EFI_UNSUPPORTED The operation is not supported by this implementation. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_UNSUPPORTED One or more options in the Token.OptionList are in + the unsupported list of structure EFI_MTFTP4_MODE_DATA. + @retval EFI_NOT_STARTED The EFI MTFTPv4 Protocol driver has not been started. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_ALREADY_STARTED This Token is already being used in another MTFTPv4 session. + @retval EFI_OUT_OF_RESOURCES Required system resources could not be allocated. + @retval EFI_ACCESS_DENIED The previous operation has not completed yet. + @retval EFI_DEVICE_ERROR An unexpected network error or system error occurred. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MTFTP4_WRITE_FILE)( + IN EFI_MTFTP4_PROTOCOL *This, + IN EFI_MTFTP4_TOKEN *Token + ); + + +/** + Downloads a data file "directory" from an MTFTPv4 server. May be unsupported in some EFI + implementations. + + @param This The pointer to the EFI_MTFTP4_PROTOCOL instance. + @param Token The pointer to the token structure to provide the parameters that are + used in this operation. + + @retval EFI_SUCCESS The MTFTPv4 related file "directory" has been downloaded. + @retval EFI_UNSUPPORTED The operation is not supported by this implementation. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_UNSUPPORTED One or more options in the Token.OptionList are in + the unsupported list of structure EFI_MTFTP4_MODE_DATA. + @retval EFI_NOT_STARTED The EFI MTFTPv4 Protocol driver has not been started. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_ALREADY_STARTED This Token is already being used in another MTFTPv4 session. + @retval EFI_OUT_OF_RESOURCES Required system resources could not be allocated. + @retval EFI_ACCESS_DENIED The previous operation has not completed yet. + @retval EFI_DEVICE_ERROR An unexpected network error or system error occurred. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MTFTP4_READ_DIRECTORY)( + IN EFI_MTFTP4_PROTOCOL *This, + IN EFI_MTFTP4_TOKEN *Token + ); + +/** + Polls for incoming data packets and processes outgoing data packets. + + @param This The pointer to the EFI_MTFTP4_PROTOCOL instance. + + @retval EFI_SUCCESS Incoming or outgoing data was processed. + @retval EFI_NOT_STARTED This EFI MTFTPv4 Protocol instance has not been started. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + @retval EFI_TIMEOUT Data was dropped out of the transmit and/or receive queue. + Consider increasing the polling rate. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_MTFTP4_POLL)( + IN EFI_MTFTP4_PROTOCOL *This + ); + +/// +/// The EFI_MTFTP4_PROTOCOL is designed to be used by UEFI drivers and applications +/// to transmit and receive data files. The EFI MTFTPv4 Protocol driver uses +/// the underlying EFI UDPv4 Protocol driver and EFI IPv4 Protocol driver. +/// +struct _EFI_MTFTP4_PROTOCOL { + EFI_MTFTP4_GET_MODE_DATA GetModeData; + EFI_MTFTP4_CONFIGURE Configure; + EFI_MTFTP4_GET_INFO GetInfo; + EFI_MTFTP4_PARSE_OPTIONS ParseOptions; + EFI_MTFTP4_READ_FILE ReadFile; + EFI_MTFTP4_WRITE_FILE WriteFile; + EFI_MTFTP4_READ_DIRECTORY ReadDirectory; + EFI_MTFTP4_POLL Poll; +}; + +struct _EFI_MTFTP4_TOKEN { + /// + /// The status that is returned to the caller at the end of the operation + /// to indicate whether this operation completed successfully. + /// + EFI_STATUS Status; + /// + /// The event that will be signaled when the operation completes. If + /// set to NULL, the corresponding function will wait until the read or + /// write operation finishes. The type of Event must be + /// EVT_NOTIFY_SIGNAL. The Task Priority Level (TPL) of + /// Event must be lower than or equal to TPL_CALLBACK. + /// + EFI_EVENT Event; + /// + /// If not NULL, the data that will be used to override the existing configure data. + /// + EFI_MTFTP4_OVERRIDE_DATA *OverrideData; + /// + /// The pointer to the null-terminated ASCII file name string. + /// + UINT8 *Filename; + /// + /// The pointer to the null-terminated ASCII mode string. If NULL, "octet" is used. + /// + UINT8 *ModeStr; + /// + /// Number of option/value string pairs. + /// + UINT32 OptionCount; + /// + /// The pointer to an array of option/value string pairs. Ignored if OptionCount is zero. + /// + EFI_MTFTP4_OPTION *OptionList; + /// + /// The size of the data buffer. + /// + UINT64 BufferSize; + /// + /// The pointer to the data buffer. Data that is downloaded from the + /// MTFTPv4 server is stored here. Data that is uploaded to the + /// MTFTPv4 server is read from here. Ignored if BufferSize is zero. + /// + VOID *Buffer; + /// + /// The pointer to the context that will be used by CheckPacket, + /// TimeoutCallback and PacketNeeded. + /// + VOID *Context; + /// + /// The pointer to the callback function to check the contents of the received packet. + /// + EFI_MTFTP4_CHECK_PACKET CheckPacket; + /// + /// The pointer to the function to be called when a timeout occurs. + /// + EFI_MTFTP4_TIMEOUT_CALLBACK TimeoutCallback; + /// + /// The pointer to the function to provide the needed packet contents. + /// + EFI_MTFTP4_PACKET_NEEDED PacketNeeded; +}; + +extern EFI_GUID gEfiMtftp4ServiceBindingProtocolGuid; +extern EFI_GUID gEfiMtftp4ProtocolGuid; + +#endif + diff --git a/src/include/ipxe/efi/Protocol/PxeBaseCode.h b/src/include/ipxe/efi/Protocol/PxeBaseCode.h new file mode 100644 index 00000000..26447987 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/PxeBaseCode.h @@ -0,0 +1,936 @@ +/** @file + EFI PXE Base Code Protocol definitions, which is used to access PXE-compatible + devices for network access and network booting. + +Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + + @par Revision Reference: + This Protocol is introduced in EFI Specification 1.10. + +**/ +#ifndef __PXE_BASE_CODE_PROTOCOL_H__ +#define __PXE_BASE_CODE_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + +/// +/// PXE Base Code protocol. +/// +#define EFI_PXE_BASE_CODE_PROTOCOL_GUID \ + { \ + 0x03c4e603, 0xac28, 0x11d3, {0x9a, 0x2d, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d } \ + } + +typedef struct _EFI_PXE_BASE_CODE_PROTOCOL EFI_PXE_BASE_CODE_PROTOCOL; + +/// +/// Protocol defined in EFI1.1. +/// +typedef EFI_PXE_BASE_CODE_PROTOCOL EFI_PXE_BASE_CODE; + +/// +/// Default IP TTL and ToS. +/// +#define DEFAULT_TTL 16 +#define DEFAULT_ToS 0 + +/// +/// ICMP error format. +/// +typedef struct { + UINT8 Type; + UINT8 Code; + UINT16 Checksum; + union { + UINT32 reserved; + UINT32 Mtu; + UINT32 Pointer; + struct { + UINT16 Identifier; + UINT16 Sequence; + } Echo; + } u; + UINT8 Data[494]; +} EFI_PXE_BASE_CODE_ICMP_ERROR; + +/// +/// TFTP error format. +/// +typedef struct { + UINT8 ErrorCode; + CHAR8 ErrorString[127]; +} EFI_PXE_BASE_CODE_TFTP_ERROR; + +/// +/// IP Receive Filter definitions. +/// +#define EFI_PXE_BASE_CODE_MAX_IPCNT 8 + +/// +/// IP Receive Filter structure. +/// +typedef struct { + UINT8 Filters; + UINT8 IpCnt; + UINT16 reserved; + EFI_IP_ADDRESS IpList[EFI_PXE_BASE_CODE_MAX_IPCNT]; +} EFI_PXE_BASE_CODE_IP_FILTER; + +#define EFI_PXE_BASE_CODE_IP_FILTER_STATION_IP 0x0001 +#define EFI_PXE_BASE_CODE_IP_FILTER_BROADCAST 0x0002 +#define EFI_PXE_BASE_CODE_IP_FILTER_PROMISCUOUS 0x0004 +#define EFI_PXE_BASE_CODE_IP_FILTER_PROMISCUOUS_MULTICAST 0x0008 + +/// +/// ARP cache entries. +/// +typedef struct { + EFI_IP_ADDRESS IpAddr; + EFI_MAC_ADDRESS MacAddr; +} EFI_PXE_BASE_CODE_ARP_ENTRY; + +/// +/// ARP route table entries. +/// +typedef struct { + EFI_IP_ADDRESS IpAddr; + EFI_IP_ADDRESS SubnetMask; + EFI_IP_ADDRESS GwAddr; +} EFI_PXE_BASE_CODE_ROUTE_ENTRY; + +// +// UDP definitions +// +typedef UINT16 EFI_PXE_BASE_CODE_UDP_PORT; + +#define EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_SRC_IP 0x0001 +#define EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_SRC_PORT 0x0002 +#define EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_DEST_IP 0x0004 +#define EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_DEST_PORT 0x0008 +#define EFI_PXE_BASE_CODE_UDP_OPFLAGS_USE_FILTER 0x0010 +#define EFI_PXE_BASE_CODE_UDP_OPFLAGS_MAY_FRAGMENT 0x0020 + +// +// Discover() definitions +// +#define EFI_PXE_BASE_CODE_BOOT_TYPE_BOOTSTRAP 0 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_MS_WINNT_RIS 1 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_INTEL_LCM 2 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_DOSUNDI 3 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_NEC_ESMPRO 4 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_IBM_WSoD 5 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_IBM_LCCM 6 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_CA_UNICENTER_TNG 7 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_HP_OPENVIEW 8 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_ALTIRIS_9 9 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_ALTIRIS_10 10 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_ALTIRIS_11 11 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_NOT_USED_12 12 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_REDHAT_INSTALL 13 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_REDHAT_BOOT 14 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_REMBO 15 +#define EFI_PXE_BASE_CODE_BOOT_TYPE_BEOBOOT 16 +// +// 17 through 32767 are reserved +// 32768 through 65279 are for vendor use +// 65280 through 65534 are reserved +// +#define EFI_PXE_BASE_CODE_BOOT_TYPE_PXETEST 65535 + +#define EFI_PXE_BASE_CODE_BOOT_LAYER_MASK 0x7FFF +#define EFI_PXE_BASE_CODE_BOOT_LAYER_INITIAL 0x0000 + +// +// PXE Tag definition that identifies the processor +// and programming environment of the client system. +// These identifiers are defined by IETF: +// http://www.ietf.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xml +// +#if defined (MDE_CPU_IA32) +#define EFI_PXE_CLIENT_SYSTEM_ARCHITECTURE 0x0006 +#elif defined (MDE_CPU_IPF) +#define EFI_PXE_CLIENT_SYSTEM_ARCHITECTURE 0x0002 +#elif defined (MDE_CPU_X64) +#define EFI_PXE_CLIENT_SYSTEM_ARCHITECTURE 0x0007 +#elif defined (MDE_CPU_ARM) +#define EFI_PXE_CLIENT_SYSTEM_ARCHITECTURE 0x000A +#elif defined (MDE_CPU_AARCH64) +#define EFI_PXE_CLIENT_SYSTEM_ARCHITECTURE 0x000B +#endif + + +/// +/// Discover() server list structure. +/// +typedef struct { + UINT16 Type; + BOOLEAN AcceptAnyResponse; + UINT8 Reserved; + EFI_IP_ADDRESS IpAddr; +} EFI_PXE_BASE_CODE_SRVLIST; + +/// +/// Discover() information override structure. +/// +typedef struct { + BOOLEAN UseMCast; + BOOLEAN UseBCast; + BOOLEAN UseUCast; + BOOLEAN MustUseList; + EFI_IP_ADDRESS ServerMCastIp; + UINT16 IpCnt; + EFI_PXE_BASE_CODE_SRVLIST SrvList[1]; +} EFI_PXE_BASE_CODE_DISCOVER_INFO; + +/// +/// TFTP opcode definitions. +/// +typedef enum { + EFI_PXE_BASE_CODE_TFTP_FIRST, + EFI_PXE_BASE_CODE_TFTP_GET_FILE_SIZE, + EFI_PXE_BASE_CODE_TFTP_READ_FILE, + EFI_PXE_BASE_CODE_TFTP_WRITE_FILE, + EFI_PXE_BASE_CODE_TFTP_READ_DIRECTORY, + EFI_PXE_BASE_CODE_MTFTP_GET_FILE_SIZE, + EFI_PXE_BASE_CODE_MTFTP_READ_FILE, + EFI_PXE_BASE_CODE_MTFTP_READ_DIRECTORY, + EFI_PXE_BASE_CODE_MTFTP_LAST +} EFI_PXE_BASE_CODE_TFTP_OPCODE; + +/// +/// MTFTP information. This information is required +/// to start or join a multicast TFTP session. It is also required to +/// perform the "get file size" and "read directory" operations of MTFTP. +/// +typedef struct { + EFI_IP_ADDRESS MCastIp; + EFI_PXE_BASE_CODE_UDP_PORT CPort; + EFI_PXE_BASE_CODE_UDP_PORT SPort; + UINT16 ListenTimeout; + UINT16 TransmitTimeout; +} EFI_PXE_BASE_CODE_MTFTP_INFO; + +/// +/// DHCPV4 Packet structure. +/// +typedef struct { + UINT8 BootpOpcode; + UINT8 BootpHwType; + UINT8 BootpHwAddrLen; + UINT8 BootpGateHops; + UINT32 BootpIdent; + UINT16 BootpSeconds; + UINT16 BootpFlags; + UINT8 BootpCiAddr[4]; + UINT8 BootpYiAddr[4]; + UINT8 BootpSiAddr[4]; + UINT8 BootpGiAddr[4]; + UINT8 BootpHwAddr[16]; + UINT8 BootpSrvName[64]; + UINT8 BootpBootFile[128]; + UINT32 DhcpMagik; + UINT8 DhcpOptions[56]; +} EFI_PXE_BASE_CODE_DHCPV4_PACKET; + +/// +/// DHCPV6 Packet structure. +/// +typedef struct { + UINT32 MessageType:8; + UINT32 TransactionId:24; + UINT8 DhcpOptions[1024]; +} EFI_PXE_BASE_CODE_DHCPV6_PACKET; + +/// +/// Packet structure. +/// +typedef union { + UINT8 Raw[1472]; + EFI_PXE_BASE_CODE_DHCPV4_PACKET Dhcpv4; + EFI_PXE_BASE_CODE_DHCPV6_PACKET Dhcpv6; +} EFI_PXE_BASE_CODE_PACKET; + +// +// PXE Base Code Mode structure +// +#define EFI_PXE_BASE_CODE_MAX_ARP_ENTRIES 8 +#define EFI_PXE_BASE_CODE_MAX_ROUTE_ENTRIES 8 + +/// +/// EFI_PXE_BASE_CODE_MODE. +/// The data values in this structure are read-only and +/// are updated by the code that produces the +/// EFI_PXE_BASE_CODE_PROTOCOL functions. +/// +typedef struct { + BOOLEAN Started; + BOOLEAN Ipv6Available; + BOOLEAN Ipv6Supported; + BOOLEAN UsingIpv6; + BOOLEAN BisSupported; + BOOLEAN BisDetected; + BOOLEAN AutoArp; + BOOLEAN SendGUID; + BOOLEAN DhcpDiscoverValid; + BOOLEAN DhcpAckReceived; + BOOLEAN ProxyOfferReceived; + BOOLEAN PxeDiscoverValid; + BOOLEAN PxeReplyReceived; + BOOLEAN PxeBisReplyReceived; + BOOLEAN IcmpErrorReceived; + BOOLEAN TftpErrorReceived; + BOOLEAN MakeCallbacks; + UINT8 TTL; + UINT8 ToS; + EFI_IP_ADDRESS StationIp; + EFI_IP_ADDRESS SubnetMask; + EFI_PXE_BASE_CODE_PACKET DhcpDiscover; + EFI_PXE_BASE_CODE_PACKET DhcpAck; + EFI_PXE_BASE_CODE_PACKET ProxyOffer; + EFI_PXE_BASE_CODE_PACKET PxeDiscover; + EFI_PXE_BASE_CODE_PACKET PxeReply; + EFI_PXE_BASE_CODE_PACKET PxeBisReply; + EFI_PXE_BASE_CODE_IP_FILTER IpFilter; + UINT32 ArpCacheEntries; + EFI_PXE_BASE_CODE_ARP_ENTRY ArpCache[EFI_PXE_BASE_CODE_MAX_ARP_ENTRIES]; + UINT32 RouteTableEntries; + EFI_PXE_BASE_CODE_ROUTE_ENTRY RouteTable[EFI_PXE_BASE_CODE_MAX_ROUTE_ENTRIES]; + EFI_PXE_BASE_CODE_ICMP_ERROR IcmpError; + EFI_PXE_BASE_CODE_TFTP_ERROR TftpError; +} EFI_PXE_BASE_CODE_MODE; + +// +// PXE Base Code Interface Function definitions +// + +/** + Enables the use of the PXE Base Code Protocol functions. + + This function enables the use of the PXE Base Code Protocol functions. If the + Started field of the EFI_PXE_BASE_CODE_MODE structure is already TRUE, then + EFI_ALREADY_STARTED will be returned. If UseIpv6 is TRUE, then IPv6 formatted + addresses will be used in this session. If UseIpv6 is FALSE, then IPv4 formatted + addresses will be used in this session. If UseIpv6 is TRUE, and the Ipv6Supported + field of the EFI_PXE_BASE_CODE_MODE structure is FALSE, then EFI_UNSUPPORTED will + be returned. If there is not enough memory or other resources to start the PXE + Base Code Protocol, then EFI_OUT_OF_RESOURCES will be returned. Otherwise, the + PXE Base Code Protocol will be started, and all of the fields of the EFI_PXE_BASE_CODE_MODE + structure will be initialized as follows: + StartedSet to TRUE. + Ipv6SupportedUnchanged. + Ipv6AvailableUnchanged. + UsingIpv6Set to UseIpv6. + BisSupportedUnchanged. + BisDetectedUnchanged. + AutoArpSet to TRUE. + SendGUIDSet to FALSE. + TTLSet to DEFAULT_TTL. + ToSSet to DEFAULT_ToS. + DhcpCompletedSet to FALSE. + ProxyOfferReceivedSet to FALSE. + StationIpSet to an address of all zeros. + SubnetMaskSet to a subnet mask of all zeros. + DhcpDiscoverZero-filled. + DhcpAckZero-filled. + ProxyOfferZero-filled. + PxeDiscoverValidSet to FALSE. + PxeDiscoverZero-filled. + PxeReplyValidSet to FALSE. + PxeReplyZero-filled. + PxeBisReplyValidSet to FALSE. + PxeBisReplyZero-filled. + IpFilterSet the Filters field to 0 and the IpCnt field to 0. + ArpCacheEntriesSet to 0. + ArpCacheZero-filled. + RouteTableEntriesSet to 0. + RouteTableZero-filled. + IcmpErrorReceivedSet to FALSE. + IcmpErrorZero-filled. + TftpErroReceivedSet to FALSE. + TftpErrorZero-filled. + MakeCallbacksSet to TRUE if the PXE Base Code Callback Protocol is available. + Set to FALSE if the PXE Base Code Callback Protocol is not available. + + @param This The pointer to the EFI_PXE_BASE_CODE_PROTOCOL instance. + @param UseIpv6 Specifies the type of IP addresses that are to be used during the session + that is being started. Set to TRUE for IPv6 addresses, and FALSE for + IPv4 addresses. + + @retval EFI_SUCCESS The PXE Base Code Protocol was started. + @retval EFI_DEVICE_ERROR The network device encountered an error during this oper + @retval EFI_UNSUPPORTED UseIpv6 is TRUE, but the Ipv6Supported field of the + EFI_PXE_BASE_CODE_MODE structure is FALSE. + @retval EFI_ALREADY_STARTED The PXE Base Code Protocol is already in the started state. + @retval EFI_INVALID_PARAMETER The This parameter is NULL or does not point to a valid + EFI_PXE_BASE_CODE_PROTOCOL structure. + @retval EFI_OUT_OF_RESOURCES Could not allocate enough memory or other resources to start the + PXE Base Code Protocol. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_PXE_BASE_CODE_START)( + IN EFI_PXE_BASE_CODE_PROTOCOL *This, + IN BOOLEAN UseIpv6 + ); + +/** + Disables the use of the PXE Base Code Protocol functions. + + This function stops all activity on the network device. All the resources allocated + in Start() are released, the Started field of the EFI_PXE_BASE_CODE_MODE structure is + set to FALSE and EFI_SUCCESS is returned. If the Started field of the EFI_PXE_BASE_CODE_MODE + structure is already FALSE, then EFI_NOT_STARTED will be returned. + + @param This The pointer to the EFI_PXE_BASE_CODE_PROTOCOL instance. + + @retval EFI_SUCCESS The PXE Base Code Protocol was stopped. + @retval EFI_NOT_STARTED The PXE Base Code Protocol is already in the stopped state. + @retval EFI_INVALID_PARAMETER The This parameter is NULL or does not point to a valid + EFI_PXE_BASE_CODE_PROTOCOL structure. + @retval EFI_DEVICE_ERROR The network device encountered an error during this operation. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_PXE_BASE_CODE_STOP)( + IN EFI_PXE_BASE_CODE_PROTOCOL *This + ); + +/** + Attempts to complete a DHCPv4 D.O.R.A. (discover / offer / request / acknowledge) or DHCPv6 + S.A.R.R (solicit / advertise / request / reply) sequence. + + This function attempts to complete the DHCP sequence. If this sequence is completed, + then EFI_SUCCESS is returned, and the DhcpCompleted, ProxyOfferReceived, StationIp, + SubnetMask, DhcpDiscover, DhcpAck, and ProxyOffer fields of the EFI_PXE_BASE_CODE_MODE + structure are filled in. + If SortOffers is TRUE, then the cached DHCP offer packets will be sorted before + they are tried. If SortOffers is FALSE, then the cached DHCP offer packets will + be tried in the order in which they are received. Please see the Preboot Execution + Environment (PXE) Specification for additional details on the implementation of DHCP. + This function can take at least 31 seconds to timeout and return control to the + caller. If the DHCP sequence does not complete, then EFI_TIMEOUT will be returned. + If the Callback Protocol does not return EFI_PXE_BASE_CODE_CALLBACK_STATUS_CONTINUE, + then the DHCP sequence will be stopped and EFI_ABORTED will be returned. + + @param This The pointer to the EFI_PXE_BASE_CODE_PROTOCOL instance. + @param SortOffers TRUE if the offers received should be sorted. Set to FALSE to try the + offers in the order that they are received. + + @retval EFI_SUCCESS Valid DHCP has completed. + @retval EFI_NOT_STARTED The PXE Base Code Protocol is in the stopped state. + @retval EFI_INVALID_PARAMETER The This parameter is NULL or does not point to a valid + EFI_PXE_BASE_CODE_PROTOCOL structure. + @retval EFI_DEVICE_ERROR The network device encountered an error during this operation. + @retval EFI_OUT_OF_RESOURCES Could not allocate enough memory to complete the DHCP Protocol. + @retval EFI_ABORTED The callback function aborted the DHCP Protocol. + @retval EFI_TIMEOUT The DHCP Protocol timed out. + @retval EFI_ICMP_ERROR An ICMP error packet was received during the DHCP session. + @retval EFI_NO_RESPONSE Valid PXE offer was not received. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_PXE_BASE_CODE_DHCP)( + IN EFI_PXE_BASE_CODE_PROTOCOL *This, + IN BOOLEAN SortOffers + ); + +/** + Attempts to complete the PXE Boot Server and/or boot image discovery sequence. + + This function attempts to complete the PXE Boot Server and/or boot image discovery + sequence. If this sequence is completed, then EFI_SUCCESS is returned, and the + PxeDiscoverValid, PxeDiscover, PxeReplyReceived, and PxeReply fields of the + EFI_PXE_BASE_CODE_MODE structure are filled in. If UseBis is TRUE, then the + PxeBisReplyReceived and PxeBisReply fields of the EFI_PXE_BASE_CODE_MODE structure + will also be filled in. If UseBis is FALSE, then PxeBisReplyValid will be set to FALSE. + In the structure referenced by parameter Info, the PXE Boot Server list, SrvList[], + has two uses: It is the Boot Server IP address list used for unicast discovery + (if the UseUCast field is TRUE), and it is the list used for Boot Server verification + (if the MustUseList field is TRUE). Also, if the MustUseList field in that structure + is TRUE and the AcceptAnyResponse field in the SrvList[] array is TRUE, any Boot + Server reply of that type will be accepted. If the AcceptAnyResponse field is + FALSE, only responses from Boot Servers with matching IP addresses will be accepted. + This function can take at least 10 seconds to timeout and return control to the + caller. If the Discovery sequence does not complete, then EFI_TIMEOUT will be + returned. Please see the Preboot Execution Environment (PXE) Specification for + additional details on the implementation of the Discovery sequence. + If the Callback Protocol does not return EFI_PXE_BASE_CODE_CALLBACK_STATUS_CONTINUE, + then the Discovery sequence is stopped and EFI_ABORTED will be returned. + + @param This The pointer to the EFI_PXE_BASE_CODE_PROTOCOL instance. + @param Type The type of bootstrap to perform. + @param Layer The pointer to the boot server layer number to discover, which must be + PXE_BOOT_LAYER_INITIAL when a new server type is being + discovered. + @param UseBis TRUE if Boot Integrity Services are to be used. FALSE otherwise. + @param Info The pointer to a data structure that contains additional information on the + type of discovery operation that is to be performed. + + @retval EFI_SUCCESS The Discovery sequence has been completed. + @retval EFI_NOT_STARTED The PXE Base Code Protocol is in the stopped state. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_DEVICE_ERROR The network device encountered an error during this operation. + @retval EFI_OUT_OF_RESOURCES Could not allocate enough memory to complete Discovery. + @retval EFI_ABORTED The callback function aborted the Discovery sequence. + @retval EFI_TIMEOUT The Discovery sequence timed out. + @retval EFI_ICMP_ERROR An ICMP error packet was received during the PXE discovery + session. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_PXE_BASE_CODE_DISCOVER)( + IN EFI_PXE_BASE_CODE_PROTOCOL *This, + IN UINT16 Type, + IN UINT16 *Layer, + IN BOOLEAN UseBis, + IN EFI_PXE_BASE_CODE_DISCOVER_INFO *Info OPTIONAL + ); + +/** + Used to perform TFTP and MTFTP services. + + This function is used to perform TFTP and MTFTP services. This includes the + TFTP operations to get the size of a file, read a directory, read a file, and + write a file. It also includes the MTFTP operations to get the size of a file, + read a directory, and read a file. The type of operation is specified by Operation. + If the callback function that is invoked during the TFTP/MTFTP operation does + not return EFI_PXE_BASE_CODE_CALLBACK_STATUS_CONTINUE, then EFI_ABORTED will + be returned. + For read operations, the return data will be placed in the buffer specified by + BufferPtr. If BufferSize is too small to contain the entire downloaded file, + then EFI_BUFFER_TOO_SMALL will be returned and BufferSize will be set to zero + or the size of the requested file (the size of the requested file is only returned + if the TFTP server supports TFTP options). If BufferSize is large enough for the + read operation, then BufferSize will be set to the size of the downloaded file, + and EFI_SUCCESS will be returned. Applications using the PxeBc.Mtftp() services + should use the get-file-size operations to determine the size of the downloaded + file prior to using the read-file operations--especially when downloading large + (greater than 64 MB) files--instead of making two calls to the read-file operation. + Following this recommendation will save time if the file is larger than expected + and the TFTP server does not support TFTP option extensions. Without TFTP option + extension support, the client has to download the entire file, counting and discarding + the received packets, to determine the file size. + For write operations, the data to be sent is in the buffer specified by BufferPtr. + BufferSize specifies the number of bytes to send. If the write operation completes + successfully, then EFI_SUCCESS will be returned. + For TFTP "get file size" operations, the size of the requested file or directory + is returned in BufferSize, and EFI_SUCCESS will be returned. If the TFTP server + does not support options, the file will be downloaded into a bit bucket and the + length of the downloaded file will be returned. For MTFTP "get file size" operations, + if the MTFTP server does not support the "get file size" option, EFI_UNSUPPORTED + will be returned. + This function can take up to 10 seconds to timeout and return control to the caller. + If the TFTP sequence does not complete, EFI_TIMEOUT will be returned. + If the Callback Protocol does not return EFI_PXE_BASE_CODE_CALLBACK_STATUS_CONTINUE, + then the TFTP sequence is stopped and EFI_ABORTED will be returned. + The format of the data returned from a TFTP read directory operation is a null-terminated + filename followed by a null-terminated information string, of the form + "size year-month-day hour:minute:second" (i.e. %d %d-%d-%d %d:%d:%f - note that + the seconds field can be a decimal number), where the date and time are UTC. For + an MTFTP read directory command, there is additionally a null-terminated multicast + IP address preceding the filename of the form %d.%d.%d.%d for IP v4. The final + entry is itself null-terminated, so that the final information string is terminated + with two null octets. + + @param This The pointer to the EFI_PXE_BASE_CODE_PROTOCOL instance. + @param Operation The type of operation to perform. + @param BufferPtr A pointer to the data buffer. + @param Overwrite Only used on write file operations. TRUE if a file on a remote server can + be overwritten. + @param BufferSize For get-file-size operations, *BufferSize returns the size of the + requested file. + @param BlockSize The requested block size to be used during a TFTP transfer. + @param ServerIp The TFTP / MTFTP server IP address. + @param Filename A Null-terminated ASCII string that specifies a directory name or a file + name. + @param Info The pointer to the MTFTP information. + @param DontUseBuffer Set to FALSE for normal TFTP and MTFTP read file operation. + + @retval EFI_SUCCESS The TFTP/MTFTP operation was completed. + @retval EFI_NOT_STARTED The PXE Base Code Protocol is in the stopped state. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_DEVICE_ERROR The network device encountered an error during this operation. + @retval EFI_BUFFER_TOO_SMALL The buffer is not large enough to complete the read operation. + @retval EFI_ABORTED The callback function aborted the TFTP/MTFTP operation. + @retval EFI_TIMEOUT The TFTP/MTFTP operation timed out. + @retval EFI_ICMP_ERROR An ICMP error packet was received during the MTFTP session. + @retval EFI_TFTP_ERROR A TFTP error packet was received during the MTFTP session. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_PXE_BASE_CODE_MTFTP)( + IN EFI_PXE_BASE_CODE_PROTOCOL *This, + IN EFI_PXE_BASE_CODE_TFTP_OPCODE Operation, + IN OUT VOID *BufferPtr OPTIONAL, + IN BOOLEAN Overwrite, + IN OUT UINT64 *BufferSize, + IN UINTN *BlockSize OPTIONAL, + IN EFI_IP_ADDRESS *ServerIp, + IN UINT8 *Filename OPTIONAL, + IN EFI_PXE_BASE_CODE_MTFTP_INFO *Info OPTIONAL, + IN BOOLEAN DontUseBuffer + ); + +/** + Writes a UDP packet to the network interface. + + This function writes a UDP packet specified by the (optional HeaderPtr and) + BufferPtr parameters to the network interface. The UDP header is automatically + built by this routine. It uses the parameters OpFlags, DestIp, DestPort, GatewayIp, + SrcIp, and SrcPort to build this header. If the packet is successfully built and + transmitted through the network interface, then EFI_SUCCESS will be returned. + If a timeout occurs during the transmission of the packet, then EFI_TIMEOUT will + be returned. If an ICMP error occurs during the transmission of the packet, then + the IcmpErrorReceived field is set to TRUE, the IcmpError field is filled in and + EFI_ICMP_ERROR will be returned. If the Callback Protocol does not return + EFI_PXE_BASE_CODE_CALLBACK_STATUS_CONTINUE, then EFI_ABORTED will be returned. + + @param This The pointer to the EFI_PXE_BASE_CODE_PROTOCOL instance. + @param OpFlags The UDP operation flags. + @param DestIp The destination IP address. + @param DestPort The destination UDP port number. + @param GatewayIp The gateway IP address. + @param SrcIp The source IP address. + @param SrcPort The source UDP port number. + @param HeaderSize An optional field which may be set to the length of a header at + HeaderPtr to be prefixed to the data at BufferPtr. + @param HeaderPtr If HeaderSize is not NULL, a pointer to a header to be prefixed to the + data at BufferPtr. + @param BufferSize A pointer to the size of the data at BufferPtr. + @param BufferPtr A pointer to the data to be written. + + @retval EFI_SUCCESS The UDP Write operation was completed. + @retval EFI_NOT_STARTED The PXE Base Code Protocol is in the stopped state. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_BAD_BUFFER_SIZE The buffer is too long to be transmitted. + @retval EFI_ABORTED The callback function aborted the UDP Write operation. + @retval EFI_TIMEOUT The UDP Write operation timed out. + @retval EFI_ICMP_ERROR An ICMP error packet was received during the UDP write session. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_PXE_BASE_CODE_UDP_WRITE)( + IN EFI_PXE_BASE_CODE_PROTOCOL *This, + IN UINT16 OpFlags, + IN EFI_IP_ADDRESS *DestIp, + IN EFI_PXE_BASE_CODE_UDP_PORT *DestPort, + IN EFI_IP_ADDRESS *GatewayIp, OPTIONAL + IN EFI_IP_ADDRESS *SrcIp, OPTIONAL + IN OUT EFI_PXE_BASE_CODE_UDP_PORT *SrcPort, OPTIONAL + IN UINTN *HeaderSize, OPTIONAL + IN VOID *HeaderPtr, OPTIONAL + IN UINTN *BufferSize, + IN VOID *BufferPtr + ); + +/** + Reads a UDP packet from the network interface. + + This function reads a UDP packet from a network interface. The data contents + are returned in (the optional HeaderPtr and) BufferPtr, and the size of the + buffer received is returned in BufferSize. If the input BufferSize is smaller + than the UDP packet received (less optional HeaderSize), it will be set to the + required size, and EFI_BUFFER_TOO_SMALL will be returned. In this case, the + contents of BufferPtr are undefined, and the packet is lost. If a UDP packet is + successfully received, then EFI_SUCCESS will be returned, and the information + from the UDP header will be returned in DestIp, DestPort, SrcIp, and SrcPort if + they are not NULL. + Depending on the values of OpFlags and the DestIp, DestPort, SrcIp, and SrcPort + input values, different types of UDP packet receive filtering will be performed. + The following tables summarize these receive filter operations. + + @param This The pointer to the EFI_PXE_BASE_CODE_PROTOCOL instance. + @param OpFlags The UDP operation flags. + @param DestIp The destination IP address. + @param DestPort The destination UDP port number. + @param SrcIp The source IP address. + @param SrcPort The source UDP port number. + @param HeaderSize An optional field which may be set to the length of a header at + HeaderPtr to be prefixed to the data at BufferPtr. + @param HeaderPtr If HeaderSize is not NULL, a pointer to a header to be prefixed to the + data at BufferPtr. + @param BufferSize A pointer to the size of the data at BufferPtr. + @param BufferPtr A pointer to the data to be read. + + @retval EFI_SUCCESS The UDP Read operation was completed. + @retval EFI_NOT_STARTED The PXE Base Code Protocol is in the stopped state. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_DEVICE_ERROR The network device encountered an error during this operation. + @retval EFI_BUFFER_TOO_SMALL The packet is larger than Buffer can hold. + @retval EFI_ABORTED The callback function aborted the UDP Read operation. + @retval EFI_TIMEOUT The UDP Read operation timed out. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_PXE_BASE_CODE_UDP_READ)( + IN EFI_PXE_BASE_CODE_PROTOCOL *This, + IN UINT16 OpFlags, + IN OUT EFI_IP_ADDRESS *DestIp, OPTIONAL + IN OUT EFI_PXE_BASE_CODE_UDP_PORT *DestPort, OPTIONAL + IN OUT EFI_IP_ADDRESS *SrcIp, OPTIONAL + IN OUT EFI_PXE_BASE_CODE_UDP_PORT *SrcPort, OPTIONAL + IN UINTN *HeaderSize, OPTIONAL + IN VOID *HeaderPtr, OPTIONAL + IN OUT UINTN *BufferSize, + IN VOID *BufferPtr + ); + +/** + Updates the IP receive filters of a network device and enables software filtering. + + The NewFilter field is used to modify the network device's current IP receive + filter settings and to enable a software filter. This function updates the IpFilter + field of the EFI_PXE_BASE_CODE_MODE structure with the contents of NewIpFilter. + The software filter is used when the USE_FILTER in OpFlags is set to UdpRead(). + The current hardware filter remains in effect no matter what the settings of OpFlags + are, so that the meaning of ANY_DEST_IP set in OpFlags to UdpRead() is from those + packets whose reception is enabled in hardware - physical NIC address (unicast), + broadcast address, logical address or addresses (multicast), or all (promiscuous). + UdpRead() does not modify the IP filter settings. + Dhcp(), Discover(), and Mtftp() set the IP filter, and return with the IP receive + filter list emptied and the filter set to EFI_PXE_BASE_CODE_IP_FILTER_STATION_IP. + If an application or driver wishes to preserve the IP receive filter settings, + it will have to preserve the IP receive filter settings before these calls, and + use SetIpFilter() to restore them after the calls. If incompatible filtering is + requested (for example, PROMISCUOUS with anything else), or if the device does not + support a requested filter setting and it cannot be accommodated in software + (for example, PROMISCUOUS not supported), EFI_INVALID_PARAMETER will be returned. + The IPlist field is used to enable IPs other than the StationIP. They may be + multicast or unicast. If IPcnt is set as well as EFI_PXE_BASE_CODE_IP_FILTER_STATION_IP, + then both the StationIP and the IPs from the IPlist will be used. + + @param This The pointer to the EFI_PXE_BASE_CODE_PROTOCOL instance. + @param NewFilter The pointer to the new set of IP receive filters. + + @retval EFI_SUCCESS The IP receive filter settings were updated. + @retval EFI_NOT_STARTED The PXE Base Code Protocol is in the stopped state. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_PXE_BASE_CODE_SET_IP_FILTER)( + IN EFI_PXE_BASE_CODE_PROTOCOL *This, + IN EFI_PXE_BASE_CODE_IP_FILTER *NewFilter + ); + +/** + Uses the ARP protocol to resolve a MAC address. + + This function uses the ARP protocol to resolve a MAC address. The UsingIpv6 field + of the EFI_PXE_BASE_CODE_MODE structure is used to determine if IPv4 or IPv6 + addresses are being used. The IP address specified by IpAddr is used to resolve + a MAC address. If the ARP protocol succeeds in resolving the specified address, + then the ArpCacheEntries and ArpCache fields of the EFI_PXE_BASE_CODE_MODE structure + are updated, and EFI_SUCCESS is returned. If MacAddr is not NULL, the resolved + MAC address is placed there as well. + If the PXE Base Code protocol is in the stopped state, then EFI_NOT_STARTED is + returned. If the ARP protocol encounters a timeout condition while attempting + to resolve an address, then EFI_TIMEOUT is returned. If the Callback Protocol + does not return EFI_PXE_BASE_CODE_CALLBACK_STATUS_CONTINUE, then EFI_ABORTED is + returned. + + @param This The pointer to the EFI_PXE_BASE_CODE_PROTOCOL instance. + @param IpAddr The pointer to the IP address that is used to resolve a MAC address. + @param MacAddr If not NULL, a pointer to the MAC address that was resolved with the + ARP protocol. + + @retval EFI_SUCCESS The IP or MAC address was resolved. + @retval EFI_NOT_STARTED The PXE Base Code Protocol is in the stopped state. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_DEVICE_ERROR The network device encountered an error during this operation. + @retval EFI_ABORTED The callback function aborted the ARP Protocol. + @retval EFI_TIMEOUT The ARP Protocol encountered a timeout condition. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_PXE_BASE_CODE_ARP)( + IN EFI_PXE_BASE_CODE_PROTOCOL *This, + IN EFI_IP_ADDRESS *IpAddr, + IN EFI_MAC_ADDRESS *MacAddr OPTIONAL + ); + +/** + Updates the parameters that affect the operation of the PXE Base Code Protocol. + + This function sets parameters that affect the operation of the PXE Base Code Protocol. + The parameter specified by NewAutoArp is used to control the generation of ARP + protocol packets. If NewAutoArp is TRUE, then ARP Protocol packets will be generated + as required by the PXE Base Code Protocol. If NewAutoArp is FALSE, then no ARP + Protocol packets will be generated. In this case, the only mappings that are + available are those stored in the ArpCache of the EFI_PXE_BASE_CODE_MODE structure. + If there are not enough mappings in the ArpCache to perform a PXE Base Code Protocol + service, then the service will fail. This function updates the AutoArp field of + the EFI_PXE_BASE_CODE_MODE structure to NewAutoArp. + The SetParameters() call must be invoked after a Callback Protocol is installed + to enable the use of callbacks. + + @param This The pointer to the EFI_PXE_BASE_CODE_PROTOCOL instance. + @param NewAutoArp If not NULL, a pointer to a value that specifies whether to replace the + current value of AutoARP. + @param NewSendGUID If not NULL, a pointer to a value that specifies whether to replace the + current value of SendGUID. + @param NewTTL If not NULL, a pointer to be used in place of the current value of TTL, + the "time to live" field of the IP header. + @param NewToS If not NULL, a pointer to be used in place of the current value of ToS, + the "type of service" field of the IP header. + @param NewMakeCallback If not NULL, a pointer to a value that specifies whether to replace the + current value of the MakeCallback field of the Mode structure. + + @retval EFI_SUCCESS The new parameters values were updated. + @retval EFI_NOT_STARTED The PXE Base Code Protocol is in the stopped state. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_PXE_BASE_CODE_SET_PARAMETERS)( + IN EFI_PXE_BASE_CODE_PROTOCOL *This, + IN BOOLEAN *NewAutoArp, OPTIONAL + IN BOOLEAN *NewSendGUID, OPTIONAL + IN UINT8 *NewTTL, OPTIONAL + IN UINT8 *NewToS, OPTIONAL + IN BOOLEAN *NewMakeCallback OPTIONAL + ); + +/** + Updates the station IP address and/or subnet mask values of a network device. + + This function updates the station IP address and/or subnet mask values of a network + device. + The NewStationIp field is used to modify the network device's current IP address. + If NewStationIP is NULL, then the current IP address will not be modified. Otherwise, + this function updates the StationIp field of the EFI_PXE_BASE_CODE_MODE structure + with NewStationIp. + The NewSubnetMask field is used to modify the network device's current subnet + mask. If NewSubnetMask is NULL, then the current subnet mask will not be modified. + Otherwise, this function updates the SubnetMask field of the EFI_PXE_BASE_CODE_MODE + structure with NewSubnetMask. + + @param This The pointer to the EFI_PXE_BASE_CODE_PROTOCOL instance. + @param NewStationIp The pointer to the new IP address to be used by the network device. + @param NewSubnetMask The pointer to the new subnet mask to be used by the network device. + + @retval EFI_SUCCESS The new station IP address and/or subnet mask were updated. + @retval EFI_NOT_STARTED The PXE Base Code Protocol is in the stopped state. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_PXE_BASE_CODE_SET_STATION_IP)( + IN EFI_PXE_BASE_CODE_PROTOCOL *This, + IN EFI_IP_ADDRESS *NewStationIp, OPTIONAL + IN EFI_IP_ADDRESS *NewSubnetMask OPTIONAL + ); + +/** + Updates the contents of the cached DHCP and Discover packets. + + The pointers to the new packets are used to update the contents of the cached + packets in the EFI_PXE_BASE_CODE_MODE structure. + + @param This The pointer to the EFI_PXE_BASE_CODE_PROTOCOL instance. + @param NewDhcpDiscoverValid The pointer to a value that will replace the current + DhcpDiscoverValid field. + @param NewDhcpAckReceived The pointer to a value that will replace the current + DhcpAckReceived field. + @param NewProxyOfferReceived The pointer to a value that will replace the current + ProxyOfferReceived field. + @param NewPxeDiscoverValid The pointer to a value that will replace the current + ProxyOfferReceived field. + @param NewPxeReplyReceived The pointer to a value that will replace the current + PxeReplyReceived field. + @param NewPxeBisReplyReceived The pointer to a value that will replace the current + PxeBisReplyReceived field. + @param NewDhcpDiscover The pointer to the new cached DHCP Discover packet contents. + @param NewDhcpAck The pointer to the new cached DHCP Ack packet contents. + @param NewProxyOffer The pointer to the new cached Proxy Offer packet contents. + @param NewPxeDiscover The pointer to the new cached PXE Discover packet contents. + @param NewPxeReply The pointer to the new cached PXE Reply packet contents. + @param NewPxeBisReply The pointer to the new cached PXE BIS Reply packet contents. + + @retval EFI_SUCCESS The cached packet contents were updated. + @retval EFI_NOT_STARTED The PXE Base Code Protocol is in the stopped state. + @retval EFI_INVALID_PARAMETER This is NULL or not point to a valid EFI_PXE_BASE_CODE_PROTOCOL structure. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_PXE_BASE_CODE_SET_PACKETS)( + IN EFI_PXE_BASE_CODE_PROTOCOL *This, + BOOLEAN *NewDhcpDiscoverValid, OPTIONAL + BOOLEAN *NewDhcpAckReceived, OPTIONAL + BOOLEAN *NewProxyOfferReceived, OPTIONAL + BOOLEAN *NewPxeDiscoverValid, OPTIONAL + BOOLEAN *NewPxeReplyReceived, OPTIONAL + BOOLEAN *NewPxeBisReplyReceived, OPTIONAL + IN EFI_PXE_BASE_CODE_PACKET *NewDhcpDiscover, OPTIONAL + IN EFI_PXE_BASE_CODE_PACKET *NewDhcpAck, OPTIONAL + IN EFI_PXE_BASE_CODE_PACKET *NewProxyOffer, OPTIONAL + IN EFI_PXE_BASE_CODE_PACKET *NewPxeDiscover, OPTIONAL + IN EFI_PXE_BASE_CODE_PACKET *NewPxeReply, OPTIONAL + IN EFI_PXE_BASE_CODE_PACKET *NewPxeBisReply OPTIONAL + ); + +// +// PXE Base Code Protocol structure +// +#define EFI_PXE_BASE_CODE_PROTOCOL_REVISION 0x00010000 + +// +// Revision defined in EFI1.1 +// +#define EFI_PXE_BASE_CODE_INTERFACE_REVISION EFI_PXE_BASE_CODE_PROTOCOL_REVISION + +/// +/// The EFI_PXE_BASE_CODE_PROTOCOL is used to control PXE-compatible devices. +/// An EFI_PXE_BASE_CODE_PROTOCOL will be layered on top of an +/// EFI_MANAGED_NETWORK_PROTOCOL protocol in order to perform packet level transactions. +/// The EFI_PXE_BASE_CODE_PROTOCOL handle also supports the +/// EFI_LOAD_FILE_PROTOCOL protocol. This provides a clean way to obtain control from the +/// boot manager if the boot path is from the remote device. +/// +struct _EFI_PXE_BASE_CODE_PROTOCOL { + /// + /// The revision of the EFI_PXE_BASE_CODE_PROTOCOL. All future revisions must + /// be backwards compatible. If a future version is not backwards compatible + /// it is not the same GUID. + /// + UINT64 Revision; + EFI_PXE_BASE_CODE_START Start; + EFI_PXE_BASE_CODE_STOP Stop; + EFI_PXE_BASE_CODE_DHCP Dhcp; + EFI_PXE_BASE_CODE_DISCOVER Discover; + EFI_PXE_BASE_CODE_MTFTP Mtftp; + EFI_PXE_BASE_CODE_UDP_WRITE UdpWrite; + EFI_PXE_BASE_CODE_UDP_READ UdpRead; + EFI_PXE_BASE_CODE_SET_IP_FILTER SetIpFilter; + EFI_PXE_BASE_CODE_ARP Arp; + EFI_PXE_BASE_CODE_SET_PARAMETERS SetParameters; + EFI_PXE_BASE_CODE_SET_STATION_IP SetStationIp; + EFI_PXE_BASE_CODE_SET_PACKETS SetPackets; + /// + /// The pointer to the EFI_PXE_BASE_CODE_MODE data for this device. + /// + EFI_PXE_BASE_CODE_MODE *Mode; +}; + +extern EFI_GUID gEfiPxeBaseCodeProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/Rng.h b/src/include/ipxe/efi/Protocol/Rng.h new file mode 100644 index 00000000..f04efbb0 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/Rng.h @@ -0,0 +1,158 @@ +/** @file + EFI_RNG_PROTOCOL as defined in UEFI 2.4. + The UEFI Random Number Generator Protocol is used to provide random bits for use + in applications, or entropy for seeding other random number generators. + +Copyright (c) 2013, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __EFI_RNG_PROTOCOL_H__ +#define __EFI_RNG_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + +/// +/// Global ID for the Random Number Generator Protocol +/// +#define EFI_RNG_PROTOCOL_GUID \ + { \ + 0x3152bca5, 0xeade, 0x433d, {0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44 } \ + } + +typedef struct _EFI_RNG_PROTOCOL EFI_RNG_PROTOCOL; + +/// +/// A selection of EFI_RNG_PROTOCOL algorithms. +/// The algorithms listed are optional, not meant to be exhaustive and be argmented by +/// vendors or other industry standards. +/// + +typedef EFI_GUID EFI_RNG_ALGORITHM; + +/// +/// The algorithms corresponds to SP800-90 as defined in +/// NIST SP 800-90, "Recommendation for Random Number Generation Using Deterministic Random +/// Bit Generators", March 2007. +/// +#define EFI_RNG_ALGORITHM_SP800_90_HASH_256_GUID \ + { \ + 0xa7af67cb, 0x603b, 0x4d42, {0xba, 0x21, 0x70, 0xbf, 0xb6, 0x29, 0x3f, 0x96 } \ + } +#define EFI_RNG_ALGORITHM_SP800_90_HMAC_256_GUID \ + { \ + 0xc5149b43, 0xae85, 0x4f53, {0x99, 0x82, 0xb9, 0x43, 0x35, 0xd3, 0xa9, 0xe7 } \ + } +#define EFI_RNG_ALGORITHM_SP800_90_CTR_256_GUID \ + { \ + 0x44f0de6e, 0x4d8c, 0x4045, {0xa8, 0xc7, 0x4d, 0xd1, 0x68, 0x85, 0x6b, 0x9e } \ + } +/// +/// The algorithms correspond to X9.31 as defined in +/// NIST, "Recommended Random Number Generator Based on ANSI X9.31 Appendix A.2.4 Using +/// the 3-Key Triple DES and AES Algorithm", January 2005. +/// +#define EFI_RNG_ALGORITHM_X9_31_3DES_GUID \ + { \ + 0x63c4785a, 0xca34, 0x4012, {0xa3, 0xc8, 0x0b, 0x6a, 0x32, 0x4f, 0x55, 0x46 } \ + } +#define EFI_RNG_ALGORITHM_X9_31_AES_GUID \ + { \ + 0xacd03321, 0x777e, 0x4d3d, {0xb1, 0xc8, 0x20, 0xcf, 0xd8, 0x88, 0x20, 0xc9 } \ + } +/// +/// The "raw" algorithm, when supported, is intended to provide entropy directly from +/// the source, without it going through some deterministic random bit generator. +/// +#define EFI_RNG_ALGORITHM_RAW \ + { \ + 0xe43176d7, 0xb6e8, 0x4827, {0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61 } \ + } + +/** + Returns information about the random number generation implementation. + + @param[in] This A pointer to the EFI_RNG_PROTOCOL instance. + @param[in,out] RNGAlgorithmListSize On input, the size in bytes of RNGAlgorithmList. + On output with a return code of EFI_SUCCESS, the size + in bytes of the data returned in RNGAlgorithmList. On output + with a return code of EFI_BUFFER_TOO_SMALL, + the size of RNGAlgorithmList required to obtain the list. + @param[out] RNGAlgorithmList A caller-allocated memory buffer filled by the driver + with one EFI_RNG_ALGORITHM element for each supported + RNG algorithm. The list must not change across multiple + calls to the same driver. The first algorithm in the list + is the default algorithm for the driver. + + @retval EFI_SUCCESS The RNG algorithm list was returned successfully. + @retval EFI_UNSUPPORTED The services is not supported by this driver. + @retval EFI_DEVICE_ERROR The list of algorithms could not be retrieved due to a + hardware or firmware error. + @retval EFI_INVALID_PARAMETER One or more of the parameters are incorrect. + @retval EFI_BUFFER_TOO_SMALL The buffer RNGAlgorithmList is too small to hold the result. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_RNG_GET_INFO) ( + IN EFI_RNG_PROTOCOL *This, + IN OUT UINTN *RNGAlgorithmListSize, + OUT EFI_RNG_ALGORITHM *RNGAlgorithmList + ); + +/** + Produces and returns an RNG value using either the default or specified RNG algorithm. + + @param[in] This A pointer to the EFI_RNG_PROTOCOL instance. + @param[in] RNGAlgorithm A pointer to the EFI_RNG_ALGORITHM that identifies the RNG + algorithm to use. May be NULL in which case the function will + use its default RNG algorithm. + @param[in] RNGValueLength The length in bytes of the memory buffer pointed to by + RNGValue. The driver shall return exactly this numbers of bytes. + @param[out] RNGValue A caller-allocated memory buffer filled by the driver with the + resulting RNG value. + + @retval EFI_SUCCESS The RNG value was returned successfully. + @retval EFI_UNSUPPORTED The algorithm specified by RNGAlgorithm is not supported by + this driver. + @retval EFI_DEVICE_ERROR An RNG value could not be retrieved due to a hardware or + firmware error. + @retval EFI_NOT_READY There is not enough random data available to satisfy the length + requested by RNGValueLength. + @retval EFI_INVALID_PARAMETER RNGValue is NULL or RNGValueLength is zero. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_RNG_GET_RNG) ( + IN EFI_RNG_PROTOCOL *This, + IN EFI_RNG_ALGORITHM *RNGAlgorithm, OPTIONAL + IN UINTN RNGValueLength, + OUT UINT8 *RNGValue + ); + +/// +/// The Random Number Generator (RNG) protocol provides random bits for use in +/// applications, or entropy for seeding other random number generators. +/// +struct _EFI_RNG_PROTOCOL { + EFI_RNG_GET_INFO GetInfo; + EFI_RNG_GET_RNG GetRNG; +}; + +extern EFI_GUID gEfiRngProtocolGuid; +extern EFI_GUID gEfiRngAlgorithmSp80090Hash256Guid; +extern EFI_GUID gEfiRngAlgorithmSp80090Hmac256Guid; +extern EFI_GUID gEfiRngAlgorithmSp80090Ctr256Guid; +extern EFI_GUID gEfiRngAlgorithmX9313DesGuid; +extern EFI_GUID gEfiRngAlgorithmX931AesGuid; +extern EFI_GUID gEfiRngAlgorithmRaw; + +#endif diff --git a/src/include/ipxe/efi/Protocol/SerialIo.h b/src/include/ipxe/efi/Protocol/SerialIo.h new file mode 100644 index 00000000..130a6ecd --- /dev/null +++ b/src/include/ipxe/efi/Protocol/SerialIo.h @@ -0,0 +1,301 @@ +/** @file + Serial IO protocol as defined in the UEFI 2.0 specification. + + Abstraction of a basic serial device. Targeted at 16550 UART, but + could be much more generic. + + Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __SERIAL_IO_PROTOCOL_H__ +#define __SERIAL_IO_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + +#define EFI_SERIAL_IO_PROTOCOL_GUID \ + { \ + 0xBB25CF6F, 0xF1D4, 0x11D2, {0x9A, 0x0C, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0xFD } \ + } + +/// +/// Protocol GUID defined in EFI1.1. +/// +#define SERIAL_IO_PROTOCOL EFI_SERIAL_IO_PROTOCOL_GUID + +typedef struct _EFI_SERIAL_IO_PROTOCOL EFI_SERIAL_IO_PROTOCOL; + + +/// +/// Backward-compatible with EFI1.1. +/// +typedef EFI_SERIAL_IO_PROTOCOL SERIAL_IO_INTERFACE; + +/// +/// Parity type that is computed or checked as each character is transmitted or received. If the +/// device does not support parity, the value is the default parity value. +/// +typedef enum { + DefaultParity, + NoParity, + EvenParity, + OddParity, + MarkParity, + SpaceParity +} EFI_PARITY_TYPE; + +/// +/// Stop bits type +/// +typedef enum { + DefaultStopBits, + OneStopBit, + OneFiveStopBits, + TwoStopBits +} EFI_STOP_BITS_TYPE; + +// +// define for Control bits, grouped by read only, write only, and read write +// +// +// Read Only +// +#define EFI_SERIAL_CLEAR_TO_SEND 0x00000010 +#define EFI_SERIAL_DATA_SET_READY 0x00000020 +#define EFI_SERIAL_RING_INDICATE 0x00000040 +#define EFI_SERIAL_CARRIER_DETECT 0x00000080 +#define EFI_SERIAL_INPUT_BUFFER_EMPTY 0x00000100 +#define EFI_SERIAL_OUTPUT_BUFFER_EMPTY 0x00000200 + +// +// Write Only +// +#define EFI_SERIAL_REQUEST_TO_SEND 0x00000002 +#define EFI_SERIAL_DATA_TERMINAL_READY 0x00000001 + +// +// Read Write +// +#define EFI_SERIAL_HARDWARE_LOOPBACK_ENABLE 0x00001000 +#define EFI_SERIAL_SOFTWARE_LOOPBACK_ENABLE 0x00002000 +#define EFI_SERIAL_HARDWARE_FLOW_CONTROL_ENABLE 0x00004000 + +// +// Serial IO Member Functions +// +/** + Reset the serial device. + + @param This Protocol instance pointer. + + @retval EFI_SUCCESS The device was reset. + @retval EFI_DEVICE_ERROR The serial device could not be reset. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_SERIAL_RESET)( + IN EFI_SERIAL_IO_PROTOCOL *This + ); + +/** + Sets the baud rate, receive FIFO depth, transmit/receice time out, parity, + data bits, and stop bits on a serial device. + + @param This Protocol instance pointer. + @param BaudRate The requested baud rate. A BaudRate value of 0 will use the + device's default interface speed. + @param ReveiveFifoDepth The requested depth of the FIFO on the receive side of the + serial interface. A ReceiveFifoDepth value of 0 will use + the device's default FIFO depth. + @param Timeout The requested time out for a single character in microseconds. + This timeout applies to both the transmit and receive side of the + interface. A Timeout value of 0 will use the device's default time + out value. + @param Parity The type of parity to use on this serial device. A Parity value of + DefaultParity will use the device's default parity value. + @param DataBits The number of data bits to use on the serial device. A DataBits + vaule of 0 will use the device's default data bit setting. + @param StopBits The number of stop bits to use on this serial device. A StopBits + value of DefaultStopBits will use the device's default number of + stop bits. + + @retval EFI_SUCCESS The device was reset. + @retval EFI_DEVICE_ERROR The serial device could not be reset. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_SERIAL_SET_ATTRIBUTES)( + IN EFI_SERIAL_IO_PROTOCOL *This, + IN UINT64 BaudRate, + IN UINT32 ReceiveFifoDepth, + IN UINT32 Timeout, + IN EFI_PARITY_TYPE Parity, + IN UINT8 DataBits, + IN EFI_STOP_BITS_TYPE StopBits + ); + +/** + Set the control bits on a serial device + + @param This Protocol instance pointer. + @param Control Set the bits of Control that are settable. + + @retval EFI_SUCCESS The new control bits were set on the serial device. + @retval EFI_UNSUPPORTED The serial device does not support this operation. + @retval EFI_DEVICE_ERROR The serial device is not functioning correctly. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_SERIAL_SET_CONTROL_BITS)( + IN EFI_SERIAL_IO_PROTOCOL *This, + IN UINT32 Control + ); + +/** + Retrieves the status of thecontrol bits on a serial device + + @param This Protocol instance pointer. + @param Control A pointer to return the current Control signals from the serial device. + + @retval EFI_SUCCESS The control bits were read from the serial device. + @retval EFI_DEVICE_ERROR The serial device is not functioning correctly. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_SERIAL_GET_CONTROL_BITS)( + IN EFI_SERIAL_IO_PROTOCOL *This, + OUT UINT32 *Control + ); + +/** + Writes data to a serial device. + + @param This Protocol instance pointer. + @param BufferSize On input, the size of the Buffer. On output, the amount of + data actually written. + @param Buffer The buffer of data to write + + @retval EFI_SUCCESS The data was written. + @retval EFI_DEVICE_ERROR The device reported an error. + @retval EFI_TIMEOUT The data write was stopped due to a timeout. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_SERIAL_WRITE)( + IN EFI_SERIAL_IO_PROTOCOL *This, + IN OUT UINTN *BufferSize, + IN VOID *Buffer + ); + +/** + Writes data to a serial device. + + @param This Protocol instance pointer. + @param BufferSize On input, the size of the Buffer. On output, the amount of + data returned in Buffer. + @param Buffer The buffer to return the data into. + + @retval EFI_SUCCESS The data was read. + @retval EFI_DEVICE_ERROR The device reported an error. + @retval EFI_TIMEOUT The data write was stopped due to a timeout. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_SERIAL_READ)( + IN EFI_SERIAL_IO_PROTOCOL *This, + IN OUT UINTN *BufferSize, + OUT VOID *Buffer + ); + +/** + @par Data Structure Description: + The data values in SERIAL_IO_MODE are read-only and are updated by the code + that produces the SERIAL_IO_PROTOCOL member functions. + + @param ControlMask + A mask for the Control bits that the device supports. The device + must always support the Input Buffer Empty control bit. + + @param TimeOut + If applicable, the number of microseconds to wait before timing out + a Read or Write operation. + + @param BaudRate + If applicable, the current baud rate setting of the device; otherwise, + baud rate has the value of zero to indicate that device runs at the + device's designed speed. + + @param ReceiveFifoDepth + The number of characters the device will buffer on input + + @param DataBits + The number of characters the device will buffer on input + + @param Parity + If applicable, this is the EFI_PARITY_TYPE that is computed or + checked as each character is transmitted or reveived. If the device + does not support parity the value is the default parity value. + + @param StopBits + If applicable, the EFI_STOP_BITS_TYPE number of stop bits per + character. If the device does not support stop bits the value is + the default stop bit values. + +**/ +typedef struct { + UINT32 ControlMask; + + // + // current Attributes + // + UINT32 Timeout; + UINT64 BaudRate; + UINT32 ReceiveFifoDepth; + UINT32 DataBits; + UINT32 Parity; + UINT32 StopBits; +} EFI_SERIAL_IO_MODE; + +#define EFI_SERIAL_IO_PROTOCOL_REVISION 0x00010000 +#define SERIAL_IO_INTERFACE_REVISION EFI_SERIAL_IO_PROTOCOL_REVISION + +/// +/// The Serial I/O protocol is used to communicate with UART-style serial devices. +/// These can be standard UART serial ports in PC-AT systems, serial ports attached +/// to a USB interface, or potentially any character-based I/O device. +/// +struct _EFI_SERIAL_IO_PROTOCOL { + /// + /// The revision to which the EFI_SERIAL_IO_PROTOCOL adheres. All future revisions + /// must be backwards compatible. If a future version is not backwards compatible, + /// it is not the same GUID. + /// + UINT32 Revision; + EFI_SERIAL_RESET Reset; + EFI_SERIAL_SET_ATTRIBUTES SetAttributes; + EFI_SERIAL_SET_CONTROL_BITS SetControl; + EFI_SERIAL_GET_CONTROL_BITS GetControl; + EFI_SERIAL_WRITE Write; + EFI_SERIAL_READ Read; + /// + /// Pointer to SERIAL_IO_MODE data. + /// + EFI_SERIAL_IO_MODE *Mode; +}; + +extern EFI_GUID gEfiSerialIoProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/SimplePointer.h b/src/include/ipxe/efi/Protocol/SimplePointer.h new file mode 100644 index 00000000..3b1e3057 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/SimplePointer.h @@ -0,0 +1,145 @@ +/** @file + Simple Pointer protocol from the UEFI 2.0 specification. + + Abstraction of a very simple pointer device like a mouse or trackball. + + Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __SIMPLE_POINTER_H__ +#define __SIMPLE_POINTER_H__ + +FILE_LICENCE ( BSD3 ); + +#define EFI_SIMPLE_POINTER_PROTOCOL_GUID \ + { \ + 0x31878c87, 0xb75, 0x11d5, {0x9a, 0x4f, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d } \ + } + +typedef struct _EFI_SIMPLE_POINTER_PROTOCOL EFI_SIMPLE_POINTER_PROTOCOL; + +// +// Data structures +// +typedef struct { + /// + /// The signed distance in counts that the pointer device has been moved along the x-axis. + /// + INT32 RelativeMovementX; + /// + /// The signed distance in counts that the pointer device has been moved along the y-axis. + /// + INT32 RelativeMovementY; + /// + /// The signed distance in counts that the pointer device has been moved along the z-axis. + /// + INT32 RelativeMovementZ; + /// + /// If TRUE, then the left button of the pointer device is being + /// pressed. If FALSE, then the left button of the pointer device is not being pressed. + /// + BOOLEAN LeftButton; + /// + /// If TRUE, then the right button of the pointer device is being + /// pressed. If FALSE, then the right button of the pointer device is not being pressed. + /// + BOOLEAN RightButton; +} EFI_SIMPLE_POINTER_STATE; + +typedef struct { + /// + /// The resolution of the pointer device on the x-axis in counts/mm. + /// If 0, then the pointer device does not support an x-axis. + /// + UINT64 ResolutionX; + /// + /// The resolution of the pointer device on the y-axis in counts/mm. + /// If 0, then the pointer device does not support an x-axis. + /// + UINT64 ResolutionY; + /// + /// The resolution of the pointer device on the z-axis in counts/mm. + /// If 0, then the pointer device does not support an x-axis. + /// + UINT64 ResolutionZ; + /// + /// TRUE if a left button is present on the pointer device. Otherwise FALSE. + /// + BOOLEAN LeftButton; + /// + /// TRUE if a right button is present on the pointer device. Otherwise FALSE. + /// + BOOLEAN RightButton; +} EFI_SIMPLE_POINTER_MODE; + +/** + Resets the pointer device hardware. + + @param This A pointer to the EFI_SIMPLE_POINTER_PROTOCOL + instance. + @param ExtendedVerification Indicates that the driver may perform a more exhaustive + verification operation of the device during reset. + + @retval EFI_SUCCESS The device was reset. + @retval EFI_DEVICE_ERROR The device is not functioning correctly and could not be reset. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_SIMPLE_POINTER_RESET)( + IN EFI_SIMPLE_POINTER_PROTOCOL *This, + IN BOOLEAN ExtendedVerification + ); + +/** + Retrieves the current state of a pointer device. + + @param This A pointer to the EFI_SIMPLE_POINTER_PROTOCOL + instance. + @param State A pointer to the state information on the pointer device. + + @retval EFI_SUCCESS The state of the pointer device was returned in State. + @retval EFI_NOT_READY The state of the pointer device has not changed since the last call to + GetState(). + @retval EFI_DEVICE_ERROR A device error occurred while attempting to retrieve the pointer device's + current state. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_SIMPLE_POINTER_GET_STATE)( + IN EFI_SIMPLE_POINTER_PROTOCOL *This, + IN OUT EFI_SIMPLE_POINTER_STATE *State + ); + +/// +/// The EFI_SIMPLE_POINTER_PROTOCOL provides a set of services for a pointer +/// device that can use used as an input device from an application written +/// to this specification. The services include the ability to reset the +/// pointer device, retrieve get the state of the pointer device, and +/// retrieve the capabilities of the pointer device. +/// +struct _EFI_SIMPLE_POINTER_PROTOCOL { + EFI_SIMPLE_POINTER_RESET Reset; + EFI_SIMPLE_POINTER_GET_STATE GetState; + /// + /// Event to use with WaitForEvent() to wait for input from the pointer device. + /// + EFI_EVENT WaitForInput; + /// + /// Pointer to EFI_SIMPLE_POINTER_MODE data. + /// + EFI_SIMPLE_POINTER_MODE *Mode; +}; + +extern EFI_GUID gEfiSimplePointerProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/TcgService.h b/src/include/ipxe/efi/Protocol/TcgService.h new file mode 100644 index 00000000..86c69a84 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/TcgService.h @@ -0,0 +1,203 @@ +/** @file + TCG Service Protocol as defined in TCG_EFI_Protocol_1_22_Final + See http://trustedcomputinggroup.org for the latest specification + +Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef _TCG_SERVICE_PROTOCOL_H_ +#define _TCG_SERVICE_PROTOCOL_H_ + +FILE_LICENCE ( BSD3 ); + +#include + +#define EFI_TCG_PROTOCOL_GUID \ + {0xf541796d, 0xa62e, 0x4954, { 0xa7, 0x75, 0x95, 0x84, 0xf6, 0x1b, 0x9c, 0xdd } } + +typedef struct _EFI_TCG_PROTOCOL EFI_TCG_PROTOCOL; + +typedef struct { + UINT8 Major; + UINT8 Minor; + UINT8 RevMajor; + UINT8 RevMinor; +} TCG_VERSION; + +typedef struct _TCG_EFI_BOOT_SERVICE_CAPABILITY { + UINT8 Size; /// Size of this structure. + TCG_VERSION StructureVersion; + TCG_VERSION ProtocolSpecVersion; + UINT8 HashAlgorithmBitmap; /// Hash algorithms . + /// This protocol is capable of : 01=SHA-1. + BOOLEAN TPMPresentFlag; /// 00h = TPM not present. + BOOLEAN TPMDeactivatedFlag; /// 01h = TPM currently deactivated. +} TCG_EFI_BOOT_SERVICE_CAPABILITY; + +typedef UINT32 TCG_ALGORITHM_ID; + +/** + This service provides EFI protocol capability information, state information + about the TPM, and Event Log state information. + + @param This Indicates the calling context + @param ProtocolCapability The callee allocates memory for a TCG_BOOT_SERVICE_CAPABILITY + structure and fills in the fields with the EFI protocol + capability information and the current TPM state information. + @param TCGFeatureFlags This is a pointer to the feature flags. No feature + flags are currently defined so this parameter + MUST be set to 0. However, in the future, + feature flags may be defined that, for example, + enable hash algorithm agility. + @param EventLogLocation This is a pointer to the address of the event log in memory. + @param EventLogLastEntry If the Event Log contains more than one entry, + this is a pointer to the address of the start of + the last entry in the event log in memory. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_INVALID_PARAMETER ProtocolCapability does not match TCG capability. +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCG_STATUS_CHECK)( + IN EFI_TCG_PROTOCOL *This, + OUT TCG_EFI_BOOT_SERVICE_CAPABILITY + *ProtocolCapability, + OUT UINT32 *TCGFeatureFlags, + OUT EFI_PHYSICAL_ADDRESS *EventLogLocation, + OUT EFI_PHYSICAL_ADDRESS *EventLogLastEntry + ); + +/** + This service abstracts the capability to do a hash operation on a data buffer. + + @param This Indicates the calling context. + @param HashData The pointer to the data buffer to be hashed. + @param HashDataLen The length of the data buffer to be hashed. + @param AlgorithmId Identification of the Algorithm to use for the hashing operation. + @param HashedDataLen Resultant length of the hashed data. + @param HashedDataResult Resultant buffer of the hashed data. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_INVALID_PARAMETER HashDataLen is NULL. + @retval EFI_INVALID_PARAMETER HashDataLenResult is NULL. + @retval EFI_OUT_OF_RESOURCES Cannot allocate buffer of size *HashedDataLen. + @retval EFI_UNSUPPORTED AlgorithmId not supported. + @retval EFI_BUFFER_TOO_SMALL *HashedDataLen < sizeof (TCG_DIGEST). +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCG_HASH_ALL)( + IN EFI_TCG_PROTOCOL *This, + IN UINT8 *HashData, + IN UINT64 HashDataLen, + IN TCG_ALGORITHM_ID AlgorithmId, + IN OUT UINT64 *HashedDataLen, + IN OUT UINT8 **HashedDataResult + ); + +/** + This service abstracts the capability to add an entry to the Event Log. + + @param This Indicates the calling context + @param TCGLogData The pointer to the start of the data buffer containing + the TCG_PCR_EVENT data structure. All fields in + this structure are properly filled by the caller. + @param EventNumber The event number of the event just logged. + @param Flags Indicates additional flags. Only one flag has been + defined at this time, which is 0x01 and means the + extend operation should not be performed. All + other bits are reserved. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_OUT_OF_RESOURCES Insufficient memory in the event log to complete this action. +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCG_LOG_EVENT)( + IN EFI_TCG_PROTOCOL *This, + IN TCG_PCR_EVENT *TCGLogData, + IN OUT UINT32 *EventNumber, + IN UINT32 Flags + ); + +/** + This service is a proxy for commands to the TPM. + + @param This Indicates the calling context. + @param TpmInputParameterBlockSize Size of the TPM input parameter block. + @param TpmInputParameterBlock The pointer to the TPM input parameter block. + @param TpmOutputParameterBlockSize Size of the TPM output parameter block. + @param TpmOutputParameterBlock The pointer to the TPM output parameter block. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_INVALID_PARAMETER Invalid ordinal. + @retval EFI_UNSUPPORTED Current Task Priority Level >= EFI_TPL_CALLBACK. + @retval EFI_TIMEOUT The TIS timed-out. +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCG_PASS_THROUGH_TO_TPM)( + IN EFI_TCG_PROTOCOL *This, + IN UINT32 TpmInputParameterBlockSize, + IN UINT8 *TpmInputParameterBlock, + IN UINT32 TpmOutputParameterBlockSize, + IN UINT8 *TpmOutputParameterBlock + ); + +/** + This service abstracts the capability to do a hash operation on a data buffer, extend a specific TPM PCR with the hash result, and add an entry to the Event Log + + @param This Indicates the calling context + @param HashData The physical address of the start of the data buffer + to be hashed, extended, and logged. + @param HashDataLen The length, in bytes, of the buffer referenced by HashData + @param AlgorithmId Identification of the Algorithm to use for the hashing operation + @param TCGLogData The physical address of the start of the data + buffer containing the TCG_PCR_EVENT data structure. + @param EventNumber The event number of the event just logged. + @param EventLogLastEntry The physical address of the first byte of the entry + just placed in the Event Log. If the Event Log was + empty when this function was called then this physical + address will be the same as the physical address of + the start of the Event Log. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_UNSUPPORTED AlgorithmId != TPM_ALG_SHA. + @retval EFI_UNSUPPORTED Current TPL >= EFI_TPL_CALLBACK. + @retval EFI_DEVICE_ERROR The command was unsuccessful. +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCG_HASH_LOG_EXTEND_EVENT)( + IN EFI_TCG_PROTOCOL *This, + IN EFI_PHYSICAL_ADDRESS HashData, + IN UINT64 HashDataLen, + IN TCG_ALGORITHM_ID AlgorithmId, + IN OUT TCG_PCR_EVENT *TCGLogData, + IN OUT UINT32 *EventNumber, + OUT EFI_PHYSICAL_ADDRESS *EventLogLastEntry + ); + +/// +/// The EFI_TCG Protocol abstracts TCG activity. +/// +struct _EFI_TCG_PROTOCOL { + EFI_TCG_STATUS_CHECK StatusCheck; + EFI_TCG_HASH_ALL HashAll; + EFI_TCG_LOG_EVENT LogEvent; + EFI_TCG_PASS_THROUGH_TO_TPM PassThroughToTpm; + EFI_TCG_HASH_LOG_EXTEND_EVENT HashLogExtendEvent; +}; + +extern EFI_GUID gEfiTcgProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/Tcp4.h b/src/include/ipxe/efi/Protocol/Tcp4.h new file mode 100644 index 00000000..1771bc55 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/Tcp4.h @@ -0,0 +1,579 @@ +/** @file + EFI TCPv4(Transmission Control Protocol version 4) Protocol Definition + The EFI TCPv4 Service Binding Protocol is used to locate EFI TCPv4 Protocol drivers to create + and destroy child of the driver to communicate with other host using TCP protocol. + The EFI TCPv4 Protocol provides services to send and receive data stream. + +Copyright (c) 2006 - 2014, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + + @par Revision Reference: + This Protocol is introduced in UEFI Specification 2.0. + +**/ + +#ifndef __EFI_TCP4_PROTOCOL_H__ +#define __EFI_TCP4_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + +#include + +#define EFI_TCP4_SERVICE_BINDING_PROTOCOL_GUID \ + { \ + 0x00720665, 0x67EB, 0x4a99, {0xBA, 0xF7, 0xD3, 0xC3, 0x3A, 0x1C, 0x7C, 0xC9 } \ + } + +#define EFI_TCP4_PROTOCOL_GUID \ + { \ + 0x65530BC7, 0xA359, 0x410f, {0xB0, 0x10, 0x5A, 0xAD, 0xC7, 0xEC, 0x2B, 0x62 } \ + } + +typedef struct _EFI_TCP4_PROTOCOL EFI_TCP4_PROTOCOL; + +/// +/// EFI_TCP4_SERVICE_POINT is deprecated in the UEFI 2.4B and should not be used any more. +/// The definition in here is only present to provide backwards compatability. +/// +typedef struct { + EFI_HANDLE InstanceHandle; + EFI_IPv4_ADDRESS LocalAddress; + UINT16 LocalPort; + EFI_IPv4_ADDRESS RemoteAddress; + UINT16 RemotePort; +} EFI_TCP4_SERVICE_POINT; + +/// +/// EFI_TCP4_VARIABLE_DATA is deprecated in the UEFI 2.4B and should not be used any more. +/// The definition in here is only present to provide backwards compatability. +/// +typedef struct { + EFI_HANDLE DriverHandle; + UINT32 ServiceCount; + EFI_TCP4_SERVICE_POINT Services[1]; +} EFI_TCP4_VARIABLE_DATA; + +typedef struct { + BOOLEAN UseDefaultAddress; + EFI_IPv4_ADDRESS StationAddress; + EFI_IPv4_ADDRESS SubnetMask; + UINT16 StationPort; + EFI_IPv4_ADDRESS RemoteAddress; + UINT16 RemotePort; + BOOLEAN ActiveFlag; +} EFI_TCP4_ACCESS_POINT; + +typedef struct { + UINT32 ReceiveBufferSize; + UINT32 SendBufferSize; + UINT32 MaxSynBackLog; + UINT32 ConnectionTimeout; + UINT32 DataRetries; + UINT32 FinTimeout; + UINT32 TimeWaitTimeout; + UINT32 KeepAliveProbes; + UINT32 KeepAliveTime; + UINT32 KeepAliveInterval; + BOOLEAN EnableNagle; + BOOLEAN EnableTimeStamp; + BOOLEAN EnableWindowScaling; + BOOLEAN EnableSelectiveAck; + BOOLEAN EnablePathMtuDiscovery; +} EFI_TCP4_OPTION; + +typedef struct { + // + // I/O parameters + // + UINT8 TypeOfService; + UINT8 TimeToLive; + + // + // Access Point + // + EFI_TCP4_ACCESS_POINT AccessPoint; + + // + // TCP Control Options + // + EFI_TCP4_OPTION *ControlOption; +} EFI_TCP4_CONFIG_DATA; + +/// +/// TCP4 connnection state +/// +typedef enum { + Tcp4StateClosed = 0, + Tcp4StateListen = 1, + Tcp4StateSynSent = 2, + Tcp4StateSynReceived = 3, + Tcp4StateEstablished = 4, + Tcp4StateFinWait1 = 5, + Tcp4StateFinWait2 = 6, + Tcp4StateClosing = 7, + Tcp4StateTimeWait = 8, + Tcp4StateCloseWait = 9, + Tcp4StateLastAck = 10 +} EFI_TCP4_CONNECTION_STATE; + +typedef struct { + EFI_EVENT Event; + EFI_STATUS Status; +} EFI_TCP4_COMPLETION_TOKEN; + +typedef struct { + /// + /// The Status in the CompletionToken will be set to one of + /// the following values if the active open succeeds or an unexpected + /// error happens: + /// EFI_SUCCESS: The active open succeeds and the instance's + /// state is Tcp4StateEstablished. + /// EFI_CONNECTION_RESET: The connect fails because the connection is reset + /// either by instance itself or the communication peer. + /// EFI_CONNECTION_REFUSED: The connect fails because this connection is initiated with + /// an active open and the connection is refused. + /// EFI_ABORTED: The active open is aborted. + /// EFI_TIMEOUT: The connection establishment timer expires and + /// no more specific information is available. + /// EFI_NETWORK_UNREACHABLE: The active open fails because + /// an ICMP network unreachable error is received. + /// EFI_HOST_UNREACHABLE: The active open fails because an + /// ICMP host unreachable error is received. + /// EFI_PROTOCOL_UNREACHABLE: The active open fails + /// because an ICMP protocol unreachable error is received. + /// EFI_PORT_UNREACHABLE: The connection establishment + /// timer times out and an ICMP port unreachable error is received. + /// EFI_ICMP_ERROR: The connection establishment timer timeout and some other ICMP + /// error is received. + /// EFI_DEVICE_ERROR: An unexpected system or network error occurred. + /// EFI_NO_MEDIA: There was a media error. + /// + EFI_TCP4_COMPLETION_TOKEN CompletionToken; +} EFI_TCP4_CONNECTION_TOKEN; + +typedef struct { + EFI_TCP4_COMPLETION_TOKEN CompletionToken; + EFI_HANDLE NewChildHandle; +} EFI_TCP4_LISTEN_TOKEN; + +typedef struct { + UINT32 FragmentLength; + VOID *FragmentBuffer; +} EFI_TCP4_FRAGMENT_DATA; + +typedef struct { + BOOLEAN UrgentFlag; + UINT32 DataLength; + UINT32 FragmentCount; + EFI_TCP4_FRAGMENT_DATA FragmentTable[1]; +} EFI_TCP4_RECEIVE_DATA; + +typedef struct { + BOOLEAN Push; + BOOLEAN Urgent; + UINT32 DataLength; + UINT32 FragmentCount; + EFI_TCP4_FRAGMENT_DATA FragmentTable[1]; +} EFI_TCP4_TRANSMIT_DATA; + +typedef struct { + /// + /// When transmission finishes or meets any unexpected error it will + /// be set to one of the following values: + /// EFI_SUCCESS: The receiving or transmission operation + /// completes successfully. + /// EFI_CONNECTION_FIN: The receiving operation fails because the communication peer + /// has closed the connection and there is no more data in the + /// receive buffer of the instance. + /// EFI_CONNECTION_RESET: The receiving or transmission operation fails + /// because this connection is reset either by instance + /// itself or the communication peer. + /// EFI_ABORTED: The receiving or transmission is aborted. + /// EFI_TIMEOUT: The transmission timer expires and no more + /// specific information is available. + /// EFI_NETWORK_UNREACHABLE: The transmission fails + /// because an ICMP network unreachable error is received. + /// EFI_HOST_UNREACHABLE: The transmission fails because an + /// ICMP host unreachable error is received. + /// EFI_PROTOCOL_UNREACHABLE: The transmission fails + /// because an ICMP protocol unreachable error is received. + /// EFI_PORT_UNREACHABLE: The transmission fails and an + /// ICMP port unreachable error is received. + /// EFI_ICMP_ERROR: The transmission fails and some other + /// ICMP error is received. + /// EFI_DEVICE_ERROR: An unexpected system or network error occurs. + /// EFI_NO_MEDIA: There was a media error. + /// + EFI_TCP4_COMPLETION_TOKEN CompletionToken; + union { + /// + /// When this token is used for receiving, RxData is a pointer to EFI_TCP4_RECEIVE_DATA. + /// + EFI_TCP4_RECEIVE_DATA *RxData; + /// + /// When this token is used for transmitting, TxData is a pointer to EFI_TCP4_TRANSMIT_DATA. + /// + EFI_TCP4_TRANSMIT_DATA *TxData; + } Packet; +} EFI_TCP4_IO_TOKEN; + +typedef struct { + EFI_TCP4_COMPLETION_TOKEN CompletionToken; + BOOLEAN AbortOnClose; +} EFI_TCP4_CLOSE_TOKEN; + +// +// Interface definition for TCP4 protocol +// + +/** + Get the current operational status. + + @param This The pointer to the EFI_TCP4_PROTOCOL instance. + @param Tcp4State The pointer to the buffer to receive the current TCP state. + @param Tcp4ConfigData The pointer to the buffer to receive the current TCP configuration. + @param Ip4ModeData The pointer to the buffer to receive the current IPv4 configuration + data used by the TCPv4 instance. + @param MnpConfigData The pointer to the buffer to receive the current MNP configuration + data used indirectly by the TCPv4 instance. + @param SnpModeData The pointer to the buffer to receive the current SNP configuration + data used indirectly by the TCPv4 instance. + + @retval EFI_SUCCESS The mode data was read. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_NOT_STARTED No configuration data is available because this instance hasn't + been started. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCP4_GET_MODE_DATA)( + IN EFI_TCP4_PROTOCOL *This, + OUT EFI_TCP4_CONNECTION_STATE *Tcp4State OPTIONAL, + OUT EFI_TCP4_CONFIG_DATA *Tcp4ConfigData OPTIONAL, + OUT EFI_IP4_MODE_DATA *Ip4ModeData OPTIONAL, + OUT EFI_MANAGED_NETWORK_CONFIG_DATA *MnpConfigData OPTIONAL, + OUT EFI_SIMPLE_NETWORK_MODE *SnpModeData OPTIONAL + ); + +/** + Initialize or brutally reset the operational parameters for this EFI TCPv4 instance. + + @param This The pointer to the EFI_TCP4_PROTOCOL instance. + @param Tcp4ConfigData The pointer to the configure data to configure the instance. + + @retval EFI_SUCCESS The operational settings are set, changed, or reset + successfully. + @retval EFI_INVALID_PARAMETER Some parameter is invalid. + @retval EFI_NO_MAPPING When using a default address, configuration (through + DHCP, BOOTP, RARP, etc.) is not finished yet. + @retval EFI_ACCESS_DENIED Configuring TCP instance when it is configured without + calling Configure() with NULL to reset it. + @retval EFI_DEVICE_ERROR An unexpected network or system error occurred. + @retval EFI_UNSUPPORTED One or more of the control options are not supported in + the implementation. + @retval EFI_OUT_OF_RESOURCES Could not allocate enough system resources when + executing Configure(). + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCP4_CONFIGURE)( + IN EFI_TCP4_PROTOCOL *This, + IN EFI_TCP4_CONFIG_DATA *TcpConfigData OPTIONAL + ); + + +/** + Add or delete a route entry to the route table + + @param This The pointer to the EFI_TCP4_PROTOCOL instance. + @param DeleteRoute Set it to TRUE to delete this route from the routing table. Set it to + FALSE to add this route to the routing table. + DestinationAddress and SubnetMask are used as the + keywords to search route entry. + @param SubnetAddress The destination network. + @param SubnetMask The subnet mask of the destination network. + @param GatewayAddress The gateway address for this route. It must be on the same + subnet with the station address unless a direct route is specified. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_NOT_STARTED The EFI TCPv4 Protocol instance has not been configured. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + - This is NULL. + - SubnetAddress is NULL. + - SubnetMask is NULL. + - GatewayAddress is NULL. + - *SubnetAddress is not NULL a valid subnet address. + - *SubnetMask is not a valid subnet mask. + - *GatewayAddress is not a valid unicast IP address or it + is not in the same subnet. + @retval EFI_OUT_OF_RESOURCES Could not allocate enough resources to add the entry to the + routing table. + @retval EFI_NOT_FOUND This route is not in the routing table. + @retval EFI_ACCESS_DENIED The route is already defined in the routing table. + @retval EFI_UNSUPPORTED The TCP driver does not support this operation. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCP4_ROUTES)( + IN EFI_TCP4_PROTOCOL *This, + IN BOOLEAN DeleteRoute, + IN EFI_IPv4_ADDRESS *SubnetAddress, + IN EFI_IPv4_ADDRESS *SubnetMask, + IN EFI_IPv4_ADDRESS *GatewayAddress + ); + +/** + Initiate a nonblocking TCP connection request for an active TCP instance. + + @param This The pointer to the EFI_TCP4_PROTOCOL instance. + @param ConnectionToken The pointer to the connection token to return when the TCP three + way handshake finishes. + + @retval EFI_SUCCESS The connection request is successfully initiated and the state + of this TCPv4 instance has been changed to Tcp4StateSynSent. + @retval EFI_NOT_STARTED This EFI TCPv4 Protocol instance has not been configured. + @retval EFI_ACCESS_DENIED One or more of the following conditions are TRUE: + - This instance is not configured as an active one. + - This instance is not in Tcp4StateClosed state. + @retval EFI_INVALID_PARAMETER One or more of the following are TRUE: + - This is NULL. + - ConnectionToken is NULL. + - ConnectionToken->CompletionToken.Event is NULL. + @retval EFI_OUT_OF_RESOURCES The driver can't allocate enough resource to initiate the activ eopen. + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCP4_CONNECT)( + IN EFI_TCP4_PROTOCOL *This, + IN EFI_TCP4_CONNECTION_TOKEN *ConnectionToken + ); + + +/** + Listen on the passive instance to accept an incoming connection request. This is a nonblocking operation. + + @param This The pointer to the EFI_TCP4_PROTOCOL instance. + @param ListenToken The pointer to the listen token to return when operation finishes. + + @retval EFI_SUCCESS The listen token has been queued successfully. + @retval EFI_NOT_STARTED This EFI TCPv4 Protocol instance has not been configured. + @retval EFI_ACCESS_DENIED One or more of the following are TRUE: + - This instance is not a passive instance. + - This instance is not in Tcp4StateListen state. + - The same listen token has already existed in the listen + token queue of this TCP instance. + @retval EFI_INVALID_PARAMETER One or more of the following are TRUE: + - This is NULL. + - ListenToken is NULL. + - ListentToken->CompletionToken.Event is NULL. + @retval EFI_OUT_OF_RESOURCES Could not allocate enough resource to finish the operation. + @retval EFI_DEVICE_ERROR Any unexpected and not belonged to above category error. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCP4_ACCEPT)( + IN EFI_TCP4_PROTOCOL *This, + IN EFI_TCP4_LISTEN_TOKEN *ListenToken + ); + +/** + Queues outgoing data into the transmit queue. + + @param This The pointer to the EFI_TCP4_PROTOCOL instance. + @param Token The pointer to the completion token to queue to the transmit queue. + + @retval EFI_SUCCESS The data has been queued for transmission. + @retval EFI_NOT_STARTED This EFI TCPv4 Protocol instance has not been configured. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_INVALID_PARAMETER One or more of the following are TRUE: + - This is NULL. + - Token is NULL. + - Token->CompletionToken.Event is NULL. + - Token->Packet.TxData is NULL L. + - Token->Packet.FragmentCount is zero. + - Token->Packet.DataLength is not equal to the sum of fragment lengths. + @retval EFI_ACCESS_DENIED One or more of the following conditions is TRUE: + - A transmit completion token with the same Token->CompletionToken.Event + was already in the transmission queue. + - The current instance is in Tcp4StateClosed state. + - The current instance is a passive one and it is in + Tcp4StateListen state. + - User has called Close() to disconnect this connection. + @retval EFI_NOT_READY The completion token could not be queued because the + transmit queue is full. + @retval EFI_OUT_OF_RESOURCES Could not queue the transmit data because of resource + shortage. + @retval EFI_NETWORK_UNREACHABLE There is no route to the destination network or address. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCP4_TRANSMIT)( + IN EFI_TCP4_PROTOCOL *This, + IN EFI_TCP4_IO_TOKEN *Token + ); + + +/** + Places an asynchronous receive request into the receiving queue. + + @param This The pointer to the EFI_TCP4_PROTOCOL instance. + @param Token The pointer to a token that is associated with the receive data + descriptor. + + @retval EFI_SUCCESS The receive completion token was cached. + @retval EFI_NOT_STARTED This EFI TCPv4 Protocol instance has not been configured. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, RARP, + etc.) is not finished yet. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + - This is NULL. + - Token is NULL. + - Token->CompletionToken.Event is NULL. + - Token->Packet.RxData is NULL. + - Token->Packet.RxData->DataLength is 0. + - The Token->Packet.RxData->DataLength is not + the sum of all FragmentBuffer length in FragmentTable. + @retval EFI_OUT_OF_RESOURCES The receive completion token could not be queued due to a lack of + system resources (usually memory). + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + @retval EFI_ACCESS_DENIED One or more of the following conditions is TRUE: + - A receive completion token with the same Token- + >CompletionToken.Event was already in the receive + queue. + - The current instance is in Tcp4StateClosed state. + - The current instance is a passive one and it is in + Tcp4StateListen state. + - User has called Close() to disconnect this connection. + @retval EFI_CONNECTION_FIN The communication peer has closed the connection and there is + no any buffered data in the receive buffer of this instance. + @retval EFI_NOT_READY The receive request could not be queued because the receive queue is full. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCP4_RECEIVE)( + IN EFI_TCP4_PROTOCOL *This, + IN EFI_TCP4_IO_TOKEN *Token + ); + +/** + Disconnecting a TCP connection gracefully or reset a TCP connection. This function is a + nonblocking operation. + + @param This The pointer to the EFI_TCP4_PROTOCOL instance. + @param CloseToken The pointer to the close token to return when operation finishes. + + @retval EFI_SUCCESS The Close() is called successfully. + @retval EFI_NOT_STARTED This EFI TCPv4 Protocol instance has not been configured. + @retval EFI_ACCESS_DENIED One or more of the following are TRUE: + - Configure() has been called with + TcpConfigData set to NULL and this function has + not returned. + - Previous Close() call on this instance has not + finished. + @retval EFI_INVALID_PARAMETER One or more of the following are TRUE: + - This is NULL. + - CloseToken is NULL. + - CloseToken->CompletionToken.Event is NULL. + @retval EFI_OUT_OF_RESOURCES Could not allocate enough resource to finish the operation. + @retval EFI_DEVICE_ERROR Any unexpected and not belonged to above category error. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCP4_CLOSE)( + IN EFI_TCP4_PROTOCOL *This, + IN EFI_TCP4_CLOSE_TOKEN *CloseToken + ); + +/** + Abort an asynchronous connection, listen, transmission or receive request. + + @param This The pointer to the EFI_TCP4_PROTOCOL instance. + @param Token The pointer to a token that has been issued by + EFI_TCP4_PROTOCOL.Connect(), + EFI_TCP4_PROTOCOL.Accept(), + EFI_TCP4_PROTOCOL.Transmit() or + EFI_TCP4_PROTOCOL.Receive(). If NULL, all pending + tokens issued by above four functions will be aborted. Type + EFI_TCP4_COMPLETION_TOKEN is defined in + EFI_TCP4_PROTOCOL.Connect(). + + @retval EFI_SUCCESS The asynchronous I/O request is aborted and Token->Event + is signaled. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_NOT_STARTED This instance hasn't been configured. + @retval EFI_NO_MAPPING When using the default address, configuration + (DHCP, BOOTP,RARP, etc.) hasn't finished yet. + @retval EFI_NOT_FOUND The asynchronous I/O request isn't found in the + transmission or receive queue. It has either + completed or wasn't issued by Transmit() and Receive(). + @retval EFI_UNSUPPORTED The implementation does not support this function. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCP4_CANCEL)( + IN EFI_TCP4_PROTOCOL *This, + IN EFI_TCP4_COMPLETION_TOKEN *Token OPTIONAL + ); + + +/** + Poll to receive incoming data and transmit outgoing segments. + + @param This The pointer to the EFI_TCP4_PROTOCOL instance. + + @retval EFI_SUCCESS Incoming or outgoing data was processed. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + @retval EFI_NOT_READY No incoming or outgoing data is processed. + @retval EFI_TIMEOUT Data was dropped out of the transmission or receive queue. + Consider increasing the polling rate. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_TCP4_POLL)( + IN EFI_TCP4_PROTOCOL *This + ); + +/// +/// The EFI_TCP4_PROTOCOL defines the EFI TCPv4 Protocol child to be used by +/// any network drivers or applications to send or receive data stream. +/// It can either listen on a specified port as a service or actively connected +/// to remote peer as a client. Each instance has its own independent settings, +/// such as the routing table. +/// +struct _EFI_TCP4_PROTOCOL { + EFI_TCP4_GET_MODE_DATA GetModeData; + EFI_TCP4_CONFIGURE Configure; + EFI_TCP4_ROUTES Routes; + EFI_TCP4_CONNECT Connect; + EFI_TCP4_ACCEPT Accept; + EFI_TCP4_TRANSMIT Transmit; + EFI_TCP4_RECEIVE Receive; + EFI_TCP4_CLOSE Close; + EFI_TCP4_CANCEL Cancel; + EFI_TCP4_POLL Poll; +}; + +extern EFI_GUID gEfiTcp4ServiceBindingProtocolGuid; +extern EFI_GUID gEfiTcp4ProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/Udp4.h b/src/include/ipxe/efi/Protocol/Udp4.h new file mode 100644 index 00000000..3c61db8c --- /dev/null +++ b/src/include/ipxe/efi/Protocol/Udp4.h @@ -0,0 +1,447 @@ +/** @file + UDP4 Service Binding Protocol as defined in UEFI specification. + + The EFI UDPv4 Protocol provides simple packet-oriented services + to transmit and receive UDP packets. + +Copyright (c) 2006 - 2014, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + + @par Revision Reference: + This Protocol is introduced in UEFI Specification 2.0. + +**/ + +#ifndef __EFI_UDP4_PROTOCOL_H__ +#define __EFI_UDP4_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + +#include +// +//GUID definitions +// +#define EFI_UDP4_SERVICE_BINDING_PROTOCOL_GUID \ + { \ + 0x83f01464, 0x99bd, 0x45e5, {0xb3, 0x83, 0xaf, 0x63, 0x05, 0xd8, 0xe9, 0xe6 } \ + } + +#define EFI_UDP4_PROTOCOL_GUID \ + { \ + 0x3ad9df29, 0x4501, 0x478d, {0xb1, 0xf8, 0x7f, 0x7f, 0xe7, 0x0e, 0x50, 0xf3 } \ + } + +typedef struct _EFI_UDP4_PROTOCOL EFI_UDP4_PROTOCOL; + +/// +/// EFI_UDP4_SERVICE_POINT is deprecated in the UEFI 2.4B and should not be used any more. +/// The definition in here is only present to provide backwards compatability. +/// +typedef struct { + EFI_HANDLE InstanceHandle; + EFI_IPv4_ADDRESS LocalAddress; + UINT16 LocalPort; + EFI_IPv4_ADDRESS RemoteAddress; + UINT16 RemotePort; +} EFI_UDP4_SERVICE_POINT; + +/// +/// EFI_UDP4_VARIABLE_DATA is deprecated in the UEFI 2.4B and should not be used any more. +/// The definition in here is only present to provide backwards compatability. +/// +typedef struct { + EFI_HANDLE DriverHandle; + UINT32 ServiceCount; + EFI_UDP4_SERVICE_POINT Services[1]; +} EFI_UDP4_VARIABLE_DATA; + +typedef struct { + UINT32 FragmentLength; + VOID *FragmentBuffer; +} EFI_UDP4_FRAGMENT_DATA; + +typedef struct { + EFI_IPv4_ADDRESS SourceAddress; + UINT16 SourcePort; + EFI_IPv4_ADDRESS DestinationAddress; + UINT16 DestinationPort; +} EFI_UDP4_SESSION_DATA; +typedef struct { + // + // Receiving Filters + // + BOOLEAN AcceptBroadcast; + BOOLEAN AcceptPromiscuous; + BOOLEAN AcceptAnyPort; + BOOLEAN AllowDuplicatePort; + // + // I/O parameters + // + UINT8 TypeOfService; + UINT8 TimeToLive; + BOOLEAN DoNotFragment; + UINT32 ReceiveTimeout; + UINT32 TransmitTimeout; + // + // Access Point + // + BOOLEAN UseDefaultAddress; + EFI_IPv4_ADDRESS StationAddress; + EFI_IPv4_ADDRESS SubnetMask; + UINT16 StationPort; + EFI_IPv4_ADDRESS RemoteAddress; + UINT16 RemotePort; +} EFI_UDP4_CONFIG_DATA; + +typedef struct { + EFI_UDP4_SESSION_DATA *UdpSessionData; //OPTIONAL + EFI_IPv4_ADDRESS *GatewayAddress; //OPTIONAL + UINT32 DataLength; + UINT32 FragmentCount; + EFI_UDP4_FRAGMENT_DATA FragmentTable[1]; +} EFI_UDP4_TRANSMIT_DATA; + +typedef struct { + EFI_TIME TimeStamp; + EFI_EVENT RecycleSignal; + EFI_UDP4_SESSION_DATA UdpSession; + UINT32 DataLength; + UINT32 FragmentCount; + EFI_UDP4_FRAGMENT_DATA FragmentTable[1]; +} EFI_UDP4_RECEIVE_DATA; + + +typedef struct { + EFI_EVENT Event; + EFI_STATUS Status; + union { + EFI_UDP4_RECEIVE_DATA *RxData; + EFI_UDP4_TRANSMIT_DATA *TxData; + } Packet; +} EFI_UDP4_COMPLETION_TOKEN; + +/** + Reads the current operational settings. + + The GetModeData() function copies the current operational settings of this EFI + UDPv4 Protocol instance into user-supplied buffers. This function is used + optionally to retrieve the operational mode data of underlying networks or + drivers. + + @param This The pointer to the EFI_UDP4_PROTOCOL instance. + @param Udp4ConfigData The pointer to the buffer to receive the current configuration data. + @param Ip4ModeData The pointer to the EFI IPv4 Protocol mode data structure. + @param MnpConfigData The pointer to the managed network configuration data structure. + @param SnpModeData The pointer to the simple network mode data structure. + + @retval EFI_SUCCESS The mode data was read. + @retval EFI_NOT_STARTED When Udp4ConfigData is queried, no configuration data is + available because this instance has not been started. + @retval EFI_INVALID_PARAMETER This is NULL. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_UDP4_GET_MODE_DATA)( + IN EFI_UDP4_PROTOCOL *This, + OUT EFI_UDP4_CONFIG_DATA *Udp4ConfigData OPTIONAL, + OUT EFI_IP4_MODE_DATA *Ip4ModeData OPTIONAL, + OUT EFI_MANAGED_NETWORK_CONFIG_DATA *MnpConfigData OPTIONAL, + OUT EFI_SIMPLE_NETWORK_MODE *SnpModeData OPTIONAL + ); + + +/** + Initializes, changes, or resets the operational parameters for this instance of the EFI UDPv4 + Protocol. + + The Configure() function is used to do the following: + * Initialize and start this instance of the EFI UDPv4 Protocol. + * Change the filtering rules and operational parameters. + * Reset this instance of the EFI UDPv4 Protocol. + Until these parameters are initialized, no network traffic can be sent or + received by this instance. This instance can be also reset by calling Configure() + with UdpConfigData set to NULL. Once reset, the receiving queue and transmitting + queue are flushed and no traffic is allowed through this instance. + With different parameters in UdpConfigData, Configure() can be used to bind + this instance to specified port. + + @param This The pointer to the EFI_UDP4_PROTOCOL instance. + @param Udp4ConfigData The pointer to the buffer to receive the current configuration data. + + @retval EFI_SUCCESS The configuration settings were set, changed, or reset successfully. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_INVALID_PARAMETER UdpConfigData.StationAddress is not a valid unicast IPv4 address. + @retval EFI_INVALID_PARAMETER UdpConfigData.SubnetMask is not a valid IPv4 address mask. The subnet + mask must be contiguous. + @retval EFI_INVALID_PARAMETER UdpConfigData.RemoteAddress is not a valid unicast IPv4 address if it + is not zero. + @retval EFI_ALREADY_STARTED The EFI UDPv4 Protocol instance is already started/configured + and must be stopped/reset before it can be reconfigured. + @retval EFI_ACCESS_DENIED UdpConfigData. AllowDuplicatePort is FALSE + and UdpConfigData.StationPort is already used by + other instance. + @retval EFI_OUT_OF_RESOURCES The EFI UDPv4 Protocol driver cannot allocate memory for this + EFI UDPv4 Protocol instance. + @retval EFI_DEVICE_ERROR An unexpected network or system error occurred and this instance + was not opened. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_UDP4_CONFIGURE)( + IN EFI_UDP4_PROTOCOL *This, + IN EFI_UDP4_CONFIG_DATA *UdpConfigData OPTIONAL + ); + +/** + Joins and leaves multicast groups. + + The Groups() function is used to enable and disable the multicast group + filtering. If the JoinFlag is FALSE and the MulticastAddress is NULL, then all + currently joined groups are left. + + @param This The pointer to the EFI_UDP4_PROTOCOL instance. + @param JoinFlag Set to TRUE to join a multicast group. Set to FALSE to leave one + or all multicast groups. + @param MulticastAddress The pointer to multicast group address to join or leave. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_NOT_STARTED The EFI UDPv4 Protocol instance has not been started. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_OUT_OF_RESOURCES Could not allocate resources to join the group. + @retval EFI_INVALID_PARAMETER One or more of the following conditions is TRUE: + - This is NULL. + - JoinFlag is TRUE and MulticastAddress is NULL. + - JoinFlag is TRUE and *MulticastAddress is not + a valid multicast address. + @retval EFI_ALREADY_STARTED The group address is already in the group table (when + JoinFlag is TRUE). + @retval EFI_NOT_FOUND The group address is not in the group table (when JoinFlag is + FALSE). + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_UDP4_GROUPS)( + IN EFI_UDP4_PROTOCOL *This, + IN BOOLEAN JoinFlag, + IN EFI_IPv4_ADDRESS *MulticastAddress OPTIONAL + ); + +/** + Adds and deletes routing table entries. + + The Routes() function adds a route to or deletes a route from the routing table. + Routes are determined by comparing the SubnetAddress with the destination IP + address and arithmetically AND-ing it with the SubnetMask. The gateway address + must be on the same subnet as the configured station address. + The default route is added with SubnetAddress and SubnetMask both set to 0.0.0.0. + The default route matches all destination IP addresses that do not match any + other routes. + A zero GatewayAddress is a nonroute. Packets are sent to the destination IP + address if it can be found in the Address Resolution Protocol (ARP) cache or + on the local subnet. One automatic nonroute entry will be inserted into the + routing table for outgoing packets that are addressed to a local subnet + (gateway address of 0.0.0.0). + Each instance of the EFI UDPv4 Protocol has its own independent routing table. + Instances of the EFI UDPv4 Protocol that use the default IP address will also + have copies of the routing table provided by the EFI_IP4_CONFIG_PROTOCOL. These + copies will be updated automatically whenever the IP driver reconfigures its + instances; as a result, the previous modification to these copies will be lost. + + @param This The pointer to the EFI_UDP4_PROTOCOL instance. + @param DeleteRoute Set to TRUE to delete this route from the routing table. + Set to FALSE to add this route to the routing table. + @param SubnetAddress The destination network address that needs to be routed. + @param SubnetMask The subnet mask of SubnetAddress. + @param GatewayAddress The gateway IP address for this route. + + @retval EFI_SUCCESS The operation completed successfully. + @retval EFI_NOT_STARTED The EFI UDPv4 Protocol instance has not been started. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, + - RARP, etc.) is not finished yet. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_OUT_OF_RESOURCES Could not add the entry to the routing table. + @retval EFI_NOT_FOUND This route is not in the routing table. + @retval EFI_ACCESS_DENIED The route is already defined in the routing table. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_UDP4_ROUTES)( + IN EFI_UDP4_PROTOCOL *This, + IN BOOLEAN DeleteRoute, + IN EFI_IPv4_ADDRESS *SubnetAddress, + IN EFI_IPv4_ADDRESS *SubnetMask, + IN EFI_IPv4_ADDRESS *GatewayAddress + ); + +/** + Polls for incoming data packets and processes outgoing data packets. + + The Poll() function can be used by network drivers and applications to increase + the rate that data packets are moved between the communications device and the + transmit and receive queues. + In some systems, the periodic timer event in the managed network driver may not + poll the underlying communications device fast enough to transmit and/or receive + all data packets without missing incoming packets or dropping outgoing packets. + Drivers and applications that are experiencing packet loss should try calling + the Poll() function more often. + + @param This The pointer to the EFI_UDP4_PROTOCOL instance. + + @retval EFI_SUCCESS Incoming or outgoing data was processed. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + @retval EFI_TIMEOUT Data was dropped out of the transmit and/or receive queue. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_UDP4_POLL)( + IN EFI_UDP4_PROTOCOL *This + ); + +/** + Places an asynchronous receive request into the receiving queue. + + The Receive() function places a completion token into the receive packet queue. + This function is always asynchronous. + The caller must fill in the Token.Event field in the completion token, and this + field cannot be NULL. When the receive operation completes, the EFI UDPv4 Protocol + driver updates the Token.Status and Token.Packet.RxData fields and the Token.Event + is signaled. Providing a proper notification function and context for the event + will enable the user to receive the notification and receiving status. That + notification function is guaranteed to not be re-entered. + + @param This The pointer to the EFI_UDP4_PROTOCOL instance. + @param Token The pointer to a token that is associated with the receive data + descriptor. + + @retval EFI_SUCCESS The receive completion token was cached. + @retval EFI_NOT_STARTED This EFI UDPv4 Protocol instance has not been started. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, RARP, etc.) + is not finished yet. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_INVALID_PARAMETER Token is NULL. + @retval EFI_INVALID_PARAMETER Token.Event is NULL. + @retval EFI_OUT_OF_RESOURCES The receive completion token could not be queued due to a lack of system + resources (usually memory). + @retval EFI_DEVICE_ERROR An unexpected system or network error occurred. + @retval EFI_ACCESS_DENIED A receive completion token with the same Token.Event was already in + the receive queue. + @retval EFI_NOT_READY The receive request could not be queued because the receive queue is full. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_UDP4_RECEIVE)( + IN EFI_UDP4_PROTOCOL *This, + IN EFI_UDP4_COMPLETION_TOKEN *Token + ); + +/** + Queues outgoing data packets into the transmit queue. + + The Transmit() function places a sending request to this instance of the EFI + UDPv4 Protocol, alongside the transmit data that was filled by the user. Whenever + the packet in the token is sent out or some errors occur, the Token.Event will + be signaled and Token.Status is updated. Providing a proper notification function + and context for the event will enable the user to receive the notification and + transmitting status. + + @param This The pointer to the EFI_UDP4_PROTOCOL instance. + @param Token The pointer to the completion token that will be placed into the + transmit queue. + + @retval EFI_SUCCESS The data has been queued for transmission. + @retval EFI_NOT_STARTED This EFI UDPv4 Protocol instance has not been started. + @retval EFI_NO_MAPPING When using a default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_ACCESS_DENIED The transmit completion token with the same + Token.Event was already in the transmit queue. + @retval EFI_NOT_READY The completion token could not be queued because the + transmit queue is full. + @retval EFI_OUT_OF_RESOURCES Could not queue the transmit data. + @retval EFI_NOT_FOUND There is no route to the destination network or address. + @retval EFI_BAD_BUFFER_SIZE The data length is greater than the maximum UDP packet + size. Or the length of the IP header + UDP header + data + length is greater than MTU if DoNotFragment is TRUE. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_UDP4_TRANSMIT)( + IN EFI_UDP4_PROTOCOL *This, + IN EFI_UDP4_COMPLETION_TOKEN *Token + ); + +/** + Aborts an asynchronous transmit or receive request. + + The Cancel() function is used to abort a pending transmit or receive request. + If the token is in the transmit or receive request queues, after calling this + function, Token.Status will be set to EFI_ABORTED and then Token.Event will be + signaled. If the token is not in one of the queues, which usually means that + the asynchronous operation has completed, this function will not signal the + token and EFI_NOT_FOUND is returned. + + @param This The pointer to the EFI_UDP4_PROTOCOL instance. + @param Token The pointer to a token that has been issued by + EFI_UDP4_PROTOCOL.Transmit() or + EFI_UDP4_PROTOCOL.Receive().If NULL, all pending + tokens are aborted. + + @retval EFI_SUCCESS The asynchronous I/O request was aborted and Token.Event + was signaled. When Token is NULL, all pending requests are + aborted and their events are signaled. + @retval EFI_INVALID_PARAMETER This is NULL. + @retval EFI_NOT_STARTED This instance has not been started. + @retval EFI_NO_MAPPING When using the default address, configuration (DHCP, BOOTP, + RARP, etc.) is not finished yet. + @retval EFI_NOT_FOUND When Token is not NULL, the asynchronous I/O request was + not found in the transmit or receive queue. It has either completed + or was not issued by Transmit() and Receive(). + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_UDP4_CANCEL)( + IN EFI_UDP4_PROTOCOL *This, + IN EFI_UDP4_COMPLETION_TOKEN *Token OPTIONAL + ); + +/// +/// The EFI_UDP4_PROTOCOL defines an EFI UDPv4 Protocol session that can be used +/// by any network drivers, applications, or daemons to transmit or receive UDP packets. +/// This protocol instance can either be bound to a specified port as a service or +/// connected to some remote peer as an active client. Each instance has its own settings, +/// such as the routing table and group table, which are independent from each other. +/// +struct _EFI_UDP4_PROTOCOL { + EFI_UDP4_GET_MODE_DATA GetModeData; + EFI_UDP4_CONFIGURE Configure; + EFI_UDP4_GROUPS Groups; + EFI_UDP4_ROUTES Routes; + EFI_UDP4_TRANSMIT Transmit; + EFI_UDP4_RECEIVE Receive; + EFI_UDP4_CANCEL Cancel; + EFI_UDP4_POLL Poll; +}; + +extern EFI_GUID gEfiUdp4ServiceBindingProtocolGuid; +extern EFI_GUID gEfiUdp4ProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/UgaDraw.h b/src/include/ipxe/efi/Protocol/UgaDraw.h new file mode 100644 index 00000000..56502068 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/UgaDraw.h @@ -0,0 +1,168 @@ +/** @file + UGA Draw protocol from the EFI 1.10 specification. + + Abstraction of a very simple graphics device. + + Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __UGA_DRAW_H__ +#define __UGA_DRAW_H__ + +FILE_LICENCE ( BSD3 ); + + +#define EFI_UGA_DRAW_PROTOCOL_GUID \ + { \ + 0x982c298b, 0xf4fa, 0x41cb, {0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39 } \ + } + +typedef struct _EFI_UGA_DRAW_PROTOCOL EFI_UGA_DRAW_PROTOCOL; + +/** + Return the current video mode information. + + @param This The EFI_UGA_DRAW_PROTOCOL instance. + @param HorizontalResolution The size of video screen in pixels in the X dimension. + @param VerticalResolution The size of video screen in pixels in the Y dimension. + @param ColorDepth Number of bits per pixel, currently defined to be 32. + @param RefreshRate The refresh rate of the monitor in Hertz. + + @retval EFI_SUCCESS Mode information returned. + @retval EFI_NOT_STARTED Video display is not initialized. Call SetMode () + @retval EFI_INVALID_PARAMETER One of the input args was NULL. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_UGA_DRAW_PROTOCOL_GET_MODE)( + IN EFI_UGA_DRAW_PROTOCOL *This, + OUT UINT32 *HorizontalResolution, + OUT UINT32 *VerticalResolution, + OUT UINT32 *ColorDepth, + OUT UINT32 *RefreshRate + ); + +/** + Set the current video mode information. + + @param This The EFI_UGA_DRAW_PROTOCOL instance. + @param HorizontalResolution The size of video screen in pixels in the X dimension. + @param VerticalResolution The size of video screen in pixels in the Y dimension. + @param ColorDepth Number of bits per pixel, currently defined to be 32. + @param RefreshRate The refresh rate of the monitor in Hertz. + + @retval EFI_SUCCESS Mode information returned. + @retval EFI_NOT_STARTED Video display is not initialized. Call SetMode () + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_UGA_DRAW_PROTOCOL_SET_MODE)( + IN EFI_UGA_DRAW_PROTOCOL *This, + IN UINT32 HorizontalResolution, + IN UINT32 VerticalResolution, + IN UINT32 ColorDepth, + IN UINT32 RefreshRate + ); + +typedef struct { + UINT8 Blue; + UINT8 Green; + UINT8 Red; + UINT8 Reserved; +} EFI_UGA_PIXEL; + +typedef union { + EFI_UGA_PIXEL Pixel; + UINT32 Raw; +} EFI_UGA_PIXEL_UNION; + +/// +/// Enumration value for actions of Blt operations. +/// +typedef enum { + EfiUgaVideoFill, ///< Write data from the BltBuffer pixel (SourceX, SourceY) + ///< directly to every pixel of the video display rectangle + ///< (DestinationX, DestinationY) (DestinationX + Width, DestinationY + Height). + ///< Only one pixel will be used from the BltBuffer. Delta is NOT used. + + EfiUgaVideoToBltBuffer, ///< Read data from the video display rectangle + ///< (SourceX, SourceY) (SourceX + Width, SourceY + Height) and place it in + ///< the BltBuffer rectangle (DestinationX, DestinationY ) + ///< (DestinationX + Width, DestinationY + Height). If DestinationX or + ///< DestinationY is not zero then Delta must be set to the length in bytes + ///< of a row in the BltBuffer. + + EfiUgaBltBufferToVideo, ///< Write data from the BltBuffer rectangle + ///< (SourceX, SourceY) (SourceX + Width, SourceY + Height) directly to the + ///< video display rectangle (DestinationX, DestinationY) + ///< (DestinationX + Width, DestinationY + Height). If SourceX or SourceY is + ///< not zero then Delta must be set to the length in bytes of a row in the + ///< BltBuffer. + + EfiUgaVideoToVideo, ///< Copy from the video display rectangle (SourceX, SourceY) + ///< (SourceX + Width, SourceY + Height) .to the video display rectangle + ///< (DestinationX, DestinationY) (DestinationX + Width, DestinationY + Height). + ///< The BltBuffer and Delta are not used in this mode. + + EfiUgaBltMax ///< Maxmimum value for enumration value of Blt operation. If a Blt operation + ///< larger or equal to this enumration value, it is invalid. +} EFI_UGA_BLT_OPERATION; + +/** + Blt a rectangle of pixels on the graphics screen. + + @param[in] This - Protocol instance pointer. + @param[in] BltBuffer - Buffer containing data to blit into video buffer. This + buffer has a size of Width*Height*sizeof(EFI_UGA_PIXEL) + @param[in] BltOperation - Operation to perform on BlitBuffer and video memory + @param[in] SourceX - X coordinate of source for the BltBuffer. + @param[in] SourceY - Y coordinate of source for the BltBuffer. + @param[in] DestinationX - X coordinate of destination for the BltBuffer. + @param[in] DestinationY - Y coordinate of destination for the BltBuffer. + @param[in] Width - Width of rectangle in BltBuffer in pixels. + @param[in] Height - Hight of rectangle in BltBuffer in pixels. + @param[in] Delta - OPTIONAL + + @retval EFI_SUCCESS - The Blt operation completed. + @retval EFI_INVALID_PARAMETER - BltOperation is not valid. + @retval EFI_DEVICE_ERROR - A hardware error occured writting to the video buffer. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_UGA_DRAW_PROTOCOL_BLT)( + IN EFI_UGA_DRAW_PROTOCOL * This, + IN EFI_UGA_PIXEL * BltBuffer, OPTIONAL + IN EFI_UGA_BLT_OPERATION BltOperation, + IN UINTN SourceX, + IN UINTN SourceY, + IN UINTN DestinationX, + IN UINTN DestinationY, + IN UINTN Width, + IN UINTN Height, + IN UINTN Delta OPTIONAL + ); + +/// +/// This protocol provides a basic abstraction to set video modes and +/// copy pixels to and from the graphics controller's frame buffer. +/// +struct _EFI_UGA_DRAW_PROTOCOL { + EFI_UGA_DRAW_PROTOCOL_GET_MODE GetMode; + EFI_UGA_DRAW_PROTOCOL_SET_MODE SetMode; + EFI_UGA_DRAW_PROTOCOL_BLT Blt; +}; + +extern EFI_GUID gEfiUgaDrawProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/UnicodeCollation.h b/src/include/ipxe/efi/Protocol/UnicodeCollation.h new file mode 100644 index 00000000..870428c2 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/UnicodeCollation.h @@ -0,0 +1,194 @@ +/** @file + Unicode Collation protocol that follows the UEFI 2.0 specification. + This protocol is used to allow code running in the boot services environment + to perform lexical comparison functions on Unicode strings for given languages. + +Copyright (c) 2006 - 2011, Intel Corporation. All rights reserved.
+This program and the accompanying materials are licensed and made available under +the terms and conditions of the BSD License that accompanies this distribution. +The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php. + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __UNICODE_COLLATION_H__ +#define __UNICODE_COLLATION_H__ + +FILE_LICENCE ( BSD3 ); + +#define EFI_UNICODE_COLLATION_PROTOCOL_GUID \ + { \ + 0x1d85cd7f, 0xf43d, 0x11d2, {0x9a, 0xc, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d } \ + } + +#define EFI_UNICODE_COLLATION_PROTOCOL2_GUID \ + { \ + 0xa4c751fc, 0x23ae, 0x4c3e, {0x92, 0xe9, 0x49, 0x64, 0xcf, 0x63, 0xf3, 0x49 } \ + } + +typedef struct _EFI_UNICODE_COLLATION_PROTOCOL EFI_UNICODE_COLLATION_PROTOCOL; + + +/// +/// Protocol GUID name defined in EFI1.1. +/// +#define UNICODE_COLLATION_PROTOCOL EFI_UNICODE_COLLATION_PROTOCOL_GUID + +/// +/// Protocol defined in EFI1.1. +/// +typedef EFI_UNICODE_COLLATION_PROTOCOL UNICODE_COLLATION_INTERFACE; + +/// +/// Protocol data structures and defines +/// +#define EFI_UNICODE_BYTE_ORDER_MARK (CHAR16) (0xfeff) + +// +// Protocol member functions +// +/** + Performs a case-insensitive comparison of two Null-terminated strings. + + @param This A pointer to the EFI_UNICODE_COLLATION_PROTOCOL instance. + @param Str1 A pointer to a Null-terminated string. + @param Str2 A pointer to a Null-terminated string. + + @retval 0 Str1 is equivalent to Str2. + @retval >0 Str1 is lexically greater than Str2. + @retval <0 Str1 is lexically less than Str2. + +**/ +typedef +INTN +(EFIAPI *EFI_UNICODE_COLLATION_STRICOLL)( + IN EFI_UNICODE_COLLATION_PROTOCOL *This, + IN CHAR16 *Str1, + IN CHAR16 *Str2 + ); + +/** + Performs a case-insensitive comparison of a Null-terminated + pattern string and a Null-terminated string. + + @param This A pointer to the EFI_UNICODE_COLLATION_PROTOCOL instance. + @param String A pointer to a Null-terminated string. + @param Pattern A pointer to a Null-terminated pattern string. + + @retval TRUE Pattern was found in String. + @retval FALSE Pattern was not found in String. + +**/ +typedef +BOOLEAN +(EFIAPI *EFI_UNICODE_COLLATION_METAIMATCH)( + IN EFI_UNICODE_COLLATION_PROTOCOL *This, + IN CHAR16 *String, + IN CHAR16 *Pattern + ); + +/** + Converts all the characters in a Null-terminated string to + lower case characters. + + @param This A pointer to the EFI_UNICODE_COLLATION_PROTOCOL instance. + @param String A pointer to a Null-terminated string. + +**/ +typedef +VOID +(EFIAPI *EFI_UNICODE_COLLATION_STRLWR)( + IN EFI_UNICODE_COLLATION_PROTOCOL *This, + IN OUT CHAR16 *Str + ); + +/** + Converts all the characters in a Null-terminated string to upper + case characters. + + @param This A pointer to the EFI_UNICODE_COLLATION_PROTOCOL instance. + @param String A pointer to a Null-terminated string. + +**/ +typedef +VOID +(EFIAPI *EFI_UNICODE_COLLATION_STRUPR)( + IN EFI_UNICODE_COLLATION_PROTOCOL *This, + IN OUT CHAR16 *Str + ); + +/** + Converts an 8.3 FAT file name in an OEM character set to a Null-terminated + string. + + @param This A pointer to the EFI_UNICODE_COLLATION_PROTOCOL instance. + @param FatSize The size of the string Fat in bytes. + @param Fat A pointer to a Null-terminated string that contains an 8.3 file + name using an 8-bit OEM character set. + @param String A pointer to a Null-terminated string. The string must + be allocated in advance to hold FatSize characters. + +**/ +typedef +VOID +(EFIAPI *EFI_UNICODE_COLLATION_FATTOSTR)( + IN EFI_UNICODE_COLLATION_PROTOCOL *This, + IN UINTN FatSize, + IN CHAR8 *Fat, + OUT CHAR16 *String + ); + +/** + Converts a Null-terminated string to legal characters in a FAT + filename using an OEM character set. + + @param This A pointer to the EFI_UNICODE_COLLATION_PROTOCOL instance. + @param String A pointer to a Null-terminated string. + @param FatSize The size of the string Fat in bytes. + @param Fat A pointer to a string that contains the converted version of + String using legal FAT characters from an OEM character set. + + @retval TRUE One or more conversions failed and were substituted with '_' + @retval FALSE None of the conversions failed. + +**/ +typedef +BOOLEAN +(EFIAPI *EFI_UNICODE_COLLATION_STRTOFAT)( + IN EFI_UNICODE_COLLATION_PROTOCOL *This, + IN CHAR16 *String, + IN UINTN FatSize, + OUT CHAR8 *Fat + ); + +/// +/// The EFI_UNICODE_COLLATION_PROTOCOL is used to perform case-insensitive +/// comparisons of strings. +/// +struct _EFI_UNICODE_COLLATION_PROTOCOL { + EFI_UNICODE_COLLATION_STRICOLL StriColl; + EFI_UNICODE_COLLATION_METAIMATCH MetaiMatch; + EFI_UNICODE_COLLATION_STRLWR StrLwr; + EFI_UNICODE_COLLATION_STRUPR StrUpr; + + // + // for supporting fat volumes + // + EFI_UNICODE_COLLATION_FATTOSTR FatToStr; + EFI_UNICODE_COLLATION_STRTOFAT StrToFat; + + /// + /// A Null-terminated ASCII string array that contains one or more language codes. + /// When this field is used for UnicodeCollation2, it is specified in RFC 4646 format. + /// When it is used for UnicodeCollation, it is specified in ISO 639-2 format. + /// + CHAR8 *SupportedLanguages; +}; + +extern EFI_GUID gEfiUnicodeCollationProtocolGuid; +extern EFI_GUID gEfiUnicodeCollation2ProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/Usb2HostController.h b/src/include/ipxe/efi/Protocol/Usb2HostController.h new file mode 100644 index 00000000..8308e8f1 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/Usb2HostController.h @@ -0,0 +1,666 @@ +/** @file + EFI_USB2_HC_PROTOCOL as defined in UEFI 2.0. + The USB Host Controller Protocol is used by code, typically USB bus drivers, + running in the EFI boot services environment, to perform data transactions over + a USB bus. In addition, it provides an abstraction for the root hub of the USB bus. + + Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef _USB2_HOSTCONTROLLER_H_ +#define _USB2_HOSTCONTROLLER_H_ + +FILE_LICENCE ( BSD3 ); + +#include + +#define EFI_USB2_HC_PROTOCOL_GUID \ + { \ + 0x3e745226, 0x9818, 0x45b6, {0xa2, 0xac, 0xd7, 0xcd, 0xe, 0x8b, 0xa2, 0xbc } \ + } + +/// +/// Forward reference for pure ANSI compatability +/// +typedef struct _EFI_USB2_HC_PROTOCOL EFI_USB2_HC_PROTOCOL; + + +typedef struct { + UINT16 PortStatus; ///< Contains current port status bitmap. + UINT16 PortChangeStatus; ///< Contains current port status change bitmap. +} EFI_USB_PORT_STATUS; + +/// +/// EFI_USB_PORT_STATUS.PortStatus bit definition +/// +#define USB_PORT_STAT_CONNECTION 0x0001 +#define USB_PORT_STAT_ENABLE 0x0002 +#define USB_PORT_STAT_SUSPEND 0x0004 +#define USB_PORT_STAT_OVERCURRENT 0x0008 +#define USB_PORT_STAT_RESET 0x0010 +#define USB_PORT_STAT_POWER 0x0100 +#define USB_PORT_STAT_LOW_SPEED 0x0200 +#define USB_PORT_STAT_HIGH_SPEED 0x0400 +#define USB_PORT_STAT_SUPER_SPEED 0x0800 +#define USB_PORT_STAT_OWNER 0x2000 + +/// +/// EFI_USB_PORT_STATUS.PortChangeStatus bit definition +/// +#define USB_PORT_STAT_C_CONNECTION 0x0001 +#define USB_PORT_STAT_C_ENABLE 0x0002 +#define USB_PORT_STAT_C_SUSPEND 0x0004 +#define USB_PORT_STAT_C_OVERCURRENT 0x0008 +#define USB_PORT_STAT_C_RESET 0x0010 + + +/// +/// Usb port features value +/// Each value indicates its bit index in the port status and status change bitmaps, +/// if combines these two bitmaps into a 32-bit bitmap. +/// +typedef enum { + EfiUsbPortEnable = 1, + EfiUsbPortSuspend = 2, + EfiUsbPortReset = 4, + EfiUsbPortPower = 8, + EfiUsbPortOwner = 13, + EfiUsbPortConnectChange = 16, + EfiUsbPortEnableChange = 17, + EfiUsbPortSuspendChange = 18, + EfiUsbPortOverCurrentChange = 19, + EfiUsbPortResetChange = 20 +} EFI_USB_PORT_FEATURE; + +#define EFI_USB_SPEED_FULL 0x0000 ///< 12 Mb/s, USB 1.1 OHCI and UHCI HC. +#define EFI_USB_SPEED_LOW 0x0001 ///< 1 Mb/s, USB 1.1 OHCI and UHCI HC. +#define EFI_USB_SPEED_HIGH 0x0002 ///< 480 Mb/s, USB 2.0 EHCI HC. +#define EFI_USB_SPEED_SUPER 0x0003 ///< 4.8 Gb/s, USB 3.0 XHCI HC. + +typedef struct { + UINT8 TranslatorHubAddress; ///< device address + UINT8 TranslatorPortNumber; ///< the port number of the hub that device is connected to. +} EFI_USB2_HC_TRANSACTION_TRANSLATOR; + +// +// Protocol definitions +// + +/** + Retrieves the Host Controller capabilities. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param MaxSpeed Host controller data transfer speed. + @param PortNumber Number of the root hub ports. + @param Is64BitCapable TRUE if controller supports 64-bit memory addressing, + FALSE otherwise. + + @retval EFI_SUCCESS The host controller capabilities were retrieved successfully. + @retval EFI_INVALID_PARAMETER One of the input args was NULL. + @retval EFI_DEVICE_ERROR An error was encountered while attempting to + retrieve the capabilities. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_GET_CAPABILITY)( + IN EFI_USB2_HC_PROTOCOL *This, + OUT UINT8 *MaxSpeed, + OUT UINT8 *PortNumber, + OUT UINT8 *Is64BitCapable + ); + +#define EFI_USB_HC_RESET_GLOBAL 0x0001 +#define EFI_USB_HC_RESET_HOST_CONTROLLER 0x0002 +#define EFI_USB_HC_RESET_GLOBAL_WITH_DEBUG 0x0004 +#define EFI_USB_HC_RESET_HOST_WITH_DEBUG 0x0008 +/** + Provides software reset for the USB host controller. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param Attributes A bit mask of the reset operation to perform. + + @retval EFI_SUCCESS The reset operation succeeded. + @retval EFI_INVALID_PARAMETER Attributes is not valid. + @retval EFI_UNSUPPORTED The type of reset specified by Attributes is not currently + supported by the host controller hardware. + @retval EFI_ACCESS_DENIED Reset operation is rejected due to the debug port being configured + and active; only EFI_USB_HC_RESET_GLOBAL_WITH_DEBUG or + EFI_USB_HC_RESET_HOST_WITH_DEBUG reset Attributes can be used to + perform reset operation for this host controller. + @retval EFI_DEVICE_ERROR An error was encountered while attempting to + retrieve the capabilities. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_RESET)( + IN EFI_USB2_HC_PROTOCOL *This, + IN UINT16 Attributes + ); + +/** + Enumration value for status of USB HC. +**/ +typedef enum { + EfiUsbHcStateHalt, ///< The host controller is in halt + ///< state. No USB transactions can occur + ///< while in this state. The host + ///< controller can enter this state for + ///< three reasons: 1) After host + ///< controller hardware reset. 2) + ///< Explicitly set by software. 3) + ///< Triggered by a fatal error such as + ///< consistency check failure. + + EfiUsbHcStateOperational, ///< The host controller is in an + ///< operational state. When in + ///< this state, the host + ///< controller can execute bus + ///< traffic. This state must be + ///< explicitly set to enable the + ///< USB bus traffic. + + EfiUsbHcStateSuspend, ///< The host controller is in the + ///< suspend state. No USB + ///< transactions can occur while in + ///< this state. The host controller + ///< enters this state for the + ///< following reasons: 1) Explicitly + ///< set by software. 2) Triggered + ///< when there is no bus traffic for + ///< 3 microseconds. + + EfiUsbHcStateMaximum ///< Maximum value for enumration value of HC status. +} EFI_USB_HC_STATE; + +/** + Retrieves current state of the USB host controller. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param State A pointer to the EFI_USB_HC_STATE data structure that + indicates current state of the USB host controller. + + @retval EFI_SUCCESS The state information of the host controller was returned in State. + @retval EFI_INVALID_PARAMETER State is NULL. + @retval EFI_DEVICE_ERROR An error was encountered while attempting to retrieve the + host controller's current state. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_GET_STATE)( + IN EFI_USB2_HC_PROTOCOL *This, + OUT EFI_USB_HC_STATE *State +); + +/** + Sets the USB host controller to a specific state. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param State Indicates the state of the host controller that will be set. + + @retval EFI_SUCCESS The USB host controller was successfully placed in the state + specified by State. + @retval EFI_INVALID_PARAMETER State is not valid. + @retval EFI_DEVICE_ERROR Failed to set the state specified by State due to device error. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_SET_STATE)( + IN EFI_USB2_HC_PROTOCOL *This, + IN EFI_USB_HC_STATE State + ); + +/** + Submits control transfer to a target USB device. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param DeviceAddress Represents the address of the target device on the USB. + @param DeviceSpeed Indicates device speed. + @param MaximumPacketLength Indicates the maximum packet size that the default control transfer + endpoint is capable of sending or receiving. + @param Request A pointer to the USB device request that will be sent to the USB device. + @param TransferDirection Specifies the data direction for the transfer. There are three values + available, EfiUsbDataIn, EfiUsbDataOut and EfiUsbNoData. + @param Data A pointer to the buffer of data that will be transmitted to USB device or + received from USB device. + @param DataLength On input, indicates the size, in bytes, of the data buffer specified by Data. + On output, indicates the amount of data actually transferred. + @param TimeOut Indicates the maximum time, in milliseconds, which the transfer is + allowed to complete. + @param Translator A pointer to the transaction translator data. + @param TransferResult A pointer to the detailed result information generated by this control + transfer. + + @retval EFI_SUCCESS The control transfer was completed successfully. + @retval EFI_INVALID_PARAMETER Some parameters are invalid. + @retval EFI_OUT_OF_RESOURCES The control transfer could not be completed due to a lack of resources. + @retval EFI_TIMEOUT The control transfer failed due to timeout. + @retval EFI_DEVICE_ERROR The control transfer failed due to host controller or device error. + Caller should check TransferResult for detailed error information. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_CONTROL_TRANSFER)( + IN EFI_USB2_HC_PROTOCOL *This, + IN UINT8 DeviceAddress, + IN UINT8 DeviceSpeed, + IN UINTN MaximumPacketLength, + IN EFI_USB_DEVICE_REQUEST *Request, + IN EFI_USB_DATA_DIRECTION TransferDirection, + IN OUT VOID *Data OPTIONAL, + IN OUT UINTN *DataLength OPTIONAL, + IN UINTN TimeOut, + IN EFI_USB2_HC_TRANSACTION_TRANSLATOR *Translator, + OUT UINT32 *TransferResult + ); + +#define EFI_USB_MAX_BULK_BUFFER_NUM 10 + +/** + Submits bulk transfer to a bulk endpoint of a USB device. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param DeviceAddress Represents the address of the target device on the USB. + @param EndPointAddress The combination of an endpoint number and an endpoint direction of the + target USB device. + @param DeviceSpeed Indicates device speed. + @param MaximumPacketLength Indicates the maximum packet size the target endpoint is capable of + sending or receiving. + @param DataBuffersNumber Number of data buffers prepared for the transfer. + @param Data Array of pointers to the buffers of data that will be transmitted to USB + device or received from USB device. + @param DataLength When input, indicates the size, in bytes, of the data buffers specified by + Data. When output, indicates the actually transferred data size. + @param DataToggle A pointer to the data toggle value. + @param TimeOut Indicates the maximum time, in milliseconds, which the transfer is + allowed to complete. + @param Translator A pointer to the transaction translator data. + @param TransferResult A pointer to the detailed result information of the bulk transfer. + + @retval EFI_SUCCESS The bulk transfer was completed successfully. + @retval EFI_INVALID_PARAMETER Some parameters are invalid. + @retval EFI_OUT_OF_RESOURCES The bulk transfer could not be submitted due to a lack of resources. + @retval EFI_TIMEOUT The bulk transfer failed due to timeout. + @retval EFI_DEVICE_ERROR The bulk transfer failed due to host controller or device error. + Caller should check TransferResult for detailed error information. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_BULK_TRANSFER)( + IN EFI_USB2_HC_PROTOCOL *This, + IN UINT8 DeviceAddress, + IN UINT8 EndPointAddress, + IN UINT8 DeviceSpeed, + IN UINTN MaximumPacketLength, + IN UINT8 DataBuffersNumber, + IN OUT VOID *Data[EFI_USB_MAX_BULK_BUFFER_NUM], + IN OUT UINTN *DataLength, + IN OUT UINT8 *DataToggle, + IN UINTN TimeOut, + IN EFI_USB2_HC_TRANSACTION_TRANSLATOR *Translator, + OUT UINT32 *TransferResult + ); + +/** + Submits an asynchronous interrupt transfer to an interrupt endpoint of a USB device. + Translator parameter doesn't exist in UEFI2.0 spec, but it will be updated in the following specification version. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param DeviceAddress Represents the address of the target device on the USB. + @param EndPointAddress The combination of an endpoint number and an endpoint direction of the + target USB device. + @param DeviceSpeed Indicates device speed. + @param MaximumPacketLength Indicates the maximum packet size the target endpoint is capable of + sending or receiving. + @param IsNewTransfer If TRUE, an asynchronous interrupt pipe is built between the host and the + target interrupt endpoint. If FALSE, the specified asynchronous interrupt + pipe is canceled. If TRUE, and an interrupt transfer exists for the target + end point, then EFI_INVALID_PARAMETER is returned. + @param DataToggle A pointer to the data toggle value. + @param PollingInterval Indicates the interval, in milliseconds, that the asynchronous interrupt + transfer is polled. + @param DataLength Indicates the length of data to be received at the rate specified by + PollingInterval from the target asynchronous interrupt endpoint. + @param Translator A pointr to the transaction translator data. + @param CallBackFunction The Callback function. This function is called at the rate specified by + PollingInterval. + @param Context The context that is passed to the CallBackFunction. This is an + optional parameter and may be NULL. + + @retval EFI_SUCCESS The asynchronous interrupt transfer request has been successfully + submitted or canceled. + @retval EFI_INVALID_PARAMETER Some parameters are invalid. + @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack of resources. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_ASYNC_INTERRUPT_TRANSFER)( + IN EFI_USB2_HC_PROTOCOL *This, + IN UINT8 DeviceAddress, + IN UINT8 EndPointAddress, + IN UINT8 DeviceSpeed, + IN UINTN MaxiumPacketLength, + IN BOOLEAN IsNewTransfer, + IN OUT UINT8 *DataToggle, + IN UINTN PollingInterval OPTIONAL, + IN UINTN DataLength OPTIONAL, + IN EFI_USB2_HC_TRANSACTION_TRANSLATOR *Translator OPTIONAL, + IN EFI_ASYNC_USB_TRANSFER_CALLBACK CallBackFunction OPTIONAL, + IN VOID *Context OPTIONAL + ); + +/** + Submits synchronous interrupt transfer to an interrupt endpoint of a USB device. + Translator parameter doesn't exist in UEFI2.0 spec, but it will be updated in the following specification version. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param DeviceAddress Represents the address of the target device on the USB. + @param EndPointAddress The combination of an endpoint number and an endpoint direction of the + target USB device. + @param DeviceSpeed Indicates device speed. + @param MaximumPacketLength Indicates the maximum packet size the target endpoint is capable of + sending or receiving. + @param Data A pointer to the buffer of data that will be transmitted to USB device or + received from USB device. + @param DataLength On input, the size, in bytes, of the data buffer specified by Data. On + output, the number of bytes transferred. + @param DataToggle A pointer to the data toggle value. + @param TimeOut Indicates the maximum time, in milliseconds, which the transfer is + allowed to complete. + @param Translator A pointr to the transaction translator data. + @param TransferResult A pointer to the detailed result information from the synchronous + interrupt transfer. + + @retval EFI_SUCCESS The synchronous interrupt transfer was completed successfully. + @retval EFI_INVALID_PARAMETER Some parameters are invalid. + @retval EFI_OUT_OF_RESOURCES The synchronous interrupt transfer could not be submitted due to a lack of resources. + @retval EFI_TIMEOUT The synchronous interrupt transfer failed due to timeout. + @retval EFI_DEVICE_ERROR The synchronous interrupt transfer failed due to host controller or device error. + Caller should check TransferResult for detailed error information. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_SYNC_INTERRUPT_TRANSFER)( + IN EFI_USB2_HC_PROTOCOL *This, + IN UINT8 DeviceAddress, + IN UINT8 EndPointAddress, + IN UINT8 DeviceSpeed, + IN UINTN MaximumPacketLength, + IN OUT VOID *Data, + IN OUT UINTN *DataLength, + IN OUT UINT8 *DataToggle, + IN UINTN TimeOut, + IN EFI_USB2_HC_TRANSACTION_TRANSLATOR *Translator, + OUT UINT32 *TransferResult + ); + +#define EFI_USB_MAX_ISO_BUFFER_NUM 7 +#define EFI_USB_MAX_ISO_BUFFER_NUM1 2 + +/** + Submits isochronous transfer to an isochronous endpoint of a USB device. + + This function is used to submit isochronous transfer to a target endpoint of a USB device. + The target endpoint is specified by DeviceAddressand EndpointAddress. Isochronous transfers are + used when working with isochronous date. It provides periodic, continuous communication between + the host and a device. Isochronous transfers can beused only by full-speed, high-speed, and + super-speed devices. + + High-speed isochronous transfers can be performed using multiple data buffers. The number of + buffers that are actually prepared for the transfer is specified by DataBuffersNumber. For + full-speed isochronous transfers this value is ignored. + + Data represents a list of pointers to the data buffers. For full-speed isochronous transfers + only the data pointed by Data[0]shall be used. For high-speed isochronous transfers and for + the split transactions depending on DataLengththere several data buffers canbe used. For the + high-speed isochronous transfers the total number of buffers must not exceed EFI_USB_MAX_ISO_BUFFER_NUM. + + For split transactions performed on full-speed device by high-speed host controller the total + number of buffers is limited to EFI_USB_MAX_ISO_BUFFER_NUM1. + If the isochronous transfer is successful, then EFI_SUCCESSis returned. The isochronous transfer + is designed to be completed within one USB frame time, if it cannot be completed, EFI_TIMEOUT + is returned. If an error other than timeout occurs during the USB transfer, then EFI_DEVICE_ERROR + is returned and the detailed status code will be returned in TransferResult. + + EFI_INVALID_PARAMETERis returned if one of the following conditionsis satisfied: + - Data is NULL. + - DataLength is 0. + - DeviceSpeed is not one of the supported values listed above. + - MaximumPacketLength is invalid. MaximumPacketLength must be 1023 or less for full-speed devices, + and 1024 or less for high-speed and super-speed devices. + - TransferResult is NULL. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param DeviceAddress Represents the address of the target device on the USB. + @param EndPointAddress The combination of an endpoint number and an endpoint direction of the + target USB device. + @param DeviceSpeed Indicates device speed. The supported values are EFI_USB_SPEED_FULL, + EFI_USB_SPEED_HIGH, or EFI_USB_SPEED_SUPER. + @param MaximumPacketLength Indicates the maximum packet size the target endpoint is capable of + sending or receiving. + @param DataBuffersNumber Number of data buffers prepared for the transfer. + @param Data Array of pointers to the buffers of data that will be transmitted to USB + device or received from USB device. + @param DataLength Specifies the length, in bytes, of the data to be sent to or received from + the USB device. + @param Translator A pointer to the transaction translator data. + @param TransferResult A pointer to the detailed result information of the isochronous transfer. + + @retval EFI_SUCCESS The isochronous transfer was completed successfully. + @retval EFI_INVALID_PARAMETER Some parameters are invalid. + @retval EFI_OUT_OF_RESOURCES The isochronous transfer could not be submitted due to a lack of resources. + @retval EFI_TIMEOUT The isochronous transfer cannot be completed within the one USB frame time. + @retval EFI_DEVICE_ERROR The isochronous transfer failed due to host controller or device error. + Caller should check TransferResult for detailed error information. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_ISOCHRONOUS_TRANSFER)( + IN EFI_USB2_HC_PROTOCOL *This, + IN UINT8 DeviceAddress, + IN UINT8 EndPointAddress, + IN UINT8 DeviceSpeed, + IN UINTN MaximumPacketLength, + IN UINT8 DataBuffersNumber, + IN OUT VOID *Data[EFI_USB_MAX_ISO_BUFFER_NUM], + IN UINTN DataLength, + IN EFI_USB2_HC_TRANSACTION_TRANSLATOR *Translator, + OUT UINT32 *TransferResult + ); + +/** + Submits nonblocking isochronous transfer to an isochronous endpoint of a USB device. + + This is an asynchronous type of USB isochronous transfer. If the caller submits a USB + isochronous transfer request through this function, this function will return immediately. + + When the isochronous transfer completes, the IsochronousCallbackfunction will be triggered, + the caller can know the transfer results. If the transfer is successful, the caller can get + the data received or sent in this callback function. + + The target endpoint is specified by DeviceAddressand EndpointAddress. Isochronous transfers + are used when working with isochronous date. It provides periodic, continuous communication + between the host and a device. Isochronous transfers can be used only by full-speed, high-speed, + and super-speed devices. + + High-speed isochronous transfers can be performed using multiple data buffers. The number of + buffers that are actually prepared for the transfer is specified by DataBuffersNumber. For + full-speed isochronous transfers this value is ignored. + + Data represents a list of pointers to the data buffers. For full-speed isochronous transfers + only the data pointed by Data[0] shall be used. For high-speed isochronous transfers and for + the split transactions depending on DataLength there several data buffers can be used. For + the high-speed isochronous transfers the total number of buffers must not exceed EFI_USB_MAX_ISO_BUFFER_NUM. + + For split transactions performed on full-speed device by high-speed host controller the total + number of buffers is limited to EFI_USB_MAX_ISO_BUFFER_NUM1. + + EFI_INVALID_PARAMETER is returned if one of the following conditionsis satisfied: + - Data is NULL. + - DataLength is 0. + - DeviceSpeed is not one of the supported values listed above. + - MaximumPacketLength is invalid. MaximumPacketLength must be 1023 or less for full-speed + devices and 1024 or less for high-speed and super-speed devices. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param DeviceAddress Represents the address of the target device on the USB. + @param EndPointAddress The combination of an endpoint number and an endpoint direction of the + target USB device. + @param DeviceSpeed Indicates device speed. The supported values are EFI_USB_SPEED_FULL, + EFI_USB_SPEED_HIGH, or EFI_USB_SPEED_SUPER. + @param MaximumPacketLength Indicates the maximum packet size the target endpoint is capable of + sending or receiving. + @param DataBuffersNumber Number of data buffers prepared for the transfer. + @param Data Array of pointers to the buffers of data that will be transmitted to USB + device or received from USB device. + @param DataLength Specifies the length, in bytes, of the data to be sent to or received from + the USB device. + @param Translator A pointer to the transaction translator data. + @param IsochronousCallback The Callback function. This function is called if the requested + isochronous transfer is completed. + @param Context Data passed to the IsochronousCallback function. This is an + optional parameter and may be NULL. + + @retval EFI_SUCCESS The asynchronous isochronous transfer request has been successfully + submitted or canceled. + @retval EFI_INVALID_PARAMETER Some parameters are invalid. + @retval EFI_OUT_OF_RESOURCES The asynchronous isochronous transfer could not be submitted due to + a lack of resources. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_ASYNC_ISOCHRONOUS_TRANSFER)( + IN EFI_USB2_HC_PROTOCOL *This, + IN UINT8 DeviceAddress, + IN UINT8 EndPointAddress, + IN UINT8 DeviceSpeed, + IN UINTN MaximumPacketLength, + IN UINT8 DataBuffersNumber, + IN OUT VOID *Data[EFI_USB_MAX_ISO_BUFFER_NUM], + IN UINTN DataLength, + IN EFI_USB2_HC_TRANSACTION_TRANSLATOR *Translator, + IN EFI_ASYNC_USB_TRANSFER_CALLBACK IsochronousCallBack, + IN VOID *Context OPTIONAL + ); + +/** + Retrieves the current status of a USB root hub port. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param PortNumber Specifies the root hub port from which the status is to be retrieved. + This value is zero based. + @param PortStatus A pointer to the current port status bits and port status change bits. + + @retval EFI_SUCCESS The status of the USB root hub port specified by PortNumber + was returned in PortStatus. + @retval EFI_INVALID_PARAMETER PortNumber is invalid. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_GET_ROOTHUB_PORT_STATUS)( + IN EFI_USB2_HC_PROTOCOL *This, + IN UINT8 PortNumber, + OUT EFI_USB_PORT_STATUS *PortStatus + ); + +/** + Sets a feature for the specified root hub port. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param PortNumber Specifies the root hub port whose feature is requested to be set. This + value is zero based. + @param PortFeature Indicates the feature selector associated with the feature set request. + + @retval EFI_SUCCESS The feature specified by PortFeature was set for the USB + root hub port specified by PortNumber. + @retval EFI_INVALID_PARAMETER PortNumber is invalid or PortFeature is invalid for this function. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_SET_ROOTHUB_PORT_FEATURE)( + IN EFI_USB2_HC_PROTOCOL *This, + IN UINT8 PortNumber, + IN EFI_USB_PORT_FEATURE PortFeature + ); + +/** + Clears a feature for the specified root hub port. + + @param This A pointer to the EFI_USB2_HC_PROTOCOL instance. + @param PortNumber Specifies the root hub port whose feature is requested to be cleared. This + value is zero based. + @param PortFeature Indicates the feature selector associated with the feature clear request. + + @retval EFI_SUCCESS The feature specified by PortFeature was cleared for the USB + root hub port specified by PortNumber. + @retval EFI_INVALID_PARAMETER PortNumber is invalid or PortFeature is invalid for this function. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB2_HC_PROTOCOL_CLEAR_ROOTHUB_PORT_FEATURE)( + IN EFI_USB2_HC_PROTOCOL *This, + IN UINT8 PortNumber, + IN EFI_USB_PORT_FEATURE PortFeature + ); + +/// +/// The EFI_USB2_HC_PROTOCOL provides USB host controller management, basic +/// data transactions over a USB bus, and USB root hub access. A device driver +/// that wishes to manage a USB bus in a system retrieves the EFI_USB2_HC_PROTOCOL +/// instance that is associated with the USB bus to be managed. A device handle +/// for a USB host controller will minimally contain an EFI_DEVICE_PATH_PROTOCOL +/// instance, and an EFI_USB2_HC_PROTOCOL instance. +/// +struct _EFI_USB2_HC_PROTOCOL { + EFI_USB2_HC_PROTOCOL_GET_CAPABILITY GetCapability; + EFI_USB2_HC_PROTOCOL_RESET Reset; + EFI_USB2_HC_PROTOCOL_GET_STATE GetState; + EFI_USB2_HC_PROTOCOL_SET_STATE SetState; + EFI_USB2_HC_PROTOCOL_CONTROL_TRANSFER ControlTransfer; + EFI_USB2_HC_PROTOCOL_BULK_TRANSFER BulkTransfer; + EFI_USB2_HC_PROTOCOL_ASYNC_INTERRUPT_TRANSFER AsyncInterruptTransfer; + EFI_USB2_HC_PROTOCOL_SYNC_INTERRUPT_TRANSFER SyncInterruptTransfer; + EFI_USB2_HC_PROTOCOL_ISOCHRONOUS_TRANSFER IsochronousTransfer; + EFI_USB2_HC_PROTOCOL_ASYNC_ISOCHRONOUS_TRANSFER AsyncIsochronousTransfer; + EFI_USB2_HC_PROTOCOL_GET_ROOTHUB_PORT_STATUS GetRootHubPortStatus; + EFI_USB2_HC_PROTOCOL_SET_ROOTHUB_PORT_FEATURE SetRootHubPortFeature; + EFI_USB2_HC_PROTOCOL_CLEAR_ROOTHUB_PORT_FEATURE ClearRootHubPortFeature; + + /// + /// The major revision number of the USB host controller. The revision information + /// indicates the release of the Universal Serial Bus Specification with which the + /// host controller is compliant. + /// + UINT16 MajorRevision; + + /// + /// The minor revision number of the USB host controller. The revision information + /// indicates the release of the Universal Serial Bus Specification with which the + /// host controller is compliant. + /// + UINT16 MinorRevision; +}; + +extern EFI_GUID gEfiUsb2HcProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/UsbHostController.h b/src/include/ipxe/efi/Protocol/UsbHostController.h new file mode 100644 index 00000000..a29088c6 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/UsbHostController.h @@ -0,0 +1,510 @@ +/** @file + EFI_USB_HC_PROTOCOL as defined in EFI 1.10. + + The USB Host Controller Protocol is used by code, typically USB bus drivers, + running in the EFI boot services environment, to perform data transactions + over a USB bus. In addition, it provides an abstraction for the root hub of the USB bus. + + Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef _USB_HOSTCONTROLLER_H_ +#define _USB_HOSTCONTROLLER_H_ + +FILE_LICENCE ( BSD3 ); + +#include + +#define EFI_USB_HC_PROTOCOL_GUID \ + { \ + 0xf5089266, 0x1aa0, 0x4953, {0x97, 0xd8, 0x56, 0x2f, 0x8a, 0x73, 0xb5, 0x19 } \ + } + +/// +/// Forward reference for pure ANSI compatability +/// +typedef struct _EFI_USB_HC_PROTOCOL EFI_USB_HC_PROTOCOL; + +// +// Protocol definitions +// + +/** + Provides software reset for the USB host controller. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param Attributes A bit mask of the reset operation to perform. + + @retval EFI_SUCCESS The reset operation succeeded. + @retval EFI_UNSUPPORTED The type of reset specified by Attributes is not currently supported + by the host controller hardware. + @retval EFI_INVALID_PARAMETER Attributes is not valid. + @retval EFI_DEVICE_ERROR An error was encountered while attempting to perform the reset operation. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_RESET)( + IN EFI_USB_HC_PROTOCOL *This, + IN UINT16 Attributes + ); + +/** + Retrieves current state of the USB host controller. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param State A pointer to the EFI_USB_HC_STATE data structure that + indicates current state of the USB host controller. + + @retval EFI_SUCCESS The state information of the host controller was returned in State. + @retval EFI_INVALID_PARAMETER State is NULL. + @retval EFI_DEVICE_ERROR An error was encountered while attempting to retrieve the host controller's + current state. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_GET_STATE)( + IN EFI_USB_HC_PROTOCOL *This, + OUT EFI_USB_HC_STATE *State + ); + +/** + Sets the USB host controller to a specific state. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param State Indicates the state of the host controller that will be set. + + @retval EFI_SUCCESS The USB host controller was successfully placed in the state specified by + State. + @retval EFI_INVALID_PARAMETER State is NULL. + @retval EFI_DEVICE_ERROR Failed to set the state specified by State due to device error. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_SET_STATE)( + IN EFI_USB_HC_PROTOCOL *This, + IN EFI_USB_HC_STATE State + ); + +/** + Submits control transfer to a target USB device. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param DeviceAddress Represents the address of the target device on the USB, which is + assigned during USB enumeration. + @param IsSlowDevice Indicates whether the target device is slow device or full-speed + device. + @param MaximumPacketLength Indicates the maximum packet size that the default control + transfer endpoint is capable of sending or receiving. + @param Request A pointer to the USB device request that will be sent to the USB + device. + @param TransferDirection Specifies the data direction for the transfer. There are three + values available, EfiUsbDataIn, EfiUsbDataOut and EfiUsbNoData. + @param Data A pointer to the buffer of data that will be transmitted to USB + device or received from USB device. + @param DataLength On input, indicates the size, in bytes, of the data buffer specified + by Data. On output, indicates the amount of data actually + transferred. + @param TimeOut Indicates the maximum time, in milliseconds, which the transfer + is allowed to complete. + @param TransferResult A pointer to the detailed result information generated by this + control transfer. + + @retval EFI_SUCCESS The control transfer was completed successfully. + @retval EFI_OUT_OF_RESOURCES The control transfer could not be completed due to a lack of resources. + @retval EFI_INVALID_PARAMETER Some parameters are invalid. + @retval EFI_TIMEOUT The control transfer failed due to timeout. + @retval EFI_DEVICE_ERROR The control transfer failed due to host controller or device error. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_CONTROL_TRANSFER)( + IN EFI_USB_HC_PROTOCOL *This, + IN UINT8 DeviceAddress, + IN BOOLEAN IsSlowDevice, + IN UINT8 MaximumPacketLength, + IN EFI_USB_DEVICE_REQUEST *Request, + IN EFI_USB_DATA_DIRECTION TransferDirection, + IN OUT VOID *Data OPTIONAL, + IN OUT UINTN *DataLength OPTIONAL, + IN UINTN TimeOut, + OUT UINT32 *TransferResult + ); + +/** + Submits bulk transfer to a bulk endpoint of a USB device. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param DeviceAddress Represents the address of the target device on the USB, which is + assigned during USB enumeration. + @param EndPointAddress The combination of an endpoint number and an endpoint + direction of the target USB device. Each endpoint address + supports data transfer in one direction except the control + endpoint (whose default endpoint address is 0). It is the + caller's responsibility to make sure that the EndPointAddress + represents a bulk endpoint. + @param MaximumPacketLength Indicates the maximum packet size that the default control + transfer endpoint is capable of sending or receiving. + @param Data A pointer to the buffer of data that will be transmitted to USB + device or received from USB device. + @param DataLength On input, indicates the size, in bytes, of the data buffer specified + by Data. On output, indicates the amount of data actually + transferred. + @param DataToggle A pointer to the data toggle value. + @param TimeOut Indicates the maximum time, in milliseconds, which the transfer + is allowed to complete. + @param TransferResult A pointer to the detailed result information of the bulk transfer. + + @retval EFI_SUCCESS The bulk transfer was completed successfully. + @retval EFI_OUT_OF_RESOURCES The bulk transfer could not be completed due to a lack of resources. + @retval EFI_INVALID_PARAMETER Some parameters are invalid. + @retval EFI_TIMEOUT The bulk transfer failed due to timeout. + @retval EFI_DEVICE_ERROR The bulk transfer failed due to host controller or device error. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_BULK_TRANSFER)( + IN EFI_USB_HC_PROTOCOL *This, + IN UINT8 DeviceAddress, + IN UINT8 EndPointAddress, + IN UINT8 MaximumPacketLength, + IN OUT VOID *Data, + IN OUT UINTN *DataLength, + IN OUT UINT8 *DataToggle, + IN UINTN TimeOut, + OUT UINT32 *TransferResult + ); + +/** + Submits an asynchronous interrupt transfer to an interrupt endpoint of a USB device. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param DeviceAddress Represents the address of the target device on the USB, which is + assigned during USB enumeration. + @param EndPointAddress The combination of an endpoint number and an endpoint + direction of the target USB device. Each endpoint address + supports data transfer in one direction except the control + endpoint (whose default endpoint address is zero). It is the + caller's responsibility to make sure that the + EndPointAddress represents an interrupt endpoint. + @param IsSlowDevice Indicates whether the target device is slow device or full-speed + device. + @param MaximumPacketLength Indicates the maximum packet size that the default control + transfer endpoint is capable of sending or receiving. + @param IsNewTransfer If TRUE, an asynchronous interrupt pipe is built between the host + and the target interrupt endpoint. If FALSE, the specified asynchronous + interrupt pipe is canceled. If TRUE, and an interrupt transfer exists + for the target end point, then EFI_INVALID_PARAMETER is returned. + @param DataToggle A pointer to the data toggle value. On input, it is valid when + IsNewTransfer is TRUE, and it indicates the initial data toggle + value the asynchronous interrupt transfer should adopt. On output, + it is valid when IsNewTransfer is FALSE, and it is updated to indicate + the data toggle value of the subsequent asynchronous interrupt transfer. + @param PollingInterval Indicates the interval, in milliseconds, that the asynchronous + interrupt transfer is polled. + @param DataLength Indicates the length of data to be received at the rate specified by + PollingInterval from the target asynchronous interrupt + endpoint. This parameter is only required when IsNewTransfer is TRUE. + @param CallBackFunction The Callback function. This function is called at the rate specified by + PollingInterval. This parameter is only required when IsNewTransfer is TRUE. + @param Context The context that is passed to the CallBackFunction. + + @retval EFI_SUCCESS The asynchronous interrupt transfer request has been successfully + submitted or canceled. + @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack of resources. + @retval EFI_INVALID_PARAMETER Some parameters are invalid. + @retval EFI_TIMEOUT The bulk transfer failed due to timeout. + @retval EFI_DEVICE_ERROR The bulk transfer failed due to host controller or device error. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_ASYNC_INTERRUPT_TRANSFER)( + IN EFI_USB_HC_PROTOCOL *This, + IN UINT8 DeviceAddress, + IN UINT8 EndPointAddress, + IN BOOLEAN IsSlowDevice, + IN UINT8 MaxiumPacketLength, + IN BOOLEAN IsNewTransfer, + IN OUT UINT8 *DataToggle, + IN UINTN PollingInterval OPTIONAL, + IN UINTN DataLength OPTIONAL, + IN EFI_ASYNC_USB_TRANSFER_CALLBACK CallBackFunction OPTIONAL, + IN VOID *Context OPTIONAL + ); + +/** + Submits synchronous interrupt transfer to an interrupt endpoint of a USB device. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param DeviceAddress Represents the address of the target device on the USB, which is + assigned during USB enumeration. + @param EndPointAddress The combination of an endpoint number and an endpoint + direction of the target USB device. Each endpoint address + supports data transfer in one direction except the control + endpoint (whose default endpoint address is zero). It is the + caller's responsibility to make sure that the + EndPointAddress represents an interrupt endpoint. + @param IsSlowDevice Indicates whether the target device is slow device or full-speed + device. + @param MaximumPacketLength Indicates the maximum packet size that the default control + transfer endpoint is capable of sending or receiving. + @param Data A pointer to the buffer of data that will be transmitted to USB + device or received from USB device. asynchronous interrupt pipe is canceled. + @param DataLength On input, the size, in bytes, of the data buffer specified by Data. + On output, the number of bytes transferred. + @param DataToggle A pointer to the data toggle value. On input, it indicates the initial + data toggle value the synchronous interrupt transfer should adopt; + on output, it is updated to indicate the data toggle value of the + subsequent synchronous interrupt transfer. + @param TimeOut Indicates the maximum time, in milliseconds, which the transfer + is allowed to complete. + @param TransferResult A pointer to the detailed result information from the synchronous + interrupt transfer. + + @retval EFI_SUCCESS The synchronous interrupt transfer was completed successfully. + @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack of resources. + @retval EFI_INVALID_PARAMETER Some parameters are invalid. + @retval EFI_TIMEOUT The synchronous interrupt transfer failed due to timeout. + @retval EFI_DEVICE_ERROR The synchronous interrupt transfer failed due to host controller or device error. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_SYNC_INTERRUPT_TRANSFER)( + IN EFI_USB_HC_PROTOCOL *This, + IN UINT8 DeviceAddress, + IN UINT8 EndPointAddress, + IN BOOLEAN IsSlowDevice, + IN UINT8 MaximumPacketLength, + IN OUT VOID *Data, + IN OUT UINTN *DataLength, + IN OUT UINT8 *DataToggle, + IN UINTN TimeOut, + OUT UINT32 *TransferResult + ); + +/** + Submits isochronous transfer to an isochronous endpoint of a USB device. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param DeviceAddress Represents the address of the target device on the USB, which is + assigned during USB enumeration. + @param EndPointAddress The combination of an endpoint number and an endpoint + direction of the target USB device. Each endpoint address + supports data transfer in one direction except the control + endpoint (whose default endpoint address is 0). It is the caller's + responsibility to make sure that the EndPointAddress + represents an isochronous endpoint. + @param MaximumPacketLength Indicates the maximum packet size that the default control + transfer endpoint is capable of sending or receiving. + @param Data A pointer to the buffer of data that will be transmitted to USB + device or received from USB device. asynchronous interrupt pipe is canceled. + @param DataLength Specifies the length, in bytes, of the data to be sent to or received + from the USB device. + @param TransferResult A pointer to the detailed result information from the isochronous + transfer. + + @retval EFI_SUCCESS The isochronous transfer was completed successfully. + @retval EFI_OUT_OF_RESOURCES The isochronous could not be completed due to a lack of resources. + @retval EFI_INVALID_PARAMETER Some parameters are invalid. + @retval EFI_TIMEOUT The isochronous transfer failed due to timeout. + @retval EFI_DEVICE_ERROR The isochronous transfer failed due to host controller or device error. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_ISOCHRONOUS_TRANSFER)( + IN EFI_USB_HC_PROTOCOL *This, + IN UINT8 DeviceAddress, + IN UINT8 EndPointAddress, + IN UINT8 MaximumPacketLength, + IN OUT VOID *Data, + IN UINTN DataLength, + OUT UINT32 *TransferResult + ); + +/** + Submits nonblocking isochronous transfer to an isochronous endpoint of a USB device. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param DeviceAddress Represents the address of the target device on the USB, which is + assigned during USB enumeration. + @param EndPointAddress The combination of an endpoint number and an endpoint + direction of the target USB device. Each endpoint address + supports data transfer in one direction except the control + endpoint (whose default endpoint address is zero). It is the + caller's responsibility to make sure that the + EndPointAddress represents an isochronous endpoint. + @param MaximumPacketLength Indicates the maximum packet size that the default control + transfer endpoint is capable of sending or receiving. For isochronous + endpoints, this value is used to reserve the bus time in the schedule, + required for the perframe data payloads. The pipe may, on an ongoing basis, + actually use less bandwidth than that reserved. + @param Data A pointer to the buffer of data that will be transmitted to USB + device or received from USB device. asynchronous interrupt pipe is canceled. + @param DataLength Specifies the length, in bytes, of the data to be sent to or received + from the USB device. + @param IsochronousCallback The Callback function.This function is called if the requested + isochronous transfer is completed. + @param Context Data passed to the IsochronousCallback function. This is + an optional parameter and may be NULL. + + @retval EFI_SUCCESS The asynchronous isochronous transfer was completed successfully. + @retval EFI_OUT_OF_RESOURCES The asynchronous isochronous could not be completed due to a lack of resources. + @retval EFI_INVALID_PARAMETER Some parameters are invalid. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_ASYNC_ISOCHRONOUS_TRANSFER)( + IN EFI_USB_HC_PROTOCOL *This, + IN UINT8 DeviceAddress, + IN UINT8 EndPointAddress, + IN UINT8 MaximumPacketLength, + IN OUT VOID *Data, + IN UINTN DataLength, + IN EFI_ASYNC_USB_TRANSFER_CALLBACK IsochronousCallBack, + IN VOID *Context OPTIONAL + ); + +/** + Retrieves the number of root hub ports. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param PortNumber A pointer to the number of the root hub ports. + + @retval EFI_SUCCESS The port number was retrieved successfully. + @retval EFI_DEVICE_ERROR An error was encountered while attempting to retrieve the port number. + @retval EFI_INVALID_PARAMETER PortNumber is NULL. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_GET_ROOTHUB_PORT_NUMBER)( + IN EFI_USB_HC_PROTOCOL *This, + OUT UINT8 *PortNumber + ); + +/** + Retrieves the current status of a USB root hub port. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param PortNumber Specifies the root hub port from which the status is to be retrieved. + This value is zero based. For example, if a root hub has two ports, + then the first port is numbered 0, and the second port is + numbered 1. + @param PortStatus A pointer to the current port status bits and port status change bits. + + @retval EFI_SUCCESS The status of the USB root hub port specified by PortNumber + was returned in PortStatus. + @retval EFI_INVALID_PARAMETER PortNumber is invalid. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_GET_ROOTHUB_PORT_STATUS)( + IN EFI_USB_HC_PROTOCOL *This, + IN UINT8 PortNumber, + OUT EFI_USB_PORT_STATUS *PortStatus + ); + +/** + Sets a feature for the specified root hub port. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param PortNumber Specifies the root hub port from which the status is to be retrieved. + This value is zero based. For example, if a root hub has two ports, + then the first port is numbered 0, and the second port is + numbered 1. + @param PortFeature Indicates the feature selector associated with the feature set + request. + + @retval EFI_SUCCESS The feature specified by PortFeature was set for the USB + root hub port specified by PortNumber. + @retval EFI_INVALID_PARAMETER PortNumber is invalid or PortFeature is invalid for this function. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_SET_ROOTHUB_PORT_FEATURE)( + IN EFI_USB_HC_PROTOCOL *This, + IN UINT8 PortNumber, + IN EFI_USB_PORT_FEATURE PortFeature + ); + +/** + Clears a feature for the specified root hub port. + + @param This A pointer to the EFI_USB_HC_PROTOCOL instance. + @param PortNumber Specifies the root hub port from which the status is to be retrieved. + This value is zero based. For example, if a root hub has two ports, + then the first port is numbered 0, and the second port is + numbered 1. + @param PortFeature Indicates the feature selector associated with the feature clear + request. + + @retval EFI_SUCCESS The feature specified by PortFeature was cleared for the USB + root hub port specified by PortNumber. + @retval EFI_INVALID_PARAMETER PortNumber is invalid or PortFeature is invalid for this function. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_HC_PROTOCOL_CLEAR_ROOTHUB_PORT_FEATURE)( + IN EFI_USB_HC_PROTOCOL *This, + IN UINT8 PortNumber, + IN EFI_USB_PORT_FEATURE PortFeature + ); + + +/// +/// The EFI_USB_HC_PROTOCOL provides USB host controller management, basic data transactions +/// over a USB bus, and USB root hub access. A device driver that wishes to manage a USB bus in a +/// system retrieves the EFI_USB_HC_PROTOCOL instance that is associated with the USB bus to be +/// managed. A device handle for a USB host controller will minimally contain an +/// EFI_DEVICE_PATH_PROTOCOL instance, and an EFI_USB_HC_PROTOCOL instance. +/// +struct _EFI_USB_HC_PROTOCOL { + EFI_USB_HC_PROTOCOL_RESET Reset; + EFI_USB_HC_PROTOCOL_GET_STATE GetState; + EFI_USB_HC_PROTOCOL_SET_STATE SetState; + EFI_USB_HC_PROTOCOL_CONTROL_TRANSFER ControlTransfer; + EFI_USB_HC_PROTOCOL_BULK_TRANSFER BulkTransfer; + EFI_USB_HC_PROTOCOL_ASYNC_INTERRUPT_TRANSFER AsyncInterruptTransfer; + EFI_USB_HC_PROTOCOL_SYNC_INTERRUPT_TRANSFER SyncInterruptTransfer; + EFI_USB_HC_PROTOCOL_ISOCHRONOUS_TRANSFER IsochronousTransfer; + EFI_USB_HC_PROTOCOL_ASYNC_ISOCHRONOUS_TRANSFER AsyncIsochronousTransfer; + EFI_USB_HC_PROTOCOL_GET_ROOTHUB_PORT_NUMBER GetRootHubPortNumber; + EFI_USB_HC_PROTOCOL_GET_ROOTHUB_PORT_STATUS GetRootHubPortStatus; + EFI_USB_HC_PROTOCOL_SET_ROOTHUB_PORT_FEATURE SetRootHubPortFeature; + EFI_USB_HC_PROTOCOL_CLEAR_ROOTHUB_PORT_FEATURE ClearRootHubPortFeature; + /// + /// The major revision number of the USB host controller. The revision information + /// indicates the release of the Universal Serial Bus Specification with which the + /// host controller is compliant. + /// + UINT16 MajorRevision; + /// + /// The minor revision number of the USB host controller. The revision information + /// indicates the release of the Universal Serial Bus Specification with which the + /// host controller is compliant. + /// + UINT16 MinorRevision; +}; + +extern EFI_GUID gEfiUsbHcProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/UsbIo.h b/src/include/ipxe/efi/Protocol/UsbIo.h new file mode 100644 index 00000000..b8d33ee0 --- /dev/null +++ b/src/include/ipxe/efi/Protocol/UsbIo.h @@ -0,0 +1,514 @@ +/** @file + EFI Usb I/O Protocol as defined in UEFI specification. + This protocol is used by code, typically drivers, running in the EFI + boot services environment to access USB devices like USB keyboards, + mice and mass storage devices. In particular, functions for managing devices + on USB buses are defined here. + + Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef __USB_IO_H__ +#define __USB_IO_H__ + +FILE_LICENCE ( BSD3 ); + +#include + +// +// Global ID for the USB I/O Protocol +// +#define EFI_USB_IO_PROTOCOL_GUID \ + { \ + 0x2B2F68D6, 0x0CD2, 0x44cf, {0x8E, 0x8B, 0xBB, 0xA2, 0x0B, 0x1B, 0x5B, 0x75 } \ + } + +typedef struct _EFI_USB_IO_PROTOCOL EFI_USB_IO_PROTOCOL; + +// +// Related Definition for EFI USB I/O protocol +// + +// +// USB standard descriptors and reqeust +// +typedef USB_DEVICE_REQUEST EFI_USB_DEVICE_REQUEST; +typedef USB_DEVICE_DESCRIPTOR EFI_USB_DEVICE_DESCRIPTOR; +typedef USB_CONFIG_DESCRIPTOR EFI_USB_CONFIG_DESCRIPTOR; +typedef USB_INTERFACE_DESCRIPTOR EFI_USB_INTERFACE_DESCRIPTOR; +typedef USB_ENDPOINT_DESCRIPTOR EFI_USB_ENDPOINT_DESCRIPTOR; + +/// +/// USB data transfer direction +/// +typedef enum { + EfiUsbDataIn, + EfiUsbDataOut, + EfiUsbNoData +} EFI_USB_DATA_DIRECTION; + +// +// USB Transfer Results +// +#define EFI_USB_NOERROR 0x00 +#define EFI_USB_ERR_NOTEXECUTE 0x01 +#define EFI_USB_ERR_STALL 0x02 +#define EFI_USB_ERR_BUFFER 0x04 +#define EFI_USB_ERR_BABBLE 0x08 +#define EFI_USB_ERR_NAK 0x10 +#define EFI_USB_ERR_CRC 0x20 +#define EFI_USB_ERR_TIMEOUT 0x40 +#define EFI_USB_ERR_BITSTUFF 0x80 +#define EFI_USB_ERR_SYSTEM 0x100 + +/** + Async USB transfer callback routine. + + @param Data Data received or sent via the USB Asynchronous Transfer, if the + transfer completed successfully. + @param DataLength The length of Data received or sent via the Asynchronous + Transfer, if transfer successfully completes. + @param Context Data passed from UsbAsyncInterruptTransfer() request. + @param Status Indicates the result of the asynchronous transfer. + + @retval EFI_SUCCESS The asynchronous USB transfer request has been successfully executed. + @retval EFI_DEVICE_ERROR The asynchronous USB transfer request failed. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_ASYNC_USB_TRANSFER_CALLBACK)( + IN VOID *Data, + IN UINTN DataLength, + IN VOID *Context, + IN UINT32 Status + ); + +// +// Prototype for EFI USB I/O protocol +// + + +/** + This function is used to manage a USB device with a control transfer pipe. A control transfer is + typically used to perform device initialization and configuration. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + @param Request A pointer to the USB device request that will be sent to the USB + device. + @param Direction Indicates the data direction. + @param Timeout Indicating the transfer should be completed within this time frame. + The units are in milliseconds. + @param Data A pointer to the buffer of data that will be transmitted to USB + device or received from USB device. + @param DataLength The size, in bytes, of the data buffer specified by Data. + @param Status A pointer to the result of the USB transfer. + + @retval EFI_SUCCESS The control transfer has been successfully executed. + @retval EFI_DEVICE_ERROR The transfer failed. The transfer status is returned in Status. + @retval EFI_INVALID_PARAMETE One or more parameters are invalid. + @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack of resources. + @retval EFI_TIMEOUT The control transfer fails due to timeout. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_CONTROL_TRANSFER)( + IN EFI_USB_IO_PROTOCOL *This, + IN EFI_USB_DEVICE_REQUEST *Request, + IN EFI_USB_DATA_DIRECTION Direction, + IN UINT32 Timeout, + IN OUT VOID *Data OPTIONAL, + IN UINTN DataLength OPTIONAL, + OUT UINT32 *Status + ); + +/** + This function is used to manage a USB device with the bulk transfer pipe. Bulk Transfers are + typically used to transfer large amounts of data to/from USB devices. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + @param DeviceEndpoint The destination USB device endpoint to which the + device request is being sent. DeviceEndpoint must + be between 0x01 and 0x0F or between 0x81 and 0x8F, + otherwise EFI_INVALID_PARAMETER is returned. If + the endpoint is not a BULK endpoint, EFI_INVALID_PARAMETER + is returned. The MSB of this parameter indicates + the endpoint direction. The number "1" stands for + an IN endpoint, and "0" stands for an OUT endpoint. + @param Data A pointer to the buffer of data that will be transmitted to USB + device or received from USB device. + @param DataLength The size, in bytes, of the data buffer specified by Data. + On input, the size, in bytes, of the data buffer specified by Data. + On output, the number of bytes that were actually transferred. + @param Timeout Indicating the transfer should be completed within this time frame. + The units are in milliseconds. If Timeout is 0, then the + caller must wait for the function to be completed until + EFI_SUCCESS or EFI_DEVICE_ERROR is returned. + @param Status This parameter indicates the USB transfer status. + + @retval EFI_SUCCESS The bulk transfer has been successfully executed. + @retval EFI_DEVICE_ERROR The transfer failed. The transfer status is returned in Status. + @retval EFI_INVALID_PARAMETE One or more parameters are invalid. + @retval EFI_OUT_OF_RESOURCES The request could not be submitted due to a lack of resources. + @retval EFI_TIMEOUT The control transfer fails due to timeout. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_BULK_TRANSFER)( + IN EFI_USB_IO_PROTOCOL *This, + IN UINT8 DeviceEndpoint, + IN OUT VOID *Data, + IN OUT UINTN *DataLength, + IN UINTN Timeout, + OUT UINT32 *Status + ); + +/** + This function is used to manage a USB device with an interrupt transfer pipe. An Asynchronous + Interrupt Transfer is typically used to query a device's status at a fixed rate. For example, + keyboard, mouse, and hub devices use this type of transfer to query their interrupt endpoints at + a fixed rate. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + @param DeviceEndpoint The destination USB device endpoint to which the + device request is being sent. DeviceEndpoint must + be between 0x01 and 0x0F or between 0x81 and 0x8F, + otherwise EFI_INVALID_PARAMETER is returned. If + the endpoint is not a BULK endpoint, EFI_INVALID_PARAMETER + is returned. The MSB of this parameter indicates + the endpoint direction. The number "1" stands for + an IN endpoint, and "0" stands for an OUT endpoint. + @param IsNewTransfer If TRUE, a new transfer will be submitted to USB controller. If + FALSE, the interrupt transfer is deleted from the device's interrupt + transfer queue. + @param PollingInterval Indicates the periodic rate, in milliseconds, that the transfer is to be + executed.This parameter is required when IsNewTransfer is TRUE. The + value must be between 1 to 255, otherwise EFI_INVALID_PARAMETER is returned. + The units are in milliseconds. + @param DataLength Specifies the length, in bytes, of the data to be received from the + USB device. This parameter is only required when IsNewTransfer is TRUE. + @param InterruptCallback The Callback function. This function is called if the asynchronous + interrupt transfer is completed. This parameter is required + when IsNewTransfer is TRUE. + @param Context Data passed to the InterruptCallback function. This is an optional + parameter and may be NULL. + + @retval EFI_SUCCESS The asynchronous USB transfer request transfer has been successfully executed. + @retval EFI_DEVICE_ERROR The asynchronous USB transfer request failed. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_ASYNC_INTERRUPT_TRANSFER)( + IN EFI_USB_IO_PROTOCOL *This, + IN UINT8 DeviceEndpoint, + IN BOOLEAN IsNewTransfer, + IN UINTN PollingInterval OPTIONAL, + IN UINTN DataLength OPTIONAL, + IN EFI_ASYNC_USB_TRANSFER_CALLBACK InterruptCallBack OPTIONAL, + IN VOID *Context OPTIONAL + ); + +/** + This function is used to manage a USB device with an interrupt transfer pipe. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + @param DeviceEndpoint The destination USB device endpoint to which the + device request is being sent. DeviceEndpoint must + be between 0x01 and 0x0F or between 0x81 and 0x8F, + otherwise EFI_INVALID_PARAMETER is returned. If + the endpoint is not a BULK endpoint, EFI_INVALID_PARAMETER + is returned. The MSB of this parameter indicates + the endpoint direction. The number "1" stands for + an IN endpoint, and "0" stands for an OUT endpoint. + @param Data A pointer to the buffer of data that will be transmitted to USB + device or received from USB device. + @param DataLength On input, then size, in bytes, of the buffer Data. On output, the + amount of data actually transferred. + @param Timeout The time out, in seconds, for this transfer. If Timeout is 0, + then the caller must wait for the function to be completed + until EFI_SUCCESS or EFI_DEVICE_ERROR is returned. If the + transfer is not completed in this time frame, then EFI_TIMEOUT is returned. + @param Status This parameter indicates the USB transfer status. + + @retval EFI_SUCCESS The sync interrupt transfer has been successfully executed. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_DEVICE_ERROR The sync interrupt transfer request failed. + @retval EFI_OUT_OF_RESOURCES The request could not be submitted due to a lack of resources. + @retval EFI_TIMEOUT The transfer fails due to timeout. +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_SYNC_INTERRUPT_TRANSFER)( + IN EFI_USB_IO_PROTOCOL *This, + IN UINT8 DeviceEndpoint, + IN OUT VOID *Data, + IN OUT UINTN *DataLength, + IN UINTN Timeout, + OUT UINT32 *Status + ); + +/** + This function is used to manage a USB device with an isochronous transfer pipe. An Isochronous + transfer is typically used to transfer streaming data. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + @param DeviceEndpoint The destination USB device endpoint to which the + device request is being sent. DeviceEndpoint must + be between 0x01 and 0x0F or between 0x81 and 0x8F, + otherwise EFI_INVALID_PARAMETER is returned. If + the endpoint is not a BULK endpoint, EFI_INVALID_PARAMETER + is returned. The MSB of this parameter indicates + the endpoint direction. The number "1" stands for + an IN endpoint, and "0" stands for an OUT endpoint. + @param Data A pointer to the buffer of data that will be transmitted to USB + device or received from USB device. + @param DataLength The size, in bytes, of the data buffer specified by Data. + @param Status This parameter indicates the USB transfer status. + + @retval EFI_SUCCESS The isochronous transfer has been successfully executed. + @retval EFI_INVALID_PARAMETER The parameter DeviceEndpoint is not valid. + @retval EFI_DEVICE_ERROR The transfer failed due to the reason other than timeout, The error status + is returned in Status. + @retval EFI_OUT_OF_RESOURCES The request could not be submitted due to a lack of resources. + @retval EFI_TIMEOUT The transfer fails due to timeout. +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_ISOCHRONOUS_TRANSFER)( + IN EFI_USB_IO_PROTOCOL *This, + IN UINT8 DeviceEndpoint, + IN OUT VOID *Data, + IN UINTN DataLength, + OUT UINT32 *Status + ); + +/** + This function is used to manage a USB device with an isochronous transfer pipe. An Isochronous + transfer is typically used to transfer streaming data. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + @param DeviceEndpoint The destination USB device endpoint to which the + device request is being sent. DeviceEndpoint must + be between 0x01 and 0x0F or between 0x81 and 0x8F, + otherwise EFI_INVALID_PARAMETER is returned. If + the endpoint is not a BULK endpoint, EFI_INVALID_PARAMETER + is returned. The MSB of this parameter indicates + the endpoint direction. The number "1" stands for + an IN endpoint, and "0" stands for an OUT endpoint. + @param Data A pointer to the buffer of data that will be transmitted to USB + device or received from USB device. + @param DataLength The size, in bytes, of the data buffer specified by Data. + This is an optional parameter and may be NULL. + @param IsochronousCallback The IsochronousCallback() function.This function is + called if the requested isochronous transfer is completed. + @param Context Data passed to the IsochronousCallback() function. + + @retval EFI_SUCCESS The asynchronous isochronous transfer has been successfully submitted + to the system. + @retval EFI_INVALID_PARAMETER The parameter DeviceEndpoint is not valid. + @retval EFI_OUT_OF_RESOURCES The request could not be submitted due to a lack of resources. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_ASYNC_ISOCHRONOUS_TRANSFER)( + IN EFI_USB_IO_PROTOCOL *This, + IN UINT8 DeviceEndpoint, + IN OUT VOID *Data, + IN UINTN DataLength, + IN EFI_ASYNC_USB_TRANSFER_CALLBACK IsochronousCallBack, + IN VOID *Context OPTIONAL + ); + +/** + Resets and reconfigures the USB controller. This function will work for all USB devices except + USB Hub Controllers. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + + @retval EFI_SUCCESS The USB controller was reset. + @retval EFI_INVALID_PARAMETER If the controller specified by This is a USB hub. + @retval EFI_DEVICE_ERROR An error occurred during the reconfiguration process. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_PORT_RESET)( + IN EFI_USB_IO_PROTOCOL *This + ); + +/** + Retrieves the USB Device Descriptor. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + @param DeviceDescriptor A pointer to the caller allocated USB Device Descriptor. + + @retval EFI_SUCCESS The device descriptor was retrieved successfully. + @retval EFI_INVALID_PARAMETER DeviceDescriptor is NULL. + @retval EFI_NOT_FOUND The device descriptor was not found. The device may not be configured. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_GET_DEVICE_DESCRIPTOR)( + IN EFI_USB_IO_PROTOCOL *This, + OUT EFI_USB_DEVICE_DESCRIPTOR *DeviceDescriptor + ); + +/** + Retrieves the USB Device Descriptor. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + @param ConfigurationDescriptor A pointer to the caller allocated USB Active Configuration + Descriptor. + @retval EFI_SUCCESS The active configuration descriptor was retrieved successfully. + @retval EFI_INVALID_PARAMETER ConfigurationDescriptor is NULL. + @retval EFI_NOT_FOUND An active configuration descriptor cannot be found. The device may not + be configured. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_GET_CONFIG_DESCRIPTOR)( + IN EFI_USB_IO_PROTOCOL *This, + OUT EFI_USB_CONFIG_DESCRIPTOR *ConfigurationDescriptor + ); + +/** + Retrieves the Interface Descriptor for a USB Device Controller. As stated earlier, an interface + within a USB device is equivalently to a USB Controller within the current configuration. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + @param InterfaceDescriptor A pointer to the caller allocated USB Interface Descriptor within + the configuration setting. + @retval EFI_SUCCESS The interface descriptor retrieved successfully. + @retval EFI_INVALID_PARAMETER InterfaceDescriptor is NULL. + @retval EFI_NOT_FOUND The interface descriptor cannot be found. The device may not be + correctly configured. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_GET_INTERFACE_DESCRIPTOR)( + IN EFI_USB_IO_PROTOCOL *This, + OUT EFI_USB_INTERFACE_DESCRIPTOR *InterfaceDescriptor + ); + +/** + Retrieves an Endpoint Descriptor within a USB Controller. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + @param EndpointIndex Indicates which endpoint descriptor to retrieve. + @param EndpointDescriptor A pointer to the caller allocated USB Endpoint Descriptor of + a USB controller. + + @retval EFI_SUCCESS The endpoint descriptor was retrieved successfully. + @retval EFI_INVALID_PARAMETER One or more parameters are invalid. + @retval EFI_NOT_FOUND The endpoint descriptor cannot be found. The device may not be + correctly configured. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_GET_ENDPOINT_DESCRIPTOR)( + IN EFI_USB_IO_PROTOCOL *This, + IN UINT8 EndpointIndex, + OUT EFI_USB_ENDPOINT_DESCRIPTOR *EndpointDescriptor + ); + +/** + Retrieves a string stored in a USB Device. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + @param LangID The Language ID for the string being retrieved. + @param StringID The ID of the string being retrieved. + @param String A pointer to a buffer allocated by this function with + AllocatePool() to store the string.If this function + returns EFI_SUCCESS, it stores the string the caller + wants to get. The caller should release the string + buffer with FreePool() after the string is not used any more. + + @retval EFI_SUCCESS The string was retrieved successfully. + @retval EFI_NOT_FOUND The string specified by LangID and StringID was not found. + @retval EFI_OUT_OF_RESOURCES There are not enough resources to allocate the return buffer String. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_GET_STRING_DESCRIPTOR)( + IN EFI_USB_IO_PROTOCOL *This, + IN UINT16 LangID, + IN UINT8 StringID, + OUT CHAR16 **String + ); + +/** + Retrieves all the language ID codes that the USB device supports. + + @param This A pointer to the EFI_USB_IO_PROTOCOL instance. + @param LangIDTable Language ID for the string the caller wants to get. + This is a 16-bit ID defined by Microsoft. This + buffer pointer is allocated and maintained by + the USB Bus Driver, the caller should not modify + its contents. + @param TableSize The size, in bytes, of the table LangIDTable. + + @retval EFI_SUCCESS The support languages were retrieved successfully. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_USB_IO_GET_SUPPORTED_LANGUAGE)( + IN EFI_USB_IO_PROTOCOL *This, + OUT UINT16 **LangIDTable, + OUT UINT16 *TableSize + ); + +/// +/// The EFI_USB_IO_PROTOCOL provides four basic transfers types described +/// in the USB 1.1 Specification. These include control transfer, interrupt +/// transfer, bulk transfer and isochronous transfer. The EFI_USB_IO_PROTOCOL +/// also provides some basic USB device/controller management and configuration +/// interfaces. A USB device driver uses the services of this protocol to manage USB devices. +/// +struct _EFI_USB_IO_PROTOCOL { + // + // IO transfer + // + EFI_USB_IO_CONTROL_TRANSFER UsbControlTransfer; + EFI_USB_IO_BULK_TRANSFER UsbBulkTransfer; + EFI_USB_IO_ASYNC_INTERRUPT_TRANSFER UsbAsyncInterruptTransfer; + EFI_USB_IO_SYNC_INTERRUPT_TRANSFER UsbSyncInterruptTransfer; + EFI_USB_IO_ISOCHRONOUS_TRANSFER UsbIsochronousTransfer; + EFI_USB_IO_ASYNC_ISOCHRONOUS_TRANSFER UsbAsyncIsochronousTransfer; + + // + // Common device request + // + EFI_USB_IO_GET_DEVICE_DESCRIPTOR UsbGetDeviceDescriptor; + EFI_USB_IO_GET_CONFIG_DESCRIPTOR UsbGetConfigDescriptor; + EFI_USB_IO_GET_INTERFACE_DESCRIPTOR UsbGetInterfaceDescriptor; + EFI_USB_IO_GET_ENDPOINT_DESCRIPTOR UsbGetEndpointDescriptor; + EFI_USB_IO_GET_STRING_DESCRIPTOR UsbGetStringDescriptor; + EFI_USB_IO_GET_SUPPORTED_LANGUAGE UsbGetSupportedLanguages; + + // + // Reset controller's parent port + // + EFI_USB_IO_PORT_RESET UsbPortReset; +}; + +extern EFI_GUID gEfiUsbIoProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/Protocol/VlanConfig.h b/src/include/ipxe/efi/Protocol/VlanConfig.h new file mode 100644 index 00000000..928faded --- /dev/null +++ b/src/include/ipxe/efi/Protocol/VlanConfig.h @@ -0,0 +1,145 @@ +/** @file + EFI VLAN Config protocol is to provide manageability interface for VLAN configuration. + + Copyright (c) 2009, Intel Corporation. All rights reserved.
+ This program and the accompanying materials + are licensed and made available under the terms and conditions of the BSD License + which accompanies this distribution. The full text of the license may be found at + http://opensource.org/licenses/bsd-license.php + + THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + + @par Revision Reference: + This Protocol is introduced in UEFI Specification 2.2 + +**/ + +#ifndef __EFI_VLANCONFIG_PROTOCOL_H__ +#define __EFI_VLANCONFIG_PROTOCOL_H__ + +FILE_LICENCE ( BSD3 ); + + +#define EFI_VLAN_CONFIG_PROTOCOL_GUID \ + { \ + 0x9e23d768, 0xd2f3, 0x4366, {0x9f, 0xc3, 0x3a, 0x7a, 0xba, 0x86, 0x43, 0x74 } \ + } + +typedef struct _EFI_VLAN_CONFIG_PROTOCOL EFI_VLAN_CONFIG_PROTOCOL; + + +/// +/// EFI_VLAN_FIND_DATA +/// +typedef struct { + UINT16 VlanId; ///< Vlan Identifier. + UINT8 Priority; ///< Priority of this VLAN. +} EFI_VLAN_FIND_DATA; + + +/** + Create a VLAN device or modify the configuration parameter of an + already-configured VLAN. + + The Set() function is used to create a new VLAN device or change the VLAN + configuration parameters. If the VlanId hasn't been configured in the + physical Ethernet device, a new VLAN device will be created. If a VLAN with + this VlanId is already configured, then related configuration will be updated + as the input parameters. + + If VlanId is zero, the VLAN device will send and receive untagged frames. + Otherwise, the VLAN device will send and receive VLAN-tagged frames containing the VlanId. + If VlanId is out of scope of (0-4094), EFI_INVALID_PARAMETER is returned. + If Priority is out of the scope of (0-7), then EFI_INVALID_PARAMETER is returned. + If there is not enough system memory to perform the registration, then + EFI_OUT_OF_RESOURCES is returned. + + @param[in] This Points to the EFI_VLAN_CONFIG_PROTOCOL. + @param[in] VlanId A unique identifier (1-4094) of the VLAN which is being created + or modified, or zero (0). + @param[in] Priority 3 bit priority in VLAN header. Priority 0 is default value. If + VlanId is zero (0), Priority is ignored. + + @retval EFI_SUCCESS The VLAN is successfully configured. + @retval EFI_INVALID_PARAMETER One or more of following conditions is TRUE: + - This is NULL. + - VlanId is an invalid VLAN Identifier. + - Priority is invalid. + @retval EFI_OUT_OF_RESOURCES There is not enough system memory to perform the registration. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_VLAN_CONFIG_SET)( + IN EFI_VLAN_CONFIG_PROTOCOL *This, + IN UINT16 VlanId, + IN UINT8 Priority + ); + +/** + Find configuration information for specified VLAN or all configured VLANs. + + The Find() function is used to find the configuration information for matching + VLAN and allocate a buffer into which those entries are copied. + + @param[in] This Points to the EFI_VLAN_CONFIG_PROTOCOL. + @param[in] VlanId Pointer to VLAN identifier. Set to NULL to find all + configured VLANs. + @param[out] NumberOfVlan The number of VLANs which is found by the specified criteria. + @param[out] Entries The buffer which receive the VLAN configuration. + + @retval EFI_SUCCESS The VLAN is successfully found. + @retval EFI_INVALID_PARAMETER One or more of following conditions is TRUE: + - This is NULL. + - Specified VlanId is invalid. + @retval EFI_NOT_FOUND No matching VLAN is found. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_VLAN_CONFIG_FIND)( + IN EFI_VLAN_CONFIG_PROTOCOL *This, + IN UINT16 *VlanId OPTIONAL, + OUT UINT16 *NumberOfVlan, + OUT EFI_VLAN_FIND_DATA **Entries + ); + +/** + Remove the configured VLAN device. + + The Remove() function is used to remove the specified VLAN device. + If the VlanId is out of the scope of (0-4094), EFI_INVALID_PARAMETER is returned. + If specified VLAN hasn't been previously configured, EFI_NOT_FOUND is returned. + + @param[in] This Points to the EFI_VLAN_CONFIG_PROTOCOL. + @param[in] VlanId Identifier (0-4094) of the VLAN to be removed. + + @retval EFI_SUCCESS The VLAN is successfully removed. + @retval EFI_INVALID_PARAMETER One or more of following conditions is TRUE: + - This is NULL. + - VlanId is an invalid parameter. + @retval EFI_NOT_FOUND The to-be-removed VLAN does not exist. + +**/ +typedef +EFI_STATUS +(EFIAPI *EFI_VLAN_CONFIG_REMOVE)( + IN EFI_VLAN_CONFIG_PROTOCOL *This, + IN UINT16 VlanId + ); + +/// +/// EFI_VLAN_CONFIG_PROTOCOL +/// provide manageability interface for VLAN setting. The intended +/// VLAN tagging implementation is IEEE802.1Q. +/// +struct _EFI_VLAN_CONFIG_PROTOCOL { + EFI_VLAN_CONFIG_SET Set; + EFI_VLAN_CONFIG_FIND Find; + EFI_VLAN_CONFIG_REMOVE Remove; +}; + +extern EFI_GUID gEfiVlanConfigProtocolGuid; + +#endif diff --git a/src/include/ipxe/efi/efi_acpi.h b/src/include/ipxe/efi/efi_acpi.h new file mode 100644 index 00000000..01456f13 --- /dev/null +++ b/src/include/ipxe/efi/efi_acpi.h @@ -0,0 +1,18 @@ +#ifndef _IPXE_EFI_ACPI_H +#define _IPXE_EFI_ACPI_H + +/** @file + * + * iPXE ACPI API for EFI + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef ACPI_EFI +#define ACPI_PREFIX_efi +#else +#define ACPI_PREFIX_efi __efi_ +#endif + +#endif /* _IPXE_EFI_ACPI_H */ diff --git a/src/include/ipxe/efi/efi_autoboot.h b/src/include/ipxe/efi/efi_autoboot.h new file mode 100644 index 00000000..1d5ddc8c --- /dev/null +++ b/src/include/ipxe/efi/efi_autoboot.h @@ -0,0 +1,14 @@ +#ifndef _IPXE_EFI_AUTOBOOT_H +#define _IPXE_EFI_AUTOBOOT_H + +/** @file + * + * EFI autoboot device + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern void efi_set_autoboot ( void ); + +#endif /* _IPXE_EFI_AUTOBOOT_H */ diff --git a/src/include/ipxe/efi/efi_blacklist.h b/src/include/ipxe/efi/efi_blacklist.h new file mode 100644 index 00000000..c5a5a61d --- /dev/null +++ b/src/include/ipxe/efi/efi_blacklist.h @@ -0,0 +1,13 @@ +#ifndef _IPXE_EFI_BLACKLIST_H +#define _IPXE_EFI_BLACKLIST_H + +/** @file + * + * EFI driver blacklist + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern void efi_unload_blacklist ( void ); + +#endif /* _IPXE_EFI_BLACKLIST_H */ diff --git a/src/include/ipxe/efi/efi_block.h b/src/include/ipxe/efi/efi_block.h new file mode 100644 index 00000000..f8cf7fc1 --- /dev/null +++ b/src/include/ipxe/efi/efi_block.h @@ -0,0 +1,18 @@ +#ifndef _IPXE_EFI_BLOCK_H +#define _IPXE_EFI_BLOCK_H + +/** @block + * + * EFI block device protocols + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef SANBOOT_EFI +#define SANBOOT_PREFIX_efi +#else +#define SANBOOT_PREFIX_efi __efi_ +#endif + +#endif /* _IPXE_EFI_BLOCK_H */ diff --git a/src/include/ipxe/efi/efi_null.h b/src/include/ipxe/efi/efi_null.h new file mode 100644 index 00000000..cc91e09b --- /dev/null +++ b/src/include/ipxe/efi/efi_null.h @@ -0,0 +1,31 @@ +#ifndef _IPXE_EFI_NULL_H +#define _IPXE_EFI_NULL_H + +/** @file + * + * EFI null interfaces + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern void efi_nullify_snp ( EFI_SIMPLE_NETWORK_PROTOCOL *snp ); +extern void efi_nullify_nii ( EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL *nii ); +extern void efi_nullify_name2 ( EFI_COMPONENT_NAME2_PROTOCOL *name2 ); +extern void efi_nullify_load_file ( EFI_LOAD_FILE_PROTOCOL *load_file ); +extern void efi_nullify_hii ( EFI_HII_CONFIG_ACCESS_PROTOCOL *hii ); +extern void efi_nullify_block ( EFI_BLOCK_IO_PROTOCOL *block ); +extern void efi_nullify_pxe ( EFI_PXE_BASE_CODE_PROTOCOL *pxe ); +extern void efi_nullify_apple ( EFI_APPLE_NET_BOOT_PROTOCOL *apple ); + +#endif /* _IPXE_EFI_NULL_H */ diff --git a/src/include/ipxe/efi/efi_path.h b/src/include/ipxe/efi/efi_path.h new file mode 100644 index 00000000..76ded728 --- /dev/null +++ b/src/include/ipxe/efi/efi_path.h @@ -0,0 +1,43 @@ +#ifndef _IPXE_EFI_PATH_H +#define _IPXE_EFI_PATH_H + +/** @file + * + * EFI device paths + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +struct net_device; +struct uri; +struct iscsi_session; +struct aoe_device; +struct fcp_description; +struct ib_srp_device; +struct usb_function; + +extern EFI_DEVICE_PATH_PROTOCOL * +efi_path_end ( EFI_DEVICE_PATH_PROTOCOL *path ); +extern size_t efi_path_len ( EFI_DEVICE_PATH_PROTOCOL *path ); +extern EFI_DEVICE_PATH_PROTOCOL * efi_paths ( EFI_DEVICE_PATH_PROTOCOL *first, + ... ); +extern EFI_DEVICE_PATH_PROTOCOL * efi_netdev_path ( struct net_device *netdev ); +extern EFI_DEVICE_PATH_PROTOCOL * efi_uri_path ( struct uri *uri ); +extern EFI_DEVICE_PATH_PROTOCOL * +efi_iscsi_path ( struct iscsi_session *iscsi ); +extern EFI_DEVICE_PATH_PROTOCOL * efi_aoe_path ( struct aoe_device *aoedev ); +extern EFI_DEVICE_PATH_PROTOCOL * efi_fcp_path ( struct fcp_description *desc ); +extern EFI_DEVICE_PATH_PROTOCOL * +efi_ib_srp_path ( struct ib_srp_device *ib_srp ); +extern EFI_DEVICE_PATH_PROTOCOL * efi_usb_path ( struct usb_function *func ); + +extern EFI_DEVICE_PATH_PROTOCOL * efi_describe ( struct interface *interface ); +#define efi_describe_TYPE( object_type ) \ + typeof ( EFI_DEVICE_PATH_PROTOCOL * ( object_type ) ) + +#endif /* _IPXE_EFI_PATH_H */ diff --git a/src/include/ipxe/efi/efi_pxe.h b/src/include/ipxe/efi/efi_pxe.h new file mode 100644 index 00000000..b356f378 --- /dev/null +++ b/src/include/ipxe/efi/efi_pxe.h @@ -0,0 +1,17 @@ +#ifndef _IPXE_EFI_PXE_H +#define _IPXE_EFI_PXE_H + +/** @file + * + * EFI PXE base code protocol + */ + +#include +#include + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern int efi_pxe_install ( EFI_HANDLE handle, struct net_device *netdev ); +extern void efi_pxe_uninstall ( EFI_HANDLE handle ); + +#endif /* _IPXE_EFI_PXE_H */ diff --git a/src/include/ipxe/efi/efi_time.h b/src/include/ipxe/efi/efi_time.h new file mode 100644 index 00000000..099994b5 --- /dev/null +++ b/src/include/ipxe/efi/efi_time.h @@ -0,0 +1,20 @@ +#ifndef _IPXE_EFI_TIME_H +#define _IPXE_EFI_TIME_H + +/** @file + * + * EFI time source + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +#ifdef TIME_EFI +#define TIME_PREFIX_efi +#else +#define TIME_PREFIX_efi __efi_ +#endif + +#endif /* _IPXE_EFI_TIME_H */ diff --git a/src/include/ipxe/efi/efi_usb.h b/src/include/ipxe/efi/efi_usb.h new file mode 100644 index 00000000..06baff52 --- /dev/null +++ b/src/include/ipxe/efi/efi_usb.h @@ -0,0 +1,80 @@ +#ifndef _IPXE_EFI_USB_H +#define _IPXE_EFI_USB_H + +/** @file + * + * USB I/O protocol + * + */ + +#include +#include +#include +#include +#include + +/** An EFI USB device */ +struct efi_usb_device { + /** Name */ + const char *name; + /** The underlying USB function */ + struct usb_function *func; + /** Configuration descriptor */ + struct usb_configuration_descriptor *config; + /** Supported languages */ + uint16_t *lang; + /** Length of supported languages */ + size_t lang_len; + /** List of interfaces */ + struct list_head interfaces; +}; + +/** An EFI USB device interface */ +struct efi_usb_interface { + /** Name */ + char name[32]; + /** Containing USB device */ + struct efi_usb_device *usbdev; + /** List of interfaces */ + struct list_head list; + + /** Interface number */ + unsigned int interface; + /** Alternate setting */ + unsigned int alternate; + /** EFI handle */ + EFI_HANDLE handle; + /** USB I/O protocol */ + EFI_USB_IO_PROTOCOL usbio; + /** Device path */ + EFI_DEVICE_PATH_PROTOCOL *path; + + /** Opened endpoints */ + struct efi_usb_endpoint *endpoint[32]; +}; + +/** An EFI USB device endpoint */ +struct efi_usb_endpoint { + /** EFI USB device interface */ + struct efi_usb_interface *usbintf; + /** USB endpoint */ + struct usb_endpoint ep; + + /** Most recent synchronous completion status */ + int rc; + + /** Asynchronous timer event */ + EFI_EVENT event; + /** Asynchronous callback handler */ + EFI_ASYNC_USB_TRANSFER_CALLBACK callback; + /** Asynchronous callback context */ + void *context; +}; + +/** Asynchronous transfer fill level + * + * This is a policy decision. + */ +#define EFI_USB_ASYNC_FILL 2 + +#endif /* _IPXE_EFI_USB_H */ diff --git a/src/include/ipxe/efi/efi_utils.h b/src/include/ipxe/efi/efi_utils.h new file mode 100644 index 00000000..270d38dc --- /dev/null +++ b/src/include/ipxe/efi/efi_utils.h @@ -0,0 +1,22 @@ +#ifndef _IPXE_EFI_UTILS_H +#define _IPXE_EFI_UTILS_H + +/** @file + * + * EFI utilities + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +struct device; + +extern int efi_locate_device ( EFI_HANDLE device, EFI_GUID *protocol, + EFI_HANDLE *parent ); +extern int efi_child_add ( EFI_HANDLE parent, EFI_HANDLE child ); +extern void efi_child_del ( EFI_HANDLE parent, EFI_HANDLE child ); +extern void efi_device_info ( EFI_HANDLE device, const char *prefix, + struct device *dev ); + +#endif /* _IPXE_EFI_UTILS_H */ diff --git a/src/include/ipxe/efi/efi_watchdog.h b/src/include/ipxe/efi/efi_watchdog.h new file mode 100644 index 00000000..4a56b9a2 --- /dev/null +++ b/src/include/ipxe/efi/efi_watchdog.h @@ -0,0 +1,31 @@ +#ifndef _IPXE_EFI_WATCHDOG_H +#define _IPXE_EFI_WATCHDOG_H + +/** @file + * + * EFI watchdog holdoff timer + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern struct retry_timer efi_watchdog; + +/** + * Start EFI watchdog holdoff timer + * + */ +static inline void efi_watchdog_start ( void ) { + + start_timer_nodelay ( &efi_watchdog ); +} + +/** + * Stop EFI watchdog holdoff timer + * + */ +static inline void efi_watchdog_stop ( void ) { + + stop_timer ( &efi_watchdog ); +} + +#endif /* _IPXE_EFI_WATCHDOG_H */ diff --git a/src/include/ipxe/efi/efi_wrap.h b/src/include/ipxe/efi/efi_wrap.h new file mode 100644 index 00000000..6c7ccf2e --- /dev/null +++ b/src/include/ipxe/efi/efi_wrap.h @@ -0,0 +1,16 @@ +#ifndef _IPXE_EFI_WRAP_H +#define _IPXE_EFI_WRAP_H + +/** @file + * + * EFI driver interface + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +extern EFI_SYSTEM_TABLE * efi_wrap_systab ( void ); +extern void efi_wrap ( EFI_HANDLE handle ); + +#endif /* _IPXE_EFI_WRAP_H */ diff --git a/src/include/ipxe/eoib.h b/src/include/ipxe/eoib.h new file mode 100644 index 00000000..93f496c3 --- /dev/null +++ b/src/include/ipxe/eoib.h @@ -0,0 +1,103 @@ +#ifndef _IPXE_EOIB_H +#define _IPXE_EOIB_H + +/** @file + * + * Ethernet over Infiniband + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include + +/** An EoIB header */ +struct eoib_header { + /** Signature */ + uint16_t magic; + /** Reserved */ + uint16_t reserved; +} __attribute__ (( packed )); + +/** EoIB magic signature */ +#define EOIB_MAGIC 0x8919 + +/** An EoIB device */ +struct eoib_device { + /** Name */ + const char *name; + /** Network device */ + struct net_device *netdev; + /** Underlying Infiniband device */ + struct ib_device *ibdev; + /** List of EoIB devices */ + struct list_head list; + /** Broadcast address */ + struct ib_address_vector broadcast; + + /** Completion queue */ + struct ib_completion_queue *cq; + /** Queue pair */ + struct ib_queue_pair *qp; + /** Broadcast group membership */ + struct ib_mc_membership membership; + + /** Peer cache */ + struct list_head peers; + + /** Send duplicate packet to gateway (or NULL) + * + * @v eoib EoIB device + * @v original Original I/O buffer + */ + void ( * duplicate ) ( struct eoib_device *eoib, + struct io_buffer *original ); + /** Gateway (if any) */ + struct ib_address_vector gateway; + /** Multicast group additional component mask */ + unsigned int mask; +}; + +/** + * Check if EoIB device uses a gateway + * + * @v eoib EoIB device + * @v has_gw EoIB device uses a gateway + */ +static inline int eoib_has_gateway ( struct eoib_device *eoib ) { + + return ( eoib->duplicate != NULL ); +} + +/** + * Force creation of multicast group + * + * @v eoib EoIB device + */ +static inline void eoib_force_group_creation ( struct eoib_device *eoib ) { + + /* Some dubious EoIB implementations require each endpoint to + * force the creation of the multicast group. Yes, this makes + * it impossible for the group parameters (e.g. SL) to ever be + * modified without breaking backwards compatiblity with every + * existing driver. + */ + eoib->mask = ( IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_QKEY | + IB_SA_MCMEMBER_REC_SL | IB_SA_MCMEMBER_REC_FLOW_LABEL | + IB_SA_MCMEMBER_REC_TRAFFIC_CLASS ); +} + +extern int eoib_create ( struct ib_device *ibdev, const uint8_t *hw_addr, + struct ib_address_vector *broadcast, + const char *name ); +extern struct eoib_device * eoib_find ( struct ib_device *ibdev, + const uint8_t *hw_addr ); +extern void eoib_destroy ( struct eoib_device *eoib ); +extern void eoib_set_gateway ( struct eoib_device *eoib, + struct ib_address_vector *av ); + +#endif /* _IPXE_EOIB_H */ diff --git a/src/include/ipxe/fault.h b/src/include/ipxe/fault.h new file mode 100644 index 00000000..356296c3 --- /dev/null +++ b/src/include/ipxe/fault.h @@ -0,0 +1,53 @@ +#ifndef _IPXE_FAULT_H +#define _IPXE_FAULT_H + +/** @file + * + * Fault injection + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +extern int inject_fault_nonzero ( unsigned int rate ); +extern void inject_corruption_nonzero ( unsigned int rate, const void *data, + size_t len ); + +/** + * Inject fault with a specified probability + * + * @v rate Reciprocal of fault probability (zero for no faults) + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +inject_fault ( unsigned int rate ) { + + /* Force dead code elimination in non-fault-injecting builds */ + if ( rate == 0 ) + return 0; + + return inject_fault_nonzero ( rate ); +} + +/** + * Corrupt data with a specified probability + * + * @v rate Reciprocal of fault probability (zero for no faults) + * @v data Data + * @v len Length of data + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) void +inject_corruption ( unsigned int rate, const void *data, size_t len ) { + + /* Force dead code elimination in non-fault-injecting builds */ + if ( rate == 0 ) + return; + + return inject_corruption_nonzero ( rate, data, len ); +} + +#endif /* _IPXE_FAULT_H */ diff --git a/src/include/ipxe/fdt.h b/src/include/ipxe/fdt.h new file mode 100644 index 00000000..97efa100 --- /dev/null +++ b/src/include/ipxe/fdt.h @@ -0,0 +1,102 @@ +#ifndef _IPXE_FDT_H +#define _IPXE_FDT_H + +/** @file + * + * Flattened Device Tree + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +struct net_device; + +/** Device tree header */ +struct fdt_header { + /** Magic signature */ + uint32_t magic; + /** Total size of device tree */ + uint32_t totalsize; + /** Offset to structure block */ + uint32_t off_dt_struct; + /** Offset to strings block */ + uint32_t off_dt_strings; + /** Offset to memory reservation block */ + uint32_t off_mem_rsvmap; + /** Version of this data structure */ + uint32_t version; + /** Lowest version to which this structure is compatible */ + uint32_t last_comp_version; + /** Physical ID of the boot CPU */ + uint32_t boot_cpuid_phys; + /** Length of string block */ + uint32_t size_dt_strings; + /** Length of structure block */ + uint32_t size_dt_struct; +} __attribute__ (( packed )); + +/** Magic signature */ +#define FDT_MAGIC 0xd00dfeed + +/** Expected device tree version */ +#define FDT_VERSION 16 + +/** Device tree token */ +typedef uint32_t fdt_token_t; + +/** Begin node token */ +#define FDT_BEGIN_NODE 0x00000001 + +/** End node token */ +#define FDT_END_NODE 0x00000002 + +/** Property token */ +#define FDT_PROP 0x00000003 + +/** Property fragment */ +struct fdt_prop { + /** Data length */ + uint32_t len; + /** Name offset */ + uint32_t name_off; +} __attribute__ (( packed )); + +/** NOP token */ +#define FDT_NOP 0x00000004 + +/** End of structure block */ +#define FDT_END 0x00000009 + +/** Alignment of structure block */ +#define FDT_STRUCTURE_ALIGN ( sizeof ( fdt_token_t ) ) + +/** A device tree */ +struct fdt { + /** Tree data */ + union { + /** Tree header */ + const struct fdt_header *hdr; + /** Raw data */ + const void *raw; + }; + /** Length of tree */ + size_t len; + /** Offset to structure block */ + unsigned int structure; + /** Length of structure block */ + size_t structure_len; + /** Offset to strings block */ + unsigned int strings; + /** Length of strings block */ + size_t strings_len; +}; + +extern int fdt_path ( const char *path, unsigned int *offset ); +extern int fdt_alias ( const char *name, unsigned int *offset ); +extern const char * fdt_string ( unsigned int offset, const char *name ); +extern int fdt_mac ( unsigned int offset, struct net_device *netdev ); +extern int register_fdt ( const struct fdt_header *hdr ); + +#endif /* _IPXE_FDT_H */ diff --git a/src/include/ipxe/hyperv.h b/src/include/ipxe/hyperv.h new file mode 100644 index 00000000..9b7e54a5 --- /dev/null +++ b/src/include/ipxe/hyperv.h @@ -0,0 +1,242 @@ +#ifndef _IPXE_HYPERV_H +#define _IPXE_HYPERV_H + +/** @file + * + * Hyper-V interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** Hyper-V interface identification */ +#define HV_INTERFACE_ID 0x31237648 /* "Hv#1" */ + +/** Guest OS identity for iPXE + * + * This field comprises: + * + * Bit 63 : set to 1 to indicate an open source OS + * Bits 62:56 : OS Type + * Bits 55:48 : OS ID + * Bits 47:16 : Version + * Bits 15:0 : Build number + * + * There appears to be no central registry for the "OS Type". The + * specification states that "Linux is 0x100", and the FreeBSD source + * states that "FreeBSD is 0x200". Both of these statements are + * actually referring to the combined "OS Type" and "OS ID" field. + * + * We choose to use 0x98ae: this is generated by setting bit 63 (to + * indicate an open source OS) and setting the OS Type+ID equal to the + * PnP vendor ID used in romprefix.S. No version information or build + * number is included. + */ +#define HV_GUEST_OS_ID_IPXE ( ( 1ULL << 63 ) | ( 0x18aeULL << 48 ) ) + +/** Guest OS identity for Gen 2 UEFI firmware + * + * This does not conform to the documented structure for guest OS + * identities. + */ +#define HV_GUEST_OS_ID_UEFI ( 1ULL << 40 ) + +/** Enable hypercall page */ +#define HV_HYPERCALL_ENABLE 0x00000001UL + +/** Enable SynIC */ +#define HV_SCONTROL_ENABLE 0x00000001UL + +/** Enable SynIC event flags */ +#define HV_SIEFP_ENABLE 0x00000001UL + +/** Enable SynIC messages */ +#define HV_SIMP_ENABLE 0x00000001UL + +/** Perform implicit EOI upon synthetic interrupt delivery */ +#define HV_SINT_AUTO_EOI 0x00020000UL + +/** Mask synthetic interrupt */ +#define HV_SINT_MASKED 0x00010000UL + +/** Synthetic interrupt vector */ +#define HV_SINT_VECTOR(x) ( (x) << 0 ) + +/** Synthetic interrupt vector mask */ +#define HV_SINT_VECTOR_MASK HV_SINT_VECTOR ( 0xff ) + +/** Maximum synthetic interrupt number */ +#define HV_SINT_MAX 15 + +/** Post message */ +#define HV_POST_MESSAGE 0x005c + +/** A posted message + * + * This is the input parameter list for the HvPostMessage hypercall. + */ +struct hv_post_message { + /** Connection ID */ + uint32_t id; + /** Padding */ + uint32_t reserved; + /** Type */ + uint32_t type; + /** Length of message */ + uint32_t len; + /** Message */ + uint8_t data[240]; +} __attribute__ (( packed )); + +/** A received message + * + * This is the HV_MESSAGE structure from the Hypervisor Top-Level + * Functional Specification. The field order given in the + * documentation is incorrect. + */ +struct hv_message { + /** Type */ + uint32_t type; + /** Length of message */ + uint8_t len; + /** Flags */ + uint8_t flags; + /** Padding */ + uint16_t reserved; + /** Origin */ + uint64_t origin; + /** Message */ + uint8_t data[240]; +} __attribute__ (( packed )); + +/** Signal event */ +#define HV_SIGNAL_EVENT 0x005d + +/** A signalled event */ +struct hv_signal_event { + /** Connection ID */ + uint32_t id; + /** Flag number */ + uint16_t flag; + /** Reserved */ + uint16_t reserved; +} __attribute__ (( packed )); + +/** A received event */ +struct hv_event { + /** Event flags */ + uint8_t flags[256]; +} __attribute__ (( packed )); + +/** A monitor trigger group + * + * This is the HV_MONITOR_TRIGGER_GROUP structure from the Hypervisor + * Top-Level Functional Specification. + */ +struct hv_monitor_trigger { + /** Pending events */ + uint32_t pending; + /** Armed events */ + uint32_t armed; +} __attribute__ (( packed )); + +/** A monitor parameter set + * + * This is the HV_MONITOR_PARAMETER structure from the Hypervisor + * Top-Level Functional Specification. + */ +struct hv_monitor_parameter { + /** Connection ID */ + uint32_t id; + /** Flag number */ + uint16_t flag; + /** Reserved */ + uint16_t reserved; +} __attribute__ (( packed )); + +/** A monitor page + * + * This is the HV_MONITOR_PAGE structure from the Hypervisor Top-Level + * Functional Specification. + */ +struct hv_monitor { + /** Flags */ + uint32_t flags; + /** Reserved */ + uint8_t reserved_a[4]; + /** Trigger groups */ + struct hv_monitor_trigger trigger[4]; + /** Reserved */ + uint8_t reserved_b[536]; + /** Latencies */ + uint16 latency[4][32]; + /** Reserved */ + uint8_t reserved_c[256]; + /** Parameters */ + struct hv_monitor_parameter param[4][32]; + /** Reserved */ + uint8_t reserved_d[1984]; +} __attribute__ (( packed )); + +/** A synthetic interrupt controller */ +struct hv_synic { + /** Message page */ + struct hv_message *message; + /** Event flag page */ + struct hv_event *event; +}; + +/** A message buffer */ +union hv_message_buffer { + /** Posted message */ + struct hv_post_message posted; + /** Received message */ + struct hv_message received; + /** Signalled event */ + struct hv_signal_event signalled; +}; + +/** A Hyper-V hypervisor */ +struct hv_hypervisor { + /** Hypercall page */ + void *hypercall; + /** Synthetic interrupt controller (SynIC) */ + struct hv_synic synic; + /** Message buffer */ + union hv_message_buffer *message; + /** Virtual machine bus */ + struct vmbus *vmbus; +}; + +#include + +/** + * Calculate the number of pages covering an address range + * + * @v data Start of data + * @v len Length of data (must be non-zero) + * @ret pfn_count Number of pages covered + */ +static inline unsigned int hv_pfn_count ( physaddr_t data, size_t len ) { + unsigned int first_pfn = ( data / PAGE_SIZE ); + unsigned int last_pfn = ( ( data + len - 1 ) / PAGE_SIZE ); + + return ( last_pfn - first_pfn + 1 ); +} + +extern __attribute__ (( sentinel )) int +hv_alloc_pages ( struct hv_hypervisor *hv, ... ); +extern __attribute__ (( sentinel )) void +hv_free_pages ( struct hv_hypervisor *hv, ... ); +extern void hv_enable_sint ( struct hv_hypervisor *hv, unsigned int sintx ); +extern void hv_disable_sint ( struct hv_hypervisor *hv, unsigned int sintx ); +extern int hv_post_message ( struct hv_hypervisor *hv, unsigned int id, + unsigned int type, const void *data, size_t len ); +extern int hv_wait_for_message ( struct hv_hypervisor *hv, unsigned int sintx ); +extern int hv_signal_event ( struct hv_hypervisor *hv, unsigned int id, + unsigned int flag ); + +#endif /* _IPXE_HYPERV_H */ diff --git a/src/include/ipxe/ib_service.h b/src/include/ipxe/ib_service.h new file mode 100644 index 00000000..88afe4e6 --- /dev/null +++ b/src/include/ipxe/ib_service.h @@ -0,0 +1,20 @@ +#ifndef _IPXE_IB_SERVICE_H +#define _IPXE_IB_SERVICE_H + +/** @file + * + * Infiniband service records + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +extern struct ib_mad_transaction * +ib_create_service_madx ( struct ib_device *ibdev, + struct ib_mad_interface *mi, const char *name, + struct ib_mad_transaction_operations *op ); + +#endif /* _IPXE_IB_SERVICE_H */ diff --git a/src/include/ipxe/iomap.h b/src/include/ipxe/iomap.h new file mode 100644 index 00000000..b8ded38e --- /dev/null +++ b/src/include/ipxe/iomap.h @@ -0,0 +1,78 @@ +#ifndef _IPXE_IOMAP_H +#define _IPXE_IOMAP_H + +/** @file + * + * iPXE I/O mapping API + * + * The I/O mapping API provides methods for mapping and unmapping I/O + * devices. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** + * Calculate static inline I/O mapping API function name + * + * @v _prefix Subsystem prefix + * @v _api_func API function + * @ret _subsys_func Subsystem API function + */ +#define IOMAP_INLINE( _subsys, _api_func ) \ + SINGLE_API_INLINE ( IOMAP_PREFIX_ ## _subsys, _api_func ) + +/** + * Provide an I/O mapping API implementation + * + * @v _prefix Subsystem prefix + * @v _api_func API function + * @v _func Implementing function + */ +#define PROVIDE_IOMAP( _subsys, _api_func, _func ) \ + PROVIDE_SINGLE_API ( IOMAP_PREFIX_ ## _subsys, _api_func, _func ) + +/** + * Provide a static inline I/O mapping API implementation + * + * @v _prefix Subsystem prefix + * @v _api_func API function + */ +#define PROVIDE_IOMAP_INLINE( _subsys, _api_func ) \ + PROVIDE_SINGLE_API_INLINE ( IOMAP_PREFIX_ ## _subsys, _api_func ) + +/* Include all architecture-independent I/O API headers */ +#include + +/* Include all architecture-dependent I/O API headers */ +#include + +/** + * Map bus address as an I/O address + * + * @v bus_addr Bus address + * @v len Length of region + * @ret io_addr I/O address + */ +void * ioremap ( unsigned long bus_addr, size_t len ); + +/** + * Unmap I/O address + * + * @v io_addr I/O address + */ +void iounmap ( volatile const void *io_addr ); + +/** + * Convert I/O address to bus address (for debug only) + * + * @v io_addr I/O address + * @ret bus_addr Bus address + */ +unsigned long io_to_bus ( volatile const void *io_addr ); + +#endif /* _IPXE_IOMAP_H */ diff --git a/src/include/ipxe/iomap_virt.h b/src/include/ipxe/iomap_virt.h new file mode 100644 index 00000000..4962b7c3 --- /dev/null +++ b/src/include/ipxe/iomap_virt.h @@ -0,0 +1,33 @@ +#ifndef _IPXE_IOMAP_VIRT_H +#define _IPXE_IOMAP_VIRT_H + +/** @file + * + * iPXE I/O mapping API using phys_to_virt() + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef IOMAP_VIRT +#define IOMAP_PREFIX_virt +#else +#define IOMAP_PREFIX_virt __virt_ +#endif + +static inline __always_inline void * +IOMAP_INLINE ( virt, ioremap ) ( unsigned long bus_addr, size_t len __unused ) { + return ( bus_addr ? phys_to_virt ( bus_addr ) : NULL ); +} + +static inline __always_inline void +IOMAP_INLINE ( virt, iounmap ) ( volatile const void *io_addr __unused ) { + /* Nothing to do */ +} + +static inline __always_inline unsigned long +IOMAP_INLINE ( virt, io_to_bus ) ( volatile const void *io_addr ) { + return virt_to_phys ( io_addr ); +} + +#endif /* _IPXE_IOMAP_VIRT_H */ diff --git a/src/include/ipxe/jumpscroll.h b/src/include/ipxe/jumpscroll.h new file mode 100644 index 00000000..7a5b111c --- /dev/null +++ b/src/include/ipxe/jumpscroll.h @@ -0,0 +1,50 @@ +#ifndef _IPXE_JUMPSCROLL_H +#define _IPXE_JUMPSCROLL_H + +/** @file + * + * Jump scrolling + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** A jump scroller */ +struct jump_scroller { + /** Maximum number of visible rows */ + unsigned int rows; + /** Total number of items */ + unsigned int count; + /** Currently selected item */ + unsigned int current; + /** First visible item */ + unsigned int first; +}; + +/** + * Check if jump scroller is currently on first page + * + * @v scroll Jump scroller + * @ret is_first Scroller is currently on first page + */ +static inline int jump_scroll_is_first ( struct jump_scroller *scroll ) { + + return ( scroll->first == 0 ); +} + +/** + * Check if jump scroller is currently on last page + * + * @v scroll Jump scroller + * @ret is_last Scroller is currently on last page + */ +static inline int jump_scroll_is_last ( struct jump_scroller *scroll ) { + + return ( ( scroll->first + scroll->rows ) >= scroll->count ); +} + +extern int jump_scroll_key ( struct jump_scroller *scroll, int key ); +extern int jump_scroll_move ( struct jump_scroller *scroll, int move ); +extern int jump_scroll ( struct jump_scroller *scroll ); + +#endif /* _IPXE_JUMPSCROLL_H */ diff --git a/src/include/ipxe/md4.h b/src/include/ipxe/md4.h new file mode 100644 index 00000000..8f172e62 --- /dev/null +++ b/src/include/ipxe/md4.h @@ -0,0 +1,73 @@ +#ifndef _IPXE_MD4_H +#define _IPXE_MD4_H + +/** @file + * + * MD4 algorithm + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** An MD4 digest */ +struct md4_digest { + /** Hash output */ + uint32_t h[4]; +}; + +/** An MD4 data block */ +union md4_block { + /** Raw bytes */ + uint8_t byte[64]; + /** Raw dwords */ + uint32_t dword[16]; + /** Final block structure */ + struct { + /** Padding */ + uint8_t pad[56]; + /** Length in bits */ + uint64_t len; + } final; +}; + +/** MD4 digest and data block + * + * The order of fields within this structure is designed to minimise + * code size. + */ +struct md4_digest_data { + /** Digest of data already processed */ + struct md4_digest digest; + /** Accumulated data */ + union md4_block data; +} __attribute__ (( packed )); + +/** MD4 digest and data block */ +union md4_digest_data_dwords { + /** Digest and data block */ + struct md4_digest_data dd; + /** Raw dwords */ + uint32_t dword[ sizeof ( struct md4_digest_data ) / + sizeof ( uint32_t ) ]; +}; + +/** An MD4 context */ +struct md4_context { + /** Amount of accumulated data */ + size_t len; + /** Digest and accumulated data */ + union md4_digest_data_dwords ddd; +} __attribute__ (( packed )); + +/** MD4 context size */ +#define MD4_CTX_SIZE sizeof ( struct md4_context ) + +/** MD4 digest size */ +#define MD4_DIGEST_SIZE sizeof ( struct md4_digest ) + +extern struct digest_algorithm md4_algorithm; + +#endif /* _IPXE_MD4_H */ diff --git a/src/include/ipxe/mii_bit.h b/src/include/ipxe/mii_bit.h new file mode 100644 index 00000000..0f797e91 --- /dev/null +++ b/src/include/ipxe/mii_bit.h @@ -0,0 +1,55 @@ +#ifndef _IPXE_MII_BIT_H +#define _IPXE_MII_BIT_H + +/** @file + * + * MII bit-bashing interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +#define MII_BIT_START 0xffffffff /**< Start */ +#define MII_BIT_START_MASK 0x80000000 /**< Start mask */ + +#define MII_BIT_CMD_MASK 0x00000008 /**< Command mask */ +#define MII_BIT_CMD_READ 0x00000006 /**< Command read */ +#define MII_BIT_CMD_WRITE 0x00000005 /**< Command write */ +#define MII_BIT_CMD_RW 0x00000001 /**< Command read or write */ + +#define MII_BIT_PHY_MASK 0x00000010 /**< PHY mask */ + +#define MII_BIT_REG_MASK 0x00000010 /**< Register mask */ + +#define MII_BIT_SWITCH 0x00000002 /**< Switch */ +#define MII_BIT_SWITCH_MASK 0x00000002 /**< Switch mask */ + +#define MII_BIT_DATA_MASK 0x00008000 /**< Data mask */ + +/** A bit-bashing MII interface */ +struct mii_bit_basher { + /** MII interface */ + struct mii_interface mdio; + /** Bit-bashing interface */ + struct bit_basher basher; +}; + +/** Bit indices used for MII bit-bashing interface */ +enum { + /** MII clock */ + MII_BIT_MDC = 0, + /** MII data */ + MII_BIT_MDIO, + /** MII data direction */ + MII_BIT_DRIVE, +}; + +/** Delay between MDC transitions */ +#define MII_BIT_UDELAY 1 + +extern void init_mii_bit_basher ( struct mii_bit_basher *miibit ); + +#endif /* _IPXE_MII_BIT_H */ diff --git a/src/include/ipxe/netbios.h b/src/include/ipxe/netbios.h new file mode 100644 index 00000000..c1155255 --- /dev/null +++ b/src/include/ipxe/netbios.h @@ -0,0 +1,30 @@ +#ifndef _IPXE_NETBIOS_H +#define _IPXE_NETBIOS_H + +/** @file + * + * NetBIOS user names + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern const char * netbios_domain ( char **username ); + +/** + * Restore NetBIOS [domain\]username + * + * @v domain NetBIOS domain name + * @v username NetBIOS user name + * + * Restore the separator in a NetBIOS [domain\]username as split by + * netbios_domain(). + */ +static inline void netbios_domain_undo ( const char *domain, char *username ) { + + /* Restore separator, if applicable */ + if ( domain ) + username[-1] = '\\'; +} + +#endif /* _IPXE_NETBIOS_H */ diff --git a/src/include/ipxe/ntlm.h b/src/include/ipxe/ntlm.h new file mode 100644 index 00000000..b0436c9a --- /dev/null +++ b/src/include/ipxe/ntlm.h @@ -0,0 +1,199 @@ +#ifndef _IPXE_NTLM_H +#define _IPXE_NTLM_H + +/** @file + * + * NT LAN Manager (NTLM) authentication + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** A message header */ +struct ntlm_header { + /** Magic signature */ + uint8_t magic[8]; + /** Message type */ + uint32_t type; +} __attribute__ (( packed )); + +/** Magic signature */ +#define NTLM_MAGIC { 'N', 'T', 'L', 'M', 'S', 'S', 'P', '\0' } + +/** Message types */ +enum ntlm_type { + /** Negotiate message type */ + NTLM_NEGOTIATE = 0x00000001UL, + /** Challenge message type */ + NTLM_CHALLENGE = 0x00000002UL, + /** Authenticate message */ + NTLM_AUTHENTICATE = 0x00000003UL, +}; + +/** Negotiation flags */ +enum ntlm_flags { + /** Negotiate key exchange */ + NTLM_NEGOTIATE_KEY_EXCH = 0x20000000UL, + /** Negotiate extended security */ + NTLM_NEGOTIATE_EXTENDED_SESSIONSECURITY = 0x00080000UL, + /** Negotiate always sign */ + NTLM_NEGOTIATE_ALWAYS_SIGN = 0x00008000UL, + /** Negotiate NTLM key */ + NTLM_NEGOTIATE_NTLM = 0x00000200UL, + /** Request target name and information */ + NTLM_REQUEST_TARGET = 0x00000004UL, + /** Negotiate Unicode character encoding */ + NTLM_NEGOTIATE_UNICODE = 0x00000001UL, +}; + +/** A version descriptor */ +struct ntlm_version { + /** Product major version */ + uint8_t major; + /** Product minor version */ + uint8_t minor; + /** Product build number */ + uint16_t build; + /** Reserved */ + uint8_t reserved[3]; + /** NTLMSSP revision */ + uint8_t revision; +} __attribute__ (( packed )); + +/** A nonce */ +struct ntlm_nonce { + /** Raw bytes */ + uint8_t raw[8]; +} __attribute__ (( packed )); + +/** A variable-length data descriptor */ +struct ntlm_data { + /** Length (in bytes) */ + uint16_t len; + /** Maximum length (in bytes) + * + * Should always be set equal to the length; this field is + * entirely superfluous. + */ + uint16_t max_len; + /** Offset from start of message header */ + uint32_t offset; +} __attribute__ (( packed )); + +/** A Negotiate message */ +struct ntlm_negotiate { + /** Message header */ + struct ntlm_header header; + /** Negotiation flags */ + uint32_t flags; + /** Domain name */ + struct ntlm_data domain; + /** Workstation name */ + struct ntlm_data workstation; +} __attribute__ (( packed )); + +/** A Challenge message */ +struct ntlm_challenge { + /** Message header */ + struct ntlm_header header; + /** Target name */ + struct ntlm_data name; + /** Negotiation flags */ + uint32_t flags; + /** Server nonce */ + struct ntlm_nonce nonce; + /** Reserved */ + uint8_t reserved[8]; + /** Target information */ + struct ntlm_data info; +} __attribute__ (( packed )); + +/** An Authenticate message */ +struct ntlm_authenticate { + /** Message header */ + struct ntlm_header header; + /** LAN Manager response */ + struct ntlm_data lm; + /** NT response */ + struct ntlm_data nt; + /** Domain name */ + struct ntlm_data domain; + /** User name */ + struct ntlm_data user; + /** Workstation name */ + struct ntlm_data workstation; + /** Session key */ + struct ntlm_data session; + /** Negotiation flags */ + uint32_t flags; +} __attribute__ (( packed )); + +/** A LAN Manager response */ +struct ntlm_lm_response { + /** HMAC-MD5 digest */ + uint8_t digest[MD5_DIGEST_SIZE]; + /** Client nonce */ + struct ntlm_nonce nonce; +} __attribute__ (( packed )); + +/** An NT response */ +struct ntlm_nt_response { + /** HMAC-MD5 digest */ + uint8_t digest[MD5_DIGEST_SIZE]; + /** Response version */ + uint8_t version; + /** Highest response version */ + uint8_t high; + /** Reserved */ + uint8_t reserved_a[6]; + /** Current time */ + uint64_t time; + /** Client nonce */ + struct ntlm_nonce nonce; + /** Must be zero */ + uint32_t zero; +} __attribute__ (( packed )); + +/** NTLM version */ +#define NTLM_VERSION_NTLMV2 0x01 + +/** NTLM challenge information */ +struct ntlm_challenge_info { + /** Server nonce */ + struct ntlm_nonce *nonce; + /** Target information */ + void *target; + /** Length of target information */ + size_t len; +}; + +/** An NTLM verification key */ +struct ntlm_key { + /** Raw bytes */ + uint8_t raw[MD5_DIGEST_SIZE]; +}; + +extern const struct ntlm_negotiate ntlm_negotiate; +extern int ntlm_challenge ( struct ntlm_challenge *challenge, size_t len, + struct ntlm_challenge_info *info ); +extern void ntlm_key ( const char *domain, const char *username, + const char *password, struct ntlm_key *key ); +extern void ntlm_response ( struct ntlm_challenge_info *info, + struct ntlm_key *key, struct ntlm_nonce *nonce, + struct ntlm_lm_response *lm, + struct ntlm_nt_response *nt ); +extern size_t ntlm_authenticate ( struct ntlm_challenge_info *info, + const char *domain, const char *username, + const char *workstation, + struct ntlm_lm_response *lm, + struct ntlm_nt_response *nt, + struct ntlm_authenticate *auth ); +extern size_t ntlm_authenticate_len ( struct ntlm_challenge_info *info, + const char *domain, const char *username, + const char *workstation ); + +#endif /* _IPXE_NTLM_H */ diff --git a/src/include/ipxe/ntp.h b/src/include/ipxe/ntp.h new file mode 100644 index 00000000..f5b3d232 --- /dev/null +++ b/src/include/ipxe/ntp.h @@ -0,0 +1,109 @@ +#ifndef _IPXE_NTP_H +#define _IPXE_NTP_H + +/** @file + * + * Network Time Protocol + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** NTP port */ +#define NTP_PORT 123 + +/** An NTP short-format timestamp */ +struct ntp_short { + /** Seconds */ + uint16_t seconds; + /** Fraction of a second */ + uint16_t fraction; +} __attribute__ (( packed )); + +/** An NTP timestamp */ +struct ntp_timestamp { + /** Seconds */ + uint32_t seconds; + /** Fraction of a second */ + uint32_t fraction; +} __attribute__ (( packed )); + +/** An NTP reference identifier */ +union ntp_id { + /** Textual identifier */ + char text[4]; + /** IPv4 address */ + struct in_addr in; + /** Opaque integer */ + uint32_t opaque; +}; + +/** An NTP header */ +struct ntp_header { + /** Flags */ + uint8_t flags; + /** Stratum */ + uint8_t stratum; + /** Polling rate */ + int8_t poll; + /** Precision */ + int8_t precision; + /** Root delay */ + struct ntp_short delay; + /** Root dispersion */ + struct ntp_short dispersion; + /** Reference clock identifier */ + union ntp_id id; + /** Reference timestamp */ + struct ntp_timestamp reference; + /** Originate timestamp */ + struct ntp_timestamp originate; + /** Receive timestamp */ + struct ntp_timestamp receive; + /** Transmit timestamp */ + struct ntp_timestamp transmit; +} __attribute__ (( packed )); + +/** Leap second indicator: unknown */ +#define NTP_FL_LI_UNKNOWN 0xc0 + +/** NTP version: 1 */ +#define NTP_FL_VN_1 0x20 + +/** NTP mode: client */ +#define NTP_FL_MODE_CLIENT 0x03 + +/** NTP mode: server */ +#define NTP_FL_MODE_SERVER 0x04 + +/** NTP mode mask */ +#define NTP_FL_MODE_MASK 0x07 + +/** NTP timestamp for start of Unix epoch */ +#define NTP_EPOCH 2208988800UL + +/** NTP fraction of a second magic value + * + * This is a policy decision. + */ +#define NTP_FRACTION_MAGIC 0x69505845UL + +/** NTP minimum retransmission timeout + * + * This is a policy decision. + */ +#define NTP_MIN_TIMEOUT ( 1 * TICKS_PER_SEC ) + +/** NTP maximum retransmission timeout + * + * This is a policy decision. + */ +#define NTP_MAX_TIMEOUT ( 10 * TICKS_PER_SEC ) + +extern int start_ntp ( struct interface *job, const char *hostname ); + +#endif /* _IPXE_NTP_H */ diff --git a/src/include/ipxe/null_acpi.h b/src/include/ipxe/null_acpi.h new file mode 100644 index 00000000..1e469e33 --- /dev/null +++ b/src/include/ipxe/null_acpi.h @@ -0,0 +1,23 @@ +#ifndef _IPXE_NULL_ACPI_H +#define _IPXE_NULL_ACPI_H + +/** @file + * + * Standard do-nothing ACPI interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef ACPI_NULL +#define ACPI_PREFIX_null +#else +#define ACPI_PREFIX_null __null_ +#endif + +static inline __always_inline userptr_t +ACPI_INLINE ( null, acpi_find_rsdt ) ( void ) { + return UNULL; +} + +#endif /* _IPXE_NULL_ACPI_H */ diff --git a/src/include/ipxe/pccrc.h b/src/include/ipxe/pccrc.h new file mode 100644 index 00000000..7f096342 --- /dev/null +++ b/src/include/ipxe/pccrc.h @@ -0,0 +1,447 @@ +#ifndef _IPXE_PCCRC_H +#define _IPXE_PCCRC_H + +/** @file + * + * Peer Content Caching and Retrieval: Content Identification [MS-PCCRC] + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/****************************************************************************** + * + * Content Information versioning + * + ****************************************************************************** + * + * Note that version 1 data structures are little-endian, but version + * 2 data structures are big-endian. + */ + +/** Content Information version number */ +union peerdist_info_version { + /** Raw version number + * + * Always little-endian, regardless of whether the + * encompassing structure is version 1 (little-endian) or + * version 2 (big-endian). + */ + uint16_t raw; + /** Major:minor version number */ + struct { + /** Minor version number */ + uint8_t minor; + /** Major version number */ + uint8_t major; + } __attribute__ (( packed )); +} __attribute__ (( packed )); + +/** Content Information version 1 */ +#define PEERDIST_INFO_V1 0x0100 + +/** Content Information version 2 */ +#define PEERDIST_INFO_V2 0x0200 + +/****************************************************************************** + * + * Content Information version 1 + * + ****************************************************************************** + */ + +/** Content Information version 1 data structure header + * + * All fields are little-endian. + */ +struct peerdist_info_v1 { + /** Version number */ + union peerdist_info_version version; + /** Hash algorithm + * + * This is a @c PEERDIST_INFO_V1_HASH_XXX constant. + */ + uint32_t hash; + /** Length to skip in first segment + * + * Length at the start of the first segment which is not + * included within the content range. + */ + uint32_t first; + /** Length to read in last segment, or zero + * + * Length within the last segment which is included within the + * content range. A zero value indicates that the whole of + * the last segment is included within the content range. + */ + uint32_t last; + /** Number of segments within the content information */ + uint32_t segments; + /* Followed by a variable-length array of segment descriptions + * and a list of variable-length block descriptions: + * + * peerdist_info_v1_segment_t(digestsize) segment[segments]; + * peerdist_info_v1_block_t(digestsize, block0.blocks) block0; + * peerdist_info_v1_block_t(digestsize, block1.blocks) block1; + * ... + * peerdist_info_v1_block_t(digestsize, blockN.blocks) blockN; + */ +} __attribute__ (( packed )); + +/** SHA-256 hash algorithm */ +#define PEERDIST_INFO_V1_HASH_SHA256 0x0000800cUL + +/** SHA-384 hash algorithm */ +#define PEERDIST_INFO_V1_HASH_SHA384 0x0000800dUL + +/** SHA-512 hash algorithm */ +#define PEERDIST_INFO_V1_HASH_SHA512 0x0000800eUL + +/** Content Information version 1 segment description header + * + * All fields are little-endian. + */ +struct peerdist_info_v1_segment { + /** Offset of this segment within the content */ + uint64_t offset; + /** Length of this segment + * + * Should always be 32MB, except for the last segment within + * the content. + */ + uint32_t len; + /** Block size for this segment + * + * Should always be 64kB. Note that the last block within the + * last segment may actually be less than 64kB. + */ + uint32_t blksize; + /* Followed by two variable-length hashes: + * + * uint8_t hash[digestsize]; + * uint8_t secret[digestsize]; + * + * where digestsize is the digest size for the selected hash + * algorithm. + * + * Note that the hash is taken over (the hashes of all blocks + * within) the entire segment, even if the blocks do not + * intersect the content range (and so do not appear within + * the block list). It therefore functions only as a segment + * identifier; it cannot be used to verify the content of the + * segment (since we may not download all blocks within the + * segment). + */ +} __attribute__ (( packed )); + +/** Content Information version 1 segment description + * + * @v digestsize Digest size + */ +#define peerdist_info_v1_segment_t( digestsize ) \ + struct { \ + struct peerdist_info_v1_segment segment; \ + uint8_t hash[digestsize]; \ + uint8_t secret[digestsize]; \ + } __attribute__ (( packed )) + +/** Content Information version 1 block description header + * + * All fields are little-endian. + */ +struct peerdist_info_v1_block { + /** Number of blocks within the block description + * + * This is the number of blocks within the segment which + * overlap the content range. It may therefore be less than + * the number of blocks within the segment. + */ + uint32_t blocks; + /* Followed by an array of variable-length hashes: + * + * uint8_t hash[blocks][digestsize]; + * + * where digestsize is the digest size for the selected hash + * algorithm. + */ + } __attribute__ (( packed )); + +/** Content Information version 1 block description + * + * @v digestsize Digest size + * @v blocks Number of blocks + */ +#define peerdist_info_v1_block_t( digestsize, blocks ) \ + struct { \ + struct peerdist_info_v1_block block; \ + uint8_t hash[blocks][digestsize]; \ + } __attribute__ (( packed )) + +/****************************************************************************** + * + * Content Information version 2 + * + ****************************************************************************** + */ + +/** Content Information version 2 data structure header + * + * All fields are big-endian. + */ +struct peerdist_info_v2 { + /** Version number */ + union peerdist_info_version version; + /** Hash algorithm + * + * This is a @c PEERDIST_INFO_V2_HASH_XXX constant. + */ + uint8_t hash; + /** Offset of the first segment within the content */ + uint64_t offset; + /** Index of the first segment within the content */ + uint64_t index; + /** Length to skip in first segment + * + * Length at the start of the first segment which is not + * included within the content range. + */ + uint32_t first; + /** Length of content range, or zero + * + * Length of the content range. A zero indicates that + * everything up to the end of the last segment is included in + * the content range. + */ + uint64_t len; + /* Followed by a list of chunk descriptions */ +} __attribute__ (( packed )); + +/** SHA-512 hash algorithm with output truncated to first 256 bits */ +#define PEERDIST_INFO_V2_HASH_SHA512_TRUNC 0x04 + +/** Content Information version 2 chunk description header + * + * All fields are big-endian. + */ +struct peerdist_info_v2_chunk { + /** Chunk type */ + uint8_t type; + /** Chunk data length */ + uint32_t len; + /* Followed by an array of segment descriptions: + * + * peerdist_info_v2_segment_t(digestsize) segment[segments] + * + * where digestsize is the digest size for the selected hash + * algorithm, and segments is equal to @c len divided by the + * size of each segment array entry. + */ +} __attribute__ (( packed )); + +/** Content Information version 2 chunk description + * + * @v digestsize Digest size + */ +#define peerdist_info_v2_chunk_t( digestsize ) \ + struct { \ + struct peerdist_info_v2_chunk chunk; \ + peerdist_info_v2_segment_t ( digestsize ) segment[0]; \ + } __attribute__ (( packed )) + +/** Chunk type */ +#define PEERDIST_INFO_V2_CHUNK_TYPE 0x00 + +/** Content Information version 2 segment description header + * + * All fields are big-endian. + */ +struct peerdist_info_v2_segment { + /** Segment length */ + uint32_t len; + /* Followed by two variable-length hashes: + * + * uint8_t hash[digestsize]; + * uint8_t secret[digestsize]; + * + * where digestsize is the digest size for the selected hash + * algorithm. + */ +} __attribute__ (( packed )); + +/** Content Information version 2 segment description + * + * @v digestsize Digest size + */ +#define peerdist_info_v2_segment_t( digestsize ) \ + struct { \ + struct peerdist_info_v2_segment segment; \ + uint8_t hash[digestsize]; \ + uint8_t secret[digestsize]; \ + } __attribute__ (( packed )) + +/****************************************************************************** + * + * Content Information + * + ****************************************************************************** + */ + +/** Maximum digest size for any supported algorithm + * + * The largest digest size that we support is for SHA-512 at 64 bytes + */ +#define PEERDIST_DIGEST_MAX_SIZE 64 + +/** Raw content information */ +struct peerdist_raw { + /** Data buffer */ + userptr_t data; + /** Length of data buffer */ + size_t len; +}; + +/** A content range */ +struct peerdist_range { + /** Start offset */ + size_t start; + /** End offset */ + size_t end; +}; + +/** Content information */ +struct peerdist_info { + /** Raw content information */ + struct peerdist_raw raw; + + /** Content information operations */ + struct peerdist_info_operations *op; + /** Digest algorithm */ + struct digest_algorithm *digest; + /** Digest size + * + * Note that this may be shorter than the digest size of the + * digest algorithm. The truncation does not always take + * place as soon as a digest is calculated. For example, + * version 2 content information uses SHA-512 with a truncated + * digest size of 32 (256 bits), but the segment identifier + * ("HoHoDk") is calculated by using HMAC with the full + * SHA-512 digest and then truncating the HMAC output, rather + * than by simply using HMAC with the truncated SHA-512 + * digest. This is, of course, totally undocumented. + */ + size_t digestsize; + /** Content range */ + struct peerdist_range range; + /** Trimmed content range */ + struct peerdist_range trim; + /** Number of segments within the content information */ + unsigned int segments; +}; + +/** A content information segment */ +struct peerdist_info_segment { + /** Content information */ + const struct peerdist_info *info; + /** Segment index */ + unsigned int index; + + /** Content range + * + * Note that this range may exceed the overall content range. + */ + struct peerdist_range range; + /** Number of blocks within this segment */ + unsigned int blocks; + /** Block size */ + size_t blksize; + /** Segment hash of data + * + * This is MS-PCCRC's "HoD". + */ + uint8_t hash[PEERDIST_DIGEST_MAX_SIZE]; + /** Segment secret + * + * This is MS-PCCRC's "Ke = Kp". + */ + uint8_t secret[PEERDIST_DIGEST_MAX_SIZE]; + /** Segment identifier + * + * This is MS-PCCRC's "HoHoDk". + */ + uint8_t id[PEERDIST_DIGEST_MAX_SIZE]; +}; + +/** Magic string constant used to calculate segment identifier + * + * Note that the MS-PCCRC specification states that this constant is + * + * "the null-terminated ASCII string constant "MS_P2P_CACHING"; + * string literals are all ASCII strings with NULL terminators + * unless otherwise noted." + * + * The specification lies. This constant is a UTF-16LE string, not an + * ASCII string. The terminating wNUL *is* included within the + * constant. + */ +#define PEERDIST_SEGMENT_ID_MAGIC L"MS_P2P_CACHING" + +/** A content information block */ +struct peerdist_info_block { + /** Content information segment */ + const struct peerdist_info_segment *segment; + /** Block index */ + unsigned int index; + + /** Content range + * + * Note that this range may exceed the overall content range. + */ + struct peerdist_range range; + /** Trimmed content range */ + struct peerdist_range trim; + /** Block hash */ + uint8_t hash[PEERDIST_DIGEST_MAX_SIZE]; +}; + +/** Content information operations */ +struct peerdist_info_operations { + /** + * Populate content information + * + * @v info Content information to fill in + * @ret rc Return status code + */ + int ( * info ) ( struct peerdist_info *info ); + /** + * Populate content information segment + * + * @v segment Content information segment to fill in + * @ret rc Return status code + */ + int ( * segment ) ( struct peerdist_info_segment *segment ); + /** + * Populate content information block + * + * @v block Content information block to fill in + * @ret rc Return status code + */ + int ( * block ) ( struct peerdist_info_block *block ); +}; + +extern struct digest_algorithm sha512_trunc_algorithm; + +extern int peerdist_info ( userptr_t data, size_t len, + struct peerdist_info *info ); +extern int peerdist_info_segment ( const struct peerdist_info *info, + struct peerdist_info_segment *segment, + unsigned int index ); +extern int peerdist_info_block ( const struct peerdist_info_segment *segment, + struct peerdist_info_block *block, + unsigned int index ); + +#endif /* _IPXE_PCCRC_H */ diff --git a/src/include/ipxe/pccrd.h b/src/include/ipxe/pccrd.h new file mode 100644 index 00000000..3daa92f2 --- /dev/null +++ b/src/include/ipxe/pccrd.h @@ -0,0 +1,47 @@ +#ifndef _IPXE_PCCRD_H +#define _IPXE_PCCRD_H + +/** @file + * + * Peer Content Caching and Retrieval: Discovery Protocol [MS-PCCRD] + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** PeerDist discovery port */ +#define PEERDIST_DISCOVERY_PORT 3702 + +/** PeerDist discovery IPv4 address (239.255.255.250) */ +#define PEERDIST_DISCOVERY_IPV4 \ + ( ( 239 << 24 ) | ( 255 << 16 ) | ( 255 << 8 ) | ( 250 << 0 ) ) + +/** PeerDist discovery IPv6 address (ff02::c) */ +#define PEERDIST_DISCOVERY_IPV6 \ + { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xc } + +/** A PeerDist discovery reply block count */ +struct peerdist_discovery_block_count { + /** Count (as an eight-digit hex value) */ + char hex[8]; +} __attribute__ (( packed )); + +/** A PeerDist discovery reply */ +struct peerdist_discovery_reply { + /** List of segment ID strings + * + * The list is terminated with a zero-length string. + */ + char *ids; + /** List of peer locations + * + * The list is terminated with a zero-length string. + */ + char *locations; +}; + +extern char * peerdist_discovery_request ( const char *uuid, const char *id ); +extern int peerdist_discovery_reply ( char *data, size_t len, + struct peerdist_discovery_reply *reply ); + +#endif /* _IPXE_PCCRD_H */ diff --git a/src/include/ipxe/pccrr.h b/src/include/ipxe/pccrr.h new file mode 100644 index 00000000..1ea86c40 --- /dev/null +++ b/src/include/ipxe/pccrr.h @@ -0,0 +1,376 @@ +#ifndef _IPXE_PCCRR_H +#define _IPXE_PCCRR_H + +/** @file + * + * Peer Content Caching and Retrieval: Retrieval Protocol [MS-PCCRR] + * + * All fields are in network byte order. + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** Magic retrieval URI path */ +#define PEERDIST_MAGIC_PATH "/116B50EB-ECE2-41ac-8429-9F9E963361B7/" + +/** Retrieval protocol version */ +union peerdist_msg_version { + /** Raw version number */ + uint32_t raw; + /** Major:minor version number */ + struct { + /** Minor version number */ + uint16_t minor; + /** Major version number */ + uint16_t major; + } __attribute__ (( packed )); +} __attribute__ (( packed )); + +/** Retrieval protocol version 1.0 */ +#define PEERDIST_MSG_VERSION_1_0 0x00000001UL + +/** Retrieval protocol version 2.0 */ +#define PEERDIST_MSG_VERSION_2_0 0x00000002UL + +/** Retrieval protocol supported versions */ +struct peerdist_msg_versions { + /** Minimum supported protocol version */ + union peerdist_msg_version min; + /** Maximum supported protocol version */ + union peerdist_msg_version max; +} __attribute__ (( packed )); + +/** Retrieval protocol block range */ +struct peerdist_msg_range { + /** First block in range */ + uint32_t first; + /** Number of blocks in range */ + uint32_t count; +} __attribute__ (( packed )); + +/** Retrieval protocol segment ID header */ +struct peerdist_msg_segment { + /** Digest size (i.e. length of segment ID) */ + uint32_t digestsize; + /* Followed by a single variable-length ID and padding: + * + * uint8_t id[digestsize]; + * uint8_t pad[ (-digestsize) & 0x3 ]; + */ +} __attribute__ (( packed )); + +/** Retrieval protocol segment ID + * + * @v digestsize Digest size + */ +#define peerdist_msg_segment_t( digestsize ) \ + struct { \ + struct peerdist_msg_segment segment; \ + uint8_t id[digestsize]; \ + uint8_t pad[ ( -(digestsize) ) & 0x3 ]; \ + } __attribute__ (( packed )) + +/** Retrieval protocol block range list header */ +struct peerdist_msg_ranges { + /** Number of ranges */ + uint32_t count; + /* Followed by an array of block ranges: + * + * struct peerdist_msg_range range[count]; + */ +} __attribute__ (( packed )); + +/** Retrieval protocol block range list + * + * @v count Number of ranges + */ +#define peerdist_msg_ranges_t( count ) \ + struct { \ + struct peerdist_msg_ranges ranges; \ + struct peerdist_msg_range range[count]; \ + } __attribute__ (( packed )) + +/** Retrieval protocol data block header */ +struct peerdist_msg_block { + /** Length of data block */ + uint32_t len; + /* Followed by the (encrypted) data block: + * + * uint8_t data[len]; + */ +} __attribute__ (( packed )); + +/** Retrieval protocol data block */ +#define peerdist_msg_block_t( len ) \ + struct { \ + struct peerdist_msg_block block; \ + uint8_t data[len]; \ + } __attribute__ (( packed )) + +/** Retrieval protocol initialisation vector header */ +struct peerdist_msg_iv { + /** Cipher block size */ + uint32_t blksize; + /* Followed by the initialisation vector: + * + * uint8_t data[blksize]; + */ +} __attribute__ (( packed )); + +/** Retrieval protocol initialisation vector */ +#define peerdist_msg_iv_t( blksize ) \ + struct { \ + struct peerdist_msg_iv iv; \ + uint8_t data[blksize]; \ + } __attribute__ (( packed )) + +/** Retrieval protocol useless VRF data header */ +struct peerdist_msg_useless_vrf { + /** Length of useless VRF data */ + uint32_t len; + /* Followed by a variable-length useless VRF data block and + * padding: + * + * uint8_t data[len]; + * uint8_t pad[ (-len) & 0x3 ]; + */ +} __attribute__ (( packed )); + +/** Retrieval protocol useless VRF data */ +#define peerdist_msg_useless_vrf_t( vrf_len ) \ + struct { \ + struct peerdist_msg_useless_vrf vrf; \ + uint8_t data[vrf_len]; \ + uint8_t pad[ ( -(vrf_len) ) & 0x3 ]; \ + } __attribute__ (( packed )) + +/** Retrieval protocol message header */ +struct peerdist_msg_header { + /** Protocol version + * + * This is the protocol version in which the message type was + * first defined. + */ + union peerdist_msg_version version; + /** Message type */ + uint32_t type; + /** Message size (including this header) */ + uint32_t len; + /** Cryptographic algorithm ID */ + uint32_t algorithm; +} __attribute__ (( packed )); + +/** Retrieval protocol cryptographic algorithm IDs */ +enum peerdist_msg_algorithm { + /** No encryption */ + PEERDIST_MSG_PLAINTEXT = 0x00000000UL, + /** AES-128 in CBC mode */ + PEERDIST_MSG_AES_128_CBC = 0x00000001UL, + /** AES-192 in CBC mode */ + PEERDIST_MSG_AES_192_CBC = 0x00000002UL, + /** AES-256 in CBC mode */ + PEERDIST_MSG_AES_256_CBC = 0x00000003UL, +}; + +/** Retrieval protocol transport response header */ +struct peerdist_msg_transport_header { + /** Length (excluding this header) + * + * This seems to be identical in both purpose and value to the + * length found within the message header, and therefore + * serves no useful purpose. + */ + uint32_t len; +} __attribute__ (( packed )); + +/** Retrieval protocol negotiation request */ +struct peerdist_msg_nego_req { + /** Message header */ + struct peerdist_msg_header hdr; + /** Supported versions */ + struct peerdist_msg_versions versions; +} __attribute__ (( packed )); + +/** Retrieval protocol negotiation request version */ +#define PEERDIST_MSG_NEGO_REQ_VERSION PEERDIST_MSG_VERSION_1_0 + +/** Retrieval protocol negotiation request type */ +#define PEERDIST_MSG_NEGO_REQ_TYPE 0x00000000UL + +/** Retrieval protocol negotiation response */ +struct peerdist_msg_nego_resp { + /** Message header */ + struct peerdist_msg_header hdr; + /** Supported versions */ + struct peerdist_msg_versions versions; +} __attribute__ (( packed )); + +/** Retrieval protocol negotiation response version */ +#define PEERDIST_MSG_NEGO_RESP_VERSION PEERDIST_MSG_VERSION_1_0 + +/** Retrieval protocol negotiation response type */ +#define PEERDIST_MSG_NEGO_RESP_TYPE 0x00000001UL + +/** Retrieval protocol block list request header */ +struct peerdist_msg_getblklist { + /** Message header */ + struct peerdist_msg_header hdr; + /* Followed by a segment ID and a block range list: + * + * peerdist_msg_segment_t(digestsize) segment; + * peerdist_msg_ranges_t(count) ranges; + */ +} __attribute__ (( packed )); + +/** Retrieval protocol block list request + * + * @v digestsize Digest size + * @v count Block range count + */ +#define peerdist_msg_getblklist_t( digestsize, count ) \ + struct { \ + struct peerdist_msg_getblklist getblklist; \ + peerdist_msg_segment_t ( digestsize ) segment; \ + peerdist_msg_ranges_t ( count ) ranges; \ + } __attribute__ (( packed )) + +/** Retrieval protocol block list request version */ +#define PEERDIST_MSG_GETBLKLIST_VERSION PEERDIST_MSG_VERSION_1_0 + +/** Retrieval protocol block list request type */ +#define PEERDIST_MSG_GETBLKLIST_TYPE 0x00000002UL + +/** Retrieval protocol block fetch request header */ +struct peerdist_msg_getblks { + /** Message header */ + struct peerdist_msg_header hdr; + /* Followed by a segment ID, a block range list, and a useless + * VRF block: + * + * peerdist_msg_segment_t(digestsize) segment; + * peerdist_msg_ranges_t(count) ranges; + * peerdist_msg_vrf_t(vrf_len) vrf; + */ +} __attribute__ (( packed )); + +/** Retrieval protocol block fetch request + * + * @v digestsize Digest size + * @v count Block range count + * @v vrf_len Length of uselessness + */ +#define peerdist_msg_getblks_t( digestsize, count, vrf_len ) \ + struct { \ + struct peerdist_msg_getblks getblks; \ + peerdist_msg_segment_t ( digestsize ) segment; \ + peerdist_msg_ranges_t ( count ) ranges; \ + peerdist_msg_useless_vrf_t ( vrf_len ); \ + } __attribute__ (( packed )) + +/** Retrieval protocol block fetch request version */ +#define PEERDIST_MSG_GETBLKS_VERSION PEERDIST_MSG_VERSION_1_0 + +/** Retrieval protocol block fetch request type */ +#define PEERDIST_MSG_GETBLKS_TYPE 0x00000003UL + +/** Retrieval protocol block list response header */ +struct peerdist_msg_blklist { + /** Message header */ + struct peerdist_msg_header hdr; + /* Followed by a segment ID, a block range list, and a next + * block index: + * + * peerdist_msg_segment_t(digestsize) segment; + * peerdist_msg_ranges_t(count) ranges; + * uint32_t next; + */ +} __attribute__ (( packed )); + +/** Retrieval protocol block list response + * + * @v digestsize Digest size + * @v count Block range count + */ +#define peerdist_msg_blklist_t( digestsize, count ) \ + struct { \ + struct peerdist_msg_blklist blklist; \ + peerdist_msg_segment_t ( digestsize ) segment; \ + peerdist_msg_ranges_t ( count ) ranges; \ + uint32_t next; \ + } __attribute__ (( packed )) + +/** Retrieval protocol block list response version */ +#define PEERDIST_MSG_BLKLIST_VERSION PEERDIST_MSG_VERSION_1_0 + +/** Retrieval protocol block list response type */ +#define PEERDIST_MSG_BLKLIST_TYPE 0x00000004UL + +/** Retrieval protocol block fetch response header */ +struct peerdist_msg_blk { + /** Message header */ + struct peerdist_msg_header hdr; + /* Followed by a segment ID, a block index, a next block + * index, a data block, a useless VRF block, and an + * initialisation vector: + * + * peerdist_msg_segment_t(digestsize) segment; + * uint32_t index; + * uint32_t next; + * peerdist_msg_block_t(len) data; + * peerdist_msg_useless_vrf_t(vrf_len) vrf; + * peerdist_msg_iv_t(blksize) iv; + */ +} __attribute__ (( packed )); + +/** Retrieval protocol block fetch response + * + * @v digestsize Digest size + * @v len Data block length + * @v vrf_len Length of uselessness + * @v blksize Cipher block size + */ +#define peerdist_msg_blk_t( digestsize, len, vrf_len, blksize ) \ + struct { \ + struct peerdist_msg_blk blk; \ + peerdist_msg_segment_t ( digestsize ) segment; \ + uint32_t index; \ + uint32_t next; \ + peerdist_msg_block_t ( len ) block; \ + peerdist_msg_useless_vrf_t ( vrf_len ) vrf; \ + peerdist_msg_iv_t ( blksize ) iv; \ + } __attribute__ (( packed )) + +/** Retrieval protocol block fetch response version */ +#define PEERDIST_MSG_BLK_VERSION PEERDIST_MSG_VERSION_1_0 + +/** Retrieval protocol block fetch response type */ +#define PEERDIST_MSG_BLK_TYPE 0x00000005UL + +/** + * Parse retrieval protocol block fetch response + * + * @v raw Raw data + * @v raw_len Length of raw data + * @v digestsize Digest size + * @v blksize Cipher block size + * @v blk Structure to fill in + * @ret rc Return status code + */ +#define peerdist_msg_blk( raw, raw_len, digestsize, blksize, blk ) ( { \ + assert ( sizeof ( (blk)->segment.id ) == (digestsize) ); \ + assert ( sizeof ( (blk)->block.data ) == 0 ); \ + assert ( sizeof ( (blk)->vrf.data ) == 0 ); \ + assert ( sizeof ( (blk)->iv.data ) == blksize ); \ + peerdist_msg_blk_untyped ( (raw), (raw_len), (digestsize), \ + (blksize), blk ); \ + } ) + +extern int peerdist_msg_blk_untyped ( userptr_t raw, size_t raw_len, + size_t digestsize, size_t blksize, + void *out ); + +#endif /* _IPXE_PCCRR_H */ diff --git a/src/include/ipxe/pciea.h b/src/include/ipxe/pciea.h new file mode 100644 index 00000000..941c94ed --- /dev/null +++ b/src/include/ipxe/pciea.h @@ -0,0 +1,70 @@ +#ifndef _IPXE_PCIEA_H +#define _IPXE_PCIEA_H + +/** @file + * + * PCI Enhanced Allocation + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** Number of entries */ +#define PCIEA_ENTRIES 2 +#define PCIEA_ENTRIES_MASK 0x3f + +/** First entry */ +#define PCIEA_FIRST 4 + +/** Entry descriptor */ +#define PCIEA_DESC 0 + +/** Entry size */ +#define PCIEA_DESC_SIZE(desc) ( ( (desc) >> 0 ) & 0x7 ) + +/** BAR equivalent indicator */ +#define PCIEA_DESC_BEI(desc) ( ( (desc) >> 4 ) & 0xf ) + +/** BAR equivalent indicators */ +enum pciea_bei { + PCIEA_BEI_BAR_0 = 0, /**< Standard BAR 0 */ + PCIEA_BEI_BAR_1 = 1, /**< Standard BAR 1 */ + PCIEA_BEI_BAR_2 = 2, /**< Standard BAR 2 */ + PCIEA_BEI_BAR_3 = 3, /**< Standard BAR 3 */ + PCIEA_BEI_BAR_4 = 4, /**< Standard BAR 4 */ + PCIEA_BEI_BAR_5 = 5, /**< Standard BAR 5 */ + PCIEA_BEI_ROM = 8, /**< Expansion ROM BAR */ + PCIEA_BEI_VF_BAR_0 = 9, /**< Virtual function BAR 0 */ + PCIEA_BEI_VF_BAR_1 = 10, /**< Virtual function BAR 1 */ + PCIEA_BEI_VF_BAR_2 = 11, /**< Virtual function BAR 2 */ + PCIEA_BEI_VF_BAR_3 = 12, /**< Virtual function BAR 3 */ + PCIEA_BEI_VF_BAR_4 = 13, /**< Virtual function BAR 4 */ + PCIEA_BEI_VF_BAR_5 = 14, /**< Virtual function BAR 5 */ +}; + +/** Entry is enabled */ +#define PCIEA_DESC_ENABLED 0x80000000UL + +/** Base address low dword */ +#define PCIEA_LOW_BASE 4 + +/** Limit low dword */ +#define PCIEA_LOW_LIMIT 8 + +/** BAR is 64-bit */ +#define PCIEA_LOW_ATTR_64BIT 0x00000002UL + +/** Low dword attribute bit mask */ +#define PCIEA_LOW_ATTR_MASK 0x00000003UL + +/** Offset to high dwords */ +#define PCIEA_LOW_HIGH 8 + +extern unsigned long pciea_bar_start ( struct pci_device *pci, + unsigned int bei ); +extern unsigned long pciea_bar_size ( struct pci_device *pci, + unsigned int bei ); + +#endif /* _IPXE_PCIEA_H */ diff --git a/src/include/ipxe/pcimsix.h b/src/include/ipxe/pcimsix.h new file mode 100644 index 00000000..aa2aaf01 --- /dev/null +++ b/src/include/ipxe/pcimsix.h @@ -0,0 +1,77 @@ +#ifndef _IPXE_PCIMSIX_H +#define _IPXE_PCIMSIX_H + +/** @file + * + * PCI MSI-X interrupts + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** MSI-X BAR mapped length */ +#define PCI_MSIX_LEN 0x1000 + +/** MSI-X vector offset */ +#define PCI_MSIX_VECTOR(n) ( (n) * 0x10 ) + +/** MSI-X vector address low 32 bits */ +#define PCI_MSIX_ADDRESS_LO 0x0 + +/** MSI-X vector address high 32 bits */ +#define PCI_MSIX_ADDRESS_HI 0x4 + +/** MSI-X vector data */ +#define PCI_MSIX_DATA 0x8 + +/** MSI-X vector control */ +#define PCI_MSIX_CONTROL 0xc +#define PCI_MSIX_CONTROL_MASK 0x00000001 /**< Vector is masked */ + +/** PCI MSI-X capability */ +struct pci_msix { + /** Capability offset */ + unsigned int cap; + /** Number of vectors */ + unsigned int count; + /** MSI-X table */ + void *table; + /** Pending bit array */ + void *pba; +}; + +extern int pci_msix_enable ( struct pci_device *pci, struct pci_msix *msix ); +extern void pci_msix_disable ( struct pci_device *pci, struct pci_msix *msix ); +extern void pci_msix_map ( struct pci_msix *msix, unsigned int vector, + physaddr_t address, uint32_t data ); +extern void pci_msix_control ( struct pci_msix *msix, unsigned int vector, + uint32_t mask ); +extern void pci_msix_dump ( struct pci_msix *msix, unsigned int vector ); + +/** + * Mask MSI-X interrupt vector + * + * @v msix MSI-X capability + * @v vector MSI-X vector + */ +static inline __attribute__ (( always_inline )) void +pci_msix_mask ( struct pci_msix *msix, unsigned int vector ) { + + pci_msix_control ( msix, vector, PCI_MSIX_CONTROL_MASK ); +} + +/** + * Unmask MSI-X interrupt vector + * + * @v msix MSI-X capability + * @v vector MSI-X vector + */ +static inline __attribute__ (( always_inline )) void +pci_msix_unmask ( struct pci_msix *msix, unsigned int vector ) { + + pci_msix_control ( msix, vector, 0 ); +} + +#endif /* _IPXE_PCIMSIX_H */ diff --git a/src/include/ipxe/peerblk.h b/src/include/ipxe/peerblk.h new file mode 100644 index 00000000..f16f207b --- /dev/null +++ b/src/include/ipxe/peerblk.h @@ -0,0 +1,168 @@ +#ifndef _IPXE_PEERBLK_H +#define _IPXE_PEERBLK_H + +/** @file + * + * Peer Content Caching and Retrieval (PeerDist) protocol block downloads + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** A PeerDist retrieval protocol decryption buffer descriptor */ +struct peerdist_block_decrypt { + /** Data transfer buffer */ + struct xfer_buffer *xferbuf; + /** Offset within data transfer buffer */ + size_t offset; + /** Length to use from data transfer buffer */ + size_t len; +}; + +/** PeerDist retrieval protocol decryption data transfer buffer indices */ +enum peerdist_block_decrypt_index { + /** Data before the trimmed content */ + PEERBLK_BEFORE = 0, + /** Data within the trimmed content */ + PEERBLK_DURING, + /** Data after the trimmed content */ + PEERBLK_AFTER, + /** Number of decryption buffers */ + PEERBLK_NUM_BUFFERS +}; + +/** A PeerDist block download */ +struct peerdist_block { + /** Reference count */ + struct refcnt refcnt; + /** Data transfer interface */ + struct interface xfer; + /** Raw data interface */ + struct interface raw; + /** Retrieval protocol interface */ + struct interface retrieval; + + /** Original URI */ + struct uri *uri; + /** Content range of this block */ + struct peerdist_range range; + /** Trimmed range of this block */ + struct peerdist_range trim; + /** Offset of first byte in trimmed range within overall download */ + size_t offset; + + /** Digest algorithm */ + struct digest_algorithm *digest; + /** Digest size + * + * Note that this may be shorter than the digest size of the + * digest algorithm. + */ + size_t digestsize; + /** Digest context (statically allocated at instantiation time) */ + void *digestctx; + + /** Cipher algorithm */ + struct cipher_algorithm *cipher; + /** Cipher context (dynamically allocated as needed) */ + void *cipherctx; + + /** Segment index */ + unsigned int segment; + /** Segment identifier */ + uint8_t id[PEERDIST_DIGEST_MAX_SIZE]; + /** Segment secret */ + uint8_t secret[PEERDIST_DIGEST_MAX_SIZE]; + /** Block index */ + unsigned int block; + /** Block hash */ + uint8_t hash[PEERDIST_DIGEST_MAX_SIZE]; + + /** Current position (relative to incoming data stream) */ + size_t pos; + /** Start of trimmed content (relative to incoming data stream) */ + size_t start; + /** End of trimmed content (relative to incoming data stream) */ + size_t end; + /** Data buffer */ + struct xfer_buffer buffer; + + /** Decryption process */ + struct process process; + /** Decryption data buffer descriptors */ + struct peerdist_block_decrypt decrypt[PEERBLK_NUM_BUFFERS]; + /** Remaining decryption length */ + size_t cipher_remaining; + /** Remaining digest length (excluding AES padding bytes) */ + size_t digest_remaining; + + /** Discovery client */ + struct peerdisc_client discovery; + /** Current position in discovered peer list */ + struct peerdisc_peer *peer; + /** Block download queue */ + struct peerdist_block_queue *queue; + /** List of queued block downloads */ + struct list_head queued; + /** Retry timer */ + struct retry_timer timer; + /** Number of full attempt cycles completed */ + unsigned int cycles; + /** Most recent attempt failure */ + int rc; + + /** Time at which block download was started */ + unsigned long started; + /** Time at which most recent attempt was started */ + unsigned long attempted; +}; + +/** PeerDist block download queue */ +struct peerdist_block_queue { + /** Download opening process */ + struct process process; + /** List of queued downloads */ + struct list_head list; + + /** Number of open downloads */ + unsigned int count; + /** Maximum number of open downloads */ + unsigned int max; + + /** Open block download + * + * @v peerblk PeerDist block download + * @ret rc Return status code + */ + int ( * open ) ( struct peerdist_block *peerblk ); +}; + +/** Retrieval protocol block fetch response (including transport header) + * + * @v digestsize Digest size + * @v len Data block length + * @v vrf_len Length of uselessness + * @v blksize Cipher block size + */ +#define peerblk_msg_blk_t( digestsize, len, vrf_len, blksize ) \ + struct { \ + struct peerdist_msg_transport_header hdr; \ + peerdist_msg_blk_t ( digestsize, len, vrf_len, \ + blksize ) msg; \ + } __attribute__ (( packed )) + +extern int peerblk_open ( struct interface *xfer, struct uri *uri, + struct peerdist_info_block *block ); + +#endif /* _IPXE_PEERBLK_H */ diff --git a/src/include/ipxe/peerdisc.h b/src/include/ipxe/peerdisc.h new file mode 100644 index 00000000..45d592e7 --- /dev/null +++ b/src/include/ipxe/peerdisc.h @@ -0,0 +1,122 @@ +#ifndef _IPXE_PEERDISC_H +#define _IPXE_PEERDISC_H + +/** @file + * + * Peer Content Caching and Retrieval (PeerDist) protocol peer discovery + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** A PeerDist discovery socket */ +struct peerdisc_socket { + /** Name */ + const char *name; + /** Data transfer interface */ + struct interface xfer; + /** Socket address */ + union { + struct sockaddr sa; + struct sockaddr_in sin; + struct sockaddr_in6 sin6; + } address; +}; + +/** PeerDist discovery socket table */ +#define PEERDISC_SOCKETS __table ( struct peerdisc_socket, "peerdisc_sockets" ) + +/** Declare a PeerDist discovery socket */ +#define __peerdisc_socket __table_entry ( PEERDISC_SOCKETS, 01 ) + +/** A PeerDist discovery segment */ +struct peerdisc_segment { + /** Reference count */ + struct refcnt refcnt; + /** List of segments */ + struct list_head list; + /** Segment identifier string + * + * This is MS-PCCRC's "HoHoDk", transcribed as an upper-case + * Base16-encoded string. + */ + const char *id; + /** Message UUID string */ + const char *uuid; + /** List of discovered peers + * + * The list of peers may be appended to during the lifetime of + * the discovery segment. Discovered peers will not be + * removed from the list until the last discovery has been + * closed; this allows users to safely maintain a pointer to a + * current position within the list. + */ + struct list_head peers; + /** List of active clients */ + struct list_head clients; + /** Transmission timer */ + struct retry_timer timer; +}; + +/** A PeerDist discovery peer */ +struct peerdisc_peer { + /** List of peers */ + struct list_head list; + /** Peer location */ + char location[0]; +}; + +/** A PeerDist discovery client */ +struct peerdisc_client { + /** Discovery segment */ + struct peerdisc_segment *segment; + /** List of clients */ + struct list_head list; + /** Operations */ + struct peerdisc_client_operations *op; +}; + +/** PeerDist discovery client operations */ +struct peerdisc_client_operations { + /** New peers have been discovered + * + * @v peerdisc PeerDist discovery client + */ + void ( * discovered ) ( struct peerdisc_client *peerdisc ); +}; + +/** + * Initialise PeerDist discovery + * + * @v peerdisc PeerDist discovery client + * @v op Discovery operations + */ +static inline __attribute__ (( always_inline )) void +peerdisc_init ( struct peerdisc_client *peerdisc, + struct peerdisc_client_operations *op ) { + + peerdisc->op = op; +} + +extern unsigned int peerdisc_timeout_secs; + +extern void peerdisc_stat ( struct interface *intf, struct peerdisc_peer *peer, + struct list_head *peers ); +#define peerdisc_stat_TYPE( object_type ) \ + typeof ( void ( object_type, struct peerdisc_peer *peer, \ + struct list_head *peers ) ) + +extern int peerdisc_open ( struct peerdisc_client *peerdisc, const void *id, + size_t len ); +extern void peerdisc_close ( struct peerdisc_client *peerdisc ); + +#endif /* _IPXE_PEERDISC_H */ diff --git a/src/include/ipxe/peermux.h b/src/include/ipxe/peermux.h new file mode 100644 index 00000000..54acbfec --- /dev/null +++ b/src/include/ipxe/peermux.h @@ -0,0 +1,86 @@ +#ifndef _IPXE_PEERMUX_H +#define _IPXE_PEERMUX_H + +/** @file + * + * Peer Content Caching and Retrieval (PeerDist) protocol multiplexer + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** Maximum number of concurrent block downloads */ +#define PEERMUX_MAX_BLOCKS 32 + +/** PeerDist download content information cache */ +struct peerdist_info_cache { + /** Content information */ + struct peerdist_info info; + /** Content information segment */ + struct peerdist_info_segment segment; + /** Content information block */ + struct peerdist_info_block block; +}; + +/** A PeerDist multiplexed block download */ +struct peerdist_multiplexed_block { + /** PeerDist download multiplexer */ + struct peerdist_multiplexer *peermux; + /** List of multiplexed blocks */ + struct list_head list; + /** Data transfer interface */ + struct interface xfer; +}; + +/** PeerDist statistics */ +struct peerdist_statistics { + /** Maximum observed number of peers */ + unsigned int peers; + /** Number of blocks downloaded in total */ + unsigned int total; + /** Number of blocks downloaded from peers */ + unsigned int local; +}; + +/** A PeerDist download multiplexer */ +struct peerdist_multiplexer { + /** Reference count */ + struct refcnt refcnt; + /** Data transfer interface */ + struct interface xfer; + /** Content information interface */ + struct interface info; + /** Original URI */ + struct uri *uri; + + /** Content information data transfer buffer */ + struct xfer_buffer buffer; + /** Content information cache */ + struct peerdist_info_cache cache; + + /** Block download initiation process */ + struct process process; + /** List of busy block downloads */ + struct list_head busy; + /** List of idle block downloads */ + struct list_head idle; + /** Block downloads */ + struct peerdist_multiplexed_block block[PEERMUX_MAX_BLOCKS]; + + /** Statistics */ + struct peerdist_statistics stats; +}; + +extern int peermux_filter ( struct interface *xfer, struct interface *info, + struct uri *uri ); + +#endif /* _IPXE_PEERMUX_H */ diff --git a/src/include/ipxe/pem.h b/src/include/ipxe/pem.h new file mode 100644 index 00000000..d88ec5b6 --- /dev/null +++ b/src/include/ipxe/pem.h @@ -0,0 +1,28 @@ +#ifndef _IPXE_PEM_H +#define _IPXE_PEM_H + +/** @file + * + * PEM-encoded ASN.1 data + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** Pre-encapsulation boundary marker */ +#define PEM_BEGIN "-----BEGIN" + +/** Post-encapsulation boundary marker */ +#define PEM_END "-----END" + +extern int pem_asn1 ( userptr_t data, size_t len, size_t offset, + struct asn1_cursor **cursor ); + +extern struct image_type pem_image_type __image_type ( PROBE_NORMAL ); + +#endif /* _IPXE_PEM_H */ diff --git a/src/include/ipxe/pool.h b/src/include/ipxe/pool.h new file mode 100644 index 00000000..81ff57d7 --- /dev/null +++ b/src/include/ipxe/pool.h @@ -0,0 +1,127 @@ +#ifndef _IPXE_POOL_H +#define _IPXE_POOL_H + +/** @file + * + * Pooled connections + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** A pooled connection */ +struct pooled_connection { + /** List of pooled connections + * + * Note that each connecton in the pool has a running expiry + * timer which holds a reference to the connection. We + * therefore do not require the connection pool list to hold a + * reference for each pooled connection. + */ + struct list_head list; + /** Expiry timer */ + struct retry_timer timer; + /** Close expired pooled connection + * + * @v pool Pooled connection + */ + void ( * expired ) ( struct pooled_connection *pool ); + /** Flags */ + unsigned int flags; +}; + +/** Pooled connection flags */ +enum pooled_connection_flags { + /** Connection should be recycled after closing */ + POOL_RECYCLABLE = 0x0001, + /** Connection has been recycled */ + POOL_RECYCLED = 0x0002, + /** Connection is known to be alive */ + POOL_ALIVE = 0x0004, +}; + +extern void pool_add ( struct pooled_connection *pool, struct list_head *list, + unsigned long expiry ); +extern void pool_del ( struct pooled_connection *pool ); +extern void pool_expired ( struct retry_timer *timer, int over ); + +/** + * Initialise a pooled connection + * + * @v pool Pooled connection + * @v expired Close expired pooled connection method + * @v refcnt Containing object reference counter + */ +static inline __attribute__ (( always_inline )) void +pool_init ( struct pooled_connection *pool, + void ( * expired ) ( struct pooled_connection *pool ), + struct refcnt *refcnt ) { + + INIT_LIST_HEAD ( &pool->list ); + timer_init ( &pool->timer, pool_expired, refcnt ); + pool->expired = expired; +} + +/** + * Mark pooled connection as recyclable + * + * @v pool Pooled connection + */ +static inline __attribute__ (( always_inline )) void +pool_recyclable ( struct pooled_connection *pool ) { + + pool->flags |= POOL_RECYCLABLE; +} + +/** + * Mark pooled connection as alive + * + * @v pool Pooled connection + */ +static inline __attribute__ (( always_inline )) void +pool_alive ( struct pooled_connection *pool ) { + + pool->flags |= POOL_ALIVE; +} + +/** + * Check if pooled connection is recyclable + * + * @v pool Pooled connection + * @ret recyclable Pooled connection is recyclable + */ +static inline __attribute__ (( always_inline )) int +pool_is_recyclable ( struct pooled_connection *pool ) { + + return ( pool->flags & POOL_RECYCLABLE ); +} + +/** + * Check if pooled connection is reopenable + * + * @v pool Pooled connection + * @ret reopenable Pooled connection is reopenable + */ +static inline __attribute__ (( always_inline )) int +pool_is_reopenable ( struct pooled_connection *pool ) { + + /* A connection is reopenable if it has been recycled but is + * not yet known to be alive. + */ + return ( ( pool->flags & POOL_RECYCLED ) && + ( ! ( pool->flags & POOL_ALIVE ) ) ); +} + +extern void pool_recycle ( struct interface *intf ); +#define pool_recycle_TYPE( object_type ) \ + typeof ( void ( object_type ) ) + +extern void pool_reopen ( struct interface *intf ); +#define pool_reopen_TYPE( object_type ) \ + typeof ( void ( object_type ) ) + +#endif /* _IPXE_POOL_H */ diff --git a/src/include/ipxe/pseudobit.h b/src/include/ipxe/pseudobit.h new file mode 100644 index 00000000..431b106f --- /dev/null +++ b/src/include/ipxe/pseudobit.h @@ -0,0 +1,249 @@ +#ifndef _IPXE_PSEUDOBIT_H +#define _IPXE_PSEUDOBIT_H + +/* + * Copyright (C) 2008 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Pseudo-bit structures + * + */ + +#include +#include + +/* Endianness selection. + * + * This is a property of the device, not a property of the host CPU. + */ +#ifdef PSEUDOBIT_LITTLE_ENDIAN +#define cpu_to_BIT64 cpu_to_le64 +#define cpu_to_BIT32 cpu_to_le32 +#define BIT64_to_cpu le64_to_cpu +#define BIT32_to_cpu le32_to_cpu +#define QWORD_SHIFT( offset, width ) (offset) +#endif +#ifdef PSEUDOBIT_BIG_ENDIAN +#define cpu_to_BIT64 cpu_to_be64 +#define cpu_to_BIT32 cpu_to_be32 +#define BIT64_to_cpu be64_to_cpu +#define BIT32_to_cpu be32_to_cpu +#define QWORD_SHIFT( offset, width ) ( 64 - (offset) - (width) ) +#endif + +/** Datatype used to represent a bit in the pseudo-structures */ +typedef unsigned char pseudo_bit_t; + +/** + * Wrapper structure for pseudo_bit_t structures + * + * This structure provides a wrapper around pseudo_bit_t structures. + * It has the correct size, and also encapsulates type information + * about the underlying pseudo_bit_t-based structure, which allows the + * BIT_FILL() etc. macros to work without requiring explicit type + * information. + */ +#define PSEUDO_BIT_STRUCT( _structure ) \ + union { \ + uint8_t bytes[ sizeof ( _structure ) / 8 ]; \ + uint32_t dwords[ sizeof ( _structure ) / 32 ]; \ + uint64_t qwords[ sizeof ( _structure ) / 64 ]; \ + _structure *dummy[0]; \ + } __attribute__ (( packed )) u + +/** Get pseudo_bit_t structure type from wrapper structure pointer */ +#define PSEUDO_BIT_STRUCT_TYPE( _ptr ) \ + typeof ( *((_ptr)->u.dummy[0]) ) + +/** Bit offset of a field within a pseudo_bit_t structure */ +#define BIT_OFFSET( _ptr, _field ) \ + offsetof ( PSEUDO_BIT_STRUCT_TYPE ( _ptr ), _field ) + +/** Bit width of a field within a pseudo_bit_t structure */ +#define BIT_WIDTH( _ptr, _field ) \ + sizeof ( ( ( PSEUDO_BIT_STRUCT_TYPE ( _ptr ) * ) NULL )->_field ) + +/** Qword offset of a field within a pseudo_bit_t structure */ +#define QWORD_OFFSET( _ptr, _field ) \ + ( BIT_OFFSET ( _ptr, _field ) / 64 ) + +/** Qword bit offset of a field within a pseudo_bit_t structure */ +#define QWORD_BIT_OFFSET( _ptr, _index, _field ) \ + ( BIT_OFFSET ( _ptr, _field ) - ( 64 * (_index) ) ) + +/** Qword bit shift for a field within a pseudo_bit_t structure */ +#define QWORD_BIT_SHIFT( _ptr, _index, _field ) \ + QWORD_SHIFT ( QWORD_BIT_OFFSET ( _ptr, _index, _field ), \ + BIT_WIDTH ( _ptr, _field ) ) + +/** Bit mask for a field within a pseudo_bit_t structure */ +#define BIT_MASK( _ptr, _field ) \ + ( ( ~( ( uint64_t ) 0 ) ) >> \ + ( 64 - BIT_WIDTH ( _ptr, _field ) ) ) + +/* + * Assemble native-endian qword from named fields and values + * + */ + +#define BIT_ASSEMBLE_1( _ptr, _index, _field, _value ) \ + ( ( ( uint64_t) (_value) ) << \ + QWORD_BIT_SHIFT ( _ptr, _index, _field ) ) + +#define BIT_ASSEMBLE_2( _ptr, _index, _field, _value, ... ) \ + ( BIT_ASSEMBLE_1 ( _ptr, _index, _field, _value ) | \ + BIT_ASSEMBLE_1 ( _ptr, _index, __VA_ARGS__ ) ) + +#define BIT_ASSEMBLE_3( _ptr, _index, _field, _value, ... ) \ + ( BIT_ASSEMBLE_1 ( _ptr, _index, _field, _value ) | \ + BIT_ASSEMBLE_2 ( _ptr, _index, __VA_ARGS__ ) ) + +#define BIT_ASSEMBLE_4( _ptr, _index, _field, _value, ... ) \ + ( BIT_ASSEMBLE_1 ( _ptr, _index, _field, _value ) | \ + BIT_ASSEMBLE_3 ( _ptr, _index, __VA_ARGS__ ) ) + +#define BIT_ASSEMBLE_5( _ptr, _index, _field, _value, ... ) \ + ( BIT_ASSEMBLE_1 ( _ptr, _index, _field, _value ) | \ + BIT_ASSEMBLE_4 ( _ptr, _index, __VA_ARGS__ ) ) + +#define BIT_ASSEMBLE_6( _ptr, _index, _field, _value, ... ) \ + ( BIT_ASSEMBLE_1 ( _ptr, _index, _field, _value ) | \ + BIT_ASSEMBLE_5 ( _ptr, _index, __VA_ARGS__ ) ) + +#define BIT_ASSEMBLE_7( _ptr, _index, _field, _value, ... ) \ + ( BIT_ASSEMBLE_1 ( _ptr, _index, _field, _value ) | \ + BIT_ASSEMBLE_6 ( _ptr, _index, __VA_ARGS__ ) ) + +/* + * Build native-endian (positive) qword bitmasks from named fields + * + */ + +#define BIT_MASK_1( _ptr, _index, _field ) \ + ( BIT_MASK ( _ptr, _field ) << \ + QWORD_BIT_SHIFT ( _ptr, _index, _field ) ) + +#define BIT_MASK_2( _ptr, _index, _field, ... ) \ + ( BIT_MASK_1 ( _ptr, _index, _field ) | \ + BIT_MASK_1 ( _ptr, _index, __VA_ARGS__ ) ) + +#define BIT_MASK_3( _ptr, _index, _field, ... ) \ + ( BIT_MASK_1 ( _ptr, _index, _field ) | \ + BIT_MASK_2 ( _ptr, _index, __VA_ARGS__ ) ) + +#define BIT_MASK_4( _ptr, _index, _field, ... ) \ + ( BIT_MASK_1 ( _ptr, _index, _field ) | \ + BIT_MASK_3 ( _ptr, _index, __VA_ARGS__ ) ) + +#define BIT_MASK_5( _ptr, _index, _field, ... ) \ + ( BIT_MASK_1 ( _ptr, _index, _field ) | \ + BIT_MASK_4 ( _ptr, _index, __VA_ARGS__ ) ) + +#define BIT_MASK_6( _ptr, _index, _field, ... ) \ + ( BIT_MASK_1 ( _ptr, _index, _field ) | \ + BIT_MASK_5 ( _ptr, _index, __VA_ARGS__ ) ) + +#define BIT_MASK_7( _ptr, _index, _field, ... ) \ + ( BIT_MASK_1 ( _ptr, _index, _field ) | \ + BIT_MASK_6 ( _ptr, _index, __VA_ARGS__ ) ) + +/* + * Populate device-endian qwords from named fields and values + * + */ + +#define BIT_FILL( _ptr, _index, _assembled ) do { \ + uint64_t *__ptr = &(_ptr)->u.qwords[(_index)]; \ + uint64_t __assembled = (_assembled); \ + *__ptr = cpu_to_BIT64 ( __assembled ); \ + } while ( 0 ) + +#define BIT_FILL_1( _ptr, _field1, ... ) \ + BIT_FILL ( _ptr, QWORD_OFFSET ( _ptr, _field1 ), \ + BIT_ASSEMBLE_1 ( _ptr, QWORD_OFFSET ( _ptr, _field1 ), \ + _field1, __VA_ARGS__ ) ) + +#define BIT_FILL_2( _ptr, _field1, ... ) \ + BIT_FILL ( _ptr, QWORD_OFFSET ( _ptr, _field1 ), \ + BIT_ASSEMBLE_2 ( _ptr, QWORD_OFFSET ( _ptr, _field1 ), \ + _field1, __VA_ARGS__ ) ) + +#define BIT_FILL_3( _ptr, _field1, ... ) \ + BIT_FILL ( _ptr, QWORD_OFFSET ( _ptr, _field1 ), \ + BIT_ASSEMBLE_3 ( _ptr, QWORD_OFFSET ( _ptr, _field1 ), \ + _field1, __VA_ARGS__ ) ) + +#define BIT_FILL_4( _ptr, _field1, ... ) \ + BIT_FILL ( _ptr, QWORD_OFFSET ( _ptr, _field1 ), \ + BIT_ASSEMBLE_4 ( _ptr, QWORD_OFFSET ( _ptr, _field1 ), \ + _field1, __VA_ARGS__ ) ) + +#define BIT_FILL_5( _ptr, _field1, ... ) \ + BIT_FILL ( _ptr, QWORD_OFFSET ( _ptr, _field1 ), \ + BIT_ASSEMBLE_5 ( _ptr, QWORD_OFFSET ( _ptr, _field1 ), \ + _field1, __VA_ARGS__ ) ) + +#define BIT_FILL_6( _ptr, _field1, ... ) \ + BIT_FILL ( _ptr, QWORD_OFFSET ( _ptr, _field1 ), \ + BIT_ASSEMBLE_6 ( _ptr, QWORD_OFFSET ( _ptr, _field1 ), \ + _field1, __VA_ARGS__ ) ) + +#define BIT_QWORD_PTR( _ptr, _field ) \ + ( { \ + unsigned int __index = QWORD_OFFSET ( _ptr, _field ); \ + uint64_t *__ptr = &(_ptr)->u.qwords[__index]; \ + __ptr; \ + } ) + +/** Extract value of named field */ +#define BIT_GET64( _ptr, _field ) \ + ( { \ + unsigned int __index = QWORD_OFFSET ( _ptr, _field ); \ + uint64_t *__ptr = &(_ptr)->u.qwords[__index]; \ + uint64_t __value = BIT64_to_cpu ( *__ptr ); \ + __value >>= \ + QWORD_BIT_SHIFT ( _ptr, __index, _field ); \ + __value &= BIT_MASK ( _ptr, _field ); \ + __value; \ + } ) + +/** Extract value of named field (for fields up to the size of a long) */ +#define BIT_GET( _ptr, _field ) \ + ( ( unsigned long ) BIT_GET64 ( _ptr, _field ) ) + +#define BIT_SET( _ptr, _field, _value ) do { \ + unsigned int __index = QWORD_OFFSET ( _ptr, _field ); \ + uint64_t *__ptr = &(_ptr)->u.qwords[__index]; \ + unsigned int __shift = \ + QWORD_BIT_SHIFT ( _ptr, __index, _field ); \ + uint64_t __value = (_value); \ + *__ptr &= cpu_to_BIT64 ( ~( BIT_MASK ( _ptr, _field ) << \ + __shift ) ); \ + *__ptr |= cpu_to_BIT64 ( __value << __shift ); \ + } while ( 0 ) + +#endif /* _IPXE_PSEUDOBIT_H */ diff --git a/src/include/ipxe/quiesce.h b/src/include/ipxe/quiesce.h new file mode 100644 index 00000000..00b530b8 --- /dev/null +++ b/src/include/ipxe/quiesce.h @@ -0,0 +1,31 @@ +#ifndef _IPXE_QUIESCE_H +#define _IPXE_QUIESCE_H + +/** @file + * + * Quiesce system + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** A quiescer */ +struct quiescer { + /** Quiesce system */ + void ( * quiesce ) ( void ); + /** Unquiesce system */ + void ( * unquiesce ) ( void ); +}; + +/** Quiescer table */ +#define QUIESCERS __table ( struct quiescer, "quiescers" ) + +/** Declare a quiescer */ +#define __quiescer __table_entry ( QUIESCERS, 01 ) + +extern void quiesce ( void ); +extern void unquiesce ( void ); + +#endif /* _IPXE_QUIESCE_H */ diff --git a/src/include/ipxe/rndis.h b/src/include/ipxe/rndis.h new file mode 100644 index 00000000..bcb6d8e6 --- /dev/null +++ b/src/include/ipxe/rndis.h @@ -0,0 +1,370 @@ +#ifndef _IPXE_RNDIS_H +#define _IPXE_RNDIS_H + +/** @file + * + * Remote Network Driver Interface Specification + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** Maximum time to wait for a transaction to complete + * + * This is a policy decision. + */ +#define RNDIS_MAX_WAIT_MS 1000 + +/** RNDIS message header */ +struct rndis_header { + /** Message type */ + uint32_t type; + /** Message length */ + uint32_t len; +} __attribute__ (( packed )); + +/** RNDIS initialise message */ +#define RNDIS_INITIALISE_MSG 0x00000002UL + +/** RNDIS initialise message */ +struct rndis_initialise_message { + /** Request ID */ + uint32_t id; + /** Major version */ + uint32_t major; + /** Minor version */ + uint32_t minor; + /** Maximum transfer size */ + uint32_t mtu; +} __attribute__ (( packed )); + +/** Request ID used for initialisation + * + * This is a policy decision. + */ +#define RNDIS_INIT_ID 0xe110e110UL + +/** RNDIS major version */ +#define RNDIS_VERSION_MAJOR 1 + +/** RNDIS minor version */ +#define RNDIS_VERSION_MINOR 0 + +/** RNDIS maximum transfer size + * + * This is a policy decision. + */ +#define RNDIS_MTU 2048 + +/** RNDIS initialise completion */ +#define RNDIS_INITIALISE_CMPLT 0x80000002UL + +/** RNDIS initialise completion */ +struct rndis_initialise_completion { + /** Request ID */ + uint32_t id; + /** Status */ + uint32_t status; + /** Major version */ + uint32_t major; + /** Minor version */ + uint32_t minor; + /** Device flags */ + uint32_t flags; + /** Medium */ + uint32_t medium; + /** Maximum packets per transfer */ + uint32_t max_pkts; + /** Maximum transfer size */ + uint32_t mtu; + /** Packet alignment factor */ + uint32_t align; + /** Reserved */ + uint32_t reserved; +} __attribute__ (( packed )); + +/** RNDIS halt message */ +#define RNDIS_HALT_MSG 0x00000003UL + +/** RNDIS halt message */ +struct rndis_halt_message { + /** Request ID */ + uint32_t id; +} __attribute__ (( packed )); + +/** RNDIS query OID message */ +#define RNDIS_QUERY_MSG 0x00000004UL + +/** RNDIS set OID message */ +#define RNDIS_SET_MSG 0x00000005UL + +/** RNDIS query or set OID message */ +struct rndis_oid_message { + /** Request ID */ + uint32_t id; + /** Object ID */ + uint32_t oid; + /** Information buffer length */ + uint32_t len; + /** Information buffer offset */ + uint32_t offset; + /** Reserved */ + uint32_t reserved; +} __attribute__ (( packed )); + +/** RNDIS query OID completion */ +#define RNDIS_QUERY_CMPLT 0x80000004UL + +/** RNDIS query OID completion */ +struct rndis_query_completion { + /** Request ID */ + uint32_t id; + /** Status */ + uint32_t status; + /** Information buffer length */ + uint32_t len; + /** Information buffer offset */ + uint32_t offset; +} __attribute__ (( packed )); + +/** RNDIS set OID completion */ +#define RNDIS_SET_CMPLT 0x80000005UL + +/** RNDIS set OID completion */ +struct rndis_set_completion { + /** Request ID */ + uint32_t id; + /** Status */ + uint32_t status; +} __attribute__ (( packed )); + +/** RNDIS reset message */ +#define RNDIS_RESET_MSG 0x00000006UL + +/** RNDIS reset message */ +struct rndis_reset_message { + /** Reserved */ + uint32_t reserved; +} __attribute__ (( packed )); + +/** RNDIS reset completion */ +#define RNDIS_RESET_CMPLT 0x80000006UL + +/** RNDIS reset completion */ +struct rndis_reset_completion { + /** Status */ + uint32_t status; + /** Addressing reset */ + uint32_t addr; +} __attribute__ (( packed )); + +/** RNDIS indicate status message */ +#define RNDIS_INDICATE_STATUS_MSG 0x00000007UL + +/** RNDIS diagnostic information */ +struct rndis_diagnostic_info { + /** Status */ + uint32_t status; + /** Error offset */ + uint32_t offset; +} __attribute__ (( packed )); + +/** RNDIS indicate status message */ +struct rndis_indicate_status_message { + /** Status */ + uint32_t status; + /** Status buffer length */ + uint32_t len; + /** Status buffer offset */ + uint32_t offset; + /** Diagnostic information (optional) */ + struct rndis_diagnostic_info diag[0]; +} __attribute__ (( packed )); + +/** RNDIS status codes */ +enum rndis_status { + /** Device is connected to a network medium */ + RNDIS_STATUS_MEDIA_CONNECT = 0x4001000bUL, + /** Device is disconnected from the medium */ + RNDIS_STATUS_MEDIA_DISCONNECT = 0x4001000cUL, + /** Unknown start-of-day status code */ + RNDIS_STATUS_WTF_WORLD = 0x40020006UL, +}; + +/** RNDIS keepalive message */ +#define RNDIS_KEEPALIVE_MSG 0x00000008UL + +/** RNDIS keepalive message */ +struct rndis_keepalive_message { + /** Request ID */ + uint32_t id; +} __attribute__ (( packed )); + +/** RNDIS keepalive completion */ +#define RNDIS_KEEPALIVE_CMPLT 0x80000008UL + +/** RNDIS keepalive completion */ +struct rndis_keepalive_completion { + /** Request ID */ + uint32_t id; + /** Status */ + uint32_t status; +} __attribute__ (( packed )); + +/** RNDIS packet message */ +#define RNDIS_PACKET_MSG 0x00000001UL + +/** RNDIS packet field */ +struct rndis_packet_field { + /** Offset */ + uint32_t offset; + /** Length */ + uint32_t len; +} __attribute__ (( packed )); + +/** RNDIS packet message */ +struct rndis_packet_message { + /** Data */ + struct rndis_packet_field data; + /** Out-of-band data records */ + struct rndis_packet_field oob; + /** Number of out-of-band data records */ + uint32_t oob_count; + /** Per-packet information record */ + struct rndis_packet_field ppi; + /** Reserved */ + uint32_t reserved; +} __attribute__ (( packed )); + +/** RNDIS packet record */ +struct rndis_packet_record { + /** Length */ + uint32_t len; + /** Type */ + uint32_t type; + /** Offset */ + uint32_t offset; +} __attribute__ (( packed )); + +/** OID for packet filter */ +#define RNDIS_OID_GEN_CURRENT_PACKET_FILTER 0x0001010eUL + +/** Packet filter bits */ +enum rndis_packet_filter { + /** Unicast packets */ + RNDIS_FILTER_UNICAST = 0x00000001UL, + /** Multicast packets */ + RNDIS_FILTER_MULTICAST = 0x00000002UL, + /** All multicast packets */ + RNDIS_FILTER_ALL_MULTICAST = 0x00000004UL, + /** Broadcast packets */ + RNDIS_FILTER_BROADCAST = 0x00000008UL, + /** All packets */ + RNDIS_FILTER_PROMISCUOUS = 0x00000020UL +}; + +/** OID for media status */ +#define RNDIS_OID_GEN_MEDIA_CONNECT_STATUS 0x00010114UL + +/** OID for permanent MAC address */ +#define RNDIS_OID_802_3_PERMANENT_ADDRESS 0x01010101UL + +/** OID for current MAC address */ +#define RNDIS_OID_802_3_CURRENT_ADDRESS 0x01010102UL + +struct rndis_device; + +/** RNDIS device operations */ +struct rndis_operations { + /** + * Open RNDIS device + * + * @v rndis RNDIS device + * @ret rc Return status code + */ + int ( * open ) ( struct rndis_device *rndis ); + /** + * Close RNDIS device + * + * @v rndis RNDIS device + */ + void ( * close ) ( struct rndis_device *rndis ); + /** + * Transmit packet + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + * @ret rc Return status code + * + * If this method returns success then the RNDIS device must + * eventually report completion via rndis_tx_complete(). + */ + int ( * transmit ) ( struct rndis_device *rndis, + struct io_buffer *iobuf ); + /** + * Poll for completed and received packets + * + * @v rndis RNDIS device + */ + void ( * poll ) ( struct rndis_device *rndis ); +}; + +/** An RNDIS device */ +struct rndis_device { + /** Network device */ + struct net_device *netdev; + /** Device name */ + const char *name; + /** RNDIS operations */ + struct rndis_operations *op; + /** Driver private data */ + void *priv; + + /** Request ID for current blocking request */ + unsigned int wait_id; + /** Return status code for current blocking request */ + int wait_rc; +}; + +/** + * Initialise an RNDIS device + * + * @v rndis RNDIS device + * @v op RNDIS device operations + */ +static inline void rndis_init ( struct rndis_device *rndis, + struct rndis_operations *op ) { + + rndis->op = op; +} + +extern void rndis_tx_complete_err ( struct rndis_device *rndis, + struct io_buffer *iobuf, int rc ); +extern int rndis_tx_defer ( struct rndis_device *rndis, + struct io_buffer *iobuf ); +extern void rndis_rx ( struct rndis_device *rndis, struct io_buffer *iobuf ); +extern void rndis_rx_err ( struct rndis_device *rndis, struct io_buffer *iobuf, + int rc ); + +extern struct rndis_device * alloc_rndis ( size_t priv_len ); +extern int register_rndis ( struct rndis_device *rndis ); +extern void unregister_rndis ( struct rndis_device *rndis ); +extern void free_rndis ( struct rndis_device *rndis ); + +/** + * Complete message transmission + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + */ +static inline void rndis_tx_complete ( struct rndis_device *rndis, + struct io_buffer *iobuf ) { + + rndis_tx_complete_err ( rndis, iobuf, 0 ); +} + +#endif /* _IPXE_RNDIS_H */ diff --git a/src/include/ipxe/sha512.h b/src/include/ipxe/sha512.h new file mode 100644 index 00000000..8e22d835 --- /dev/null +++ b/src/include/ipxe/sha512.h @@ -0,0 +1,98 @@ +#ifndef _IPXE_SHA512_H +#define _IPXE_SHA512_H + +/** @file + * + * SHA-512 algorithm + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** SHA-512 number of rounds */ +#define SHA512_ROUNDS 80 + +/** An SHA-512 digest */ +struct sha512_digest { + /** Hash output */ + uint64_t h[8]; +}; + +/** An SHA-512 data block */ +union sha512_block { + /** Raw bytes */ + uint8_t byte[128]; + /** Raw qwords */ + uint64_t qword[16]; + /** Final block structure */ + struct { + /** Padding */ + uint8_t pad[112]; + /** High 64 bits of length in bits */ + uint64_t len_hi; + /** Low 64 bits of length in bits */ + uint64_t len_lo; + } final; +}; + +/** SHA-512 digest and data block + * + * The order of fields within this structure is designed to minimise + * code size. + */ +struct sha512_digest_data { + /** Digest of data already processed */ + struct sha512_digest digest; + /** Accumulated data */ + union sha512_block data; +} __attribute__ (( packed )); + +/** SHA-512 digest and data block */ +union sha512_digest_data_qwords { + /** Digest and data block */ + struct sha512_digest_data dd; + /** Raw qwords */ + uint64_t qword[ sizeof ( struct sha512_digest_data ) / + sizeof ( uint64_t ) ]; +}; + +/** An SHA-512 context */ +struct sha512_context { + /** Amount of accumulated data */ + size_t len; + /** Digest size */ + size_t digestsize; + /** Digest and accumulated data */ + union sha512_digest_data_qwords ddq; +} __attribute__ (( packed )); + +/** SHA-512 context size */ +#define SHA512_CTX_SIZE sizeof ( struct sha512_context ) + +/** SHA-512 digest size */ +#define SHA512_DIGEST_SIZE sizeof ( struct sha512_digest ) + +/** SHA-384 digest size */ +#define SHA384_DIGEST_SIZE ( SHA512_DIGEST_SIZE * 384 / 512 ) + +/** SHA-512/256 digest size */ +#define SHA512_256_DIGEST_SIZE ( SHA512_DIGEST_SIZE * 256 / 512 ) + +/** SHA-512/224 digest size */ +#define SHA512_224_DIGEST_SIZE ( SHA512_DIGEST_SIZE * 224 / 512 ) + +extern void sha512_family_init ( struct sha512_context *context, + const struct sha512_digest *init, + size_t digestsize ); +extern void sha512_update ( void *ctx, const void *data, size_t len ); +extern void sha512_final ( void *ctx, void *out ); + +extern struct digest_algorithm sha512_algorithm; +extern struct digest_algorithm sha384_algorithm; +extern struct digest_algorithm sha512_256_algorithm; +extern struct digest_algorithm sha512_224_algorithm; + +#endif /* IPXE_SHA512_H */ diff --git a/src/include/ipxe/stp.h b/src/include/ipxe/stp.h new file mode 100644 index 00000000..3d85e5ba --- /dev/null +++ b/src/include/ipxe/stp.h @@ -0,0 +1,76 @@ +#ifndef _IPXE_STP_H +#define _IPXE_STP_H + +/** @file + * + * Spanning Tree Protocol (STP) + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "Protocol" value for STP + * + * This is the concatenated {DSAP,SSAP} value used internally by iPXE + * as the network-layer protocol for LLC frames. + */ +#define ETH_P_STP 0x4242 + +/** A switch identifier */ +struct stp_switch { + /** Priotity */ + uint16_t priority; + /** MAC address */ + uint8_t mac[ETH_ALEN]; +} __attribute__ (( packed )); + +/** A Spanning Tree bridge protocol data unit */ +struct stp_bpdu { + /** LLC DSAP */ + uint8_t dsap; + /** LLC SSAP */ + uint8_t ssap; + /** LLC control field */ + uint8_t control; + /** Protocol ID */ + uint16_t protocol; + /** Protocol version */ + uint8_t version; + /** Message type */ + uint8_t type; + /** Flags */ + uint8_t flags; + /** Root switch */ + struct stp_switch root; + /** Root path cost */ + uint32_t cost; + /** Sender switch */ + struct stp_switch sender; + /** Port */ + uint16_t port; + /** Message age */ + uint16_t age; + /** Maximum age */ + uint16_t max; + /** Hello time */ + uint16_t hello; + /** Forward delay */ + uint16_t delay; +} __attribute__ (( packed )); + +/** Spanning Tree protocol ID */ +#define STP_PROTOCOL 0x0000 + +/** Rapid Spanning Tree protocol version */ +#define STP_VERSION_RSTP 0x02 + +/** Rapid Spanning Tree bridge PDU type */ +#define STP_TYPE_RSTP 0x02 + +/** Port is forwarding */ +#define STP_FL_FORWARDING 0x20 + +#endif /* _IPXE_STP_H */ diff --git a/src/include/ipxe/string.h b/src/include/ipxe/string.h new file mode 100644 index 00000000..a8cbe8fa --- /dev/null +++ b/src/include/ipxe/string.h @@ -0,0 +1,14 @@ +#ifndef _IPXE_STRING_H +#define _IPXE_STRING_H + +/** @file + * + * String functions + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern unsigned int digit_value ( unsigned int digit ); + +#endif /* _IPXE_STRING_H */ diff --git a/src/include/ipxe/uart.h b/src/include/ipxe/uart.h new file mode 100644 index 00000000..c63eae61 --- /dev/null +++ b/src/include/ipxe/uart.h @@ -0,0 +1,132 @@ +#ifndef _IPXE_UART_H +#define _IPXE_UART_H + +/** @file + * + * 16550-compatible UART + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** Transmitter holding register */ +#define UART_THR 0x00 + +/** Receiver buffer register */ +#define UART_RBR 0x00 + +/** Interrupt enable register */ +#define UART_IER 0x01 + +/** FIFO control register */ +#define UART_FCR 0x02 +#define UART_FCR_FE 0x01 /**< FIFO enable */ + +/** Line control register */ +#define UART_LCR 0x03 +#define UART_LCR_WLS0 0x01 /**< Word length select bit 0 */ +#define UART_LCR_WLS1 0x02 /**< Word length select bit 1 */ +#define UART_LCR_STB 0x04 /**< Number of stop bits */ +#define UART_LCR_PEN 0x08 /**< Parity enable */ +#define UART_LCR_EPS 0x10 /**< Even parity select */ +#define UART_LCR_DLAB 0x80 /**< Divisor latch access bit */ + +#define UART_LCR_WORD_LEN(x) ( ( (x) - 5 ) << 0 ) /**< Word length */ +#define UART_LCR_STOP_BITS(x) ( ( (x) - 1 ) << 2 ) /**< Stop bits */ +#define UART_LCR_PARITY(x) ( ( (x) - 0 ) << 3 ) /**< Parity */ + +/** + * Calculate line control register value + * + * @v word_len Word length (5-8) + * @v parity Parity (0=none, 1=odd, 3=even) + * @v stop_bits Stop bits (1-2) + * @ret lcr Line control register value + */ +#define UART_LCR_WPS( word_len, parity, stop_bits ) \ + ( UART_LCR_WORD_LEN ( (word_len) ) | \ + UART_LCR_PARITY ( (parity) ) | \ + UART_LCR_STOP_BITS ( (stop_bits) ) ) + +/** Default LCR value: 8 data bits, no parity, one stop bit */ +#define UART_LCR_8N1 UART_LCR_WPS ( 8, 0, 1 ) + +/** Modem control register */ +#define UART_MCR 0x04 +#define UART_MCR_DTR 0x01 /**< Data terminal ready */ +#define UART_MCR_RTS 0x02 /**< Request to send */ + +/** Line status register */ +#define UART_LSR 0x05 +#define UART_LSR_DR 0x01 /**< Data ready */ +#define UART_LSR_THRE 0x20 /**< Transmitter holding register empty */ +#define UART_LSR_TEMT 0x40 /**< Transmitter empty */ + +/** Scratch register */ +#define UART_SCR 0x07 + +/** Divisor latch (least significant byte) */ +#define UART_DLL 0x00 + +/** Divisor latch (most significant byte) */ +#define UART_DLM 0x01 + +/** Maximum baud rate */ +#define UART_MAX_BAUD 115200 + +/** A 16550-compatible UART */ +struct uart { + /** I/O port base address */ + void *base; + /** Baud rate divisor */ + uint16_t divisor; + /** Line control register */ + uint8_t lcr; +}; + +/** Symbolic names for port indexes */ +enum uart_port { + COM1 = 1, + COM2 = 2, + COM3 = 3, + COM4 = 4, +}; + +#include + +void uart_write ( struct uart *uart, unsigned int addr, uint8_t data ); +uint8_t uart_read ( struct uart *uart, unsigned int addr ); +int uart_select ( struct uart *uart, unsigned int port ); + +/** + * Check if received data is ready + * + * @v uart UART + * @ret ready Data is ready + */ +static inline int uart_data_ready ( struct uart *uart ) { + uint8_t lsr; + + lsr = uart_read ( uart, UART_LSR ); + return ( lsr & UART_LSR_DR ); +} + +/** + * Receive data + * + * @v uart UART + * @ret data Data + */ +static inline uint8_t uart_receive ( struct uart *uart ) { + + return uart_read ( uart, UART_RBR ); +} + +extern void uart_transmit ( struct uart *uart, uint8_t data ); +extern void uart_flush ( struct uart *uart ); +extern int uart_exists ( struct uart *uart ); +extern int uart_init ( struct uart *uart, unsigned int baud, uint8_t lcr ); + +#endif /* _IPXE_UART_H */ diff --git a/src/include/ipxe/usb.h b/src/include/ipxe/usb.h new file mode 100644 index 00000000..f41f4c35 --- /dev/null +++ b/src/include/ipxe/usb.h @@ -0,0 +1,1418 @@ +#ifndef _IPXE_USB_H +#define _IPXE_USB_H + +/** @file + * + * Universal Serial Bus (USB) + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** USB protocols */ +enum usb_protocol { + /** USB 2.0 */ + USB_PROTO_2_0 = 0x0200, + /** USB 3.0 */ + USB_PROTO_3_0 = 0x0300, + /** USB 3.1 */ + USB_PROTO_3_1 = 0x0301, +}; + +/** Define a USB speed + * + * @v mantissa Mantissa + * @v exponent Exponent (in engineering terms: 1=k, 2=M, 3=G) + * @ret speed USB speed + */ +#define USB_SPEED( mantissa, exponent ) ( (exponent << 16) | (mantissa) ) + +/** Extract USB speed mantissa */ +#define USB_SPEED_MANTISSA(speed) ( (speed) & 0xffff ) + +/** Extract USB speed exponent */ +#define USB_SPEED_EXPONENT(speed) ( ( (speed) >> 16 ) & 0x3 ) + +/** USB device speeds */ +enum usb_speed { + /** Not connected */ + USB_SPEED_NONE = 0, + /** Low speed (1.5Mbps) */ + USB_SPEED_LOW = USB_SPEED ( 1500, 1 ), + /** Full speed (12Mbps) */ + USB_SPEED_FULL = USB_SPEED ( 12, 2 ), + /** High speed (480Mbps) */ + USB_SPEED_HIGH = USB_SPEED ( 480, 2 ), + /** Super speed (5Gbps) */ + USB_SPEED_SUPER = USB_SPEED ( 5, 3 ), +}; + +/** USB packet IDs */ +enum usb_pid { + /** IN PID */ + USB_PID_IN = 0x69, + /** OUT PID */ + USB_PID_OUT = 0xe1, + /** SETUP PID */ + USB_PID_SETUP = 0x2d, +}; + +/** A USB setup data packet */ +struct usb_setup_packet { + /** Request */ + uint16_t request; + /** Value parameter */ + uint16_t value; + /** Index parameter */ + uint16_t index; + /** Length of data stage */ + uint16_t len; +} __attribute__ (( packed )); + +/** Data transfer is from host to device */ +#define USB_DIR_OUT ( 0 << 7 ) + +/** Data transfer is from device to host */ +#define USB_DIR_IN ( 1 << 7 ) + +/** Standard request type */ +#define USB_TYPE_STANDARD ( 0 << 5 ) + +/** Class-specific request type */ +#define USB_TYPE_CLASS ( 1 << 5 ) + +/** Vendor-specific request type */ +#define USB_TYPE_VENDOR ( 2 << 5 ) + +/** Request recipient mask */ +#define USB_RECIP_MASK ( 0x1f << 0 ) + +/** Request recipient is the device */ +#define USB_RECIP_DEVICE ( 0 << 0 ) + +/** Request recipient is an interface */ +#define USB_RECIP_INTERFACE ( 1 << 0 ) + +/** Request recipient is an endpoint */ +#define USB_RECIP_ENDPOINT ( 2 << 0 ) + +/** Construct USB request type */ +#define USB_REQUEST_TYPE(type) ( (type) << 8 ) + +/** Get status */ +#define USB_GET_STATUS ( USB_DIR_IN | USB_REQUEST_TYPE ( 0 ) ) + +/** Clear feature */ +#define USB_CLEAR_FEATURE ( USB_DIR_OUT | USB_REQUEST_TYPE ( 1 ) ) + +/** Set feature */ +#define USB_SET_FEATURE ( USB_DIR_OUT | USB_REQUEST_TYPE ( 3 ) ) + +/** Set address */ +#define USB_SET_ADDRESS ( USB_DIR_OUT | USB_REQUEST_TYPE ( 5 ) ) + +/** Get descriptor */ +#define USB_GET_DESCRIPTOR ( USB_DIR_IN | USB_REQUEST_TYPE ( 6 ) ) + +/** Set descriptor */ +#define USB_SET_DESCRIPTOR ( USB_DIR_OUT | USB_REQUEST_TYPE ( 7 ) ) + +/** Get configuration */ +#define USB_GET_CONFIGURATION ( USB_DIR_IN | USB_REQUEST_TYPE ( 8 ) ) + +/** Set configuration */ +#define USB_SET_CONFIGURATION ( USB_DIR_OUT | USB_REQUEST_TYPE ( 9 ) ) + +/** Get interface */ +#define USB_GET_INTERFACE \ + ( USB_DIR_IN | USB_RECIP_INTERFACE | USB_REQUEST_TYPE ( 10 ) ) + +/** Set interface */ +#define USB_SET_INTERFACE \ + ( USB_DIR_OUT | USB_RECIP_INTERFACE | USB_REQUEST_TYPE ( 11 ) ) + +/** Endpoint halt feature */ +#define USB_ENDPOINT_HALT 0 + +/** A USB class code tuple */ +struct usb_class { + /** Class code */ + uint8_t class; + /** Subclass code */ + uint8_t subclass; + /** Protocol code */ + uint8_t protocol; +} __attribute__ (( packed )); + +/** Class code for USB hubs */ +#define USB_CLASS_HUB 9 + +/** A USB descriptor header */ +struct usb_descriptor_header { + /** Length of descriptor */ + uint8_t len; + /** Descriptor type */ + uint8_t type; +} __attribute__ (( packed )); + +/** A USB device descriptor */ +struct usb_device_descriptor { + /** Descriptor header */ + struct usb_descriptor_header header; + /** USB specification release number in BCD */ + uint16_t protocol; + /** Device class */ + struct usb_class class; + /** Maximum packet size for endpoint zero */ + uint8_t mtu; + /** Vendor ID */ + uint16_t vendor; + /** Product ID */ + uint16_t product; + /** Device release number in BCD */ + uint16_t release; + /** Manufacturer string */ + uint8_t manufacturer; + /** Product string */ + uint8_t name; + /** Serial number string */ + uint8_t serial; + /** Number of possible configurations */ + uint8_t configurations; +} __attribute__ (( packed )); + +/** A USB device descriptor */ +#define USB_DEVICE_DESCRIPTOR 1 + +/** A USB configuration descriptor */ +struct usb_configuration_descriptor { + /** Descriptor header */ + struct usb_descriptor_header header; + /** Total length */ + uint16_t len; + /** Number of interfaces */ + uint8_t interfaces; + /** Configuration value */ + uint8_t config; + /** Configuration string */ + uint8_t name; + /** Attributes */ + uint8_t attributes; + /** Maximum power consumption */ + uint8_t power; +} __attribute__ (( packed )); + +/** A USB configuration descriptor */ +#define USB_CONFIGURATION_DESCRIPTOR 2 + +/** A USB string descriptor */ +struct usb_string_descriptor { + /** Descriptor header */ + struct usb_descriptor_header header; + /** String */ + char string[0]; +} __attribute__ (( packed )); + +/** A USB string descriptor */ +#define USB_STRING_DESCRIPTOR 3 + +/** Language ID for English */ +#define USB_LANG_ENGLISH 0x0409 + +/** A USB interface descriptor */ +struct usb_interface_descriptor { + /** Descriptor header */ + struct usb_descriptor_header header; + /** Interface number */ + uint8_t interface; + /** Alternate setting */ + uint8_t alternate; + /** Number of endpoints */ + uint8_t endpoints; + /** Interface class */ + struct usb_class class; + /** Interface name */ + uint8_t name; +} __attribute__ (( packed )); + +/** A USB interface descriptor */ +#define USB_INTERFACE_DESCRIPTOR 4 + +/** A USB endpoint descriptor */ +struct usb_endpoint_descriptor { + /** Descriptor header */ + struct usb_descriptor_header header; + /** Endpoint address */ + uint8_t endpoint; + /** Attributes */ + uint8_t attributes; + /** Maximum packet size and burst size */ + uint16_t sizes; + /** Polling interval */ + uint8_t interval; +} __attribute__ (( packed )); + +/** A USB endpoint descriptor */ +#define USB_ENDPOINT_DESCRIPTOR 5 + +/** Endpoint attribute transfer type mask */ +#define USB_ENDPOINT_ATTR_TYPE_MASK 0x03 + +/** Endpoint periodic type */ +#define USB_ENDPOINT_ATTR_PERIODIC 0x01 + +/** Control endpoint transfer type */ +#define USB_ENDPOINT_ATTR_CONTROL 0x00 + +/** Bulk endpoint transfer type */ +#define USB_ENDPOINT_ATTR_BULK 0x02 + +/** Interrupt endpoint transfer type */ +#define USB_ENDPOINT_ATTR_INTERRUPT 0x03 + +/** Bulk OUT endpoint (internal) type */ +#define USB_BULK_OUT ( USB_ENDPOINT_ATTR_BULK | USB_DIR_OUT ) + +/** Bulk IN endpoint (internal) type */ +#define USB_BULK_IN ( USB_ENDPOINT_ATTR_BULK | USB_DIR_IN ) + +/** Interrupt IN endpoint (internal) type */ +#define USB_INTERRUPT_IN ( USB_ENDPOINT_ATTR_INTERRUPT | USB_DIR_IN ) + +/** Interrupt OUT endpoint (internal) type */ +#define USB_INTERRUPT_OUT ( USB_ENDPOINT_ATTR_INTERRUPT | USB_DIR_OUT ) + +/** USB endpoint MTU */ +#define USB_ENDPOINT_MTU(sizes) ( ( (sizes) >> 0 ) & 0x07ff ) + +/** USB endpoint maximum burst size */ +#define USB_ENDPOINT_BURST(sizes) ( ( (sizes) >> 11 ) & 0x0003 ) + +/** A USB endpoint companion descriptor */ +struct usb_endpoint_companion_descriptor { + /** Descriptor header */ + struct usb_descriptor_header header; + /** Maximum burst size */ + uint8_t burst; + /** Extended attributes */ + uint8_t extended; + /** Number of bytes per service interval */ + uint16_t periodic; +} __attribute__ (( packed )); + +/** A USB endpoint companion descriptor */ +#define USB_ENDPOINT_COMPANION_DESCRIPTOR 48 + +/** A USB interface association descriptor */ +struct usb_interface_association_descriptor { + /** Descriptor header */ + struct usb_descriptor_header header; + /** First interface number */ + uint8_t first; + /** Interface count */ + uint8_t count; + /** Association class */ + struct usb_class class; + /** Association name */ + uint8_t name; +} __attribute__ (( packed )); + +/** A USB interface association descriptor */ +#define USB_INTERFACE_ASSOCIATION_DESCRIPTOR 11 + +/** A class-specific interface descriptor */ +#define USB_CS_INTERFACE_DESCRIPTOR 36 + +/** A class-specific endpoint descriptor */ +#define USB_CS_ENDPOINT_DESCRIPTOR 37 + +/** + * Get next USB descriptor + * + * @v desc USB descriptor header + * @ret next Next USB descriptor header + */ +static inline __attribute__ (( always_inline )) struct usb_descriptor_header * +usb_next_descriptor ( struct usb_descriptor_header *desc ) { + + return ( ( ( void * ) desc ) + desc->len ); +} + +/** + * Check that descriptor lies within a configuration descriptor + * + * @v config Configuration descriptor + * @v desc Descriptor header + * @v is_within Descriptor is within the configuration descriptor + */ +static inline __attribute__ (( always_inline )) int +usb_is_within_config ( struct usb_configuration_descriptor *config, + struct usb_descriptor_header *desc ) { + struct usb_descriptor_header *end = + ( ( ( void * ) config ) + le16_to_cpu ( config->len ) ); + + /* Check that descriptor starts within the configuration + * descriptor, and that the length does not exceed the + * configuration descriptor. This relies on the fact that + * usb_next_descriptor() needs to access only the first byte + * of the descriptor in order to determine the length. + */ + return ( ( desc < end ) && ( usb_next_descriptor ( desc ) <= end ) ); +} + +/** Iterate over all configuration descriptors */ +#define for_each_config_descriptor( desc, config ) \ + for ( desc = container_of ( &(config)->header, \ + typeof ( *desc ), header ) ; \ + usb_is_within_config ( (config), &desc->header ) ; \ + desc = container_of ( usb_next_descriptor ( &desc->header ), \ + typeof ( *desc ), header ) ) + +/** Iterate over all configuration descriptors within an interface descriptor */ +#define for_each_interface_descriptor( desc, config, interface ) \ + for ( desc = container_of ( usb_next_descriptor ( &(interface)-> \ + header ), \ + typeof ( *desc ), header ) ; \ + ( usb_is_within_config ( (config), &desc->header ) && \ + ( desc->header.type != USB_INTERFACE_DESCRIPTOR ) ) ; \ + desc = container_of ( usb_next_descriptor ( &desc->header ), \ + typeof ( *desc ), header ) ) + +/** A USB endpoint */ +struct usb_endpoint { + /** USB device */ + struct usb_device *usb; + /** Endpoint address */ + unsigned int address; + /** Attributes */ + unsigned int attributes; + /** Maximum transfer size */ + size_t mtu; + /** Maximum burst size */ + unsigned int burst; + /** Interval (in microframes) */ + unsigned int interval; + + /** Endpoint is open */ + int open; + /** Buffer fill level */ + unsigned int fill; + + /** List of halted endpoints */ + struct list_head halted; + + /** Host controller operations */ + struct usb_endpoint_host_operations *host; + /** Host controller private data */ + void *priv; + /** Driver operations */ + struct usb_endpoint_driver_operations *driver; + + /** Recycled I/O buffer list */ + struct list_head recycled; + /** Refill buffer reserved header length */ + size_t reserve; + /** Refill buffer payload length */ + size_t len; + /** Maximum fill level */ + unsigned int max; +}; + +/** USB endpoint host controller operations */ +struct usb_endpoint_host_operations { + /** Open endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ + int ( * open ) ( struct usb_endpoint *ep ); + /** Close endpoint + * + * @v ep USB endpoint + */ + void ( * close ) ( struct usb_endpoint *ep ); + /** + * Reset endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ + int ( * reset ) ( struct usb_endpoint *ep ); + /** Update MTU + * + * @v ep USB endpoint + * @ret rc Return status code + */ + int ( * mtu ) ( struct usb_endpoint *ep ); + /** Enqueue message transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @ret rc Return status code + */ + int ( * message ) ( struct usb_endpoint *ep, + struct io_buffer *iobuf ); + /** Enqueue stream transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v zlp Append a zero-length packet + * @ret rc Return status code + */ + int ( * stream ) ( struct usb_endpoint *ep, struct io_buffer *iobuf, + int zlp ); +}; + +/** USB endpoint driver operations */ +struct usb_endpoint_driver_operations { + /** Complete transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ + void ( * complete ) ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ); +}; + +/** Control endpoint address */ +#define USB_EP0_ADDRESS 0x00 + +/** Control endpoint attributes */ +#define USB_EP0_ATTRIBUTES 0x00 + +/** Calculate default MTU based on device speed + * + * @v speed Device speed + * @ret mtu Default MTU + */ +#define USB_EP0_DEFAULT_MTU(speed) \ + ( ( (speed) >= USB_SPEED_SUPER ) ? 512 : \ + ( ( (speed) >= USB_SPEED_FULL ) ? 64 : 8 ) ) + +/** Control endpoint maximum burst size */ +#define USB_EP0_BURST 0 + +/** Control endpoint interval */ +#define USB_EP0_INTERVAL 0 + +/** Maximum endpoint number */ +#define USB_ENDPOINT_MAX 0x0f + +/** Endpoint direction is in */ +#define USB_ENDPOINT_IN 0x80 + +/** Construct endpoint index from endpoint address */ +#define USB_ENDPOINT_IDX(address) \ + ( ( (address) & USB_ENDPOINT_MAX ) | \ + ( ( (address) & USB_ENDPOINT_IN ) >> 3 ) ) + +/** + * Initialise USB endpoint + * + * @v ep USB endpoint + * @v usb USB device + * @v driver Driver operations + */ +static inline __attribute__ (( always_inline )) void +usb_endpoint_init ( struct usb_endpoint *ep, struct usb_device *usb, + struct usb_endpoint_driver_operations *driver ) { + + ep->usb = usb; + ep->driver = driver; +} + +/** + * Describe USB endpoint + * + * @v ep USB endpoint + * @v address Endpoint address + * @v attributes Attributes + * @v mtu Maximum packet size + * @v burst Maximum burst size + * @v interval Interval (in microframes) + */ +static inline __attribute__ (( always_inline )) void +usb_endpoint_describe ( struct usb_endpoint *ep, unsigned int address, + unsigned int attributes, size_t mtu, + unsigned int burst, unsigned int interval ) { + + ep->address = address; + ep->attributes = attributes; + ep->mtu = mtu; + ep->burst = burst; + ep->interval = interval; +} + +/** + * Set USB endpoint host controller private data + * + * @v ep USB endpoint + * @v priv Host controller private data + */ +static inline __attribute__ (( always_inline )) void +usb_endpoint_set_hostdata ( struct usb_endpoint *ep, void *priv ) { + ep->priv = priv; +} + +/** + * Get USB endpoint host controller private data + * + * @v ep USB endpoint + * @ret priv Host controller private data + */ +static inline __attribute__ (( always_inline )) void * +usb_endpoint_get_hostdata ( struct usb_endpoint *ep ) { + return ep->priv; +} + +extern const char * usb_endpoint_name ( struct usb_endpoint *ep ); +extern int +usb_endpoint_described ( struct usb_endpoint *ep, + struct usb_configuration_descriptor *config, + struct usb_interface_descriptor *interface, + unsigned int type, unsigned int index ); +extern int usb_endpoint_open ( struct usb_endpoint *ep ); +extern int usb_endpoint_clear_halt ( struct usb_endpoint *ep ); +extern void usb_endpoint_close ( struct usb_endpoint *ep ); +extern int usb_message ( struct usb_endpoint *ep, unsigned int request, + unsigned int value, unsigned int index, + struct io_buffer *iobuf ); +extern int usb_stream ( struct usb_endpoint *ep, struct io_buffer *iobuf, + int terminate ); +extern void usb_complete_err ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ); + +/** + * Initialise USB endpoint refill + * + * @v ep USB endpoint + * @v reserve Refill buffer reserved header length + * @v len Refill buffer payload length (zero for endpoint's MTU) + * @v max Maximum fill level + */ +static inline __attribute__ (( always_inline )) void +usb_refill_init ( struct usb_endpoint *ep, size_t reserve, size_t len, + unsigned int max ) { + + INIT_LIST_HEAD ( &ep->recycled ); + ep->reserve = reserve; + ep->len = len; + ep->max = max; +} + +/** + * Recycle I/O buffer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + */ +static inline __attribute__ (( always_inline )) void +usb_recycle ( struct usb_endpoint *ep, struct io_buffer *iobuf ) { + + list_add_tail ( &iobuf->list, &ep->recycled ); +} + +extern int usb_prefill ( struct usb_endpoint *ep ); +extern int usb_refill_limit ( struct usb_endpoint *ep, unsigned int max ); +extern int usb_refill ( struct usb_endpoint *ep ); +extern void usb_flush ( struct usb_endpoint *ep ); + +/** A USB class descriptor */ +union usb_class_descriptor { + /** Class */ + struct usb_class class; + /** Scalar value */ + uint32_t scalar; +}; + +/** + * A USB function descriptor + * + * This is an internal descriptor used to represent an association of + * interfaces within a USB device. + */ +struct usb_function_descriptor { + /** Vendor ID */ + uint16_t vendor; + /** Product ID */ + uint16_t product; + /** Class */ + union usb_class_descriptor class; + /** Number of interfaces */ + unsigned int count; +}; + +/** + * A USB function + * + * A USB function represents an association of interfaces within a USB + * device. + */ +struct usb_function { + /** Name */ + const char *name; + /** USB device */ + struct usb_device *usb; + /** Function descriptor */ + struct usb_function_descriptor desc; + /** Generic device */ + struct device dev; + /** List of functions within this USB device */ + struct list_head list; + + /** Driver */ + struct usb_driver *driver; + /** Driver private data */ + void *priv; + /** Driver device ID */ + struct usb_device_id *id; + + /** List of interface numbers + * + * This must be the last field within the structure. + */ + uint8_t interface[0]; +}; + +/** + * Set USB function driver private data + * + * @v func USB function + * @v priv Driver private data + */ +static inline __attribute__ (( always_inline )) void +usb_func_set_drvdata ( struct usb_function *func, void *priv ) { + func->priv = priv; +} + +/** + * Get USB function driver private data + * + * @v function USB function + * @ret priv Driver private data + */ +static inline __attribute__ (( always_inline )) void * +usb_func_get_drvdata ( struct usb_function *func ) { + return func->priv; +} + +/** A USB device */ +struct usb_device { + /** Name */ + char name[32]; + /** USB port */ + struct usb_port *port; + /** Device speed */ + unsigned int speed; + /** List of devices on this bus */ + struct list_head list; + /** Device address, if assigned */ + unsigned int address; + /** Device descriptor */ + struct usb_device_descriptor device; + /** List of functions */ + struct list_head functions; + + /** Host controller operations */ + struct usb_device_host_operations *host; + /** Host controller private data */ + void *priv; + + /** Endpoint list */ + struct usb_endpoint *ep[32]; + + /** Control endpoint */ + struct usb_endpoint control; + /** Completed control transfers */ + struct list_head complete; + + /** Default language ID (if known) */ + unsigned int language; +}; + +/** USB device host controller operations */ +struct usb_device_host_operations { + /** Open device + * + * @v usb USB device + * @ret rc Return status code + */ + int ( * open ) ( struct usb_device *usb ); + /** Close device + * + * @v usb USB device + */ + void ( * close ) ( struct usb_device *usb ); + /** Assign device address + * + * @v usb USB device + * @ret rc Return status code + */ + int ( * address ) ( struct usb_device *usb ); +}; + +/** + * Set USB device host controller private data + * + * @v usb USB device + * @v priv Host controller private data + */ +static inline __attribute__ (( always_inline )) void +usb_set_hostdata ( struct usb_device *usb, void *priv ) { + usb->priv = priv; +} + +/** + * Get USB device host controller private data + * + * @v usb USB device + * @ret priv Host controller private data + */ +static inline __attribute__ (( always_inline )) void * +usb_get_hostdata ( struct usb_device *usb ) { + return usb->priv; +} + +/** + * Get USB endpoint + * + * @v usb USB device + * @v address Endpoint address + * @ret ep USB endpoint, or NULL if not opened + */ +static inline struct usb_endpoint * usb_endpoint ( struct usb_device *usb, + unsigned int address ) { + + return usb->ep[ USB_ENDPOINT_IDX ( address ) ]; +} + +/** A USB port */ +struct usb_port { + /** USB hub */ + struct usb_hub *hub; + /** Port address */ + unsigned int address; + /** Port protocol */ + unsigned int protocol; + /** Port speed */ + unsigned int speed; + /** Port disconnection has been detected + * + * This should be set whenever the underlying hardware reports + * a connection status change. + */ + int disconnected; + /** Port has an attached device */ + int attached; + /** Currently attached device (if in use) + * + * Note that this field will be NULL if the attached device + * has been freed (e.g. because there were no drivers found). + */ + struct usb_device *usb; + /** List of changed ports */ + struct list_head changed; +}; + +/** A USB hub */ +struct usb_hub { + /** Name */ + const char *name; + /** USB bus */ + struct usb_bus *bus; + /** Underlying USB device, if any */ + struct usb_device *usb; + /** Hub protocol */ + unsigned int protocol; + /** Number of ports */ + unsigned int ports; + + /** List of hubs */ + struct list_head list; + + /** Host controller operations */ + struct usb_hub_host_operations *host; + /** Driver operations */ + struct usb_hub_driver_operations *driver; + /** Driver private data */ + void *priv; + + /** Port list + * + * This must be the last field within the structure. + */ + struct usb_port port[0]; +}; + +/** USB hub host controller operations */ +struct usb_hub_host_operations { + /** Open hub + * + * @v hub USB hub + * @ret rc Return status code + */ + int ( * open ) ( struct usb_hub *hub ); + /** Close hub + * + * @v hub USB hub + */ + void ( * close ) ( struct usb_hub *hub ); +}; + +/** USB hub driver operations */ +struct usb_hub_driver_operations { + /** Open hub + * + * @v hub USB hub + * @ret rc Return status code + */ + int ( * open ) ( struct usb_hub *hub ); + /** Close hub + * + * @v hub USB hub + */ + void ( * close ) ( struct usb_hub *hub ); + /** Enable port + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ + int ( * enable ) ( struct usb_hub *hub, struct usb_port *port ); + /** Disable port + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ + int ( * disable ) ( struct usb_hub *hub, struct usb_port *port ); + /** Update port speed + * + * @v hub USB hub + * @v port USB port + * @ret rc Return status code + */ + int ( * speed ) ( struct usb_hub *hub, struct usb_port *port ); + /** Clear transaction translator buffer + * + * @v hub USB hub + * @v port USB port + * @v ep USB endpoint + * @ret rc Return status code + */ + int ( * clear_tt ) ( struct usb_hub *hub, struct usb_port *port, + struct usb_endpoint *ep ); +}; + +/** + * Set USB hub driver private data + * + * @v hub USB hub + * @v priv Driver private data + */ +static inline __attribute__ (( always_inline )) void +usb_hub_set_drvdata ( struct usb_hub *hub, void *priv ) { + hub->priv = priv; +} + +/** + * Get USB hub driver private data + * + * @v hub USB hub + * @ret priv Driver private data + */ +static inline __attribute__ (( always_inline )) void * +usb_hub_get_drvdata ( struct usb_hub *hub ) { + return hub->priv; +} + +/** + * Get USB port + * + * @v hub USB hub + * @v address Port address + * @ret port USB port + */ +static inline __attribute__ (( always_inline )) struct usb_port * +usb_port ( struct usb_hub *hub, unsigned int address ) { + + return &hub->port[ address - 1 ]; +} + +/** A USB bus */ +struct usb_bus { + /** Name */ + const char *name; + /** Underlying hardware device */ + struct device *dev; + /** Host controller operations set */ + struct usb_host_operations *op; + + /** Largest transfer allowed on the bus */ + size_t mtu; + /** Address in-use mask + * + * This is used only by buses which perform manual address + * assignment. USB allows for addresses in the range [1,127]. + * We use a simple bitmask which restricts us to the range + * [1,64]; this is unlikely to be a problem in practice. For + * comparison: controllers which perform autonomous address + * assignment (such as xHCI) typically allow for only 32 + * devices per bus anyway. + */ + unsigned long long addresses; + + /** Root hub */ + struct usb_hub *hub; + + /** List of USB buses */ + struct list_head list; + /** List of devices */ + struct list_head devices; + /** List of hubs */ + struct list_head hubs; + + /** Host controller operations */ + struct usb_bus_host_operations *host; + /** Host controller private data */ + void *priv; +}; + +/** USB bus host controller operations */ +struct usb_bus_host_operations { + /** Open bus + * + * @v bus USB bus + * @ret rc Return status code + */ + int ( * open ) ( struct usb_bus *bus ); + /** Close bus + * + * @v bus USB bus + */ + void ( * close ) ( struct usb_bus *bus ); + /** Poll bus + * + * @v bus USB bus + */ + void ( * poll ) ( struct usb_bus *bus ); +}; + +/** USB host controller operations */ +struct usb_host_operations { + /** Endpoint operations */ + struct usb_endpoint_host_operations endpoint; + /** Device operations */ + struct usb_device_host_operations device; + /** Bus operations */ + struct usb_bus_host_operations bus; + /** Hub operations */ + struct usb_hub_host_operations hub; + /** Root hub operations */ + struct usb_hub_driver_operations root; +}; + +/** + * Set USB bus host controller private data + * + * @v bus USB bus + * @v priv Host controller private data + */ +static inline __attribute__ (( always_inline )) void +usb_bus_set_hostdata ( struct usb_bus *bus, void *priv ) { + bus->priv = priv; +} + +/** + * Get USB bus host controller private data + * + * @v bus USB bus + * @ret priv Host controller private data + */ +static inline __attribute__ (( always_inline )) void * +usb_bus_get_hostdata ( struct usb_bus *bus ) { + return bus->priv; +} + +/** + * Poll USB bus + * + * @v bus USB bus + */ +static inline __attribute__ (( always_inline )) void +usb_poll ( struct usb_bus *bus ) { + bus->host->poll ( bus ); +} + +/** Iterate over all USB buses */ +#define for_each_usb_bus( bus ) \ + list_for_each_entry ( (bus), &usb_buses, list ) + +/** + * Complete transfer (without error) + * + * @v ep USB endpoint + * @v iobuf I/O buffer + */ +static inline __attribute__ (( always_inline )) void +usb_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf ) { + usb_complete_err ( ep, iobuf, 0 ); +} + +extern int usb_control ( struct usb_device *usb, unsigned int request, + unsigned int value, unsigned int index, void *data, + size_t len ); +extern int usb_get_string_descriptor ( struct usb_device *usb, + unsigned int index, + unsigned int language, + char *buf, size_t len ); + +/** + * Get status + * + * @v usb USB device + * @v type Request type + * @v index Target index + * @v data Status to fill in + * @v len Length of status descriptor + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_get_status ( struct usb_device *usb, unsigned int type, unsigned int index, + void *data, size_t len ) { + + return usb_control ( usb, ( USB_GET_STATUS | type ), 0, index, + data, len ); +} + +/** + * Clear feature + * + * @v usb USB device + * @v type Request type + * @v feature Feature selector + * @v index Target index + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_clear_feature ( struct usb_device *usb, unsigned int type, + unsigned int feature, unsigned int index ) { + + return usb_control ( usb, ( USB_CLEAR_FEATURE | type ), + feature, index, NULL, 0 ); +} + +/** + * Set feature + * + * @v usb USB device + * @v type Request type + * @v feature Feature selector + * @v index Target index + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_set_feature ( struct usb_device *usb, unsigned int type, + unsigned int feature, unsigned int index ) { + + return usb_control ( usb, ( USB_SET_FEATURE | type ), + feature, index, NULL, 0 ); +} + +/** + * Set address + * + * @v usb USB device + * @v address Device address + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_set_address ( struct usb_device *usb, unsigned int address ) { + + return usb_control ( usb, USB_SET_ADDRESS, address, 0, NULL, 0 ); +} + +/** + * Get USB descriptor + * + * @v usb USB device + * @v type Request type + * @v desc Descriptor type + * @v index Descriptor index + * @v language Language ID (for string descriptors) + * @v data Descriptor to fill in + * @v len Maximum length of descriptor + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_get_descriptor ( struct usb_device *usb, unsigned int type, + unsigned int desc, unsigned int index, + unsigned int language, struct usb_descriptor_header *data, + size_t len ) { + + return usb_control ( usb, ( USB_GET_DESCRIPTOR | type ), + ( ( desc << 8 ) | index ), language, data, len ); +} + +/** + * Get first part of USB device descriptor (up to and including MTU) + * + * @v usb USB device + * @v data Device descriptor to (partially) fill in + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_get_mtu ( struct usb_device *usb, struct usb_device_descriptor *data ) { + + return usb_get_descriptor ( usb, 0, USB_DEVICE_DESCRIPTOR, 0, 0, + &data->header, + ( offsetof ( typeof ( *data ), mtu ) + + sizeof ( data->mtu ) ) ); +} + +/** + * Get USB device descriptor + * + * @v usb USB device + * @v data Device descriptor to fill in + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_get_device_descriptor ( struct usb_device *usb, + struct usb_device_descriptor *data ) { + + return usb_get_descriptor ( usb, 0, USB_DEVICE_DESCRIPTOR, 0, 0, + &data->header, sizeof ( *data ) ); +} + +/** + * Get USB configuration descriptor + * + * @v usb USB device + * @v index Configuration index + * @v data Configuration descriptor to fill in + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_get_config_descriptor ( struct usb_device *usb, unsigned int index, + struct usb_configuration_descriptor *data, + size_t len ) { + + return usb_get_descriptor ( usb, 0, USB_CONFIGURATION_DESCRIPTOR, index, + 0, &data->header, len ); +} + +/** + * Set USB configuration + * + * @v usb USB device + * @v index Configuration index + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_set_configuration ( struct usb_device *usb, unsigned int index ) { + + return usb_control ( usb, USB_SET_CONFIGURATION, index, 0, NULL, 0 ); +} + +/** + * Set USB interface alternate setting + * + * @v usb USB device + * @v interface Interface number + * @v alternate Alternate setting + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usb_set_interface ( struct usb_device *usb, unsigned int interface, + unsigned int alternate ) { + + return usb_control ( usb, USB_SET_INTERFACE, alternate, interface, + NULL, 0 ); +} + +extern struct list_head usb_buses; + +extern struct usb_interface_descriptor * +usb_interface_descriptor ( struct usb_configuration_descriptor *config, + unsigned int interface, unsigned int alternate ); +extern struct usb_endpoint_descriptor * +usb_endpoint_descriptor ( struct usb_configuration_descriptor *config, + struct usb_interface_descriptor *interface, + unsigned int type, unsigned int index ); +extern struct usb_endpoint_companion_descriptor * +usb_endpoint_companion_descriptor ( struct usb_configuration_descriptor *config, + struct usb_endpoint_descriptor *desc ); + +extern struct usb_hub * alloc_usb_hub ( struct usb_bus *bus, + struct usb_device *usb, + unsigned int ports, + struct usb_hub_driver_operations *op ); +extern int register_usb_hub ( struct usb_hub *hub ); +extern void unregister_usb_hub ( struct usb_hub *hub ); +extern void free_usb_hub ( struct usb_hub *hub ); + +extern void usb_port_changed ( struct usb_port *port ); + +extern struct usb_bus * alloc_usb_bus ( struct device *dev, + unsigned int ports, size_t mtu, + struct usb_host_operations *op ); +extern int register_usb_bus ( struct usb_bus *bus ); +extern void unregister_usb_bus ( struct usb_bus *bus ); +extern void free_usb_bus ( struct usb_bus *bus ); +extern struct usb_bus * find_usb_bus_by_location ( unsigned int bus_type, + unsigned int location ); + +extern int usb_alloc_address ( struct usb_bus *bus ); +extern void usb_free_address ( struct usb_bus *bus, unsigned int address ); +extern unsigned int usb_route_string ( struct usb_device *usb ); +extern unsigned int usb_depth ( struct usb_device *usb ); +extern struct usb_port * usb_root_hub_port ( struct usb_device *usb ); +extern struct usb_port * usb_transaction_translator ( struct usb_device *usb ); + +/** Minimum reset time + * + * Section 7.1.7.5 of the USB2 specification states that root hub + * ports should assert reset signalling for at least 50ms. + */ +#define USB_RESET_DELAY_MS 50 + +/** Reset recovery time + * + * Section 9.2.6.2 of the USB2 specification states that the + * "recovery" interval after a port reset is 10ms. + */ +#define USB_RESET_RECOVER_DELAY_MS 10 + +/** Maximum time to wait for a control transaction to complete + * + * Section 9.2.6.1 of the USB2 specification states that the upper + * limit for commands to be processed is 5 seconds. + */ +#define USB_CONTROL_MAX_WAIT_MS 5000 + +/** Set address recovery time + * + * Section 9.2.6.3 of the USB2 specification states that devices are + * allowed a 2ms recovery interval after receiving a new address. + */ +#define USB_SET_ADDRESS_RECOVER_DELAY_MS 2 + +/** Time to wait for ports to stabilise + * + * Section 7.1.7.3 of the USB specification states that we must allow + * 100ms for devices to signal attachment, and an additional 100ms for + * connection debouncing. (This delay is parallelised across all + * ports on a hub; we do not delay separately for each port.) + */ +#define USB_PORT_DELAY_MS 200 + +/** A USB device ID */ +struct usb_device_id { + /** Name */ + const char *name; + /** Vendor ID */ + uint16_t vendor; + /** Product ID */ + uint16_t product; + /** Arbitrary driver data */ + unsigned long driver_data; +}; + +/** Match-anything ID */ +#define USB_ANY_ID 0xffff + +/** A USB class ID */ +struct usb_class_id { + /** Class */ + union usb_class_descriptor class; + /** Class mask */ + union usb_class_descriptor mask; +}; + +/** Construct USB class ID + * + * @v base Base class code (or USB_ANY_ID) + * @v subclass Subclass code (or USB_ANY_ID) + * @v protocol Protocol code (or USB_ANY_ID) + */ +#define USB_CLASS_ID( base, subclass, protocol ) { \ + .class = { \ + .class = { \ + ( (base) & 0xff ), \ + ( (subclass) & 0xff ), \ + ( (protocol) & 0xff ), \ + }, \ + }, \ + .mask = { \ + .class = { \ + ( ( (base) == USB_ANY_ID ) ? 0x00 : 0xff ), \ + ( ( (subclass) == USB_ANY_ID ) ? 0x00 : 0xff ), \ + ( ( (protocol) == USB_ANY_ID ) ? 0x00 : 0xff ), \ + }, \ + }, \ + } + +/** A USB driver */ +struct usb_driver { + /** USB ID table */ + struct usb_device_id *ids; + /** Number of entries in ID table */ + unsigned int id_count; + /** Class ID */ + struct usb_class_id class; + /** Driver score + * + * This is used to determine the preferred configuration for a + * USB device. + */ + unsigned int score; + /** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ + int ( * probe ) ( struct usb_function *func, + struct usb_configuration_descriptor *config ); + /** + * Remove device + * + * @v func USB function + */ + void ( * remove ) ( struct usb_function *func ); +}; + +/** USB driver table */ +#define USB_DRIVERS __table ( struct usb_driver, "usb_drivers" ) + +/** Declare a USB driver */ +#define __usb_driver __table_entry ( USB_DRIVERS, 01 ) + +/** Declare a USB fallback driver */ +#define __usb_fallback_driver __table_entry ( USB_DRIVERS, 02 ) + +/** USB driver scores */ +enum usb_driver_score { + /** Fallback driver (has no effect on overall score) */ + USB_SCORE_FALLBACK = 0, + /** Deprecated driver */ + USB_SCORE_DEPRECATED = 1, + /** Normal driver */ + USB_SCORE_NORMAL = 2, +}; + +extern struct usb_driver * +usb_find_driver ( struct usb_function_descriptor *desc, + struct usb_device_id **id ); + +#endif /* _IPXE_USB_H */ diff --git a/src/include/ipxe/usbhid.h b/src/include/ipxe/usbhid.h new file mode 100644 index 00000000..233534e0 --- /dev/null +++ b/src/include/ipxe/usbhid.h @@ -0,0 +1,140 @@ +#ifndef _IPXE_USBHID_H +#define _IPXE_USBHID_H + +/** @file + * + * USB human interface devices (HID) + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** Class code for human interface devices */ +#define USB_CLASS_HID 3 + +/** Subclass code for boot devices */ +#define USB_SUBCLASS_HID_BOOT 1 + +/** Set protocol */ +#define USBHID_SET_PROTOCOL \ + ( USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 0x0b ) ) + +/** Boot protocol */ +#define USBHID_PROTOCOL_BOOT 0 + +/** Report protocol */ +#define USBHID_PROTOCOL_REPORT 1 + +/** Set idle time */ +#define USBHID_SET_IDLE \ + ( USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 0x0a ) ) + +/** Set report */ +#define USBHID_SET_REPORT \ + ( USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 0x09 ) ) + +/** Input report type */ +#define USBHID_REPORT_INPUT 0x01 + +/** Output report type */ +#define USBHID_REPORT_OUTPUT 0x02 + +/** Feature report type */ +#define USBHID_REPORT_FEATURE 0x03 + +/** A USB human interface device */ +struct usb_hid { + /** USB function */ + struct usb_function *func; + /** Interrupt IN endpoint */ + struct usb_endpoint in; + /** Interrupt OUT endpoint (optional) */ + struct usb_endpoint out; +}; + +/** + * Initialise USB human interface device + * + * @v hid USB human interface device + * @v func USB function + * @v in Interrupt IN endpoint operations + * @v out Interrupt OUT endpoint operations (or NULL) + */ +static inline __attribute__ (( always_inline )) void +usbhid_init ( struct usb_hid *hid, struct usb_function *func, + struct usb_endpoint_driver_operations *in, + struct usb_endpoint_driver_operations *out ) { + struct usb_device *usb = func->usb; + + hid->func = func; + usb_endpoint_init ( &hid->in, usb, in ); + if ( out ) + usb_endpoint_init ( &hid->out, usb, out ); +} + +/** + * Set protocol + * + * @v usb USB device + * @v interface Interface number + * @v protocol HID protocol + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usbhid_set_protocol ( struct usb_device *usb, unsigned int interface, + unsigned int protocol ) { + + return usb_control ( usb, USBHID_SET_PROTOCOL, protocol, interface, + NULL, 0 ); +} + +/** + * Set idle time + * + * @v usb USB device + * @v interface Interface number + * @v report Report ID + * @v duration Duration (in 4ms units) + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usbhid_set_idle ( struct usb_device *usb, unsigned int interface, + unsigned int report, unsigned int duration ) { + + return usb_control ( usb, USBHID_SET_IDLE, + ( ( duration << 8 ) | report ), + interface, NULL, 0 ); +} + +/** + * Set report + * + * @v usb USB device + * @v interface Interface number + * @v type Report type + * @v report Report ID + * @v data Report data + * @v len Length of report data + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +usbhid_set_report ( struct usb_device *usb, unsigned int interface, + unsigned int type, unsigned int report, void *data, + size_t len ) { + + return usb_control ( usb, USBHID_SET_REPORT, ( ( type << 8 ) | report ), + interface, data, len ); +} + +extern int usbhid_open ( struct usb_hid *hid ); +extern void usbhid_close ( struct usb_hid *hid ); +extern int usbhid_refill ( struct usb_hid *hid ); +extern int usbhid_describe ( struct usb_hid *hid, + struct usb_configuration_descriptor *config ); + +#endif /* _IPXE_USBHID_H */ diff --git a/src/include/ipxe/usbnet.h b/src/include/ipxe/usbnet.h new file mode 100644 index 00000000..a7276eba --- /dev/null +++ b/src/include/ipxe/usbnet.h @@ -0,0 +1,74 @@ +#ifndef _IPXE_USBNET_H +#define _IPXE_USBNET_H + +/** @file + * + * USB network devices + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** A USB network device */ +struct usbnet_device { + /** USB function */ + struct usb_function *func; + + /** Communications interface */ + unsigned int comms; + /** Data interface */ + unsigned int data; + /** Alternate setting for data interface */ + unsigned int alternate; + + /** Interrupt endpoint */ + struct usb_endpoint intr; + /** Bulk IN endpoint */ + struct usb_endpoint in; + /** Bulk OUT endpoint */ + struct usb_endpoint out; +}; + +/** + * Initialise USB network device + * + * @v usbnet USB network device + * @v func USB function + * @v intr Interrupt endpoint operations, or NULL + * @v in Bulk IN endpoint operations + * @v out Bulk OUT endpoint operations + */ +static inline __attribute__ (( always_inline )) void +usbnet_init ( struct usbnet_device *usbnet, struct usb_function *func, + struct usb_endpoint_driver_operations *intr, + struct usb_endpoint_driver_operations *in, + struct usb_endpoint_driver_operations *out ) { + struct usb_device *usb = func->usb; + + usbnet->func = func; + usb_endpoint_init ( &usbnet->intr, usb, intr ); + usb_endpoint_init ( &usbnet->in, usb, in ); + usb_endpoint_init ( &usbnet->out, usb, out ); +} + +/** + * Check if USB network device has an interrupt endpoint + * + * @v usbnet USB network device + * @ret has_intr Device has an interrupt endpoint + */ +static inline __attribute__ (( always_inline )) int +usbnet_has_intr ( struct usbnet_device *usbnet ) { + + return ( usbnet->intr.driver != NULL ); +} + +extern int usbnet_open ( struct usbnet_device *usbnet ); +extern void usbnet_close ( struct usbnet_device *usbnet ); +extern int usbnet_refill ( struct usbnet_device *usbnet ); +extern int usbnet_describe ( struct usbnet_device *usbnet, + struct usb_configuration_descriptor *config ); + +#endif /* _IPXE_USBNET_H */ diff --git a/src/include/ipxe/vmbus.h b/src/include/ipxe/vmbus.h new file mode 100644 index 00000000..68244185 --- /dev/null +++ b/src/include/ipxe/vmbus.h @@ -0,0 +1,660 @@ +#ifndef _IPXE_VMBUS_H +#define _IPXE_VMBUS_H + +/** @file + * + * Hyper-V virtual machine bus + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include + +/** VMBus message connection ID */ +#define VMBUS_MESSAGE_ID 1 + +/** VMBus event connection ID */ +#define VMBUS_EVENT_ID 2 + +/** VMBus message type */ +#define VMBUS_MESSAGE_TYPE 1 + +/** VMBus message synthetic interrupt */ +#define VMBUS_MESSAGE_SINT 2 + +/** VMBus version number */ +union vmbus_version { + /** Raw version */ + uint32_t raw; + /** Major/minor version */ + struct { + /** Minor version */ + uint16_t minor; + /** Major version */ + uint16_t major; + }; +} __attribute__ (( packed )); + +/** Known VMBus protocol versions */ +enum vmbus_raw_version { + /** Windows Server 2008 */ + VMBUS_VERSION_WS2008 = ( ( 0 << 16 ) | ( 13 << 0 ) ), + /** Windows 7 */ + VMBUS_VERSION_WIN7 = ( ( 1 << 16 ) | ( 1 << 0 ) ), + /** Windows 8 */ + VMBUS_VERSION_WIN8 = ( ( 2 << 16 ) | ( 4 << 0 ) ), + /** Windows 8.1 */ + VMBUS_VERSION_WIN8_1 = ( ( 3 << 16 ) | ( 0 << 0 ) ), +}; + +/** Guest physical address range descriptor */ +struct vmbus_gpa_range { + /** Byte count */ + uint32_t len; + /** Starting byte offset */ + uint32_t offset; + /** Page frame numbers + * + * The length of this array is implied by the byte count and + * starting offset. + */ + uint64_t pfn[0]; +} __attribute__ (( packed )); + +/** VMBus message header */ +struct vmbus_message_header { + /** Message type */ + uint32_t type; + /** Reserved */ + uint32_t reserved; +} __attribute__ (( packed )); + +/** VMBus message types */ +enum vmbus_message_type { + VMBUS_OFFER_CHANNEL = 1, + VMBUS_REQUEST_OFFERS = 3, + VMBUS_ALL_OFFERS_DELIVERED = 4, + VMBUS_OPEN_CHANNEL = 5, + VMBUS_OPEN_CHANNEL_RESULT = 6, + VMBUS_CLOSE_CHANNEL = 7, + VMBUS_GPADL_HEADER = 8, + VMBUS_GPADL_CREATED = 10, + VMBUS_GPADL_TEARDOWN = 11, + VMBUS_GPADL_TORNDOWN = 12, + VMBUS_INITIATE_CONTACT = 14, + VMBUS_VERSION_RESPONSE = 15, + VMBUS_UNLOAD = 16, + VMBUS_UNLOAD_RESPONSE = 17, +}; + +/** VMBus "offer channel" message */ +struct vmbus_offer_channel { + /** Message header */ + struct vmbus_message_header header; + /** Channel type */ + union uuid type; + /** Channel instance */ + union uuid instance; + /** Reserved */ + uint8_t reserved_a[16]; + /** Flags */ + uint16_t flags; + /** Reserved */ + uint8_t reserved_b[2]; + /** User data */ + uint8_t data[120]; + /** Reserved */ + uint8_t reserved_c[4]; + /** Channel ID */ + uint32_t channel; + /** Monitor ID */ + uint8_t monitor; + /** Monitor exists */ + uint8_t monitored; + /** Reserved */ + uint8_t reserved[2]; + /** Connection ID */ + uint32_t connection; +} __attribute__ (( packed )); + +/** VMBus "open channel" message */ +struct vmbus_open_channel { + /** Message header */ + struct vmbus_message_header header; + /** Channel ID */ + uint32_t channel; + /** Open ID */ + uint32_t id; + /** Ring buffer GPADL ID */ + uint32_t gpadl; + /** Reserved */ + uint32_t reserved; + /** Outbound ring buffer size (in pages) */ + uint32_t out_pages; + /** User-specific data */ + uint8_t data[120]; +} __attribute__ (( packed )); + +/** VMBus "open channel result" message */ +struct vmbus_open_channel_result { + /** Message header */ + struct vmbus_message_header header; + /** Channel ID */ + uint32_t channel; + /** Open ID */ + uint32_t id; + /** Status */ + uint32_t status; +} __attribute__ (( packed )); + +/** VMBus "close channel" message */ +struct vmbus_close_channel { + /** Message header */ + struct vmbus_message_header header; + /** Channel ID */ + uint32_t channel; +} __attribute__ (( packed )); + +/** VMBus "GPADL header" message */ +struct vmbus_gpadl_header { + /** Message header */ + struct vmbus_message_header header; + /** Channel ID */ + uint32_t channel; + /** GPADL ID */ + uint32_t gpadl; + /** Length of range descriptors */ + uint16_t range_len; + /** Number of range descriptors */ + uint16_t range_count; + /** Range descriptors */ + struct vmbus_gpa_range range[0]; +} __attribute__ (( packed )); + +/** VMBus "GPADL created" message */ +struct vmbus_gpadl_created { + /** Message header */ + struct vmbus_message_header header; + /** Channel ID */ + uint32_t channel; + /** GPADL ID */ + uint32_t gpadl; + /** Creation status */ + uint32_t status; +} __attribute__ (( packed )); + +/** VMBus "GPADL teardown" message */ +struct vmbus_gpadl_teardown { + /** Message header */ + struct vmbus_message_header header; + /** Channel ID */ + uint32_t channel; + /** GPADL ID */ + uint32_t gpadl; +} __attribute__ (( packed )); + +/** VMBus "GPADL torndown" message */ +struct vmbus_gpadl_torndown { + /** Message header */ + struct vmbus_message_header header; + /** GPADL ID */ + uint32_t gpadl; +} __attribute__ (( packed )); + +/** VMBus "initiate contact" message */ +struct vmbus_initiate_contact { + /** Message header */ + struct vmbus_message_header header; + /** Requested version */ + union vmbus_version version; + /** Target virtual CPU */ + uint32_t vcpu; + /** Interrupt page base address */ + uint64_t intr; + /** Parent to child monitor page base address */ + uint64_t monitor_in; + /** Child to parent monitor page base address */ + uint64_t monitor_out; +} __attribute__ (( packed )); + +/** VMBus "version response" message */ +struct vmbus_version_response { + /** Message header */ + struct vmbus_message_header header; + /** Version is supported */ + uint8_t supported; + /** Reserved */ + uint8_t reserved[3]; + /** Version */ + union vmbus_version version; +} __attribute__ (( packed )); + +/** VMBus message */ +union vmbus_message { + /** Common message header */ + struct vmbus_message_header header; + /** "Offer channel" message */ + struct vmbus_offer_channel offer; + /** "Open channel" message */ + struct vmbus_open_channel open; + /** "Open channel result" message */ + struct vmbus_open_channel_result opened; + /** "Close channel" message */ + struct vmbus_close_channel close; + /** "GPADL header" message */ + struct vmbus_gpadl_header gpadlhdr; + /** "GPADL created" message */ + struct vmbus_gpadl_created created; + /** "GPADL teardown" message */ + struct vmbus_gpadl_teardown teardown; + /** "GPADL torndown" message */ + struct vmbus_gpadl_torndown torndown; + /** "Initiate contact" message */ + struct vmbus_initiate_contact initiate; + /** "Version response" message */ + struct vmbus_version_response version; +}; + +/** VMBus packet header */ +struct vmbus_packet_header { + /** Type */ + uint16_t type; + /** Length of packet header (in quadwords) */ + uint16_t hdr_qlen; + /** Length of packet (in quadwords) */ + uint16_t qlen; + /** Flags */ + uint16_t flags; + /** Transaction ID + * + * This is an opaque token: we therefore treat it as + * native-endian and don't worry about byte-swapping. + */ + uint64_t xid; +} __attribute__ (( packed )); + +/** VMBus packet types */ +enum vmbus_packet_type { + VMBUS_DATA_INBAND = 6, + VMBUS_DATA_XFER_PAGES = 7, + VMBUS_DATA_GPA_DIRECT = 9, + VMBUS_CANCELLATION = 10, + VMBUS_COMPLETION = 11, +}; + +/** VMBus packet flags */ +enum vmbus_packet_flags { + VMBUS_COMPLETION_REQUESTED = 0x0001, +}; + +/** VMBus GPA direct header */ +struct vmbus_gpa_direct_header { + /** Packet header */ + struct vmbus_packet_header header; + /** Reserved */ + uint32_t reserved; + /** Number of range descriptors */ + uint32_t range_count; + /** Range descriptors */ + struct vmbus_gpa_range range[0]; +} __attribute__ (( packed )); + +/** VMBus transfer page range */ +struct vmbus_xfer_page_range { + /** Length */ + uint32_t len; + /** Offset */ + uint32_t offset; +} __attribute__ (( packed )); + +/** VMBus transfer page header */ +struct vmbus_xfer_page_header { + /** Packet header */ + struct vmbus_packet_header header; + /** Page set ID */ + uint16_t pageset; + /** Sender owns page set */ + uint8_t owner; + /** Reserved */ + uint8_t reserved; + /** Number of range descriptors */ + uint32_t range_count; + /** Range descriptors */ + struct vmbus_xfer_page_range range[0]; +} __attribute__ (( packed )); + +/** Maximum expected size of VMBus packet header */ +#define VMBUS_PACKET_MAX_HEADER_LEN 64 + +/** VMBus maximum-sized packet header */ +union vmbus_packet_header_max { + /** Common header */ + struct vmbus_packet_header header; + /** GPA direct header */ + struct vmbus_gpa_direct_header gpa; + /** Transfer page header */ + struct vmbus_xfer_page_header xfer; + /** Padding to maximum supported size */ + uint8_t padding[VMBUS_PACKET_MAX_HEADER_LEN]; +} __attribute__ (( packed )); + +/** VMBus packet footer */ +struct vmbus_packet_footer { + /** Reserved */ + uint32_t reserved; + /** Producer index of the first byte of the packet */ + uint32_t prod; +} __attribute__ (( packed )); + +/** VMBus ring buffer + * + * This is the structure of the each of the ring buffers created when + * a VMBus channel is opened. + */ +struct vmbus_ring { + /** Producer index (modulo ring length) */ + uint32_t prod; + /** Consumer index (modulo ring length) */ + uint32_t cons; + /** Interrupt mask */ + uint32_t intr_mask; + /** Reserved */ + uint8_t reserved[4084]; + /** Ring buffer contents */ + uint8_t data[0]; +} __attribute__ (( packed )); + +/** VMBus interrupt page */ +struct vmbus_interrupt { + /** Inbound interrupts */ + uint8_t in[ PAGE_SIZE / 2 ]; + /** Outbound interrupts */ + uint8_t out[ PAGE_SIZE / 2 ]; +} __attribute__ (( packed )); + +/** A virtual machine bus */ +struct vmbus { + /** Interrupt page */ + struct vmbus_interrupt *intr; + /** Inbound notifications */ + struct hv_monitor *monitor_in; + /** Outbound notifications */ + struct hv_monitor *monitor_out; + /** Received message buffer */ + const union vmbus_message *message; +}; + +struct vmbus_device; + +/** VMBus channel operations */ +struct vmbus_channel_operations { + /** + * Handle received control packet + * + * @v vmdev VMBus device + * @v xid Transaction ID + * @v data Data + * @v len Length of data + * @ret rc Return status code + */ + int ( * recv_control ) ( struct vmbus_device *vmdev, uint64_t xid, + const void *data, size_t len ); + /** + * Handle received data packet + * + * @v vmdev VMBus device + * @v xid Transaction ID + * @v data Data + * @v len Length of data + * @v list List of I/O buffers + * @ret rc Return status code + * + * This function takes ownership of the I/O buffer. It should + * eventually call vmbus_send_completion() to indicate to the + * host that the buffer can be reused. + */ + int ( * recv_data ) ( struct vmbus_device *vmdev, uint64_t xid, + const void *data, size_t len, + struct list_head *list ); + /** + * Handle received completion packet + * + * @v vmdev VMBus device + * @v xid Transaction ID + * @v data Data + * @v len Length of data + * @ret rc Return status code + */ + int ( * recv_completion ) ( struct vmbus_device *vmdev, uint64_t xid, + const void *data, size_t len ); + /** + * Handle received cancellation packet + * + * @v vmdev VMBus device + * @v xid Transaction ID + * @ret rc Return status code + */ + int ( * recv_cancellation ) ( struct vmbus_device *vmdev, + uint64_t xid ); +}; + +struct vmbus_xfer_pages; + +/** VMBus transfer page set operations */ +struct vmbus_xfer_pages_operations { + /** + * Copy data from transfer page + * + * @v pages Transfer page set + * @v data Data buffer + * @v offset Offset within page set + * @v len Length within page set + * @ret rc Return status code + */ + int ( * copy ) ( struct vmbus_xfer_pages *pages, void *data, + size_t offset, size_t len ); +}; + +/** VMBus transfer page set */ +struct vmbus_xfer_pages { + /** List of all transfer page sets */ + struct list_head list; + /** Page set ID (in protocol byte order) */ + uint16_t pageset; + /** Page set operations */ + struct vmbus_xfer_pages_operations *op; +}; + +/** A VMBus device */ +struct vmbus_device { + /** Generic iPXE device */ + struct device dev; + /** Hyper-V hypervisor */ + struct hv_hypervisor *hv; + + /** Channel instance */ + union uuid instance; + /** Channel ID */ + unsigned int channel; + /** Monitor ID */ + unsigned int monitor; + /** Signal channel + * + * @v vmdev VMBus device + */ + void ( * signal ) ( struct vmbus_device *vmdev ); + + /** Outbound ring buffer length */ + uint32_t out_len; + /** Inbound ring buffer length */ + uint32_t in_len; + /** Outbound ring buffer */ + struct vmbus_ring *out; + /** Inbound ring buffer */ + struct vmbus_ring *in; + /** Ring buffer GPADL ID */ + unsigned int gpadl; + + /** Channel operations */ + struct vmbus_channel_operations *op; + /** Maximum expected data packet length */ + size_t mtu; + /** Packet buffer */ + void *packet; + /** List of transfer page sets */ + struct list_head pages; + + /** Driver */ + struct vmbus_driver *driver; + /** Driver-private data */ + void *priv; +}; + +/** A VMBus device driver */ +struct vmbus_driver { + /** Name */ + const char *name; + /** Device type */ + union uuid type; + /** Probe device + * + * @v vmdev VMBus device + * @ret rc Return status code + */ + int ( * probe ) ( struct vmbus_device *vmdev ); + /** Reset device + * + * @v vmdev VMBus device + * @ret rc Return status code + */ + int ( * reset ) ( struct vmbus_device *vmdev ); + /** Remove device + * + * @v vmdev VMBus device + */ + void ( * remove ) ( struct vmbus_device *vmdev ); +}; + +/** VMBus device driver table */ +#define VMBUS_DRIVERS __table ( struct vmbus_driver, "vmbus_drivers" ) + +/** Declare a VMBus device driver */ +#define __vmbus_driver __table_entry ( VMBUS_DRIVERS, 01 ) + +/** + * Set VMBus device driver-private data + * + * @v vmdev VMBus device + * @v priv Private data + */ +static inline void vmbus_set_drvdata ( struct vmbus_device *vmdev, void *priv ){ + vmdev->priv = priv; +} + +/** + * Get VMBus device driver-private data + * + * @v vmdev VMBus device + * @ret priv Private data + */ +static inline void * vmbus_get_drvdata ( struct vmbus_device *vmdev ) { + return vmdev->priv; +} + +/** Construct VMBus type */ +#define VMBUS_TYPE( a, b, c, d, e0, e1, e2, e3, e4, e5 ) { \ + .canonical = { \ + cpu_to_le32 ( a ), cpu_to_le16 ( b ), \ + cpu_to_le16 ( c ), cpu_to_be16 ( d ), \ + { e0, e1, e2, e3, e4, e5 } \ + } } + +/** + * Check if data is present in ring buffer + * + * @v vmdev VMBus device + * @v has_data Data is present + */ +static inline __attribute__ (( always_inline )) int +vmbus_has_data ( struct vmbus_device *vmdev ) { + + return ( vmdev->in->prod != vmdev->in->cons ); +} + +/** + * Register transfer page set + * + * @v vmdev VMBus device + * @v pages Transfer page set + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +vmbus_register_pages ( struct vmbus_device *vmdev, + struct vmbus_xfer_pages *pages ) { + + list_add ( &pages->list, &vmdev->pages ); + return 0; +} + +/** + * Unregister transfer page set + * + * @v vmdev VMBus device + * @v pages Transfer page set + */ +static inline __attribute__ (( always_inline )) void +vmbus_unregister_pages ( struct vmbus_device *vmdev, + struct vmbus_xfer_pages *pages ) { + + list_check_contains_entry ( pages, &vmdev->pages, list ); + list_del ( &pages->list ); +} + +extern unsigned int vmbus_obsolete_gpadl; + +/** + * Check if GPADL is obsolete + * + * @v gpadl GPADL ID + * @v is_obsolete GPADL ID is obsolete + * + * Check if GPADL is obsolete (i.e. was created before the most recent + * Hyper-V reset). + */ +static inline __attribute__ (( always_inline )) int +vmbus_gpadl_is_obsolete ( unsigned int gpadl ) { + + return ( gpadl <= vmbus_obsolete_gpadl ); +} + +extern int vmbus_establish_gpadl ( struct vmbus_device *vmdev, userptr_t data, + size_t len ); +extern int vmbus_gpadl_teardown ( struct vmbus_device *vmdev, + unsigned int gpadl ); +extern int vmbus_open ( struct vmbus_device *vmdev, + struct vmbus_channel_operations *op, + size_t out_len, size_t in_len, size_t mtu ); +extern void vmbus_close ( struct vmbus_device *vmdev ); +extern int vmbus_send_control ( struct vmbus_device *vmdev, uint64_t xid, + const void *data, size_t len ); +extern int vmbus_send_data ( struct vmbus_device *vmdev, uint64_t xid, + const void *data, size_t len, + struct io_buffer *iobuf ); +extern int vmbus_send_completion ( struct vmbus_device *vmdev, uint64_t xid, + const void *data, size_t len ); +extern int vmbus_send_cancellation ( struct vmbus_device *vmdev, uint64_t xid ); +extern int vmbus_poll ( struct vmbus_device *vmdev ); +extern void vmbus_dump_channel ( struct vmbus_device *vmdev ); + +extern int vmbus_probe ( struct hv_hypervisor *hv, struct device *parent ); +extern int vmbus_reset ( struct hv_hypervisor *hv, struct device *parent ); +extern void vmbus_remove ( struct hv_hypervisor *hv, struct device *parent ); + +#endif /* _IPXE_VMBUS_H */ diff --git a/src/include/ipxe/xen.h b/src/include/ipxe/xen.h new file mode 100644 index 00000000..0fb8b762 --- /dev/null +++ b/src/include/ipxe/xen.h @@ -0,0 +1,89 @@ +#ifndef _IPXE_XEN_H +#define _IPXE_XEN_H + +/** @file + * + * Xen interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/* Define Xen interface version before including any Xen header files */ +#define __XEN_INTERFACE_VERSION__ 0x00040400 + +#include +#include +#include +#include +#include + +/* Memory barrier macros used by ring.h */ +#define xen_mb() mb() +#define xen_rmb() rmb() +#define xen_wmb() wmb() + +struct xen_hypercall; + +/** A Xen grant table */ +struct xen_grant { + /** Grant table entries */ + struct grant_entry_v1 *table; + /** Total grant table length */ + size_t len; + /** Entry size shift (for later version tables) */ + unsigned int shift; + /** Number of grant table entries in use */ + unsigned int used; + /** Most recently used grant reference */ + unsigned int ref; +}; + +/** A XenStore */ +struct xen_store { + /** XenStore domain interface */ + struct xenstore_domain_interface *intf; + /** Event channel */ + evtchn_port_t port; +}; + +/** A Xen hypervisor */ +struct xen_hypervisor { + /** Hypercall table */ + struct xen_hypercall *hypercall; + /** Shared info page */ + struct shared_info *shared; + /** Grant table */ + struct xen_grant grant; + /** XenStore */ + struct xen_store store; +}; + +/** + * Test and clear pending event + * + * @v xen Xen hypervisor + * @v port Event channel port + * @ret pending Event was pending + */ +static inline __attribute__ (( always_inline )) int +xenevent_pending ( struct xen_hypervisor *xen, evtchn_port_t port ) { + + return test_and_clear_bit ( port, xen->shared->evtchn_pending ); +} + +#include + +/** + * Convert a Xen status code to an iPXE status code + * + * @v xenrc Xen status code (negated) + * @ret rc iPXE status code (before negation) + * + * Xen status codes are defined in the file include/xen/errno.h in the + * Xen repository. They happen to match the Linux error codes, some + * of which can be found in our include/ipxe/errno/linux.h. + */ +#define EXEN( xenrc ) EPLATFORM ( EINFO_EPLATFORM, -(xenrc) ) + +#endif /* _IPXE_XEN_H */ diff --git a/src/include/ipxe/xenbus.h b/src/include/ipxe/xenbus.h new file mode 100644 index 00000000..ec5782ee --- /dev/null +++ b/src/include/ipxe/xenbus.h @@ -0,0 +1,86 @@ +#ifndef _IPXE_XENBUS_H +#define _IPXE_XENBUS_H + +/** @file + * + * Xen device bus + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** A Xen device */ +struct xen_device { + /** Generic iPXE device */ + struct device dev; + /** Xen hypervisor */ + struct xen_hypervisor *xen; + /** XenStore key */ + char *key; + /** Backend XenStore key */ + char *backend; + /** Backend domain ID */ + unsigned long backend_id; + /** Driver */ + struct xen_driver *driver; + /** Driver-private data */ + void *priv; +}; + +/** A Xen device driver */ +struct xen_driver { + /** Name */ + const char *name; + /** Device type */ + const char *type; + /** Probe device + * + * @v xendev Xen device + * @ret rc Return status code + */ + int ( * probe ) ( struct xen_device *xendev ); + /** Remove device + * + * @v xendev Xen device + */ + void ( * remove ) ( struct xen_device *xendev ); +}; + +/** Xen device driver table */ +#define XEN_DRIVERS __table ( struct xen_driver, "xen_drivers" ) + +/** Declare a Xen device driver */ +#define __xen_driver __table_entry ( XEN_DRIVERS, 01 ) + +/** + * Set Xen device driver-private data + * + * @v xendev Xen device + * @v priv Private data + */ +static inline void xen_set_drvdata ( struct xen_device *xendev, void *priv ) { + xendev->priv = priv; +} + +/** + * Get Xen device driver-private data + * + * @v xendev Xen device + * @ret priv Private data + */ +static inline void * xen_get_drvdata ( struct xen_device *xendev ) { + return xendev->priv; +} + +extern int xenbus_set_state ( struct xen_device *xendev, int state ); +extern int xenbus_backend_state ( struct xen_device *xendev ); +extern int xenbus_backend_wait ( struct xen_device *xendev, int state ); +extern int xenbus_probe ( struct xen_hypervisor *xen, struct device *parent ); +extern void xenbus_remove ( struct xen_hypervisor *xen, struct device *parent ); + +#endif /* _IPXE_XENBUS_H */ diff --git a/src/include/ipxe/xenevent.h b/src/include/ipxe/xenevent.h new file mode 100644 index 00000000..f0bd3465 --- /dev/null +++ b/src/include/ipxe/xenevent.h @@ -0,0 +1,59 @@ +#ifndef _IPXE_XENEVENT_H +#define _IPXE_XENEVENT_H + +/** @file + * + * Xen events + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** + * Close event channel + * + * @v xen Xen hypervisor + * @v close Event descriptor + * @ret xenrc Xen status code + */ +static inline __attribute__ (( always_inline )) int +xenevent_close ( struct xen_hypervisor *xen, struct evtchn_close *close ) { + + return xen_hypercall_2 ( xen, __HYPERVISOR_event_channel_op, + EVTCHNOP_close, virt_to_phys ( close ) ); +} + +/** + * Send event + * + * @v xen Xen hypervisor + * @v send Event descriptor + * @ret xenrc Xen status code + */ +static inline __attribute__ (( always_inline )) int +xenevent_send ( struct xen_hypervisor *xen, struct evtchn_send *send ) { + + return xen_hypercall_2 ( xen, __HYPERVISOR_event_channel_op, + EVTCHNOP_send, virt_to_phys ( send ) ); +} + +/** + * Allocate an unbound event channel + * + * @v xen Xen hypervisor + * @v alloc_unbound Event descriptor + * @ret xenrc Xen status code + */ +static inline __attribute__ (( always_inline )) int +xenevent_alloc_unbound ( struct xen_hypervisor *xen, + struct evtchn_alloc_unbound *alloc_unbound ) { + + return xen_hypercall_2 ( xen, __HYPERVISOR_event_channel_op, + EVTCHNOP_alloc_unbound, + virt_to_phys ( alloc_unbound ) ); +} + +#endif /* _IPXE_XENEVENT_H */ diff --git a/src/include/ipxe/xengrant.h b/src/include/ipxe/xengrant.h new file mode 100644 index 00000000..451a3cee --- /dev/null +++ b/src/include/ipxe/xengrant.h @@ -0,0 +1,232 @@ +#ifndef _IPXE_XENGRANT_H +#define _IPXE_XENGRANT_H + +/** @file + * + * Xen grant tables + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include + +/** Induced failure rate (for testing) */ +#define XENGRANT_FAIL_RATE 0 + +/** + * Query grant table size + * + * @v xen Xen hypervisor + * @v size Table size + * @ret xenrc Xen status code + */ +static inline __attribute__ (( always_inline )) int +xengrant_query_size ( struct xen_hypervisor *xen, + struct gnttab_query_size *size ) { + + return xen_hypercall_3 ( xen, __HYPERVISOR_grant_table_op, + GNTTABOP_query_size, + virt_to_phys ( size ), 1 ); +} + +/** + * Set grant table version + * + * @v xen Xen hypervisor + * @v version Version + * @ret xenrc Xen status code + */ +static inline __attribute__ (( always_inline )) int +xengrant_set_version ( struct xen_hypervisor *xen, + struct gnttab_set_version *version ) { + + return xen_hypercall_3 ( xen, __HYPERVISOR_grant_table_op, + GNTTABOP_set_version, + virt_to_phys ( version ), 1 ); +} + +/** + * Get grant table version + * + * @v xen Xen hypervisor + * @v version Version + * @ret xenrc Xen status code + */ +static inline __attribute__ (( always_inline )) int +xengrant_get_version ( struct xen_hypervisor *xen, + struct gnttab_get_version *version ) { + + return xen_hypercall_3 ( xen, __HYPERVISOR_grant_table_op, + GNTTABOP_get_version, + virt_to_phys ( version ), 1 ); +} + +/** + * Get number of grant table entries + * + * @v xen Xen hypervisor + * @ret entries Number of grant table entries + */ +static inline __attribute__ (( always_inline )) unsigned int +xengrant_entries ( struct xen_hypervisor *xen ) { + + return ( ( xen->grant.len / sizeof ( xen->grant.table[0] ) ) + >> xen->grant.shift ); +} + +/** + * Get grant table entry header + * + * @v xen Xen hypervisor + * @v ref Grant reference + * @ret hdr Grant table entry header + */ +static inline __attribute__ (( always_inline )) struct grant_entry_header * +xengrant_header ( struct xen_hypervisor *xen, grant_ref_t ref ) { + struct grant_entry_v1 *v1; + + v1 = &xen->grant.table[ ref << xen->grant.shift ]; + return ( container_of ( &v1->flags, struct grant_entry_header, flags )); +} + +/** + * Get version 1 grant table entry + * + * @v hdr Grant table entry header + * @ret v1 Version 1 grant table entry + */ +static inline __attribute__ (( always_inline )) struct grant_entry_v1 * +xengrant_v1 ( struct grant_entry_header *hdr ) { + + return ( container_of ( &hdr->flags, struct grant_entry_v1, flags ) ); +} + +/** + * Get version 2 grant table entry + * + * @v hdr Grant table entry header + * @ret v2 Version 2 grant table entry + */ +static inline __attribute__ (( always_inline )) union grant_entry_v2 * +xengrant_v2 ( struct grant_entry_header *hdr ) { + + return ( container_of ( &hdr->flags, union grant_entry_v2, hdr.flags )); +} + +/** + * Zero grant table entry + * + * @v xen Xen hypervisor + * @v hdr Grant table entry header + */ +static inline void xengrant_zero ( struct xen_hypervisor *xen, + struct grant_entry_header *hdr ) { + uint32_t *dword = ( ( uint32_t * ) hdr ); + unsigned int i = ( ( sizeof ( xen->grant.table[0] ) / sizeof ( *dword )) + << xen->grant.shift ); + + while ( i-- ) + writel ( 0, dword++ ); +} + +/** + * Invalidate access to a page + * + * @v xen Xen hypervisor + * @v ref Grant reference + */ +static inline __attribute__ (( always_inline )) void +xengrant_invalidate ( struct xen_hypervisor *xen, grant_ref_t ref ) { + struct grant_entry_header *hdr = xengrant_header ( xen, ref ); + + /* Sanity check */ + assert ( ( readw ( &hdr->flags ) & + ( GTF_reading | GTF_writing ) ) == 0 ); + + /* This should apparently be done using a cmpxchg instruction. + * We omit this: partly in the interests of simplicity, but + * mainly since our control flow generally does not permit + * failure paths to themselves fail. + */ + writew ( 0, &hdr->flags ); + + /* Leave reference marked as in-use (see xengrant_alloc()) */ + writew ( DOMID_SELF, &hdr->domid ); +} + +/** + * Permit access to a page + * + * @v xen Xen hypervisor + * @v ref Grant reference + * @v domid Domain ID + * @v subflags Additional flags + * @v page Page start + * @ret rc Return status code + */ +static inline __attribute__ (( always_inline )) int +xengrant_permit_access ( struct xen_hypervisor *xen, grant_ref_t ref, + domid_t domid, unsigned int subflags, void *page ) { + struct grant_entry_header *hdr = xengrant_header ( xen, ref ); + struct grant_entry_v1 *v1 = xengrant_v1 ( hdr ); + union grant_entry_v2 *v2 = xengrant_v2 ( hdr ); + unsigned long frame = ( virt_to_phys ( page ) / PAGE_SIZE ); + + /* Fail (for test purposes) if applicable */ + if ( ( XENGRANT_FAIL_RATE > 0 ) && + ( random() % XENGRANT_FAIL_RATE ) == 0 ) { + return -EAGAIN; + } + + /* Record frame number. This may fail on a 64-bit system if + * we are using v1 grant tables. On a 32-bit system, there is + * no way for this code path to fail (with either v1 or v2 + * grant tables); we allow the compiler to optimise the + * failure paths away to save space. + */ + if ( sizeof ( physaddr_t ) == sizeof ( uint64_t ) ) { + + /* 64-bit system */ + if ( xen->grant.shift ) { + /* Version 2 table: no possible failure */ + writeq ( frame, &v2->full_page.frame ); + } else { + /* Version 1 table: may fail if address above 16TB */ + if ( frame > 0xffffffffUL ) + return -ERANGE; + writel ( frame, &v1->frame ); + } + + } else { + + /* 32-bit system */ + if ( xen->grant.shift ) { + /* Version 2 table: no possible failure */ + writel ( frame, &v2->full_page.frame ); + } else { + /* Version 1 table: no possible failure */ + writel ( frame, &v1->frame ); + } + } + + /* Record domain ID and flags */ + writew ( domid, &hdr->domid ); + wmb(); + writew ( ( GTF_permit_access | subflags ), &hdr->flags ); + wmb(); + + return 0; +} + +extern int xengrant_init ( struct xen_hypervisor *xen ); +extern int xengrant_alloc ( struct xen_hypervisor *xen, grant_ref_t *refs, + unsigned int count ); +extern void xengrant_free ( struct xen_hypervisor *xen, grant_ref_t *refs, + unsigned int count ); + +#endif /* _IPXE_XENGRANT_H */ diff --git a/src/include/ipxe/xenmem.h b/src/include/ipxe/xenmem.h new file mode 100644 index 00000000..dcc38d46 --- /dev/null +++ b/src/include/ipxe/xenmem.h @@ -0,0 +1,46 @@ +#ifndef _IPXE_XENMEM_H +#define _IPXE_XENMEM_H + +/** @file + * + * Xen memory operations + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** + * Add page to physical address space + * + * @v xen Xen hypervisor + * @v add Page mapping descriptor + * @ret xenrc Xen status code + */ +static inline __attribute__ (( always_inline )) int +xenmem_add_to_physmap ( struct xen_hypervisor *xen, + struct xen_add_to_physmap *add ) { + + return xen_hypercall_2 ( xen, __HYPERVISOR_memory_op, + XENMEM_add_to_physmap, virt_to_phys ( add ) ); +} + +/** + * Remove page from physical address space + * + * @v xen Xen hypervisor + * @v remove Page mapping descriptor + * @ret xenrc Xen status code + */ +static inline __attribute__ (( always_inline )) int +xenmem_remove_from_physmap ( struct xen_hypervisor *xen, + struct xen_remove_from_physmap *remove ) { + + return xen_hypercall_2 ( xen, __HYPERVISOR_memory_op, + XENMEM_remove_from_physmap, + virt_to_phys ( remove ) ); +} + +#endif /* _IPXE_XENMEM_H */ diff --git a/src/include/ipxe/xenstore.h b/src/include/ipxe/xenstore.h new file mode 100644 index 00000000..89264075 --- /dev/null +++ b/src/include/ipxe/xenstore.h @@ -0,0 +1,29 @@ +#ifndef _IPXE_XENSTORE_H +#define _IPXE_XENSTORE_H + +/** @file + * + * XenStore interface + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +extern __attribute__ (( sentinel )) int +xenstore_read ( struct xen_hypervisor *xen, char **value, ... ); +extern __attribute__ (( sentinel )) int +xenstore_read_num ( struct xen_hypervisor *xen, unsigned long *num, ... ); +extern __attribute__ (( sentinel )) int +xenstore_write ( struct xen_hypervisor *xen, const char *value, ... ); +extern __attribute__ (( sentinel )) int +xenstore_write_num ( struct xen_hypervisor *xen, unsigned long num, ... ); +extern __attribute__ (( sentinel )) int +xenstore_rm ( struct xen_hypervisor *xen, ... ); +extern __attribute__ (( sentinel )) int +xenstore_directory ( struct xen_hypervisor *xen, char **children, size_t *len, + ... ); +extern void xenstore_dump ( struct xen_hypervisor *xen, const char *key ); + +#endif /* _IPXE_XENSTORE_H */ diff --git a/src/include/ipxe/xenver.h b/src/include/ipxe/xenver.h new file mode 100644 index 00000000..b29dfb32 --- /dev/null +++ b/src/include/ipxe/xenver.h @@ -0,0 +1,44 @@ +#ifndef _IPXE_XENVER_H +#define _IPXE_VENVER_H + +/** @file + * + * Xen version + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** + * Get Xen version + * + * @v xen Xen hypervisor + * @ret version Version (major.minor: 16 bits each) + */ +static inline __attribute__ (( always_inline )) uint32 +xenver_version ( struct xen_hypervisor *xen ) { + + return xen_hypercall_2 ( xen, __HYPERVISOR_xen_version, + XENVER_version, 0 ); +} + +/** + * Get Xen extra version string + * + * @v xen Xen hypervisor + * @v extraversion Extra version string to fill in + * @ret xenrc Xen status code + */ +static inline __attribute__ (( always_inline )) int +xenver_extraversion ( struct xen_hypervisor *xen, + xen_extraversion_t *extraversion ) { + + return xen_hypercall_2 ( xen, __HYPERVISOR_xen_version, + XENVER_extraversion, + virt_to_phys ( extraversion ) ); +} + +#endif /* _IPXE_XENVER_H */ diff --git a/src/include/ipxe/xsigo.h b/src/include/ipxe/xsigo.h new file mode 100644 index 00000000..f4f14c48 --- /dev/null +++ b/src/include/ipxe/xsigo.h @@ -0,0 +1,406 @@ +#ifndef _IPXE_XSIGO_H +#define _IPXE_XSIGO_H + +/** @file + * + * Xsigo virtual Ethernet devices + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** Xsigo directory service record name */ +#define XDS_SERVICE_NAME "XSIGOXDS" + +/** Xsigo configuration manager service ID */ +#define XCM_SERVICE_ID { 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0x97, 0x01 } + +/** Xsigo management class */ +#define XSIGO_MGMT_CLASS 0x0b + +/** Xsigo management class version */ +#define XSIGO_MGMT_CLASS_VERSION 2 + +/** Xsigo configuration manager request MAD */ +#define XSIGO_ATTR_XCM_REQUEST 0xb002 + +/** Generic operating system type */ +#define XSIGO_OS_TYPE_GENERIC 0x40 + +/** Xsigo virtual Ethernet broadcast GID prefix */ +#define XVE_PREFIX 0xff15101cUL + +/** Xsigo resource types */ +enum xsigo_resource_type { + /** Virtual Ethernet resource type */ + XSIGO_RESOURCE_XVE = ( 1 << 6 ), + /** Absence-of-high-availability "resource" type */ + XSIGO_RESOURCE_NO_HA = ( 1 << 4 ), +}; + +/** A Xsigo server identifier */ +struct xsigo_server_id { + /** Virtual machine ID */ + uint32_t vm; + /** Port GUID */ + union ib_guid guid; +} __attribute__ (( packed )); + +/** A Xsigo configuration manager identifier */ +struct xsigo_manager_id { + /** Port GUID */ + union ib_guid guid; + /** LID */ + uint16_t lid; + /** Reserved */ + uint8_t reserved[10]; +} __attribute__ (( packed )); + +/** A Xsigo configuration manager request MAD */ +struct xsigo_managers_request { + /** MAD header */ + struct ib_mad_hdr mad_hdr; + /** Reserved */ + uint8_t reserved0[32]; + /** Server ID */ + struct xsigo_server_id server; + /** Hostname */ + char hostname[ 65 /* Seriously, guys? */ ]; + /** OS version */ + char os_version[32]; + /** CPU architecture */ + char arch[16]; + /** OS type */ + uint8_t os_type; + /** Reserved */ + uint8_t reserved1[3]; + /** Firmware version */ + uint64_t firmware_version; + /** Hardware version */ + uint32_t hardware_version; + /** Driver version */ + uint32_t driver_version; + /** System ID */ + union ib_gid system_id; + /** Resource types */ + uint16_t resources; + /** Reserved */ + uint8_t reserved2[2]; + /** Build version */ + char build[16]; + /** Reserved */ + uint8_t reserved3[19]; +} __attribute__ (( packed )); + +/** Resource types are present */ +#define XSIGO_RESOURCES_PRESENT 0x8000 + +/** A Xsigo configuration manager reply MAD */ +struct xsigo_managers_reply { + /** MAD header */ + struct ib_mad_hdr mad_hdr; + /** Reserved */ + uint8_t reserved0[32]; + /** Server ID */ + struct xsigo_server_id server; + /** Number of XCM records */ + uint8_t count; + /** Version */ + uint8_t version; + /** Reserved */ + uint8_t reserved1[2]; + /** Managers */ + struct xsigo_manager_id manager[8]; + /** Reserved */ + uint8_t reserved2[24]; +} __attribute__ (( packed )); + +/** A Xsigo MAD */ +union xsigo_mad { + /** Generic MAD */ + union ib_mad mad; + /** Configuration manager request */ + struct xsigo_managers_request request; + /** Configuration manager reply */ + struct xsigo_managers_reply reply; +} __attribute__ (( packed )); + +/** An XSMP node identifier */ +struct xsmp_node_id { + /** Auxiliary ID (never used) */ + uint32_t aux; + /** Port GUID */ + union ib_guid guid; +} __attribute__ (( packed )); + +/** An XSMP message header */ +struct xsmp_message_header { + /** Message type */ + uint8_t type; + /** Reason code */ + uint8_t code; + /** Length */ + uint16_t len; + /** Sequence number */ + uint32_t seq; + /** Source node ID */ + struct xsmp_node_id src; + /** Destination node ID */ + struct xsmp_node_id dst; +} __attribute__ (( packed )); + +/** XSMP message types */ +enum xsmp_message_type { + /** Session message type */ + XSMP_TYPE_SESSION = 1, + /** Virtual Ethernet message type */ + XSMP_TYPE_XVE = 6, +}; + +/** An XSMP session message */ +struct xsmp_session_message { + /** Message header */ + struct xsmp_message_header hdr; + /** Message type */ + uint8_t type; + /** Reason code */ + uint8_t code; + /** Length (excluding message header) */ + uint16_t len; + /** Operating system type */ + uint8_t os_type; + /** Reserved */ + uint8_t reserved0; + /** Resource types */ + uint16_t resources; + /** Driver version */ + uint32_t driver_version; + /** Required chassis version */ + uint32_t chassis_version; + /** Boot flags */ + uint32_t boot; + /** Firmware version */ + uint64_t firmware_version; + /** Hardware version */ + uint32_t hardware_version; + /** Vendor part ID */ + uint32_t vendor; + /** Protocol version */ + uint32_t xsmp_version; + /** Chassis name */ + char chassis[32]; + /** Session name */ + char session[32]; + /** Reserved */ + uint8_t reserved1[120]; +} __attribute__ (( packed )); + +/** XSMP session message types */ +enum xsmp_session_type { + /** Keepalive message */ + XSMP_SESSION_TYPE_HELLO = 1, + /** Initial registration message */ + XSMP_SESSION_TYPE_REGISTER = 2, + /** Registration confirmation message */ + XSMP_SESSION_TYPE_CONFIRM = 3, + /** Registration rejection message */ + XSMP_SESSION_TYPE_REJECT = 4, + /** Shutdown message */ + XSMP_SESSION_TYPE_SHUTDOWN = 5, +}; + +/** XSMP boot flags */ +enum xsmp_session_boot { + /** PXE boot */ + XSMP_BOOT_PXE = ( 1 << 0 ), +}; + +/** XSMP virtual Ethernet channel adapter parameters */ +struct xsmp_xve_ca { + /** Subnet prefix (little-endian) */ + union ib_guid prefix_le; + /** Control queue pair number */ + uint32_t ctrl; + /** Data queue pair number */ + uint32_t data; + /** Partition key */ + uint16_t pkey; + /** Queue key */ + uint16_t qkey; +} __attribute__ (( packed )); + +/** XSMP virtual Ethernet MAC address */ +struct xsmp_xve_mac { + /** High 16 bits */ + uint16_t high; + /** Low 32 bits */ + uint32_t low; +} __attribute__ (( packed )); + +/** An XSMP virtual Ethernet message */ +struct xsmp_xve_message { + /** Message header */ + struct xsmp_message_header hdr; + /** Message type */ + uint8_t type; + /** Reason code */ + uint8_t code; + /** Length (excluding message header) */ + uint16_t len; + /** Update bitmask */ + uint32_t update; + /** Resource identifier */ + union ib_guid resource; + /** TCA GUID (little-endian) */ + union ib_guid guid_le; + /** TCA LID */ + uint16_t lid; + /** MAC address (little-endian) */ + struct xsmp_xve_mac mac_le; + /** Rate */ + uint16_t rate; + /** Administrative state (non-zero = "up") */ + uint16_t state; + /** Encapsulation (apparently obsolete and unused) */ + uint16_t encap; + /** MTU */ + uint16_t mtu; + /** Installation flags (apparently obsolete and unused) */ + uint32_t install; + /** Interface name */ + char name[16]; + /** Service level */ + uint16_t sl; + /** Flow control enabled (apparently obsolete and unused) */ + uint16_t flow; + /** Committed rate (in Mbps) */ + uint16_t committed_mbps; + /** Peak rate (in Mbps) */ + uint16_t peak_mbps; + /** Committed burst size (in bytes) */ + uint32_t committed_burst; + /** Peak burst size (in bytes) */ + uint32_t peak_burst; + /** VMware index */ + uint8_t vmware; + /** Reserved */ + uint8_t reserved0; + /** Multipath flags */ + uint16_t multipath; + /** Multipath group name */ + char group[48]; + /** Link aggregation flag */ + uint8_t agg; + /** Link aggregation policy */ + uint8_t policy; + /** Network ID */ + uint32_t network; + /** Mode */ + uint8_t mode; + /** Uplink type */ + uint8_t uplink; + /** Target channel adapter parameters */ + struct xsmp_xve_ca tca; + /** Host channel adapter parameters */ + struct xsmp_xve_ca hca; + /** Reserved */ + uint8_t reserved1[336]; +} __attribute__ (( packed )); + +/** XSMP virtual Ethernet message types */ +enum xsmp_xve_type { + /** Install virtual NIC */ + XSMP_XVE_TYPE_INSTALL = 1, + /** Delete virtual NIC */ + XSMP_XVE_TYPE_DELETE = 2, + /** Update virtual NIC */ + XSMP_XVE_TYPE_UPDATE = 3, + /** Set operational state up */ + XSMP_XVE_TYPE_OPER_UP = 6, + /** Set operational state down */ + XSMP_XVE_TYPE_OPER_DOWN = 7, + /** Get operational state */ + XSMP_XVE_TYPE_OPER_REQ = 15, + /** Virtual NIC is ready */ + XSMP_XVE_TYPE_READY = 20, +}; + +/** XSMP virtual Ethernet message codes */ +enum xsmp_xve_code { + /* Something went wrong */ + XSMP_XVE_CODE_ERROR = 0x84, +}; + +/** XSMP virtual Ethernet update bitmask */ +enum xsmp_xve_update { + /** Update MTU */ + XSMP_XVE_UPDATE_MTU = ( 1 << 2 ), + /** Update administrative state */ + XSMP_XVE_UPDATE_STATE = ( 1 << 6 ), + /** Update gateway to mark as down */ + XSMP_XVE_UPDATE_GW_DOWN = ( 1 << 30 ), + /** Update gateway information */ + XSMP_XVE_UPDATE_GW_CHANGE = ( 1 << 31 ), +}; + +/** XSMP virtual Ethernet modes */ +enum xsmp_xve_mode { + /** Reliable Connected */ + XSMP_XVE_MODE_RC = 1, + /** Unreliable Datagram */ + XSMP_XVE_MODE_UD = 2, +}; + +/** XSMP virtual Ethernet uplink types */ +enum xsmp_xve_uplink { + /** No uplink */ + XSMP_XVE_NO_UPLINK = 1, + /** Has uplink */ + XSMP_XVE_UPLINK = 2, +}; + +/** An XSMP message */ +union xsmp_message { + /** Message header */ + struct xsmp_message_header hdr; + /** Session message */ + struct xsmp_session_message sess; + /** Virtual Ethernet message */ + struct xsmp_xve_message xve; +}; + +/** Delay between attempts to open the Infiniband device + * + * This is a policy decision. + */ +#define XSIGO_OPEN_RETRY_DELAY ( 2 * TICKS_PER_SEC ) + +/** Delay between unsuccessful discovery attempts + * + * This is a policy decision. + */ +#define XSIGO_DISCOVERY_FAILURE_DELAY ( 10 * TICKS_PER_SEC ) + +/** Delay between successful discovery attempts + * + * This is a policy decision. + */ +#define XSIGO_DISCOVERY_SUCCESS_DELAY ( 20 * TICKS_PER_SEC ) + +/** Delay between keepalive requests + * + * This is a policy decision. + */ +#define XSIGO_KEEPALIVE_INTERVAL ( 10 * TICKS_PER_SEC ) + +/** Maximum time to wait for a keepalive response + * + * This is a policy decision. + */ +#define XSIGO_KEEPALIVE_MAX_WAIT ( 2 * TICKS_PER_SEC ) + +#endif /* _IPXE_XSIGO_H */ diff --git a/src/include/stdbool.h b/src/include/stdbool.h new file mode 100644 index 00000000..c49a7f19 --- /dev/null +++ b/src/include/stdbool.h @@ -0,0 +1,10 @@ +#ifndef _STDBOOL_H +#define _STDBOOL_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#define bool _Bool +#define true 1 +#define false 0 + +#endif /* _STDBOOL_H */ diff --git a/src/include/usr/certmgmt.h b/src/include/usr/certmgmt.h new file mode 100644 index 00000000..4363b03e --- /dev/null +++ b/src/include/usr/certmgmt.h @@ -0,0 +1,16 @@ +#ifndef _USR_CERTMGMT_H +#define _USR_CERTMGMT_H + +/** @file + * + * Certificate management + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +extern void certstat ( struct x509_certificate *cert ); + +#endif /* _USR_CERTMGMT_H */ diff --git a/src/include/usr/ibmgmt.h b/src/include/usr/ibmgmt.h new file mode 100644 index 00000000..16a09913 --- /dev/null +++ b/src/include/usr/ibmgmt.h @@ -0,0 +1,16 @@ +#ifndef _USR_IBMGMT_H +#define _USR_IBMGMT_H + +/** @file + * + * Infiniband device management + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +struct ib_device; + +extern void ibstat ( struct ib_device *ibdev ); + +#endif /* _USR_IBMGMT_H */ diff --git a/src/include/usr/ntpmgmt.h b/src/include/usr/ntpmgmt.h new file mode 100644 index 00000000..284e668e --- /dev/null +++ b/src/include/usr/ntpmgmt.h @@ -0,0 +1,14 @@ +#ifndef _USR_NTPMGMT_H +#define _USR_NTPMGMT_H + +/** @file + * + * NTP management + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern int ntp ( const char *hostname ); + +#endif /* _USR_NTPMGMT_H */ diff --git a/src/include/valgrind/memcheck.h b/src/include/valgrind/memcheck.h new file mode 100644 index 00000000..7d4b56d3 --- /dev/null +++ b/src/include/valgrind/memcheck.h @@ -0,0 +1,311 @@ + +/* + ---------------------------------------------------------------- + + Notice that the following BSD-style license applies to this one + file (memcheck.h) only. The rest of Valgrind is licensed under the + terms of the GNU General Public License, version 2, unless + otherwise indicated. See the COPYING file in the source + distribution for details. + + ---------------------------------------------------------------- + + This file is part of MemCheck, a heavyweight Valgrind tool for + detecting memory errors. + + Copyright (C) 2000-2010 Julian Seward. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + + 3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + + 4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---------------------------------------------------------------- + + Notice that the above BSD-style license applies to this one file + (memcheck.h) only. The entire rest of Valgrind is licensed under + the terms of the GNU General Public License, version 2. See the + COPYING file in the source distribution for details. + + ---------------------------------------------------------------- +*/ + + +#ifndef __MEMCHECK_H +#define __MEMCHECK_H + +FILE_LICENCE ( BSD3 ); + + +/* This file is for inclusion into client (your!) code. + + You can use these macros to manipulate and query memory permissions + inside your own programs. + + See comment near the top of valgrind.h on how to use them. +*/ + +#include "valgrind.h" + +/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! + This enum comprises an ABI exported by Valgrind to programs + which use client requests. DO NOT CHANGE THE ORDER OF THESE + ENTRIES, NOR DELETE ANY -- add new ones at the end. */ +typedef + enum { + VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'), + VG_USERREQ__MAKE_MEM_UNDEFINED, + VG_USERREQ__MAKE_MEM_DEFINED, + VG_USERREQ__DISCARD, + VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, + VG_USERREQ__CHECK_MEM_IS_DEFINED, + VG_USERREQ__DO_LEAK_CHECK, + VG_USERREQ__COUNT_LEAKS, + + VG_USERREQ__GET_VBITS, + VG_USERREQ__SET_VBITS, + + VG_USERREQ__CREATE_BLOCK, + + VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, + + /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */ + VG_USERREQ__COUNT_LEAK_BLOCKS, + + /* This is just for memcheck's internal use - don't use it */ + _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR + = VG_USERREQ_TOOL_BASE('M','C') + 256 + } Vg_MemCheckClientRequest; + + + +/* Client-code macros to manipulate the state of memory. */ + +/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */ +#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \ + (__extension__({unsigned long _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \ + VG_USERREQ__MAKE_MEM_NOACCESS, \ + _qzz_addr, _qzz_len, 0, 0, 0); \ + _qzz_res; \ + })) + +/* Similarly, mark memory at _qzz_addr as addressable but undefined + for _qzz_len bytes. */ +#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \ + (__extension__({unsigned long _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \ + VG_USERREQ__MAKE_MEM_UNDEFINED, \ + _qzz_addr, _qzz_len, 0, 0, 0); \ + _qzz_res; \ + })) + +/* Similarly, mark memory at _qzz_addr as addressable and defined + for _qzz_len bytes. */ +#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \ + (__extension__({unsigned long _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \ + VG_USERREQ__MAKE_MEM_DEFINED, \ + _qzz_addr, _qzz_len, 0, 0, 0); \ + _qzz_res; \ + })) + +/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is + not altered: bytes which are addressable are marked as defined, + but those which are not addressable are left unchanged. */ +#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \ + (__extension__({unsigned long _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \ + VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \ + _qzz_addr, _qzz_len, 0, 0, 0); \ + _qzz_res; \ + })) + +/* Create a block-description handle. The description is an ascii + string which is included in any messages pertaining to addresses + within the specified memory range. Has no other effect on the + properties of the memory range. */ +#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \ + (__extension__({unsigned long _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \ + VG_USERREQ__CREATE_BLOCK, \ + _qzz_addr, _qzz_len, _qzz_desc, \ + 0, 0); \ + _qzz_res; \ + })) + +/* Discard a block-description-handle. Returns 1 for an + invalid handle, 0 for a valid handle. */ +#define VALGRIND_DISCARD(_qzz_blkindex) \ + (__extension__ ({unsigned long _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \ + VG_USERREQ__DISCARD, \ + 0, _qzz_blkindex, 0, 0, 0); \ + _qzz_res; \ + })) + + +/* Client-code macros to check the state of memory. */ + +/* Check that memory at _qzz_addr is addressable for _qzz_len bytes. + If suitable addressibility is not established, Valgrind prints an + error message and returns the address of the first offending byte. + Otherwise it returns zero. */ +#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \ + (__extension__({unsigned long _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,\ + _qzz_addr, _qzz_len, 0, 0, 0); \ + _qzz_res; \ + })) + +/* Check that memory at _qzz_addr is addressable and defined for + _qzz_len bytes. If suitable addressibility and definedness are not + established, Valgrind prints an error message and returns the + address of the first offending byte. Otherwise it returns zero. */ +#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \ + (__extension__({unsigned long _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__CHECK_MEM_IS_DEFINED, \ + _qzz_addr, _qzz_len, 0, 0, 0); \ + _qzz_res; \ + })) + +/* Use this macro to force the definedness and addressibility of an + lvalue to be checked. If suitable addressibility and definedness + are not established, Valgrind prints an error message and returns + the address of the first offending byte. Otherwise it returns + zero. */ +#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \ + VALGRIND_CHECK_MEM_IS_DEFINED( \ + (volatile unsigned char *)&(__lvalue), \ + (unsigned long)(sizeof (__lvalue))) + + +/* Do a full memory leak check (like --leak-check=full) mid-execution. */ +#define VALGRIND_DO_LEAK_CHECK \ + {unsigned long _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__DO_LEAK_CHECK, \ + 0, 0, 0, 0, 0); \ + } + +/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */ +#define VALGRIND_DO_QUICK_LEAK_CHECK \ + {unsigned long _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__DO_LEAK_CHECK, \ + 1, 0, 0, 0, 0); \ + } + +/* Return number of leaked, dubious, reachable and suppressed bytes found by + all previous leak checks. They must be lvalues. */ +#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \ + /* For safety on 64-bit platforms we assign the results to private + unsigned long variables, then assign these to the lvalues the user + specified, which works no matter what type 'leaked', 'dubious', etc + are. We also initialise '_qzz_leaked', etc because + VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as + defined. */ \ + {unsigned long _qzz_res; \ + unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \ + unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__COUNT_LEAKS, \ + &_qzz_leaked, &_qzz_dubious, \ + &_qzz_reachable, &_qzz_suppressed, 0); \ + leaked = _qzz_leaked; \ + dubious = _qzz_dubious; \ + reachable = _qzz_reachable; \ + suppressed = _qzz_suppressed; \ + } + +/* Return number of leaked, dubious, reachable and suppressed bytes found by + all previous leak checks. They must be lvalues. */ +#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \ + /* For safety on 64-bit platforms we assign the results to private + unsigned long variables, then assign these to the lvalues the user + specified, which works no matter what type 'leaked', 'dubious', etc + are. We also initialise '_qzz_leaked', etc because + VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as + defined. */ \ + {unsigned long _qzz_res; \ + unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \ + unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__COUNT_LEAK_BLOCKS, \ + &_qzz_leaked, &_qzz_dubious, \ + &_qzz_reachable, &_qzz_suppressed, 0); \ + leaked = _qzz_leaked; \ + dubious = _qzz_dubious; \ + reachable = _qzz_reachable; \ + suppressed = _qzz_suppressed; \ + } + + +/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it + into the provided zzvbits array. Return values: + 0 if not running on valgrind + 1 success + 2 [previously indicated unaligned arrays; these are now allowed] + 3 if any parts of zzsrc/zzvbits are not addressable. + The metadata is not copied in cases 0, 2 or 3 so it should be + impossible to segfault your system by using this call. +*/ +#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \ + (__extension__({unsigned long _qzz_res; \ + char* czza = (char*)zza; \ + char* czzvbits = (char*)zzvbits; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__GET_VBITS, \ + czza, czzvbits, zznbytes, 0, 0 ); \ + _qzz_res; \ + })) + +/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it + from the provided zzvbits array. Return values: + 0 if not running on valgrind + 1 success + 2 [previously indicated unaligned arrays; these are now allowed] + 3 if any parts of zza/zzvbits are not addressable. + The metadata is not copied in cases 0, 2 or 3 so it should be + impossible to segfault your system by using this call. +*/ +#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \ + (__extension__({unsigned int _qzz_res; \ + char* czza = (char*)zza; \ + char* czzvbits = (char*)zzvbits; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__SET_VBITS, \ + czza, czzvbits, zznbytes, 0, 0 ); \ + _qzz_res; \ + })) + +#endif + diff --git a/src/include/valgrind/valgrind.h b/src/include/valgrind/valgrind.h new file mode 100644 index 00000000..d48bbcca --- /dev/null +++ b/src/include/valgrind/valgrind.h @@ -0,0 +1,4538 @@ +/* -*- c -*- + ---------------------------------------------------------------- + + Notice that the following BSD-style license applies to this one + file (valgrind.h) only. The rest of Valgrind is licensed under the + terms of the GNU General Public License, version 2, unless + otherwise indicated. See the COPYING file in the source + distribution for details. + + ---------------------------------------------------------------- + + This file is part of Valgrind, a dynamic binary instrumentation + framework. + + Copyright (C) 2000-2010 Julian Seward. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + + 3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + + 4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---------------------------------------------------------------- + + Notice that the above BSD-style license applies to this one file + (valgrind.h) only. The entire rest of Valgrind is licensed under + the terms of the GNU General Public License, version 2. See the + COPYING file in the source distribution for details. + + ---------------------------------------------------------------- +*/ + + +/* This file is for inclusion into client (your!) code. + + You can use these macros to manipulate and query Valgrind's + execution inside your own programs. + + The resulting executables will still run without Valgrind, just a + little bit more slowly than they otherwise would, but otherwise + unchanged. When not running on valgrind, each client request + consumes very few (eg. 7) instructions, so the resulting performance + loss is negligible unless you plan to execute client requests + millions of times per second. Nevertheless, if that is still a + problem, you can compile with the NVALGRIND symbol defined (gcc + -DNVALGRIND) so that client requests are not even compiled in. */ + +#ifndef __VALGRIND_H +#define __VALGRIND_H + +FILE_LICENCE ( BSD3 ); + + +/* ------------------------------------------------------------------ */ +/* VERSION NUMBER OF VALGRIND */ +/* ------------------------------------------------------------------ */ + +/* Specify Valgrind's version number, so that user code can + conditionally compile based on our version number. Note that these + were introduced at version 3.6 and so do not exist in version 3.5 + or earlier. The recommended way to use them to check for "version + X.Y or later" is (eg) + +#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \ + && (__VALGRIND_MAJOR__ > 3 \ + || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6)) +*/ +#define __VALGRIND_MAJOR__ 3 +#define __VALGRIND_MINOR__ 6 + + +#include + +/* Nb: this file might be included in a file compiled with -ansi. So + we can't use C++ style "//" comments nor the "asm" keyword (instead + use "__asm__"). */ + +/* Derive some tags indicating what the target platform is. Note + that in this file we're using the compiler's CPP symbols for + identifying architectures, which are different to the ones we use + within the rest of Valgrind. Note, __powerpc__ is active for both + 32 and 64-bit PPC, whereas __powerpc64__ is only active for the + latter (on Linux, that is). + + Misc note: how to find out what's predefined in gcc by default: + gcc -Wp,-dM somefile.c +*/ +#undef PLAT_ppc64_aix5 +#undef PLAT_ppc32_aix5 +#undef PLAT_x86_darwin +#undef PLAT_amd64_darwin +#undef PLAT_x86_linux +#undef PLAT_amd64_linux +#undef PLAT_ppc32_linux +#undef PLAT_ppc64_linux +#undef PLAT_arm_linux + +#if defined(_AIX) && defined(__64BIT__) +# define PLAT_ppc64_aix5 1 +#elif defined(_AIX) && !defined(__64BIT__) +# define PLAT_ppc32_aix5 1 +#elif defined(__APPLE__) && defined(__i386__) +# define PLAT_x86_darwin 1 +#elif defined(__APPLE__) && defined(__x86_64__) +# define PLAT_amd64_darwin 1 +#elif defined(__linux__) && defined(__i386__) +# define PLAT_x86_linux 1 +#elif defined(__linux__) && defined(__x86_64__) +# define PLAT_amd64_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__) +# define PLAT_ppc32_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) +# define PLAT_ppc64_linux 1 +#elif defined(__linux__) && defined(__arm__) +# define PLAT_arm_linux 1 +#else +/* If we're not compiling for our target platform, don't generate + any inline asms. */ +# if !defined(NVALGRIND) +# define NVALGRIND 1 +# endif +#endif + + +/* ------------------------------------------------------------------ */ +/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */ +/* in here of use to end-users -- skip to the next section. */ +/* ------------------------------------------------------------------ */ + +#if defined(NVALGRIND) + +/* Define NVALGRIND to completely remove the Valgrind magic sequence + from the compiled code (analogous to NDEBUG's effects on + assert()) */ +#define VALGRIND_DO_CLIENT_REQUEST( \ + _zzq_rlval, _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + { \ + (_zzq_rlval) = (_zzq_default); \ + } + +#else /* ! NVALGRIND */ + +/* The following defines the magic code sequences which the JITter + spots and handles magically. Don't look too closely at them as + they will rot your brain. + + The assembly code sequences for all architectures is in this one + file. This is because this file must be stand-alone, and we don't + want to have multiple files. + + For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default + value gets put in the return slot, so that everything works when + this is executed not under Valgrind. Args are passed in a memory + block, and so there's no intrinsic limit to the number that could + be passed, but it's currently five. + + The macro args are: + _zzq_rlval result lvalue + _zzq_default default value (result returned when running on real CPU) + _zzq_request request code + _zzq_arg1..5 request params + + The other two macros are used to support function wrapping, and are + a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the + guest's NRADDR pseudo-register and whatever other information is + needed to safely run the call original from the wrapper: on + ppc64-linux, the R2 value at the divert point is also needed. This + information is abstracted into a user-visible type, OrigFn. + + VALGRIND_CALL_NOREDIR_* behaves the same as the following on the + guest, but guarantees that the branch instruction will not be + redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64: + branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a + complete inline asm, since it needs to be combined with more magic + inline asm stuff to be useful. +*/ + +/* ------------------------- x86-{linux,darwin} ---------------- */ + +#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "roll $3, %%edi ; roll $13, %%edi\n\t" \ + "roll $29, %%edi ; roll $19, %%edi\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST( \ + _zzq_rlval, _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + { volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EDX = client_request ( %EAX ) */ \ + "xchgl %%ebx,%%ebx" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "memory" \ + ); \ + _zzq_rlval = _zzq_result; \ + } + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EAX = guest_NRADDR */ \ + "xchgl %%ecx,%%ecx" \ + : "=a" (__addr) \ + : \ + : "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_EAX \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%EAX */ \ + "xchgl %%edx,%%edx\n\t" +#endif /* PLAT_x86_linux || PLAT_x86_darwin */ + +/* ------------------------ amd64-{linux,darwin} --------------- */ + +#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) + +typedef + struct { + unsigned long long int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \ + "rolq $61, %%rdi ; rolq $51, %%rdi\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST( \ + _zzq_rlval, _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + { volatile unsigned long long int _zzq_args[6]; \ + volatile unsigned long long int _zzq_result; \ + _zzq_args[0] = (unsigned long long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %RDX = client_request ( %RAX ) */ \ + "xchgq %%rbx,%%rbx" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "memory" \ + ); \ + _zzq_rlval = _zzq_result; \ + } + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %RAX = guest_NRADDR */ \ + "xchgq %%rcx,%%rcx" \ + : "=a" (__addr) \ + : \ + : "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_RAX \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%RAX */ \ + "xchgq %%rdx,%%rdx\n\t" +#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */ + +/* ------------------------ ppc32-linux ------------------------ */ + +#if defined(PLAT_ppc32_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \ + "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST( \ + _zzq_rlval, _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + { unsigned int _zzq_args[6]; \ + unsigned int _zzq_result; \ + unsigned int* _zzq_ptr; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_rlval = _zzq_result; \ + } + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R11 */ \ + "or 3,3,3\n\t" +#endif /* PLAT_ppc32_linux */ + +/* ------------------------ ppc64-linux ------------------------ */ + +#if defined(PLAT_ppc64_linux) + +typedef + struct { + unsigned long long int nraddr; /* where's the code? */ + unsigned long long int r2; /* what tocptr do we need? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ + "rotldi 0,0,61 ; rotldi 0,0,51\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST( \ + _zzq_rlval, _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + { unsigned long long int _zzq_args[6]; \ + register unsigned long long int _zzq_result __asm__("r3"); \ + register unsigned long long int* _zzq_ptr __asm__("r4"); \ + _zzq_args[0] = (unsigned long long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1" \ + : "=r" (_zzq_result) \ + : "0" (_zzq_default), "r" (_zzq_ptr) \ + : "cc", "memory"); \ + _zzq_rlval = _zzq_result; \ + } + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + register unsigned long long int __addr __asm__("r3"); \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2" \ + : "=r" (__addr) \ + : \ + : "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR_GPR2 */ \ + "or 4,4,4" \ + : "=r" (__addr) \ + : \ + : "cc", "memory" \ + ); \ + _zzq_orig->r2 = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R11 */ \ + "or 3,3,3\n\t" + +#endif /* PLAT_ppc64_linux */ + +/* ------------------------- arm-linux ------------------------- */ + +#if defined(PLAT_arm_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \ + "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST( \ + _zzq_rlval, _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + { volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("mov r3, %1\n\t" /*default*/ \ + "mov r4, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* R3 = client_request ( R4 ) */ \ + "orr r10, r10, r10\n\t" \ + "mov %0, r3" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "cc","memory", "r3", "r4"); \ + _zzq_rlval = _zzq_result; \ + } + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* R3 = guest_NRADDR */ \ + "orr r11, r11, r11\n\t" \ + "mov %0, r3" \ + : "=r" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R4 */ \ + "orr r12, r12, r12\n\t" + +#endif /* PLAT_arm_linux */ + +/* ------------------------ ppc32-aix5 ------------------------- */ + +#if defined(PLAT_ppc32_aix5) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + unsigned int r2; /* what tocptr do we need? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \ + "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST( \ + _zzq_rlval, _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + { unsigned int _zzq_args[7]; \ + register unsigned int _zzq_result; \ + register unsigned int* _zzq_ptr; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + _zzq_args[6] = (unsigned int)(_zzq_default); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 4,%1\n\t" \ + "lwz 3, 24(4)\n\t" \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" \ + : "=b" (_zzq_result) \ + : "b" (_zzq_ptr) \ + : "r3", "r4", "cc", "memory"); \ + _zzq_rlval = _zzq_result; \ + } + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + register unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "r3", "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR_GPR2 */ \ + "or 4,4,4\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "r3", "cc", "memory" \ + ); \ + _zzq_orig->r2 = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R11 */ \ + "or 3,3,3\n\t" + +#endif /* PLAT_ppc32_aix5 */ + +/* ------------------------ ppc64-aix5 ------------------------- */ + +#if defined(PLAT_ppc64_aix5) + +typedef + struct { + unsigned long long int nraddr; /* where's the code? */ + unsigned long long int r2; /* what tocptr do we need? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ + "rotldi 0,0,61 ; rotldi 0,0,51\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST( \ + _zzq_rlval, _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + { unsigned long long int _zzq_args[7]; \ + register unsigned long long int _zzq_result; \ + register unsigned long long int* _zzq_ptr; \ + _zzq_args[0] = (unsigned int long long)(_zzq_request); \ + _zzq_args[1] = (unsigned int long long)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int long long)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int long long)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int long long)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int long long)(_zzq_arg5); \ + _zzq_args[6] = (unsigned int long long)(_zzq_default); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 4,%1\n\t" \ + "ld 3, 48(4)\n\t" \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" \ + : "=b" (_zzq_result) \ + : "b" (_zzq_ptr) \ + : "r3", "r4", "cc", "memory"); \ + _zzq_rlval = _zzq_result; \ + } + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + register unsigned long long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "r3", "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR_GPR2 */ \ + "or 4,4,4\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "r3", "cc", "memory" \ + ); \ + _zzq_orig->r2 = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R11 */ \ + "or 3,3,3\n\t" + +#endif /* PLAT_ppc64_aix5 */ + +/* Insert assembly code for other platforms here... */ + +#endif /* NVALGRIND */ + + +/* ------------------------------------------------------------------ */ +/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */ +/* ugly. It's the least-worst tradeoff I can think of. */ +/* ------------------------------------------------------------------ */ + +/* This section defines magic (a.k.a appalling-hack) macros for doing + guaranteed-no-redirection macros, so as to get from function + wrappers to the functions they are wrapping. The whole point is to + construct standard call sequences, but to do the call itself with a + special no-redirect call pseudo-instruction that the JIT + understands and handles specially. This section is long and + repetitious, and I can't see a way to make it shorter. + + The naming scheme is as follows: + + CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc} + + 'W' stands for "word" and 'v' for "void". Hence there are + different macros for calling arity 0, 1, 2, 3, 4, etc, functions, + and for each, the possibility of returning a word-typed result, or + no result. +*/ + +/* Use these to write the name of your wrapper. NOTE: duplicates + VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */ + +/* Use an extra level of macroisation so as to ensure the soname/fnname + args are fully macro-expanded before pasting them together. */ +#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd + +#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \ + VG_CONCAT4(_vgwZU_,soname,_,fnname) + +#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \ + VG_CONCAT4(_vgwZZ_,soname,_,fnname) + +/* Use this macro from within a wrapper function to collect the + context (address and possibly other info) of the original function. + Once you have that you can then use it in one of the CALL_FN_ + macros. The type of the argument _lval is OrigFn. */ +#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval) + +/* Derivatives of the main macros below, for calling functions + returning void. */ + +#define CALL_FN_v_v(fnptr) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_v(_junk,fnptr); } while (0) + +#define CALL_FN_v_W(fnptr, arg1) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_W(_junk,fnptr,arg1); } while (0) + +#define CALL_FN_v_WW(fnptr, arg1,arg2) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0) + +#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0) + +#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0) + +#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0) + +#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0) + +#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0) + +/* ------------------------- x86-{linux,darwin} ---------------- */ + +#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) + +/* These regs are trashed by the hidden call. No need to mention eax + as gcc can already see that, plus causes gcc to bomb. */ +#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx" + +/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + "addl $4, %%esp\n" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + "addl $8, %%esp\n" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + "addl $12, %%esp\n" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + "addl $16, %%esp\n" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + "addl $20, %%esp\n" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + "addl $24, %%esp\n" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + "addl $28, %%esp\n" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + "addl $32, %%esp\n" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + "addl $36, %%esp\n" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + "addl $40, %%esp\n" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + "pushl 44(%%eax)\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + "addl $44, %%esp\n" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + "pushl 48(%%eax)\n\t" \ + "pushl 44(%%eax)\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + "addl $48, %%esp\n" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_x86_linux || PLAT_x86_darwin */ + +/* ------------------------ amd64-{linux,darwin} --------------- */ + +#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) + +/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \ + "rdi", "r8", "r9", "r10", "r11" + +/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned + long) == 8. */ + +/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_ + macros. In order not to trash the stack redzone, we need to drop + %rsp by 128 before the hidden call, and restore afterwards. The + nastyness is that it is only by luck that the stack still appears + to be unwindable during the hidden call - since then the behaviour + of any routine using this macro does not match what the CFI data + says. Sigh. + + Why is this important? Imagine that a wrapper has a stack + allocated local, and passes to the hidden call, a pointer to it. + Because gcc does not know about the hidden call, it may allocate + that local in the redzone. Unfortunately the hidden call may then + trash it before it comes to use it. So we must step clear of the + redzone, for the duration of the hidden call, to make it safe. + + Probably the same problem afflicts the other redzone-style ABIs too + (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is + self describing (none of this CFI nonsense) so at least messing + with the stack pointer doesn't give a danger of non-unwindable + stack. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + "addq $128,%%rsp\n\t" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + "addq $128,%%rsp\n\t" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + "addq $128,%%rsp\n\t" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + "addq $128,%%rsp\n\t" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + "addq $128,%%rsp\n\t" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + "addq $128,%%rsp\n\t" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + "addq $128,%%rsp\n\t" \ + VALGRIND_CALL_NOREDIR_RAX \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + "addq $8, %%rsp\n" \ + "addq $128,%%rsp\n\t" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + "addq $16, %%rsp\n" \ + "addq $128,%%rsp\n\t" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + "addq $24, %%rsp\n" \ + "addq $128,%%rsp\n\t" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + "addq $32, %%rsp\n" \ + "addq $128,%%rsp\n\t" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "pushq 88(%%rax)\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + "addq $40, %%rsp\n" \ + "addq $128,%%rsp\n\t" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + "subq $128,%%rsp\n\t" \ + "pushq 96(%%rax)\n\t" \ + "pushq 88(%%rax)\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + "addq $48, %%rsp\n" \ + "addq $128,%%rsp\n\t" \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */ + +/* ------------------------ ppc32-linux ------------------------ */ + +#if defined(PLAT_ppc32_linux) + +/* This is useful for finding out about the on-stack stuff: + + extern int f9 ( int,int,int,int,int,int,int,int,int ); + extern int f10 ( int,int,int,int,int,int,int,int,int,int ); + extern int f11 ( int,int,int,int,int,int,int,int,int,int,int ); + extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int ); + + int g9 ( void ) { + return f9(11,22,33,44,55,66,77,88,99); + } + int g10 ( void ) { + return f10(11,22,33,44,55,66,77,88,99,110); + } + int g11 ( void ) { + return f11(11,22,33,44,55,66,77,88,99,110,121); + } + int g12 ( void ) { + return f12(11,22,33,44,55,66,77,88,99,110,121,132); + } +*/ + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* These CALL_FN_ macros assume that on ppc32-linux, + sizeof(unsigned long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "addi 1,1,-16\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "addi 1,1,16\n\t" \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "addi 1,1,-16\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "addi 1,1,16\n\t" \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "addi 1,1,-32\n\t" \ + /* arg11 */ \ + "lwz 3,44(11)\n\t" \ + "stw 3,16(1)\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "addi 1,1,32\n\t" \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + _argvec[12] = (unsigned long)arg12; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "addi 1,1,-32\n\t" \ + /* arg12 */ \ + "lwz 3,48(11)\n\t" \ + "stw 3,20(1)\n\t" \ + /* arg11 */ \ + "lwz 3,44(11)\n\t" \ + "stw 3,16(1)\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "addi 1,1,32\n\t" \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc32_linux */ + +/* ------------------------ ppc64-linux ------------------------ */ + +#if defined(PLAT_ppc64_linux) + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned + long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+0]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)" /* restore tocptr */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+1]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)" /* restore tocptr */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+2]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)" /* restore tocptr */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+3]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)" /* restore tocptr */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+4]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)" /* restore tocptr */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+5]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)" /* restore tocptr */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+6]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)" /* restore tocptr */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+7]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)" /* restore tocptr */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+8]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)" /* restore tocptr */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+9]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + "addi 1,1,128" /* restore frame */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+10]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + "addi 1,1,128" /* restore frame */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+11]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg11 */ \ + "ld 3,88(11)\n\t" \ + "std 3,128(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + "addi 1,1,144" /* restore frame */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+12]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + _argvec[2+12] = (unsigned long)arg12; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg12 */ \ + "ld 3,96(11)\n\t" \ + "std 3,136(1)\n\t" \ + /* arg11 */ \ + "ld 3,88(11)\n\t" \ + "std 3,128(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + "addi 1,1,144" /* restore frame */ \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc64_linux */ + +/* ------------------------- arm-linux ------------------------- */ + +#if defined(PLAT_arm_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14" + +/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + "ldr r0, [%1, #20] \n\t" \ + "push {r0} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "add sp, sp, #4 \n\t" \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "push {r0, r1} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "add sp, sp, #8 \n\t" \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "push {r0, r1, r2} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "add sp, sp, #12 \n\t" \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "push {r0, r1, r2, r3} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "add sp, sp, #16 \n\t" \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "add sp, sp, #20 \n\t" \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + "ldr r0, [%1, #40] \n\t" \ + "push {r0} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "add sp, sp, #24 \n\t" \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + "ldr r0, [%1, #40] \n\t" \ + "ldr r1, [%1, #44] \n\t" \ + "push {r0, r1} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "add sp, sp, #28 \n\t" \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + "ldr r0, [%1, #40] \n\t" \ + "ldr r1, [%1, #44] \n\t" \ + "ldr r2, [%1, #48] \n\t" \ + "push {r0, r1, r2} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + "add sp, sp, #32 \n\t" \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_arm_linux */ + +/* ------------------------ ppc32-aix5 ------------------------- */ + +#if defined(PLAT_ppc32_aix5) + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Expand the stack frame, copying enough info that unwinding + still works. Trashes r3. */ + +#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \ + "addi 1,1,-" #_n_fr "\n\t" \ + "lwz 3," #_n_fr "(1)\n\t" \ + "stw 3,0(1)\n\t" + +#define VG_CONTRACT_FRAME_BY(_n_fr) \ + "addi 1,1," #_n_fr "\n\t" + +/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+0]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+1]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + "lwz 3, 4(11)\n\t" /* arg1->r3 */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+2]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + "lwz 3, 4(11)\n\t" /* arg1->r3 */ \ + "lwz 4, 8(11)\n\t" /* arg2->r4 */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+3]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + "lwz 3, 4(11)\n\t" /* arg1->r3 */ \ + "lwz 4, 8(11)\n\t" /* arg2->r4 */ \ + "lwz 5, 12(11)\n\t" /* arg3->r5 */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+4]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + "lwz 3, 4(11)\n\t" /* arg1->r3 */ \ + "lwz 4, 8(11)\n\t" /* arg2->r4 */ \ + "lwz 5, 12(11)\n\t" /* arg3->r5 */ \ + "lwz 6, 16(11)\n\t" /* arg4->r6 */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+5]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + "lwz 3, 4(11)\n\t" /* arg1->r3 */ \ + "lwz 4, 8(11)\n\t" /* arg2->r4 */ \ + "lwz 5, 12(11)\n\t" /* arg3->r5 */ \ + "lwz 6, 16(11)\n\t" /* arg4->r6 */ \ + "lwz 7, 20(11)\n\t" /* arg5->r7 */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+6]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + "lwz 3, 4(11)\n\t" /* arg1->r3 */ \ + "lwz 4, 8(11)\n\t" /* arg2->r4 */ \ + "lwz 5, 12(11)\n\t" /* arg3->r5 */ \ + "lwz 6, 16(11)\n\t" /* arg4->r6 */ \ + "lwz 7, 20(11)\n\t" /* arg5->r7 */ \ + "lwz 8, 24(11)\n\t" /* arg6->r8 */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+7]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + "lwz 3, 4(11)\n\t" /* arg1->r3 */ \ + "lwz 4, 8(11)\n\t" /* arg2->r4 */ \ + "lwz 5, 12(11)\n\t" /* arg3->r5 */ \ + "lwz 6, 16(11)\n\t" /* arg4->r6 */ \ + "lwz 7, 20(11)\n\t" /* arg5->r7 */ \ + "lwz 8, 24(11)\n\t" /* arg6->r8 */ \ + "lwz 9, 28(11)\n\t" /* arg7->r9 */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+8]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + "lwz 3, 4(11)\n\t" /* arg1->r3 */ \ + "lwz 4, 8(11)\n\t" /* arg2->r4 */ \ + "lwz 5, 12(11)\n\t" /* arg3->r5 */ \ + "lwz 6, 16(11)\n\t" /* arg4->r6 */ \ + "lwz 7, 20(11)\n\t" /* arg5->r7 */ \ + "lwz 8, 24(11)\n\t" /* arg6->r8 */ \ + "lwz 9, 28(11)\n\t" /* arg7->r9 */ \ + "lwz 10, 32(11)\n\t" /* arg8->r10 */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+9]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + VG_EXPAND_FRAME_BY_trashes_r3(64) \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,56(1)\n\t" \ + /* args1-8 */ \ + "lwz 3, 4(11)\n\t" /* arg1->r3 */ \ + "lwz 4, 8(11)\n\t" /* arg2->r4 */ \ + "lwz 5, 12(11)\n\t" /* arg3->r5 */ \ + "lwz 6, 16(11)\n\t" /* arg4->r6 */ \ + "lwz 7, 20(11)\n\t" /* arg5->r7 */ \ + "lwz 8, 24(11)\n\t" /* arg6->r8 */ \ + "lwz 9, 28(11)\n\t" /* arg7->r9 */ \ + "lwz 10, 32(11)\n\t" /* arg8->r10 */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(64) \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+10]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + VG_EXPAND_FRAME_BY_trashes_r3(64) \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,60(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,56(1)\n\t" \ + /* args1-8 */ \ + "lwz 3, 4(11)\n\t" /* arg1->r3 */ \ + "lwz 4, 8(11)\n\t" /* arg2->r4 */ \ + "lwz 5, 12(11)\n\t" /* arg3->r5 */ \ + "lwz 6, 16(11)\n\t" /* arg4->r6 */ \ + "lwz 7, 20(11)\n\t" /* arg5->r7 */ \ + "lwz 8, 24(11)\n\t" /* arg6->r8 */ \ + "lwz 9, 28(11)\n\t" /* arg7->r9 */ \ + "lwz 10, 32(11)\n\t" /* arg8->r10 */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(64) \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+11]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + VG_EXPAND_FRAME_BY_trashes_r3(72) \ + /* arg11 */ \ + "lwz 3,44(11)\n\t" \ + "stw 3,64(1)\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,60(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,56(1)\n\t" \ + /* args1-8 */ \ + "lwz 3, 4(11)\n\t" /* arg1->r3 */ \ + "lwz 4, 8(11)\n\t" /* arg2->r4 */ \ + "lwz 5, 12(11)\n\t" /* arg3->r5 */ \ + "lwz 6, 16(11)\n\t" /* arg4->r6 */ \ + "lwz 7, 20(11)\n\t" /* arg5->r7 */ \ + "lwz 8, 24(11)\n\t" /* arg6->r8 */ \ + "lwz 9, 28(11)\n\t" /* arg7->r9 */ \ + "lwz 10, 32(11)\n\t" /* arg8->r10 */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(72) \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+12]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + _argvec[2+12] = (unsigned long)arg12; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "stw 2,-8(11)\n\t" /* save tocptr */ \ + "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \ + VG_EXPAND_FRAME_BY_trashes_r3(72) \ + /* arg12 */ \ + "lwz 3,48(11)\n\t" \ + "stw 3,68(1)\n\t" \ + /* arg11 */ \ + "lwz 3,44(11)\n\t" \ + "stw 3,64(1)\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,60(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,56(1)\n\t" \ + /* args1-8 */ \ + "lwz 3, 4(11)\n\t" /* arg1->r3 */ \ + "lwz 4, 8(11)\n\t" /* arg2->r4 */ \ + "lwz 5, 12(11)\n\t" /* arg3->r5 */ \ + "lwz 6, 16(11)\n\t" /* arg4->r6 */ \ + "lwz 7, 20(11)\n\t" /* arg5->r7 */ \ + "lwz 8, 24(11)\n\t" /* arg6->r8 */ \ + "lwz 9, 28(11)\n\t" /* arg7->r9 */ \ + "lwz 10, 32(11)\n\t" /* arg8->r10 */ \ + "lwz 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "lwz 2,-8(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(72) \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc32_aix5 */ + +/* ------------------------ ppc64-aix5 ------------------------- */ + +#if defined(PLAT_ppc64_aix5) + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Expand the stack frame, copying enough info that unwinding + still works. Trashes r3. */ + +#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \ + "addi 1,1,-" #_n_fr "\n\t" \ + "ld 3," #_n_fr "(1)\n\t" \ + "std 3,0(1)\n\t" + +#define VG_CONTRACT_FRAME_BY(_n_fr) \ + "addi 1,1," #_n_fr "\n\t" + +/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned + long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+0]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+1]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+2]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+3]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+4]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+5]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+6]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+7]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+8]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+9]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + VG_EXPAND_FRAME_BY_trashes_r3(128) \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(128) \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+10]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + VG_EXPAND_FRAME_BY_trashes_r3(128) \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(128) \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+11]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + VG_EXPAND_FRAME_BY_trashes_r3(144) \ + /* arg11 */ \ + "ld 3,88(11)\n\t" \ + "std 3,128(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(144) \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+12]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + _argvec[2+12] = (unsigned long)arg12; \ + __asm__ volatile( \ + "mr 11,%1\n\t" \ + VG_EXPAND_FRAME_BY_trashes_r3(512) \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + VG_EXPAND_FRAME_BY_trashes_r3(144) \ + /* arg12 */ \ + "ld 3,96(11)\n\t" \ + "std 3,136(1)\n\t" \ + /* arg11 */ \ + "ld 3,88(11)\n\t" \ + "std 3,128(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VG_CONTRACT_FRAME_BY(144) \ + VG_CONTRACT_FRAME_BY(512) \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc64_aix5 */ + + +/* ------------------------------------------------------------------ */ +/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */ +/* */ +/* ------------------------------------------------------------------ */ + +/* Some request codes. There are many more of these, but most are not + exposed to end-user view. These are the public ones, all of the + form 0x1000 + small_number. + + Core ones are in the range 0x00000000--0x0000ffff. The non-public + ones start at 0x2000. +*/ + +/* These macros are used by tools -- they must be public, but don't + embed them into other programs. */ +#define VG_USERREQ_TOOL_BASE(a,b) \ + ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16)) +#define VG_IS_TOOL_USERREQ(a, b, v) \ + (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000)) + +/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! + This enum comprises an ABI exported by Valgrind to programs + which use client requests. DO NOT CHANGE THE ORDER OF THESE + ENTRIES, NOR DELETE ANY -- add new ones at the end. */ +typedef + enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001, + VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002, + + /* These allow any function to be called from the simulated + CPU but run on the real CPU. Nb: the first arg passed to + the function is always the ThreadId of the running + thread! So CLIENT_CALL0 actually requires a 1 arg + function, etc. */ + VG_USERREQ__CLIENT_CALL0 = 0x1101, + VG_USERREQ__CLIENT_CALL1 = 0x1102, + VG_USERREQ__CLIENT_CALL2 = 0x1103, + VG_USERREQ__CLIENT_CALL3 = 0x1104, + + /* Can be useful in regression testing suites -- eg. can + send Valgrind's output to /dev/null and still count + errors. */ + VG_USERREQ__COUNT_ERRORS = 0x1201, + + /* These are useful and can be interpreted by any tool that + tracks malloc() et al, by using vg_replace_malloc.c. */ + VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301, + VG_USERREQ__FREELIKE_BLOCK = 0x1302, + /* Memory pool support. */ + VG_USERREQ__CREATE_MEMPOOL = 0x1303, + VG_USERREQ__DESTROY_MEMPOOL = 0x1304, + VG_USERREQ__MEMPOOL_ALLOC = 0x1305, + VG_USERREQ__MEMPOOL_FREE = 0x1306, + VG_USERREQ__MEMPOOL_TRIM = 0x1307, + VG_USERREQ__MOVE_MEMPOOL = 0x1308, + VG_USERREQ__MEMPOOL_CHANGE = 0x1309, + VG_USERREQ__MEMPOOL_EXISTS = 0x130a, + + /* Allow printfs to valgrind log. */ + /* The first two pass the va_list argument by value, which + assumes it is the same size as or smaller than a UWord, + which generally isn't the case. Hence are deprecated. + The second two pass the vargs by reference and so are + immune to this problem. */ + /* both :: char* fmt, va_list vargs (DEPRECATED) */ + VG_USERREQ__PRINTF = 0x1401, + VG_USERREQ__PRINTF_BACKTRACE = 0x1402, + /* both :: char* fmt, va_list* vargs */ + VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404, + + /* Stack support. */ + VG_USERREQ__STACK_REGISTER = 0x1501, + VG_USERREQ__STACK_DEREGISTER = 0x1502, + VG_USERREQ__STACK_CHANGE = 0x1503, + + /* Wine support */ + VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601 + } Vg_ClientRequest; + +#if !defined(__GNUC__) +# define __extension__ /* */ +#endif + +/* Returns the number of Valgrinds this code is running under. That + is, 0 if running natively, 1 if running under Valgrind, 2 if + running under Valgrind which is running under another Valgrind, + etc. */ +#define RUNNING_ON_VALGRIND __extension__ \ + ({unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \ + VG_USERREQ__RUNNING_ON_VALGRIND, \ + 0, 0, 0, 0, 0); \ + _qzz_res; \ + }) + + +/* Discard translation of code in the range [_qzz_addr .. _qzz_addr + + _qzz_len - 1]. Useful if you are debugging a JITter or some such, + since it provides a way to make sure valgrind will retranslate the + invalidated area. Returns no value. */ +#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \ + {unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__DISCARD_TRANSLATIONS, \ + _qzz_addr, _qzz_len, 0, 0, 0); \ + } + + +/* These requests are for getting Valgrind itself to print something. + Possibly with a backtrace. This is a really ugly hack. The return value + is the number of characters printed, excluding the "**** " part at the + start and the backtrace (if present). */ + +#if defined(NVALGRIND) + +# define VALGRIND_PRINTF(...) +# define VALGRIND_PRINTF_BACKTRACE(...) + +#else /* NVALGRIND */ + +/* Modern GCC will optimize the static routine out if unused, + and unused attribute will shut down warnings about it. */ +static int VALGRIND_PRINTF(const char *format, ...) + __attribute__((format(__printf__, 1, 2), __unused__)); +static int +VALGRIND_PRINTF(const char *format, ...) +{ + unsigned long _qzz_res; + va_list vargs; + va_start(vargs, format); + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, + VG_USERREQ__PRINTF_VALIST_BY_REF, + (unsigned long)format, + (unsigned long)&vargs, + 0, 0, 0); + va_end(vargs); + return (int)_qzz_res; +} + +static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...) + __attribute__((format(__printf__, 1, 2), __unused__)); +static int +VALGRIND_PRINTF_BACKTRACE(const char *format, ...) +{ + unsigned long _qzz_res; + va_list vargs; + va_start(vargs, format); + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, + (unsigned long)format, + (unsigned long)&vargs, + 0, 0, 0); + va_end(vargs); + return (int)_qzz_res; +} + +#endif /* NVALGRIND */ + + +/* These requests allow control to move from the simulated CPU to the + real CPU, calling an arbitary function. + + Note that the current ThreadId is inserted as the first argument. + So this call: + + VALGRIND_NON_SIMD_CALL2(f, arg1, arg2) + + requires f to have this signature: + + Word f(Word tid, Word arg1, Word arg2) + + where "Word" is a word-sized type. + + Note that these client requests are not entirely reliable. For example, + if you call a function with them that subsequently calls printf(), + there's a high chance Valgrind will crash. Generally, your prospects of + these working are made higher if the called function does not refer to + any global variables, and does not refer to any libc or other functions + (printf et al). Any kind of entanglement with libc or dynamic linking is + likely to have a bad outcome, for tricky reasons which we've grappled + with a lot in the past. +*/ +#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \ + __extension__ \ + ({unsigned long _qyy_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \ + VG_USERREQ__CLIENT_CALL0, \ + _qyy_fn, \ + 0, 0, 0, 0); \ + _qyy_res; \ + }) + +#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \ + __extension__ \ + ({unsigned long _qyy_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \ + VG_USERREQ__CLIENT_CALL1, \ + _qyy_fn, \ + _qyy_arg1, 0, 0, 0); \ + _qyy_res; \ + }) + +#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \ + __extension__ \ + ({unsigned long _qyy_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \ + VG_USERREQ__CLIENT_CALL2, \ + _qyy_fn, \ + _qyy_arg1, _qyy_arg2, 0, 0); \ + _qyy_res; \ + }) + +#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \ + __extension__ \ + ({unsigned long _qyy_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \ + VG_USERREQ__CLIENT_CALL3, \ + _qyy_fn, \ + _qyy_arg1, _qyy_arg2, \ + _qyy_arg3, 0); \ + _qyy_res; \ + }) + + +/* Counts the number of errors that have been recorded by a tool. Nb: + the tool must record the errors with VG_(maybe_record_error)() or + VG_(unique_error)() for them to be counted. */ +#define VALGRIND_COUNT_ERRORS \ + __extension__ \ + ({unsigned int _qyy_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \ + VG_USERREQ__COUNT_ERRORS, \ + 0, 0, 0, 0, 0); \ + _qyy_res; \ + }) + +/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing + when heap blocks are allocated in order to give accurate results. This + happens automatically for the standard allocator functions such as + malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete, + delete[], etc. + + But if your program uses a custom allocator, this doesn't automatically + happen, and Valgrind will not do as well. For example, if you allocate + superblocks with mmap() and then allocates chunks of the superblocks, all + Valgrind's observations will be at the mmap() level and it won't know that + the chunks should be considered separate entities. In Memcheck's case, + that means you probably won't get heap block overrun detection (because + there won't be redzones marked as unaddressable) and you definitely won't + get any leak detection. + + The following client requests allow a custom allocator to be annotated so + that it can be handled accurately by Valgrind. + + VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated + by a malloc()-like function. For Memcheck (an illustrative case), this + does two things: + + - It records that the block has been allocated. This means any addresses + within the block mentioned in error messages will be + identified as belonging to the block. It also means that if the block + isn't freed it will be detected by the leak checker. + + - It marks the block as being addressable and undefined (if 'is_zeroed' is + not set), or addressable and defined (if 'is_zeroed' is set). This + controls how accesses to the block by the program are handled. + + 'addr' is the start of the usable block (ie. after any + redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator + can apply redzones -- these are blocks of padding at the start and end of + each block. Adding redzones is recommended as it makes it much more likely + Valgrind will spot block overruns. `is_zeroed' indicates if the memory is + zeroed (or filled with another predictable value), as is the case for + calloc(). + + VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a + heap block -- that will be used by the client program -- is allocated. + It's best to put it at the outermost level of the allocator if possible; + for example, if you have a function my_alloc() which calls + internal_alloc(), and the client request is put inside internal_alloc(), + stack traces relating to the heap block will contain entries for both + my_alloc() and internal_alloc(), which is probably not what you want. + + For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out + custom blocks from within a heap block, B, that has been allocated with + malloc/calloc/new/etc, then block B will be *ignored* during leak-checking + -- the custom blocks will take precedence. + + VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For + Memcheck, it does two things: + + - It records that the block has been deallocated. This assumes that the + block was annotated as having been allocated via + VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. + + - It marks the block as being unaddressable. + + VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a + heap block is deallocated. + + In many cases, these two client requests will not be enough to get your + allocator working well with Memcheck. More specifically, if your allocator + writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call + will be necessary to mark the memory as addressable just before the zeroing + occurs, otherwise you'll get a lot of invalid write errors. For example, + you'll need to do this if your allocator recycles freed blocks, but it + zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK). + Alternatively, if your allocator reuses freed blocks for allocator-internal + data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary. + + Really, what's happening is a blurring of the lines between the client + program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the + memory should be considered unaddressable to the client program, but the + allocator knows more than the rest of the client program and so may be able + to safely access it. Extra client requests are necessary for Valgrind to + understand the distinction between the allocator and the rest of the + program. + + Note: there is currently no VALGRIND_REALLOCLIKE_BLOCK client request; it + has to be emulated with MALLOCLIKE/FREELIKE and memory copying. + + Ignored if addr == 0. +*/ +#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ + {unsigned int __unused _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__MALLOCLIKE_BLOCK, \ + addr, sizeB, rzB, is_zeroed, 0); \ + } + +/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. + Ignored if addr == 0. +*/ +#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \ + {unsigned int __unused _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__FREELIKE_BLOCK, \ + addr, rzB, 0, 0, 0); \ + } + +/* Create a memory pool. */ +#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \ + {unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__CREATE_MEMPOOL, \ + pool, rzB, is_zeroed, 0, 0); \ + } + +/* Destroy a memory pool. */ +#define VALGRIND_DESTROY_MEMPOOL(pool) \ + {unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__DESTROY_MEMPOOL, \ + pool, 0, 0, 0, 0); \ + } + +/* Associate a piece of memory with a memory pool. */ +#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \ + {unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__MEMPOOL_ALLOC, \ + pool, addr, size, 0, 0); \ + } + +/* Disassociate a piece of memory from a memory pool. */ +#define VALGRIND_MEMPOOL_FREE(pool, addr) \ + {unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__MEMPOOL_FREE, \ + pool, addr, 0, 0, 0); \ + } + +/* Disassociate any pieces outside a particular range. */ +#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \ + {unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__MEMPOOL_TRIM, \ + pool, addr, size, 0, 0); \ + } + +/* Resize and/or move a piece associated with a memory pool. */ +#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \ + {unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__MOVE_MEMPOOL, \ + poolA, poolB, 0, 0, 0); \ + } + +/* Resize and/or move a piece associated with a memory pool. */ +#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \ + {unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__MEMPOOL_CHANGE, \ + pool, addrA, addrB, size, 0); \ + } + +/* Return 1 if a mempool exists, else 0. */ +#define VALGRIND_MEMPOOL_EXISTS(pool) \ + __extension__ \ + ({unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__MEMPOOL_EXISTS, \ + pool, 0, 0, 0, 0); \ + _qzz_res; \ + }) + +/* Mark a piece of memory as being a stack. Returns a stack id. */ +#define VALGRIND_STACK_REGISTER(start, end) \ + __extension__ \ + ({unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__STACK_REGISTER, \ + start, end, 0, 0, 0); \ + _qzz_res; \ + }) + +/* Unmark the piece of memory associated with a stack id as being a + stack. */ +#define VALGRIND_STACK_DEREGISTER(id) \ + {unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__STACK_DEREGISTER, \ + id, 0, 0, 0, 0); \ + } + +/* Change the start and end address of the stack id. */ +#define VALGRIND_STACK_CHANGE(id, start, end) \ + {unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__STACK_CHANGE, \ + id, start, end, 0, 0); \ + } + +/* Load PDB debug info for Wine PE image_map. */ +#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \ + {unsigned int _qzz_res; \ + VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ + VG_USERREQ__LOAD_PDB_DEBUGINFO, \ + fd, ptr, total_size, delta, 0); \ + } + + +#undef PLAT_x86_linux +#undef PLAT_amd64_linux +#undef PLAT_ppc32_linux +#undef PLAT_ppc64_linux +#undef PLAT_arm_linux +#undef PLAT_ppc32_aix5 +#undef PLAT_ppc64_aix5 + +#endif /* __VALGRIND_H */ diff --git a/src/include/xen/arch-arm.h b/src/include/xen/arch-arm.h new file mode 100644 index 00000000..ebc3aa2f --- /dev/null +++ b/src/include/xen/arch-arm.h @@ -0,0 +1,422 @@ +/****************************************************************************** + * arch-arm.h + * + * Guest OS interface to ARM Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright 2011 (C) Citrix Systems + */ + +#ifndef __XEN_PUBLIC_ARCH_ARM_H__ +#define __XEN_PUBLIC_ARCH_ARM_H__ + +FILE_LICENCE ( MIT ); + +/* + * `incontents 50 arm_abi Hypercall Calling Convention + * + * A hypercall is issued using the ARM HVC instruction. + * + * A hypercall can take up to 5 arguments. These are passed in + * registers, the first argument in x0/r0 (for arm64/arm32 guests + * respectively irrespective of whether the underlying hypervisor is + * 32- or 64-bit), the second argument in x1/r1, the third in x2/r2, + * the forth in x3/r3 and the fifth in x4/r4. + * + * The hypercall number is passed in r12 (arm) or x16 (arm64). In both + * cases the relevant ARM procedure calling convention specifies this + * is an inter-procedure-call scratch register (e.g. for use in linker + * stubs). This use does not conflict with use during a hypercall. + * + * The HVC ISS must contain a Xen specific TAG: XEN_HYPERCALL_TAG. + * + * The return value is in x0/r0. + * + * The hypercall will clobber x16/r12 and the argument registers used + * by that hypercall (except r0 which is the return value) i.e. in + * addition to x16/r12 a 2 argument hypercall will clobber x1/r1 and a + * 4 argument hypercall will clobber x1/r1, x2/r2 and x3/r3. + * + * Parameter structs passed to hypercalls are laid out according to + * the Procedure Call Standard for the ARM Architecture (AAPCS, AKA + * EABI) and Procedure Call Standard for the ARM 64-bit Architecture + * (AAPCS64). Where there is a conflict the 64-bit standard should be + * used regardless of guest type. Structures which are passed as + * hypercall arguments are always little endian. + * + * All memory which is shared with other entities in the system + * (including the hypervisor and other guests) must reside in memory + * which is mapped as Normal Inner-cacheable. This applies to: + * - hypercall arguments passed via a pointer to guest memory. + * - memory shared via the grant table mechanism (including PV I/O + * rings etc). + * - memory shared with the hypervisor (struct shared_info, struct + * vcpu_info, the grant table, etc). + * + * Any Inner cache allocation strategy (Write-Back, Write-Through etc) + * is acceptable. There is no restriction on the Outer-cacheability. + */ + +/* + * `incontents 55 arm_hcall Supported Hypercalls + * + * Xen on ARM makes extensive use of hardware facilities and therefore + * only a subset of the potential hypercalls are required. + * + * Since ARM uses second stage paging any machine/physical addresses + * passed to hypercalls are Guest Physical Addresses (Intermediate + * Physical Addresses) unless otherwise noted. + * + * The following hypercalls (and sub operations) are supported on the + * ARM platform. Other hypercalls should be considered + * unavailable/unsupported. + * + * HYPERVISOR_memory_op + * All generic sub-operations. + * + * In addition the following arch specific sub-ops: + * * XENMEM_add_to_physmap + * * XENMEM_add_to_physmap_batch + * + * HYPERVISOR_domctl + * All generic sub-operations, with the exception of: + * * XEN_DOMCTL_iomem_permission (not yet implemented) + * * XEN_DOMCTL_irq_permission (not yet implemented) + * + * HYPERVISOR_sched_op + * All generic sub-operations, with the exception of: + * * SCHEDOP_block -- prefer wfi hardware instruction + * + * HYPERVISOR_console_io + * All generic sub-operations + * + * HYPERVISOR_xen_version + * All generic sub-operations + * + * HYPERVISOR_event_channel_op + * All generic sub-operations + * + * HYPERVISOR_physdev_op + * No sub-operations are currenty supported + * + * HYPERVISOR_sysctl + * All generic sub-operations, with the exception of: + * * XEN_SYSCTL_page_offline_op + * * XEN_SYSCTL_get_pmstat + * * XEN_SYSCTL_pm_op + * + * HYPERVISOR_hvm_op + * Exactly these sub-operations are supported: + * * HVMOP_set_param + * * HVMOP_get_param + * + * HYPERVISOR_grant_table_op + * All generic sub-operations + * + * HYPERVISOR_vcpu_op + * Exactly these sub-operations are supported: + * * VCPUOP_register_vcpu_info + * * VCPUOP_register_runstate_memory_area + * + * + * Other notes on the ARM ABI: + * + * - struct start_info is not exported to ARM guests. + * + * - struct shared_info is mapped by ARM guests using the + * HYPERVISOR_memory_op sub-op XENMEM_add_to_physmap, passing + * XENMAPSPACE_shared_info as space parameter. + * + * - All the per-cpu struct vcpu_info are mapped by ARM guests using the + * HYPERVISOR_vcpu_op sub-op VCPUOP_register_vcpu_info, including cpu0 + * struct vcpu_info. + * + * - The grant table is mapped using the HYPERVISOR_memory_op sub-op + * XENMEM_add_to_physmap, passing XENMAPSPACE_grant_table as space + * parameter. The memory range specified under the Xen compatible + * hypervisor node on device tree can be used as target gpfn for the + * mapping. + * + * - Xenstore is initialized by using the two hvm_params + * HVM_PARAM_STORE_PFN and HVM_PARAM_STORE_EVTCHN. They can be read + * with the HYPERVISOR_hvm_op sub-op HVMOP_get_param. + * + * - The paravirtualized console is initialized by using the two + * hvm_params HVM_PARAM_CONSOLE_PFN and HVM_PARAM_CONSOLE_EVTCHN. They + * can be read with the HYPERVISOR_hvm_op sub-op HVMOP_get_param. + * + * - Event channel notifications are delivered using the percpu GIC + * interrupt specified under the Xen compatible hypervisor node on + * device tree. + * + * - The device tree Xen compatible node is fully described under Linux + * at Documentation/devicetree/bindings/arm/xen.txt. + */ + +#define XEN_HYPERCALL_TAG 0XEA1 + +#define uint64_aligned_t uint64_t __attribute__((aligned(8))) + +#ifndef __ASSEMBLY__ +#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ + typedef union { type *p; unsigned long q; } \ + __guest_handle_ ## name; \ + typedef union { type *p; uint64_aligned_t q; } \ + __guest_handle_64_ ## name; + +/* + * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field + * in a struct in memory. On ARM is always 8 bytes sizes and 8 bytes + * aligned. + * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an + * hypercall argument. It is 4 bytes on aarch and 8 bytes on aarch64. + */ +#define __DEFINE_XEN_GUEST_HANDLE(name, type) \ + ___DEFINE_XEN_GUEST_HANDLE(name, type); \ + ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) +#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) +#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name +#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) +/* this is going to be changed on 64 bit */ +#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name +#define set_xen_guest_handle_raw(hnd, val) \ + do { \ + typeof(&(hnd)) _sxghr_tmp = &(hnd); \ + _sxghr_tmp->q = 0; \ + _sxghr_tmp->p = val; \ + } while ( 0 ) +#ifdef __XEN_TOOLS__ +#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) +#endif +#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val) + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */ +# define __DECL_REG(n64, n32) union { \ + uint64_t n64; \ + uint32_t n32; \ + } +#else +/* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */ +#define __DECL_REG(n64, n32) uint64_t n64 +#endif + +struct vcpu_guest_core_regs +{ + /* Aarch64 Aarch32 */ + __DECL_REG(x0, r0_usr); + __DECL_REG(x1, r1_usr); + __DECL_REG(x2, r2_usr); + __DECL_REG(x3, r3_usr); + __DECL_REG(x4, r4_usr); + __DECL_REG(x5, r5_usr); + __DECL_REG(x6, r6_usr); + __DECL_REG(x7, r7_usr); + __DECL_REG(x8, r8_usr); + __DECL_REG(x9, r9_usr); + __DECL_REG(x10, r10_usr); + __DECL_REG(x11, r11_usr); + __DECL_REG(x12, r12_usr); + + __DECL_REG(x13, sp_usr); + __DECL_REG(x14, lr_usr); + + __DECL_REG(x15, __unused_sp_hyp); + + __DECL_REG(x16, lr_irq); + __DECL_REG(x17, sp_irq); + + __DECL_REG(x18, lr_svc); + __DECL_REG(x19, sp_svc); + + __DECL_REG(x20, lr_abt); + __DECL_REG(x21, sp_abt); + + __DECL_REG(x22, lr_und); + __DECL_REG(x23, sp_und); + + __DECL_REG(x24, r8_fiq); + __DECL_REG(x25, r9_fiq); + __DECL_REG(x26, r10_fiq); + __DECL_REG(x27, r11_fiq); + __DECL_REG(x28, r12_fiq); + + __DECL_REG(x29, sp_fiq); + __DECL_REG(x30, lr_fiq); + + /* Return address and mode */ + __DECL_REG(pc64, pc32); /* ELR_EL2 */ + uint32_t cpsr; /* SPSR_EL2 */ + + union { + uint32_t spsr_el1; /* AArch64 */ + uint32_t spsr_svc; /* AArch32 */ + }; + + /* AArch32 guests only */ + uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt; + + /* AArch64 guests only */ + uint64_t sp_el0; + uint64_t sp_el1, elr_el1; +}; +typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t); + +#undef __DECL_REG + +typedef uint64_t xen_pfn_t; +#define PRI_xen_pfn PRIx64 + +/* Maximum number of virtual CPUs in legacy multi-processor guests. */ +/* Only one. All other VCPUS must use VCPUOP_register_vcpu_info */ +#define XEN_LEGACY_MAX_VCPUS 1 + +typedef uint64_t xen_ulong_t; +#define PRI_xen_ulong PRIx64 + +#if defined(__XEN__) || defined(__XEN_TOOLS__) +struct vcpu_guest_context { +#define _VGCF_online 0 +#define VGCF_online (1<<_VGCF_online) + uint32_t flags; /* VGCF_* */ + + struct vcpu_guest_core_regs user_regs; /* Core CPU registers */ + + uint32_t sctlr; + uint64_t ttbcr, ttbr0, ttbr1; +}; +typedef struct vcpu_guest_context vcpu_guest_context_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); +#endif + +struct arch_vcpu_info { +}; +typedef struct arch_vcpu_info arch_vcpu_info_t; + +struct arch_shared_info { +}; +typedef struct arch_shared_info arch_shared_info_t; +typedef uint64_t xen_callback_t; + +#endif + +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +/* PSR bits (CPSR, SPSR)*/ + +#define PSR_THUMB (1<<5) /* Thumb Mode enable */ +#define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */ +#define PSR_IRQ_MASK (1<<7) /* Interrupt mask */ +#define PSR_ABT_MASK (1<<8) /* Asynchronous Abort mask */ +#define PSR_BIG_ENDIAN (1<<9) /* arm32: Big Endian Mode */ +#define PSR_DBG_MASK (1<<9) /* arm64: Debug Exception mask */ +#define PSR_IT_MASK (0x0600fc00) /* Thumb If-Then Mask */ +#define PSR_JAZELLE (1<<24) /* Jazelle Mode */ + +/* 32 bit modes */ +#define PSR_MODE_USR 0x10 +#define PSR_MODE_FIQ 0x11 +#define PSR_MODE_IRQ 0x12 +#define PSR_MODE_SVC 0x13 +#define PSR_MODE_MON 0x16 +#define PSR_MODE_ABT 0x17 +#define PSR_MODE_HYP 0x1a +#define PSR_MODE_UND 0x1b +#define PSR_MODE_SYS 0x1f + +/* 64 bit modes */ +#define PSR_MODE_BIT 0x10 /* Set iff AArch32 */ +#define PSR_MODE_EL3h 0x0d +#define PSR_MODE_EL3t 0x0c +#define PSR_MODE_EL2h 0x09 +#define PSR_MODE_EL2t 0x08 +#define PSR_MODE_EL1h 0x05 +#define PSR_MODE_EL1t 0x04 +#define PSR_MODE_EL0t 0x00 + +#define PSR_GUEST32_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC) +#define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h) + +#define SCTLR_GUEST_INIT 0x00c50078 + +/* + * Virtual machine platform (memory layout, interrupts) + * + * These are defined for consistency between the tools and the + * hypervisor. Guests must not rely on these hardcoded values but + * should instead use the FDT. + */ + +/* Physical Address Space */ +#define GUEST_GICD_BASE 0x03001000ULL +#define GUEST_GICD_SIZE 0x00001000ULL +#define GUEST_GICC_BASE 0x03002000ULL +#define GUEST_GICC_SIZE 0x00000100ULL + +/* 16MB == 4096 pages reserved for guest to use as a region to map its + * grant table in. + */ +#define GUEST_GNTTAB_BASE 0x38000000ULL +#define GUEST_GNTTAB_SIZE 0x01000000ULL + +#define GUEST_MAGIC_BASE 0x39000000ULL +#define GUEST_MAGIC_SIZE 0x01000000ULL + +#define GUEST_RAM_BANKS 2 + +#define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */ +#define GUEST_RAM0_SIZE 0xc0000000ULL + +#define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */ +#define GUEST_RAM1_SIZE 0xfe00000000ULL + +#define GUEST_RAM_BASE GUEST_RAM0_BASE /* Lowest RAM address */ +/* Largest amount of actual RAM, not including holes */ +#define GUEST_RAM_MAX (GUEST_RAM0_SIZE + GUEST_RAM1_SIZE) +/* Suitable for e.g. const uint64_t ramfoo[] = GUEST_RAM_BANK_FOOS; */ +#define GUEST_RAM_BANK_BASES { GUEST_RAM0_BASE, GUEST_RAM1_BASE } +#define GUEST_RAM_BANK_SIZES { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE } + +/* Interrupts */ +#define GUEST_TIMER_VIRT_PPI 27 +#define GUEST_TIMER_PHYS_S_PPI 29 +#define GUEST_TIMER_PHYS_NS_PPI 30 +#define GUEST_EVTCHN_PPI 31 + +/* PSCI functions */ +#define PSCI_cpu_suspend 0 +#define PSCI_cpu_off 1 +#define PSCI_cpu_on 2 +#define PSCI_migrate 3 + +#endif + +#endif /* __XEN_PUBLIC_ARCH_ARM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/arch-x86/xen-x86_32.h b/src/include/xen/arch-x86/xen-x86_32.h new file mode 100644 index 00000000..96c8f489 --- /dev/null +++ b/src/include/xen/arch-x86/xen-x86_32.h @@ -0,0 +1,173 @@ +/****************************************************************************** + * xen-x86_32.h + * + * Guest OS interface to x86 32-bit Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2004-2007, K A Fraser + */ + +#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ +#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ + +FILE_LICENCE ( MIT ); + +/* + * Hypercall interface: + * Input: %ebx, %ecx, %edx, %esi, %edi, %ebp (arguments 1-6) + * Output: %eax + * Access is via hypercall page (set up by guest loader or via a Xen MSR): + * call hypercall_page + hypercall-number * 32 + * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) + */ + +/* + * These flat segments are in the Xen-private section of every GDT. Since these + * are also present in the initial GDT, many OSes will be able to avoid + * installing their own GDT. + */ +#define FLAT_RING1_CS 0xe019 /* GDT index 259 */ +#define FLAT_RING1_DS 0xe021 /* GDT index 260 */ +#define FLAT_RING1_SS 0xe021 /* GDT index 260 */ +#define FLAT_RING3_CS 0xe02b /* GDT index 261 */ +#define FLAT_RING3_DS 0xe033 /* GDT index 262 */ +#define FLAT_RING3_SS 0xe033 /* GDT index 262 */ + +#define FLAT_KERNEL_CS FLAT_RING1_CS +#define FLAT_KERNEL_DS FLAT_RING1_DS +#define FLAT_KERNEL_SS FLAT_RING1_SS +#define FLAT_USER_CS FLAT_RING3_CS +#define FLAT_USER_DS FLAT_RING3_DS +#define FLAT_USER_SS FLAT_RING3_SS + +#define __HYPERVISOR_VIRT_START_PAE 0xF5800000 +#define __MACH2PHYS_VIRT_START_PAE 0xF5800000 +#define __MACH2PHYS_VIRT_END_PAE 0xF6800000 +#define HYPERVISOR_VIRT_START_PAE \ + mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE) +#define MACH2PHYS_VIRT_START_PAE \ + mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE) +#define MACH2PHYS_VIRT_END_PAE \ + mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE) + +/* Non-PAE bounds are obsolete. */ +#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000 +#define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000 +#define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000 +#define HYPERVISOR_VIRT_START_NONPAE \ + mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE) +#define MACH2PHYS_VIRT_START_NONPAE \ + mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE) +#define MACH2PHYS_VIRT_END_NONPAE \ + mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE) + +#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE +#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE +#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE + +#ifndef HYPERVISOR_VIRT_START +#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) +#endif + +#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) +#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) +#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) +#ifndef machine_to_phys_mapping +#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) +#endif + +/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */ +#if defined(__XEN__) || defined(__XEN_TOOLS__) +#undef ___DEFINE_XEN_GUEST_HANDLE +#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ + typedef struct { type *p; } \ + __guest_handle_ ## name; \ + typedef struct { union { type *p; uint64_aligned_t q; }; } \ + __guest_handle_64_ ## name +#undef set_xen_guest_handle_raw +#define set_xen_guest_handle_raw(hnd, val) \ + do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ + (hnd).p = val; \ + } while ( 0 ) +#define uint64_aligned_t uint64_t __attribute__((aligned(8))) +#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name +#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name) +#endif + +#ifndef __ASSEMBLY__ + +struct cpu_user_regs { + uint32_t ebx; + uint32_t ecx; + uint32_t edx; + uint32_t esi; + uint32_t edi; + uint32_t ebp; + uint32_t eax; + uint16_t error_code; /* private */ + uint16_t entry_vector; /* private */ + uint32_t eip; + uint16_t cs; + uint8_t saved_upcall_mask; + uint8_t _pad0; + uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ + uint32_t esp; + uint16_t ss, _pad1; + uint16_t es, _pad2; + uint16_t ds, _pad3; + uint16_t fs, _pad4; + uint16_t gs, _pad5; +}; +typedef struct cpu_user_regs cpu_user_regs_t; +DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); + +/* + * Page-directory addresses above 4GB do not fit into architectural %cr3. + * When accessing %cr3, or equivalent field in vcpu_guest_context, guests + * must use the following accessor macros to pack/unpack valid MFNs. + */ +#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) +#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) + +struct arch_vcpu_info { + unsigned long cr2; + unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ +}; +typedef struct arch_vcpu_info arch_vcpu_info_t; + +struct xen_callback { + unsigned long cs; + unsigned long eip; +}; +typedef struct xen_callback xen_callback_t; + +#endif /* !__ASSEMBLY__ */ + +#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/arch-x86/xen-x86_64.h b/src/include/xen/arch-x86/xen-x86_64.h new file mode 100644 index 00000000..0e927022 --- /dev/null +++ b/src/include/xen/arch-x86/xen-x86_64.h @@ -0,0 +1,204 @@ +/****************************************************************************** + * xen-x86_64.h + * + * Guest OS interface to x86 64-bit Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2004-2006, K A Fraser + */ + +#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ +#define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ + +FILE_LICENCE ( MIT ); + +/* + * Hypercall interface: + * Input: %rdi, %rsi, %rdx, %r10, %r8, %r9 (arguments 1-6) + * Output: %rax + * Access is via hypercall page (set up by guest loader or via a Xen MSR): + * call hypercall_page + hypercall-number * 32 + * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi) + */ + +/* + * 64-bit segment selectors + * These flat segments are in the Xen-private section of every GDT. Since these + * are also present in the initial GDT, many OSes will be able to avoid + * installing their own GDT. + */ + +#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ +#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */ +#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */ +#define FLAT_RING3_DS64 0x0000 /* NULL selector */ +#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */ +#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */ + +#define FLAT_KERNEL_DS64 FLAT_RING3_DS64 +#define FLAT_KERNEL_DS32 FLAT_RING3_DS32 +#define FLAT_KERNEL_DS FLAT_KERNEL_DS64 +#define FLAT_KERNEL_CS64 FLAT_RING3_CS64 +#define FLAT_KERNEL_CS32 FLAT_RING3_CS32 +#define FLAT_KERNEL_CS FLAT_KERNEL_CS64 +#define FLAT_KERNEL_SS64 FLAT_RING3_SS64 +#define FLAT_KERNEL_SS32 FLAT_RING3_SS32 +#define FLAT_KERNEL_SS FLAT_KERNEL_SS64 + +#define FLAT_USER_DS64 FLAT_RING3_DS64 +#define FLAT_USER_DS32 FLAT_RING3_DS32 +#define FLAT_USER_DS FLAT_USER_DS64 +#define FLAT_USER_CS64 FLAT_RING3_CS64 +#define FLAT_USER_CS32 FLAT_RING3_CS32 +#define FLAT_USER_CS FLAT_USER_CS64 +#define FLAT_USER_SS64 FLAT_RING3_SS64 +#define FLAT_USER_SS32 FLAT_RING3_SS32 +#define FLAT_USER_SS FLAT_USER_SS64 + +#define __HYPERVISOR_VIRT_START 0xFFFF800000000000 +#define __HYPERVISOR_VIRT_END 0xFFFF880000000000 +#define __MACH2PHYS_VIRT_START 0xFFFF800000000000 +#define __MACH2PHYS_VIRT_END 0xFFFF804000000000 + +#ifndef HYPERVISOR_VIRT_START +#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) +#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) +#endif + +#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) +#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) +#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) +#ifndef machine_to_phys_mapping +#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) +#endif + +/* + * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) + * @which == SEGBASE_* ; @base == 64-bit base address + * Returns 0 on success. + */ +#define SEGBASE_FS 0 +#define SEGBASE_GS_USER 1 +#define SEGBASE_GS_KERNEL 2 +#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ + +/* + * int HYPERVISOR_iret(void) + * All arguments are on the kernel stack, in the following format. + * Never returns if successful. Current kernel context is lost. + * The saved CS is mapped as follows: + * RING0 -> RING3 kernel mode. + * RING1 -> RING3 kernel mode. + * RING2 -> RING3 kernel mode. + * RING3 -> RING3 user mode. + * However RING0 indicates that the guest kernel should return to iteself + * directly with + * orb $3,1*8(%rsp) + * iretq + * If flags contains VGCF_in_syscall: + * Restore RAX, RIP, RFLAGS, RSP. + * Discard R11, RCX, CS, SS. + * Otherwise: + * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. + * All other registers are saved on hypercall entry and restored to user. + */ +/* Guest exited in SYSCALL context? Return to guest with SYSRET? */ +#define _VGCF_in_syscall 8 +#define VGCF_in_syscall (1<<_VGCF_in_syscall) +#define VGCF_IN_SYSCALL VGCF_in_syscall + +#ifndef __ASSEMBLY__ + +struct iret_context { + /* Top of stack (%rsp at point of hypercall). */ + uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; + /* Bottom of iret stack frame. */ +}; + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ +#define __DECL_REG(name) union { \ + uint64_t r ## name, e ## name; \ + uint32_t _e ## name; \ +} +#else +/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ +#define __DECL_REG(name) uint64_t r ## name +#endif + +struct cpu_user_regs { + uint64_t r15; + uint64_t r14; + uint64_t r13; + uint64_t r12; + __DECL_REG(bp); + __DECL_REG(bx); + uint64_t r11; + uint64_t r10; + uint64_t r9; + uint64_t r8; + __DECL_REG(ax); + __DECL_REG(cx); + __DECL_REG(dx); + __DECL_REG(si); + __DECL_REG(di); + uint32_t error_code; /* private */ + uint32_t entry_vector; /* private */ + __DECL_REG(ip); + uint16_t cs, _pad0[1]; + uint8_t saved_upcall_mask; + uint8_t _pad1[3]; + __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */ + __DECL_REG(sp); + uint16_t ss, _pad2[3]; + uint16_t es, _pad3[3]; + uint16_t ds, _pad4[3]; + uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ + uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ +}; +typedef struct cpu_user_regs cpu_user_regs_t; +DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); + +#undef __DECL_REG + +#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) +#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) + +struct arch_vcpu_info { + unsigned long cr2; + unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ +}; +typedef struct arch_vcpu_info arch_vcpu_info_t; + +typedef unsigned long xen_callback_t; + +#endif /* !__ASSEMBLY__ */ + +#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/arch-x86/xen.h b/src/include/xen/arch-x86/xen.h new file mode 100644 index 00000000..d75528f0 --- /dev/null +++ b/src/include/xen/arch-x86/xen.h @@ -0,0 +1,275 @@ +/****************************************************************************** + * arch-x86/xen.h + * + * Guest OS interface to x86 Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2004-2006, K A Fraser + */ + +#include "../xen.h" + +#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__ +#define __XEN_PUBLIC_ARCH_X86_XEN_H__ + +FILE_LICENCE ( MIT ); + +/* Structural guest handles introduced in 0x00030201. */ +#if __XEN_INTERFACE_VERSION__ >= 0x00030201 +#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ + typedef struct { type *p; } __guest_handle_ ## name +#else +#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ + typedef type * __guest_handle_ ## name +#endif + +/* + * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field + * in a struct in memory. + * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an + * hypercall argument. + * XEN_GUEST_HANDLE_PARAM and XEN_GUEST_HANDLE are the same on X86 but + * they might not be on other architectures. + */ +#define __DEFINE_XEN_GUEST_HANDLE(name, type) \ + ___DEFINE_XEN_GUEST_HANDLE(name, type); \ + ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) +#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) +#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name +#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) +#define XEN_GUEST_HANDLE_PARAM(name) XEN_GUEST_HANDLE(name) +#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0) +#ifdef __XEN_TOOLS__ +#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) +#endif +#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val) + +#if defined(__i386__) +#include "xen-x86_32.h" +#elif defined(__x86_64__) +#include "xen-x86_64.h" +#endif + +#ifndef __ASSEMBLY__ +typedef unsigned long xen_pfn_t; +#define PRI_xen_pfn "lx" +#endif + +#define XEN_HAVE_PV_GUEST_ENTRY 1 + +#define XEN_HAVE_PV_UPCALL_MASK 1 + +/* + * `incontents 200 segdesc Segment Descriptor Tables + */ +/* + * ` enum neg_errnoval + * ` HYPERVISOR_set_gdt(const xen_pfn_t frames[], unsigned int entries); + * ` + */ +/* + * A number of GDT entries are reserved by Xen. These are not situated at the + * start of the GDT because some stupid OSes export hard-coded selector values + * in their ABI. These hard-coded values are always near the start of the GDT, + * so Xen places itself out of the way, at the far end of the GDT. + * + * NB The LDT is set using the MMUEXT_SET_LDT op of HYPERVISOR_mmuext_op + */ +#define FIRST_RESERVED_GDT_PAGE 14 +#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) +#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) + + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_update_descriptor(u64 pa, u64 desc); + * ` + * ` @pa The machine physical address of the descriptor to + * ` update. Must be either a descriptor page or writable. + * ` @desc The descriptor value to update, in the same format as a + * ` native descriptor table entry. + */ + +/* Maximum number of virtual CPUs in legacy multi-processor guests. */ +#define XEN_LEGACY_MAX_VCPUS 32 + +#ifndef __ASSEMBLY__ + +typedef unsigned long xen_ulong_t; +#define PRI_xen_ulong "lx" + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp); + * ` + * Sets the stack segment and pointer for the current vcpu. + */ + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_set_trap_table(const struct trap_info traps[]); + * ` + */ +/* + * Send an array of these to HYPERVISOR_set_trap_table(). + * Terminate the array with a sentinel entry, with traps[].address==0. + * The privilege level specifies which modes may enter a trap via a software + * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate + * privilege levels as follows: + * Level == 0: Noone may enter + * Level == 1: Kernel may enter + * Level == 2: Kernel may enter + * Level == 3: Everyone may enter + */ +#define TI_GET_DPL(_ti) ((_ti)->flags & 3) +#define TI_GET_IF(_ti) ((_ti)->flags & 4) +#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) +#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) +struct trap_info { + uint8_t vector; /* exception vector */ + uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ + uint16_t cs; /* code selector */ + unsigned long address; /* code offset */ +}; +typedef struct trap_info trap_info_t; +DEFINE_XEN_GUEST_HANDLE(trap_info_t); + +typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ + +/* + * The following is all CPU context. Note that the fpu_ctxt block is filled + * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. + * + * Also note that when calling DOMCTL_setvcpucontext and VCPU_initialise + * for HVM and PVH guests, not all information in this structure is updated: + * + * - For HVM guests, the structures read include: fpu_ctxt (if + * VGCT_I387_VALID is set), flags, user_regs, debugreg[*] + * + * - PVH guests are the same as HVM guests, but additionally use ctrlreg[3] to + * set cr3. All other fields not used should be set to 0. + */ +struct vcpu_guest_context { + /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ + struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ +#define VGCF_I387_VALID (1<<0) +#define VGCF_IN_KERNEL (1<<2) +#define _VGCF_i387_valid 0 +#define VGCF_i387_valid (1<<_VGCF_i387_valid) +#define _VGCF_in_kernel 2 +#define VGCF_in_kernel (1<<_VGCF_in_kernel) +#define _VGCF_failsafe_disables_events 3 +#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) +#define _VGCF_syscall_disables_events 4 +#define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) +#define _VGCF_online 5 +#define VGCF_online (1<<_VGCF_online) + unsigned long flags; /* VGCF_* flags */ + struct cpu_user_regs user_regs; /* User-level CPU registers */ + struct trap_info trap_ctxt[256]; /* Virtual IDT */ + unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ + unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ + unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ + /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ + unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ + unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ +#ifdef __i386__ + unsigned long event_callback_cs; /* CS:EIP of event callback */ + unsigned long event_callback_eip; + unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ + unsigned long failsafe_callback_eip; +#else + unsigned long event_callback_eip; + unsigned long failsafe_callback_eip; +#ifdef __XEN__ + union { + unsigned long syscall_callback_eip; + struct { + unsigned int event_callback_cs; /* compat CS of event cb */ + unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */ + }; + }; +#else + unsigned long syscall_callback_eip; +#endif +#endif + unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ +#ifdef __x86_64__ + /* Segment base addresses. */ + uint64_t fs_base; + uint64_t gs_base_kernel; + uint64_t gs_base_user; +#endif +}; +typedef struct vcpu_guest_context vcpu_guest_context_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); + +struct arch_shared_info { + unsigned long max_pfn; /* max pfn that appears in table */ + /* Frame containing list of mfns containing list of mfns containing p2m. */ + xen_pfn_t pfn_to_mfn_frame_list_list; + unsigned long nmi_reason; + uint64_t pad[32]; +}; +typedef struct arch_shared_info arch_shared_info_t; + +#endif /* !__ASSEMBLY__ */ + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_fpu_taskswitch(int set); + * ` + * Sets (if set!=0) or clears (if set==0) CR0.TS. + */ + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_set_debugreg(int regno, unsigned long value); + * + * ` unsigned long + * ` HYPERVISOR_get_debugreg(int regno); + * For 0<=reg<=7, returns the debug register value. + * For other values of reg, returns ((unsigned long)-EINVAL). + * (Unfortunately, this interface is defective.) + */ + +/* + * Prefix forces emulation of some non-trapping instructions. + * Currently only CPUID. + */ +#ifdef __ASSEMBLY__ +#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; +#define XEN_CPUID XEN_EMULATE_PREFIX cpuid +#else +#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " +#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" +#endif + +#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/event_channel.h b/src/include/xen/event_channel.h new file mode 100644 index 00000000..356e946d --- /dev/null +++ b/src/include/xen/event_channel.h @@ -0,0 +1,383 @@ +/****************************************************************************** + * event_channel.h + * + * Event channels between domains. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2003-2004, K A Fraser. + */ + +#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ +#define __XEN_PUBLIC_EVENT_CHANNEL_H__ + +FILE_LICENCE ( MIT ); + +#include "xen.h" + +/* + * `incontents 150 evtchn Event Channels + * + * Event channels are the basic primitive provided by Xen for event + * notifications. An event is the Xen equivalent of a hardware + * interrupt. They essentially store one bit of information, the event + * of interest is signalled by transitioning this bit from 0 to 1. + * + * Notifications are received by a guest via an upcall from Xen, + * indicating when an event arrives (setting the bit). Further + * notifications are masked until the bit is cleared again (therefore, + * guests must check the value of the bit after re-enabling event + * delivery to ensure no missed notifications). + * + * Event notifications can be masked by setting a flag; this is + * equivalent to disabling interrupts and can be used to ensure + * atomicity of certain operations in the guest kernel. + * + * Event channels are represented by the evtchn_* fields in + * struct shared_info and struct vcpu_info. + */ + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_event_channel_op(enum event_channel_op cmd, void *args) + * ` + * @cmd == EVTCHNOP_* (event-channel operation). + * @args == struct evtchn_* Operation-specific extra arguments (NULL if none). + */ + +/* ` enum event_channel_op { // EVTCHNOP_* => struct evtchn_* */ +#define EVTCHNOP_bind_interdomain 0 +#define EVTCHNOP_bind_virq 1 +#define EVTCHNOP_bind_pirq 2 +#define EVTCHNOP_close 3 +#define EVTCHNOP_send 4 +#define EVTCHNOP_status 5 +#define EVTCHNOP_alloc_unbound 6 +#define EVTCHNOP_bind_ipi 7 +#define EVTCHNOP_bind_vcpu 8 +#define EVTCHNOP_unmask 9 +#define EVTCHNOP_reset 10 +#define EVTCHNOP_init_control 11 +#define EVTCHNOP_expand_array 12 +#define EVTCHNOP_set_priority 13 +/* ` } */ + +typedef uint32_t evtchn_port_t; +DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); + +/* + * EVTCHNOP_alloc_unbound: Allocate a port in domain and mark as + * accepting interdomain bindings from domain . A fresh port + * is allocated in and returned as . + * NOTES: + * 1. If the caller is unprivileged then must be DOMID_SELF. + * 2. may be DOMID_SELF, allowing loopback connections. + */ +struct evtchn_alloc_unbound { + /* IN parameters */ + domid_t dom, remote_dom; + /* OUT parameters */ + evtchn_port_t port; +}; +typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; + +/* + * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between + * the calling domain and . must identify + * a port that is unbound and marked as accepting bindings from the calling + * domain. A fresh port is allocated in the calling domain and returned as + * . + * + * In case the peer domain has already tried to set our event channel + * pending, before it was bound, EVTCHNOP_bind_interdomain always sets + * the local event channel pending. + * + * The usual pattern of use, in the guest's upcall (or subsequent + * handler) is as follows: (Re-enable the event channel for subsequent + * signalling and then) check for the existence of whatever condition + * is being waited for by other means, and take whatever action is + * needed (if any). + * + * NOTES: + * 1. may be DOMID_SELF, allowing loopback connections. + */ +struct evtchn_bind_interdomain { + /* IN parameters. */ + domid_t remote_dom; + evtchn_port_t remote_port; + /* OUT parameters. */ + evtchn_port_t local_port; +}; +typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; + +/* + * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ on specified + * vcpu. + * NOTES: + * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list + * in xen.h for the classification of each VIRQ. + * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be + * re-bound via EVTCHNOP_bind_vcpu. + * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. + * The allocated event channel is bound to the specified vcpu and the + * binding cannot be changed. + */ +struct evtchn_bind_virq { + /* IN parameters. */ + uint32_t virq; /* enum virq */ + uint32_t vcpu; + /* OUT parameters. */ + evtchn_port_t port; +}; +typedef struct evtchn_bind_virq evtchn_bind_virq_t; + +/* + * EVTCHNOP_bind_pirq: Bind a local event channel to a real IRQ (PIRQ ). + * NOTES: + * 1. A physical IRQ may be bound to at most one event channel per domain. + * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. + */ +struct evtchn_bind_pirq { + /* IN parameters. */ + uint32_t pirq; +#define BIND_PIRQ__WILL_SHARE 1 + uint32_t flags; /* BIND_PIRQ__* */ + /* OUT parameters. */ + evtchn_port_t port; +}; +typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; + +/* + * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. + * NOTES: + * 1. The allocated event channel is bound to the specified vcpu. The binding + * may not be changed. + */ +struct evtchn_bind_ipi { + uint32_t vcpu; + /* OUT parameters. */ + evtchn_port_t port; +}; +typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; + +/* + * EVTCHNOP_close: Close a local event channel . If the channel is + * interdomain then the remote end is placed in the unbound state + * (EVTCHNSTAT_unbound), awaiting a new connection. + */ +struct evtchn_close { + /* IN parameters. */ + evtchn_port_t port; +}; +typedef struct evtchn_close evtchn_close_t; + +/* + * EVTCHNOP_send: Send an event to the remote end of the channel whose local + * endpoint is . + */ +struct evtchn_send { + /* IN parameters. */ + evtchn_port_t port; +}; +typedef struct evtchn_send evtchn_send_t; + +/* + * EVTCHNOP_status: Get the current status of the communication channel which + * has an endpoint at . + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may obtain the status of an event + * channel for which is not DOMID_SELF. + */ +struct evtchn_status { + /* IN parameters */ + domid_t dom; + evtchn_port_t port; + /* OUT parameters */ +#define EVTCHNSTAT_closed 0 /* Channel is not in use. */ +#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ +#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ +#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ +#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ +#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ + uint32_t status; + uint32_t vcpu; /* VCPU to which this channel is bound. */ + union { + struct { + domid_t dom; + } unbound; /* EVTCHNSTAT_unbound */ + struct { + domid_t dom; + evtchn_port_t port; + } interdomain; /* EVTCHNSTAT_interdomain */ + uint32_t pirq; /* EVTCHNSTAT_pirq */ + uint32_t virq; /* EVTCHNSTAT_virq */ + } u; +}; +typedef struct evtchn_status evtchn_status_t; + +/* + * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an + * event is pending. + * NOTES: + * 1. IPI-bound channels always notify the vcpu specified at bind time. + * This binding cannot be changed. + * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. + * This binding cannot be changed. + * 3. All other channels notify vcpu0 by default. This default is set when + * the channel is allocated (a port that is freed and subsequently reused + * has its binding reset to vcpu0). + */ +struct evtchn_bind_vcpu { + /* IN parameters. */ + evtchn_port_t port; + uint32_t vcpu; +}; +typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; + +/* + * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver + * a notification to the appropriate VCPU if an event is pending. + */ +struct evtchn_unmask { + /* IN parameters. */ + evtchn_port_t port; +}; +typedef struct evtchn_unmask evtchn_unmask_t; + +/* + * EVTCHNOP_reset: Close all event channels associated with specified domain. + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. + */ +struct evtchn_reset { + /* IN parameters. */ + domid_t dom; +}; +typedef struct evtchn_reset evtchn_reset_t; + +/* + * EVTCHNOP_init_control: initialize the control block for the FIFO ABI. + * + * Note: any events that are currently pending will not be resent and + * will be lost. Guests should call this before binding any event to + * avoid losing any events. + */ +struct evtchn_init_control { + /* IN parameters. */ + uint64_t control_gfn; + uint32_t offset; + uint32_t vcpu; + /* OUT parameters. */ + uint8_t link_bits; + uint8_t _pad[7]; +}; +typedef struct evtchn_init_control evtchn_init_control_t; + +/* + * EVTCHNOP_expand_array: add an additional page to the event array. + */ +struct evtchn_expand_array { + /* IN parameters. */ + uint64_t array_gfn; +}; +typedef struct evtchn_expand_array evtchn_expand_array_t; + +/* + * EVTCHNOP_set_priority: set the priority for an event channel. + */ +struct evtchn_set_priority { + /* IN parameters. */ + uint32_t port; + uint32_t priority; +}; +typedef struct evtchn_set_priority evtchn_set_priority_t; + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_event_channel_op_compat(struct evtchn_op *op) + * ` + * Superceded by new event_channel_op() hypercall since 0x00030202. + */ +struct evtchn_op { + uint32_t cmd; /* enum event_channel_op */ + union { + struct evtchn_alloc_unbound alloc_unbound; + struct evtchn_bind_interdomain bind_interdomain; + struct evtchn_bind_virq bind_virq; + struct evtchn_bind_pirq bind_pirq; + struct evtchn_bind_ipi bind_ipi; + struct evtchn_close close; + struct evtchn_send send; + struct evtchn_status status; + struct evtchn_bind_vcpu bind_vcpu; + struct evtchn_unmask unmask; + } u; +}; +typedef struct evtchn_op evtchn_op_t; +DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); + +/* + * 2-level ABI + */ + +#define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64) + +/* + * FIFO ABI + */ + +/* Events may have priorities from 0 (highest) to 15 (lowest). */ +#define EVTCHN_FIFO_PRIORITY_MAX 0 +#define EVTCHN_FIFO_PRIORITY_DEFAULT 7 +#define EVTCHN_FIFO_PRIORITY_MIN 15 + +#define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1) + +typedef uint32_t event_word_t; + +#define EVTCHN_FIFO_PENDING 31 +#define EVTCHN_FIFO_MASKED 30 +#define EVTCHN_FIFO_LINKED 29 +#define EVTCHN_FIFO_BUSY 28 + +#define EVTCHN_FIFO_LINK_BITS 17 +#define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1) + +#define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS) + +struct evtchn_fifo_control_block { + uint32_t ready; + uint32_t _rsvd; + uint32_t head[EVTCHN_FIFO_MAX_QUEUES]; +}; +typedef struct evtchn_fifo_control_block evtchn_fifo_control_block_t; + +#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/features.h b/src/include/xen/features.h new file mode 100644 index 00000000..13026581 --- /dev/null +++ b/src/include/xen/features.h @@ -0,0 +1,111 @@ +/****************************************************************************** + * features.h + * + * Feature flags, reported by XENVER_get_features. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2006, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_FEATURES_H__ +#define __XEN_PUBLIC_FEATURES_H__ + +FILE_LICENCE ( MIT ); + +/* + * `incontents 200 elfnotes_features XEN_ELFNOTE_FEATURES + * + * The list of all the features the guest supports. They are set by + * parsing the XEN_ELFNOTE_FEATURES and XEN_ELFNOTE_SUPPORTED_FEATURES + * string. The format is the feature names (as given here without the + * "XENFEAT_" prefix) separated by '|' characters. + * If a feature is required for the kernel to function then the feature name + * must be preceded by a '!' character. + * + * Note that if XEN_ELFNOTE_SUPPORTED_FEATURES is used, then in the + * XENFEAT_dom0 MUST be set if the guest is to be booted as dom0, + */ + +/* + * If set, the guest does not need to write-protect its pagetables, and can + * update them via direct writes. + */ +#define XENFEAT_writable_page_tables 0 + +/* + * If set, the guest does not need to write-protect its segment descriptor + * tables, and can update them via direct writes. + */ +#define XENFEAT_writable_descriptor_tables 1 + +/* + * If set, translation between the guest's 'pseudo-physical' address space + * and the host's machine address space are handled by the hypervisor. In this + * mode the guest does not need to perform phys-to/from-machine translations + * when performing page table operations. + */ +#define XENFEAT_auto_translated_physmap 2 + +/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ +#define XENFEAT_supervisor_mode_kernel 3 + +/* + * If set, the guest does not need to allocate x86 PAE page directories + * below 4GB. This flag is usually implied by auto_translated_physmap. + */ +#define XENFEAT_pae_pgdir_above_4gb 4 + +/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ +#define XENFEAT_mmu_pt_update_preserve_ad 5 + +/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */ +#define XENFEAT_highmem_assist 6 + +/* + * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel + * available pte bits. + */ +#define XENFEAT_gnttab_map_avail_bits 7 + +/* x86: Does this Xen host support the HVM callback vector type? */ +#define XENFEAT_hvm_callback_vector 8 + +/* x86: pvclock algorithm is safe to use on HVM */ +#define XENFEAT_hvm_safe_pvclock 9 + +/* x86: pirq can be used by HVM guests */ +#define XENFEAT_hvm_pirqs 10 + +/* operation as Dom0 is supported */ +#define XENFEAT_dom0 11 + +#define XENFEAT_NR_SUBMAPS 1 + +#endif /* __XEN_PUBLIC_FEATURES_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/grant_table.h b/src/include/xen/grant_table.h new file mode 100644 index 00000000..137939e7 --- /dev/null +++ b/src/include/xen/grant_table.h @@ -0,0 +1,664 @@ +/****************************************************************************** + * grant_table.h + * + * Interface for granting foreign access to page frames, and receiving + * page-ownership transfers. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2004, K A Fraser + */ + +#ifndef __XEN_PUBLIC_GRANT_TABLE_H__ +#define __XEN_PUBLIC_GRANT_TABLE_H__ + +FILE_LICENCE ( MIT ); + +#include "xen.h" + +/* + * `incontents 150 gnttab Grant Tables + * + * Xen's grant tables provide a generic mechanism to memory sharing + * between domains. This shared memory interface underpins the split + * device drivers for block and network IO. + * + * Each domain has its own grant table. This is a data structure that + * is shared with Xen; it allows the domain to tell Xen what kind of + * permissions other domains have on its pages. Entries in the grant + * table are identified by grant references. A grant reference is an + * integer, which indexes into the grant table. It acts as a + * capability which the grantee can use to perform operations on the + * granter’s memory. + * + * This capability-based system allows shared-memory communications + * between unprivileged domains. A grant reference also encapsulates + * the details of a shared page, removing the need for a domain to + * know the real machine address of a page it is sharing. This makes + * it possible to share memory correctly with domains running in + * fully virtualised memory. + */ + +/*********************************** + * GRANT TABLE REPRESENTATION + */ + +/* Some rough guidelines on accessing and updating grant-table entries + * in a concurrency-safe manner. For more information, Linux contains a + * reference implementation for guest OSes (drivers/xen/grant_table.c, see + * http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob;f=drivers/xen/grant-table.c;hb=HEAD + * + * NB. WMB is a no-op on current-generation x86 processors. However, a + * compiler barrier will still be required. + * + * Introducing a valid entry into the grant table: + * 1. Write ent->domid. + * 2. Write ent->frame: + * GTF_permit_access: Frame to which access is permitted. + * GTF_accept_transfer: Pseudo-phys frame slot being filled by new + * frame, or zero if none. + * 3. Write memory barrier (WMB). + * 4. Write ent->flags, inc. valid type. + * + * Invalidating an unused GTF_permit_access entry: + * 1. flags = ent->flags. + * 2. Observe that !(flags & (GTF_reading|GTF_writing)). + * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). + * NB. No need for WMB as reuse of entry is control-dependent on success of + * step 3, and all architectures guarantee ordering of ctrl-dep writes. + * + * Invalidating an in-use GTF_permit_access entry: + * This cannot be done directly. Request assistance from the domain controller + * which can set a timeout on the use of a grant entry and take necessary + * action. (NB. This is not yet implemented!). + * + * Invalidating an unused GTF_accept_transfer entry: + * 1. flags = ent->flags. + * 2. Observe that !(flags & GTF_transfer_committed). [*] + * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). + * NB. No need for WMB as reuse of entry is control-dependent on success of + * step 3, and all architectures guarantee ordering of ctrl-dep writes. + * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. + * The guest must /not/ modify the grant entry until the address of the + * transferred frame is written. It is safe for the guest to spin waiting + * for this to occur (detect by observing GTF_transfer_completed in + * ent->flags). + * + * Invalidating a committed GTF_accept_transfer entry: + * 1. Wait for (ent->flags & GTF_transfer_completed). + * + * Changing a GTF_permit_access from writable to read-only: + * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. + * + * Changing a GTF_permit_access from read-only to writable: + * Use SMP-safe bit-setting instruction. + */ + +/* + * Reference to a grant entry in a specified domain's grant table. + */ +typedef uint32_t grant_ref_t; + +/* + * A grant table comprises a packed array of grant entries in one or more + * page frames shared between Xen and a guest. + * [XEN]: This field is written by Xen and read by the sharing guest. + * [GST]: This field is written by the guest and read by Xen. + */ + +/* + * Version 1 of the grant table entry structure is maintained purely + * for backwards compatibility. New guests should use version 2. + */ +#if __XEN_INTERFACE_VERSION__ < 0x0003020a +#define grant_entry_v1 grant_entry +#define grant_entry_v1_t grant_entry_t +#endif +struct grant_entry_v1 { + /* GTF_xxx: various type and flag information. [XEN,GST] */ + uint16_t flags; + /* The domain being granted foreign privileges. [GST] */ + domid_t domid; + /* + * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] + * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] + */ + uint32_t frame; +}; +typedef struct grant_entry_v1 grant_entry_v1_t; + +/* The first few grant table entries will be preserved across grant table + * version changes and may be pre-populated at domain creation by tools. + */ +#define GNTTAB_NR_RESERVED_ENTRIES 8 +#define GNTTAB_RESERVED_CONSOLE 0 +#define GNTTAB_RESERVED_XENSTORE 1 + +/* + * Type of grant entry. + * GTF_invalid: This grant entry grants no privileges. + * GTF_permit_access: Allow @domid to map/access @frame. + * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame + * to this guest. Xen writes the page number to @frame. + * GTF_transitive: Allow @domid to transitively access a subrange of + * @trans_grant in @trans_domid. No mappings are allowed. + */ +#define GTF_invalid (0U<<0) +#define GTF_permit_access (1U<<0) +#define GTF_accept_transfer (2U<<0) +#define GTF_transitive (3U<<0) +#define GTF_type_mask (3U<<0) + +/* + * Subflags for GTF_permit_access. + * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] + * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] + * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] + * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST] + * GTF_sub_page: Grant access to only a subrange of the page. @domid + * will only be allowed to copy from the grant, and not + * map it. [GST] + */ +#define _GTF_readonly (2) +#define GTF_readonly (1U<<_GTF_readonly) +#define _GTF_reading (3) +#define GTF_reading (1U<<_GTF_reading) +#define _GTF_writing (4) +#define GTF_writing (1U<<_GTF_writing) +#define _GTF_PWT (5) +#define GTF_PWT (1U<<_GTF_PWT) +#define _GTF_PCD (6) +#define GTF_PCD (1U<<_GTF_PCD) +#define _GTF_PAT (7) +#define GTF_PAT (1U<<_GTF_PAT) +#define _GTF_sub_page (8) +#define GTF_sub_page (1U<<_GTF_sub_page) + +/* + * Subflags for GTF_accept_transfer: + * GTF_transfer_committed: Xen sets this flag to indicate that it is committed + * to transferring ownership of a page frame. When a guest sees this flag + * it must /not/ modify the grant entry until GTF_transfer_completed is + * set by Xen. + * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag + * after reading GTF_transfer_committed. Xen will always write the frame + * address, followed by ORing this flag, in a timely manner. + */ +#define _GTF_transfer_committed (2) +#define GTF_transfer_committed (1U<<_GTF_transfer_committed) +#define _GTF_transfer_completed (3) +#define GTF_transfer_completed (1U<<_GTF_transfer_completed) + +/* + * Version 2 grant table entries. These fulfil the same role as + * version 1 entries, but can represent more complicated operations. + * Any given domain will have either a version 1 or a version 2 table, + * and every entry in the table will be the same version. + * + * The interface by which domains use grant references does not depend + * on the grant table version in use by the other domain. + */ +#if __XEN_INTERFACE_VERSION__ >= 0x0003020a +/* + * Version 1 and version 2 grant entries share a common prefix. The + * fields of the prefix are documented as part of struct + * grant_entry_v1. + */ +struct grant_entry_header { + uint16_t flags; + domid_t domid; +}; +typedef struct grant_entry_header grant_entry_header_t; + +/* + * Version 2 of the grant entry structure. + */ +union grant_entry_v2 { + grant_entry_header_t hdr; + + /* + * This member is used for V1-style full page grants, where either: + * + * -- hdr.type is GTF_accept_transfer, or + * -- hdr.type is GTF_permit_access and GTF_sub_page is not set. + * + * In that case, the frame field has the same semantics as the + * field of the same name in the V1 entry structure. + */ + struct { + grant_entry_header_t hdr; + uint32_t pad0; + uint64_t frame; + } full_page; + + /* + * If the grant type is GTF_grant_access and GTF_sub_page is set, + * @domid is allowed to access bytes [@page_off,@page_off+@length) + * in frame @frame. + */ + struct { + grant_entry_header_t hdr; + uint16_t page_off; + uint16_t length; + uint64_t frame; + } sub_page; + + /* + * If the grant is GTF_transitive, @domid is allowed to use the + * grant @gref in domain @trans_domid, as if it was the local + * domain. Obviously, the transitive access must be compatible + * with the original grant. + * + * The current version of Xen does not allow transitive grants + * to be mapped. + */ + struct { + grant_entry_header_t hdr; + domid_t trans_domid; + uint16_t pad0; + grant_ref_t gref; + } transitive; + + uint32_t __spacer[4]; /* Pad to a power of two */ +}; +typedef union grant_entry_v2 grant_entry_v2_t; + +typedef uint16_t grant_status_t; + +#endif /* __XEN_INTERFACE_VERSION__ */ + +/*********************************** + * GRANT TABLE QUERIES AND USES + */ + +/* ` enum neg_errnoval + * ` HYPERVISOR_grant_table_op(enum grant_table_op cmd, + * ` void *args, + * ` unsigned int count) + * ` + * + * @args points to an array of a per-command data structure. The array + * has @count members + */ + +/* ` enum grant_table_op { // GNTTABOP_* => struct gnttab_* */ +#define GNTTABOP_map_grant_ref 0 +#define GNTTABOP_unmap_grant_ref 1 +#define GNTTABOP_setup_table 2 +#define GNTTABOP_dump_table 3 +#define GNTTABOP_transfer 4 +#define GNTTABOP_copy 5 +#define GNTTABOP_query_size 6 +#define GNTTABOP_unmap_and_replace 7 +#if __XEN_INTERFACE_VERSION__ >= 0x0003020a +#define GNTTABOP_set_version 8 +#define GNTTABOP_get_status_frames 9 +#define GNTTABOP_get_version 10 +#define GNTTABOP_swap_grant_ref 11 +#endif /* __XEN_INTERFACE_VERSION__ */ +/* ` } */ + +/* + * Handle to track a mapping created via a grant reference. + */ +typedef uint32_t grant_handle_t; + +/* + * GNTTABOP_map_grant_ref: Map the grant entry (,) for access + * by devices and/or host CPUs. If successful, is a tracking number + * that must be presented later to destroy the mapping(s). On error, + * is a negative status code. + * NOTES: + * 1. If GNTMAP_device_map is specified then is the address + * via which I/O devices may access the granted frame. + * 2. If GNTMAP_host_map is specified then a mapping will be added at + * either a host virtual address in the current address space, or at + * a PTE at the specified machine address. The type of mapping to + * perform is selected through the GNTMAP_contains_pte flag, and the + * address is specified in . + * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a + * host mapping is destroyed by other means then it is *NOT* guaranteed + * to be accounted to the correct grant reference! + */ +struct gnttab_map_grant_ref { + /* IN parameters. */ + uint64_t host_addr; + uint32_t flags; /* GNTMAP_* */ + grant_ref_t ref; + domid_t dom; + /* OUT parameters. */ + int16_t status; /* => enum grant_status */ + grant_handle_t handle; + uint64_t dev_bus_addr; +}; +typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t); + +/* + * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings + * tracked by . If or is zero, that + * field is ignored. If non-zero, they must refer to a device/host mapping + * that is tracked by + * NOTES: + * 1. The call may fail in an undefined manner if either mapping is not + * tracked by . + * 3. After executing a batch of unmaps, it is guaranteed that no stale + * mappings will remain in the device or host TLBs. + */ +struct gnttab_unmap_grant_ref { + /* IN parameters. */ + uint64_t host_addr; + uint64_t dev_bus_addr; + grant_handle_t handle; + /* OUT parameters. */ + int16_t status; /* => enum grant_status */ +}; +typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t); + +/* + * GNTTABOP_setup_table: Set up a grant table for comprising at least + * pages. The frame addresses are written to the . + * Only addresses are written, even if the table is larger. + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. + * 3. Xen may not support more than a single grant-table page per domain. + */ +struct gnttab_setup_table { + /* IN parameters. */ + domid_t dom; + uint32_t nr_frames; + /* OUT parameters. */ + int16_t status; /* => enum grant_status */ +#if __XEN_INTERFACE_VERSION__ < 0x00040300 + XEN_GUEST_HANDLE(ulong) frame_list; +#else + XEN_GUEST_HANDLE(xen_pfn_t) frame_list; +#endif +}; +typedef struct gnttab_setup_table gnttab_setup_table_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t); + +/* + * GNTTABOP_dump_table: Dump the contents of the grant table to the + * xen console. Debugging use only. + */ +struct gnttab_dump_table { + /* IN parameters. */ + domid_t dom; + /* OUT parameters. */ + int16_t status; /* => enum grant_status */ +}; +typedef struct gnttab_dump_table gnttab_dump_table_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t); + +/* + * GNTTABOP_transfer_grant_ref: Transfer to a foreign domain. The + * foreign domain has previously registered its interest in the transfer via + * . + * + * Note that, even if the transfer fails, the specified page no longer belongs + * to the calling domain *unless* the error is GNTST_bad_page. + */ +struct gnttab_transfer { + /* IN parameters. */ + xen_pfn_t mfn; + domid_t domid; + grant_ref_t ref; + /* OUT parameters. */ + int16_t status; +}; +typedef struct gnttab_transfer gnttab_transfer_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t); + + +/* + * GNTTABOP_copy: Hypervisor based copy + * source and destinations can be eithers MFNs or, for foreign domains, + * grant references. the foreign domain has to grant read/write access + * in its grant table. + * + * The flags specify what type source and destinations are (either MFN + * or grant reference). + * + * Note that this can also be used to copy data between two domains + * via a third party if the source and destination domains had previously + * grant appropriate access to their pages to the third party. + * + * source_offset specifies an offset in the source frame, dest_offset + * the offset in the target frame and len specifies the number of + * bytes to be copied. + */ + +#define _GNTCOPY_source_gref (0) +#define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) +#define _GNTCOPY_dest_gref (1) +#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) + +struct gnttab_copy { + /* IN parameters. */ + struct { + union { + grant_ref_t ref; + xen_pfn_t gmfn; + } u; + domid_t domid; + uint16_t offset; + } source, dest; + uint16_t len; + uint16_t flags; /* GNTCOPY_* */ + /* OUT parameters. */ + int16_t status; +}; +typedef struct gnttab_copy gnttab_copy_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t); + +/* + * GNTTABOP_query_size: Query the current and maximum sizes of the shared + * grant table. + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. + */ +struct gnttab_query_size { + /* IN parameters. */ + domid_t dom; + /* OUT parameters. */ + uint32_t nr_frames; + uint32_t max_nr_frames; + int16_t status; /* => enum grant_status */ +}; +typedef struct gnttab_query_size gnttab_query_size_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); + +/* + * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings + * tracked by but atomically replace the page table entry with one + * pointing to the machine address under . will be + * redirected to the null entry. + * NOTES: + * 1. The call may fail in an undefined manner if either mapping is not + * tracked by . + * 2. After executing a batch of unmaps, it is guaranteed that no stale + * mappings will remain in the device or host TLBs. + */ +struct gnttab_unmap_and_replace { + /* IN parameters. */ + uint64_t host_addr; + uint64_t new_addr; + grant_handle_t handle; + /* OUT parameters. */ + int16_t status; /* => enum grant_status */ +}; +typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t); + +#if __XEN_INTERFACE_VERSION__ >= 0x0003020a +/* + * GNTTABOP_set_version: Request a particular version of the grant + * table shared table structure. This operation can only be performed + * once in any given domain. It must be performed before any grants + * are activated; otherwise, the domain will be stuck with version 1. + * The only defined versions are 1 and 2. + */ +struct gnttab_set_version { + /* IN/OUT parameters */ + uint32_t version; +}; +typedef struct gnttab_set_version gnttab_set_version_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_set_version_t); + + +/* + * GNTTABOP_get_status_frames: Get the list of frames used to store grant + * status for . In grant format version 2, the status is separated + * from the other shared grant fields to allow more efficient synchronization + * using barriers instead of atomic cmpexch operations. + * specify the size of vector . + * The frame addresses are returned in the . + * Only addresses are returned, even if the table is larger. + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. + */ +struct gnttab_get_status_frames { + /* IN parameters. */ + uint32_t nr_frames; + domid_t dom; + /* OUT parameters. */ + int16_t status; /* => enum grant_status */ + XEN_GUEST_HANDLE(uint64_t) frame_list; +}; +typedef struct gnttab_get_status_frames gnttab_get_status_frames_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_get_status_frames_t); + +/* + * GNTTABOP_get_version: Get the grant table version which is in + * effect for domain . + */ +struct gnttab_get_version { + /* IN parameters */ + domid_t dom; + uint16_t pad; + /* OUT parameters */ + uint32_t version; +}; +typedef struct gnttab_get_version gnttab_get_version_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_get_version_t); + +/* + * GNTTABOP_swap_grant_ref: Swap the contents of two grant entries. + */ +struct gnttab_swap_grant_ref { + /* IN parameters */ + grant_ref_t ref_a; + grant_ref_t ref_b; + /* OUT parameters */ + int16_t status; /* => enum grant_status */ +}; +typedef struct gnttab_swap_grant_ref gnttab_swap_grant_ref_t; +DEFINE_XEN_GUEST_HANDLE(gnttab_swap_grant_ref_t); + +#endif /* __XEN_INTERFACE_VERSION__ */ + +/* + * Bitfield values for gnttab_map_grant_ref.flags. + */ + /* Map the grant entry for access by I/O devices. */ +#define _GNTMAP_device_map (0) +#define GNTMAP_device_map (1<<_GNTMAP_device_map) + /* Map the grant entry for access by host CPUs. */ +#define _GNTMAP_host_map (1) +#define GNTMAP_host_map (1<<_GNTMAP_host_map) + /* Accesses to the granted frame will be restricted to read-only access. */ +#define _GNTMAP_readonly (2) +#define GNTMAP_readonly (1<<_GNTMAP_readonly) + /* + * GNTMAP_host_map subflag: + * 0 => The host mapping is usable only by the guest OS. + * 1 => The host mapping is usable by guest OS + current application. + */ +#define _GNTMAP_application_map (3) +#define GNTMAP_application_map (1<<_GNTMAP_application_map) + + /* + * GNTMAP_contains_pte subflag: + * 0 => This map request contains a host virtual address. + * 1 => This map request contains the machine addess of the PTE to update. + */ +#define _GNTMAP_contains_pte (4) +#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) + +#define _GNTMAP_can_fail (5) +#define GNTMAP_can_fail (1<<_GNTMAP_can_fail) + +/* + * Bits to be placed in guest kernel available PTE bits (architecture + * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set). + */ +#define _GNTMAP_guest_avail0 (16) +#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0) + +/* + * Values for error status returns. All errors are -ve. + */ +/* ` enum grant_status { */ +#define GNTST_okay (0) /* Normal return. */ +#define GNTST_general_error (-1) /* General undefined error. */ +#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ +#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ +#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ +#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ +#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ +#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ +#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ +#define GNTST_bad_page (-9) /* Specified page was invalid for op. */ +#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ +#define GNTST_address_too_big (-11) /* transfer page address too large. */ +#define GNTST_eagain (-12) /* Operation not done; try again. */ +/* ` } */ + +#define GNTTABOP_error_msgs { \ + "okay", \ + "undefined error", \ + "unrecognised domain id", \ + "invalid grant reference", \ + "invalid mapping handle", \ + "invalid virtual address", \ + "invalid device address", \ + "no spare translation slot in the I/O MMU", \ + "permission denied", \ + "bad page", \ + "copy arguments cross page boundary", \ + "page address size too large", \ + "operation not done; try again" \ +} + +#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/hvm/hvm_op.h b/src/include/xen/hvm/hvm_op.h new file mode 100644 index 00000000..469ad4fb --- /dev/null +++ b/src/include/xen/hvm/hvm_op.h @@ -0,0 +1,384 @@ +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ +#define __XEN_PUBLIC_HVM_HVM_OP_H__ + +FILE_LICENCE ( MIT ); + +#include "../xen.h" +#include "../trace.h" +#include "../event_channel.h" + +/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */ +#define HVMOP_set_param 0 +#define HVMOP_get_param 1 +struct xen_hvm_param { + domid_t domid; /* IN */ + uint32_t index; /* IN */ + uint64_t value; /* IN/OUT */ +}; +typedef struct xen_hvm_param xen_hvm_param_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); + +/* Set the logical level of one of a domain's PCI INTx wires. */ +#define HVMOP_set_pci_intx_level 2 +struct xen_hvm_set_pci_intx_level { + /* Domain to be updated. */ + domid_t domid; + /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ + uint8_t domain, bus, device, intx; + /* Assertion level (0 = unasserted, 1 = asserted). */ + uint8_t level; +}; +typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); + +/* Set the logical level of one of a domain's ISA IRQ wires. */ +#define HVMOP_set_isa_irq_level 3 +struct xen_hvm_set_isa_irq_level { + /* Domain to be updated. */ + domid_t domid; + /* ISA device identification, by ISA IRQ (0-15). */ + uint8_t isa_irq; + /* Assertion level (0 = unasserted, 1 = asserted). */ + uint8_t level; +}; +typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); + +#define HVMOP_set_pci_link_route 4 +struct xen_hvm_set_pci_link_route { + /* Domain to be updated. */ + domid_t domid; + /* PCI link identifier (0-3). */ + uint8_t link; + /* ISA IRQ (1-15), or 0 (disable link). */ + uint8_t isa_irq; +}; +typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); + +/* Flushes all VCPU TLBs: @arg must be NULL. */ +#define HVMOP_flush_tlbs 5 + +typedef enum { + HVMMEM_ram_rw, /* Normal read/write guest RAM */ + HVMMEM_ram_ro, /* Read-only; writes are discarded */ + HVMMEM_mmio_dm, /* Reads and write go to the device model */ +} hvmmem_type_t; + +/* Following tools-only interfaces may change in future. */ +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +/* Track dirty VRAM. */ +#define HVMOP_track_dirty_vram 6 +struct xen_hvm_track_dirty_vram { + /* Domain to be tracked. */ + domid_t domid; + /* Number of pages to track. */ + uint32_t nr; + /* First pfn to track. */ + uint64_aligned_t first_pfn; + /* OUT variable. */ + /* Dirty bitmap buffer. */ + XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; +}; +typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); + +/* Notify that some pages got modified by the Device Model. */ +#define HVMOP_modified_memory 7 +struct xen_hvm_modified_memory { + /* Domain to be updated. */ + domid_t domid; + /* Number of pages. */ + uint32_t nr; + /* First pfn. */ + uint64_aligned_t first_pfn; +}; +typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); + +#define HVMOP_set_mem_type 8 +/* Notify that a region of memory is to be treated in a specific way. */ +struct xen_hvm_set_mem_type { + /* Domain to be updated. */ + domid_t domid; + /* Memory type */ + uint16_t hvmmem_type; + /* Number of pages. */ + uint32_t nr; + /* First pfn. */ + uint64_aligned_t first_pfn; +}; +typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); + +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + +/* Hint from PV drivers for pagetable destruction. */ +#define HVMOP_pagetable_dying 9 +struct xen_hvm_pagetable_dying { + /* Domain with a pagetable about to be destroyed. */ + domid_t domid; + uint16_t pad[3]; /* align next field on 8-byte boundary */ + /* guest physical address of the toplevel pagetable dying */ + uint64_t gpa; +}; +typedef struct xen_hvm_pagetable_dying xen_hvm_pagetable_dying_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_pagetable_dying_t); + +/* Get the current Xen time, in nanoseconds since system boot. */ +#define HVMOP_get_time 10 +struct xen_hvm_get_time { + uint64_t now; /* OUT */ +}; +typedef struct xen_hvm_get_time xen_hvm_get_time_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_time_t); + +#define HVMOP_xentrace 11 +struct xen_hvm_xentrace { + uint16_t event, extra_bytes; + uint8_t extra[TRACE_EXTRA_MAX * sizeof(uint32_t)]; +}; +typedef struct xen_hvm_xentrace xen_hvm_xentrace_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t); + +/* Following tools-only interfaces may change in future. */ +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +/* Deprecated by XENMEM_access_op_set_access */ +#define HVMOP_set_mem_access 12 + +/* Deprecated by XENMEM_access_op_get_access */ +#define HVMOP_get_mem_access 13 + +#define HVMOP_inject_trap 14 +/* Inject a trap into a VCPU, which will get taken up on the next + * scheduling of it. Note that the caller should know enough of the + * state of the CPU before injecting, to know what the effect of + * injecting the trap will be. + */ +struct xen_hvm_inject_trap { + /* Domain to be queried. */ + domid_t domid; + /* VCPU */ + uint32_t vcpuid; + /* Vector number */ + uint32_t vector; + /* Trap type (HVMOP_TRAP_*) */ + uint32_t type; +/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */ +# define HVMOP_TRAP_ext_int 0 /* external interrupt */ +# define HVMOP_TRAP_nmi 2 /* nmi */ +# define HVMOP_TRAP_hw_exc 3 /* hardware exception */ +# define HVMOP_TRAP_sw_int 4 /* software interrupt (CD nn) */ +# define HVMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */ +# define HVMOP_TRAP_sw_exc 6 /* INT3 (CC), INTO (CE) */ + /* Error code, or ~0u to skip */ + uint32_t error_code; + /* Intruction length */ + uint32_t insn_len; + /* CR2 for page faults */ + uint64_aligned_t cr2; +}; +typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t); + +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + +#define HVMOP_get_mem_type 15 +/* Return hvmmem_type_t for the specified pfn. */ +struct xen_hvm_get_mem_type { + /* Domain to be queried. */ + domid_t domid; + /* OUT variable. */ + uint16_t mem_type; + uint16_t pad[2]; /* align next field on 8-byte boundary */ + /* IN variable. */ + uint64_t pfn; +}; +typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t); + +/* Following tools-only interfaces may change in future. */ +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +/* MSI injection for emulated devices */ +#define HVMOP_inject_msi 16 +struct xen_hvm_inject_msi { + /* Domain to be injected */ + domid_t domid; + /* Data -- lower 32 bits */ + uint32_t data; + /* Address (0xfeexxxxx) */ + uint64_t addr; +}; +typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t); + +/* + * IOREQ Servers + * + * The interface between an I/O emulator an Xen is called an IOREQ Server. + * A domain supports a single 'legacy' IOREQ Server which is instantiated if + * parameter... + * + * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous + * ioreq structures), or... + * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered + * ioreq ring), or... + * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses + * to request buffered I/O emulation). + * + * The following hypercalls facilitate the creation of IOREQ Servers for + * 'secondary' emulators which are invoked to implement port I/O, memory, or + * PCI config space ranges which they explicitly register. + */ + +typedef uint16_t ioservid_t; + +/* + * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary + * emulator servicing domain . + * + * The handed back is unique for . If is zero + * the buffered ioreq ring will not be allocated and hence all emulation + * requestes to this server will be synchronous. + */ +#define HVMOP_create_ioreq_server 17 +struct xen_hvm_create_ioreq_server { + domid_t domid; /* IN - domain to be serviced */ + uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */ + ioservid_t id; /* OUT - server id */ +}; +typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t); + +/* + * HVMOP_get_ioreq_server_info: Get all the information necessary to access + * IOREQ Server . + * + * The emulator needs to map the synchronous ioreq structures and buffered + * ioreq ring (if it exists) that Xen uses to request emulation. These are + * hosted in domain 's gmfns and + * respectively. In addition, if the IOREQ Server is handling buffered + * emulation requests, the emulator needs to bind to event channel + * to listen for them. (The event channels used for + * synchronous emulation requests are specified in the per-CPU ioreq + * structures in ). + * If the IOREQ Server is not handling buffered emulation requests then the + * values handed back in and will both be 0. + */ +#define HVMOP_get_ioreq_server_info 18 +struct xen_hvm_get_ioreq_server_info { + domid_t domid; /* IN - domain to be serviced */ + ioservid_t id; /* IN - server id */ + evtchn_port_t bufioreq_port; /* OUT - buffered ioreq port */ + uint64_aligned_t ioreq_pfn; /* OUT - sync ioreq pfn */ + uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */ +}; +typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t); + +/* + * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain + * for emulation by the client of IOREQ + * Server + * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of + * for emulation by the client of IOREQ + * Server + * + * There are three types of I/O that can be emulated: port I/O, memory accesses + * and PCI config space accesses. The field denotes which type of range + * the and (inclusive) fields are specifying. + * PCI config space ranges are specified by segment/bus/device/function values + * which should be encoded using the HVMOP_PCI_SBDF helper macro below. + * + * NOTE: unless an emulation request falls entirely within a range mapped + * by a secondary emulator, it will not be passed to that emulator. + */ +#define HVMOP_map_io_range_to_ioreq_server 19 +#define HVMOP_unmap_io_range_from_ioreq_server 20 +struct xen_hvm_io_range { + domid_t domid; /* IN - domain to be serviced */ + ioservid_t id; /* IN - server id */ + uint32_t type; /* IN - type of range */ +# define HVMOP_IO_RANGE_PORT 0 /* I/O port range */ +# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */ +# define HVMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */ + uint64_aligned_t start, end; /* IN - inclusive start and end of range */ +}; +typedef struct xen_hvm_io_range xen_hvm_io_range_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t); + +#define HVMOP_PCI_SBDF(s,b,d,f) \ + ((((s) & 0xffff) << 16) | \ + (((b) & 0xff) << 8) | \ + (((d) & 0x1f) << 3) | \ + ((f) & 0x07)) + +/* + * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server servicing domain + * . + * + * Any registered I/O ranges will be automatically deregistered. + */ +#define HVMOP_destroy_ioreq_server 21 +struct xen_hvm_destroy_ioreq_server { + domid_t domid; /* IN - domain to be serviced */ + ioservid_t id; /* IN - server id */ +}; +typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t); + +/* + * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server servicing + * domain . + * + * The IOREQ Server will not be passed any emulation requests until it is in the + * enabled state. + * Note that the contents of the ioreq_pfn and bufioreq_fn (see + * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in + * the enabled state. + */ +#define HVMOP_set_ioreq_server_state 22 +struct xen_hvm_set_ioreq_server_state { + domid_t domid; /* IN - domain to be serviced */ + ioservid_t id; /* IN - server id */ + uint8_t enabled; /* IN - enabled? */ +}; +typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t); + +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + +#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/hvm/params.h b/src/include/xen/hvm/params.h new file mode 100644 index 00000000..49e06586 --- /dev/null +++ b/src/include/xen/hvm/params.h @@ -0,0 +1,158 @@ +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __XEN_PUBLIC_HVM_PARAMS_H__ +#define __XEN_PUBLIC_HVM_PARAMS_H__ + +FILE_LICENCE ( MIT ); + +#include "hvm_op.h" + +/* + * Parameter space for HVMOP_{set,get}_param. + */ + +/* + * How should CPU0 event-channel notifications be delivered? + * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). + * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: + * Domain = val[47:32], Bus = val[31:16], + * DevFn = val[15: 8], IntX = val[ 1: 0] + * val[63:56] == 2: val[7:0] is a vector number, check for + * XENFEAT_hvm_callback_vector to know if this delivery + * method is available. + * If val == 0 then CPU0 event-channel notifications are not delivered. + */ +#define HVM_PARAM_CALLBACK_IRQ 0 + +/* + * These are not used by Xen. They are here for convenience of HVM-guest + * xenbus implementations. + */ +#define HVM_PARAM_STORE_PFN 1 +#define HVM_PARAM_STORE_EVTCHN 2 + +#define HVM_PARAM_PAE_ENABLED 4 + +#define HVM_PARAM_IOREQ_PFN 5 + +#define HVM_PARAM_BUFIOREQ_PFN 6 +#define HVM_PARAM_BUFIOREQ_EVTCHN 26 + +#if defined(__i386__) || defined(__x86_64__) + +/* Expose Viridian interfaces to this HVM guest? */ +#define HVM_PARAM_VIRIDIAN 9 + +#endif + +/* + * Set mode for virtual timers (currently x86 only): + * delay_for_missed_ticks (default): + * Do not advance a vcpu's time beyond the correct delivery time for + * interrupts that have been missed due to preemption. Deliver missed + * interrupts when the vcpu is rescheduled and advance the vcpu's virtual + * time stepwise for each one. + * no_delay_for_missed_ticks: + * As above, missed interrupts are delivered, but guest time always tracks + * wallclock (i.e., real) time while doing so. + * no_missed_ticks_pending: + * No missed interrupts are held pending. Instead, to ensure ticks are + * delivered at some non-zero rate, if we detect missed ticks then the + * internal tick alarm is not disabled if the VCPU is preempted during the + * next tick period. + * one_missed_tick_pending: + * Missed interrupts are collapsed together and delivered as one 'late tick'. + * Guest time always tracks wallclock (i.e., real) time. + */ +#define HVM_PARAM_TIMER_MODE 10 +#define HVMPTM_delay_for_missed_ticks 0 +#define HVMPTM_no_delay_for_missed_ticks 1 +#define HVMPTM_no_missed_ticks_pending 2 +#define HVMPTM_one_missed_tick_pending 3 + +/* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */ +#define HVM_PARAM_HPET_ENABLED 11 + +/* Identity-map page directory used by Intel EPT when CR0.PG=0. */ +#define HVM_PARAM_IDENT_PT 12 + +/* Device Model domain, defaults to 0. */ +#define HVM_PARAM_DM_DOMAIN 13 + +/* ACPI S state: currently support S0 and S3 on x86. */ +#define HVM_PARAM_ACPI_S_STATE 14 + +/* TSS used on Intel when CR0.PE=0. */ +#define HVM_PARAM_VM86_TSS 15 + +/* Boolean: Enable aligning all periodic vpts to reduce interrupts */ +#define HVM_PARAM_VPT_ALIGN 16 + +/* Console debug shared memory ring and event channel */ +#define HVM_PARAM_CONSOLE_PFN 17 +#define HVM_PARAM_CONSOLE_EVTCHN 18 + +/* + * Select location of ACPI PM1a and TMR control blocks. Currently two locations + * are supported, specified by version 0 or 1 in this parameter: + * - 0: default, use the old addresses + * PM1A_EVT == 0x1f40; PM1A_CNT == 0x1f44; PM_TMR == 0x1f48 + * - 1: use the new default qemu addresses + * PM1A_EVT == 0xb000; PM1A_CNT == 0xb004; PM_TMR == 0xb008 + * You can find these address definitions in + */ +#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19 + +/* Enable blocking memory events, async or sync (pause vcpu until response) + * onchangeonly indicates messages only on a change of value */ +#define HVM_PARAM_MEMORY_EVENT_CR0 20 +#define HVM_PARAM_MEMORY_EVENT_CR3 21 +#define HVM_PARAM_MEMORY_EVENT_CR4 22 +#define HVM_PARAM_MEMORY_EVENT_INT3 23 +#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25 +#define HVM_PARAM_MEMORY_EVENT_MSR 30 + +#define HVMPME_MODE_MASK (3 << 0) +#define HVMPME_mode_disabled 0 +#define HVMPME_mode_async 1 +#define HVMPME_mode_sync 2 +#define HVMPME_onchangeonly (1 << 2) + +/* Boolean: Enable nestedhvm (hvm only) */ +#define HVM_PARAM_NESTEDHVM 24 + +/* Params for the mem event rings */ +#define HVM_PARAM_PAGING_RING_PFN 27 +#define HVM_PARAM_ACCESS_RING_PFN 28 +#define HVM_PARAM_SHARING_RING_PFN 29 + +/* SHUTDOWN_* action in case of a triple fault */ +#define HVM_PARAM_TRIPLE_FAULT_REASON 31 + +#define HVM_PARAM_IOREQ_SERVER_PFN 32 +#define HVM_PARAM_NR_IOREQ_SERVER_PAGES 33 + +/* Location of the VM Generation ID in guest physical address space. */ +#define HVM_PARAM_VM_GENERATION_ID_ADDR 34 + +#define HVM_NR_PARAMS 35 + +#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ diff --git a/src/include/xen/import.pl b/src/include/xen/import.pl new file mode 100755 index 00000000..9f09a77a --- /dev/null +++ b/src/include/xen/import.pl @@ -0,0 +1,116 @@ +#!/usr/bin/perl -w + +=head1 NAME + +import.pl + +=head1 SYNOPSIS + +import.pl [options] /path/to/xen + +Options: + + -h,--help Display brief help message + -v,--verbose Increase verbosity + -q,--quiet Decrease verbosity + +=cut + +use File::Spec::Functions qw ( :ALL ); +use File::Find; +use File::Path; +use Getopt::Long; +use Pod::Usage; +use FindBin; +use strict; +use warnings; + +my $verbosity = 0; + +sub try_import_file { + my $ipxedir = shift; + my $xendir = shift; + my $filename = shift; + + # Skip everything except headers + return unless $filename =~ /\.h$/; + + # Search for importable header + ( undef, my $subdir, undef ) = splitpath ( $filename ); + my $outfile = catfile ( $ipxedir, $filename ); + my $infile = catfile ( $xendir, "xen/include/public", $filename ); + die "$infile does not exist\n" unless -e $infile; + + # Import header file + print "$filename <- ".catfile ( $xendir, $filename )."\n" + if $verbosity >= 1; + open my $infh, "<", $infile or die "Could not open $infile: $!\n"; + mkpath ( catdir ( $xendir, $subdir ) ); + open my $outfh, ">", $outfile or die "Could not open $outfile: $!\n"; + my @dependencies = (); + my $maybe_guard; + my $guard; + while ( <$infh> ) { + # Strip CR and trailing whitespace + s/\r//g; + s/\s*$//g; + chomp; + # Update include lines, and record included files + if ( /^\#include\s+[<\"](\S+)[>\"]/ ) { + push @dependencies, catfile ( $subdir, $1 ); + } + # Write out line + print $outfh "$_\n"; + # Apply FILE_LICENCE() immediately after include guard + if ( defined $maybe_guard ) { + if ( /^\#define\s+_+${maybe_guard}_H_*$/ ) { + die "Duplicate header guard detected in $infile\n" if $guard; + $guard = $maybe_guard; + print $outfh "\nFILE_LICENCE ( MIT );\n"; + } + undef $maybe_guard; + } + if ( /^#ifndef\s+_+(\S+)_H_*$/ ) { + $maybe_guard = $1; + } + } + close $outfh; + close $infh; + # Warn if no header guard was detected + warn "Cannot detect header guard in $infile\n" unless $guard; + # Recurse to handle any included files that we don't already have + foreach my $dependency ( @dependencies ) { + if ( ! -e catfile ( $ipxedir, $dependency ) ) { + print "...following dependency on $dependency\n" if $verbosity >= 1; + try_import_file ( $ipxedir, $xendir, $dependency ); + } + } + return; +} + +# Parse command-line options +Getopt::Long::Configure ( 'bundling', 'auto_abbrev' ); +GetOptions ( + 'verbose|v+' => sub { $verbosity++; }, + 'quiet|q+' => sub { $verbosity--; }, + 'help|h' => sub { pod2usage ( 1 ); }, +) or die "Could not parse command-line options\n"; +pod2usage ( 1 ) unless @ARGV == 1; +my $xendir = shift; + +# Identify Xen import directory +die "Directory \"$xendir\" does not appear to contain the Xen source tree\n" + unless -e catfile ( $xendir, "xen/include/public/xen.h" ); + +# Identify iPXE Xen includes directory +my $ipxedir = $FindBin::Bin; +die "Directory \"$ipxedir\" does not appear to contain the iPXE Xen includes\n" + unless -e catfile ( $ipxedir, "../../include/ipxe" ); + +print "Importing Xen headers into $ipxedir\nfrom $xendir\n" + if $verbosity >= 1; + +# Import headers +find ( { wanted => sub { + try_import_file ( $ipxedir, $xendir, abs2rel ( $_, $ipxedir ) ); +}, no_chdir => 1 }, $ipxedir ); diff --git a/src/include/xen/io/netif.h b/src/include/xen/io/netif.h new file mode 100644 index 00000000..ae12eab7 --- /dev/null +++ b/src/include/xen/io/netif.h @@ -0,0 +1,307 @@ +/****************************************************************************** + * netif.h + * + * Unified network-device I/O interface for Xen guest OSes. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2003-2004, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_IO_NETIF_H__ +#define __XEN_PUBLIC_IO_NETIF_H__ + +FILE_LICENCE ( MIT ); + +#include "ring.h" +#include "../grant_table.h" + +/* + * Older implementation of Xen network frontend / backend has an + * implicit dependency on the MAX_SKB_FRAGS as the maximum number of + * ring slots a skb can use. Netfront / netback may not work as + * expected when frontend and backend have different MAX_SKB_FRAGS. + * + * A better approach is to add mechanism for netfront / netback to + * negotiate this value. However we cannot fix all possible + * frontends, so we need to define a value which states the minimum + * slots backend must support. + * + * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS + * (18), which is proved to work with most frontends. Any new backend + * which doesn't negotiate with frontend should expect frontend to + * send a valid packet using slots up to this value. + */ +#define XEN_NETIF_NR_SLOTS_MIN 18 + +/* + * Notifications after enqueuing any type of message should be conditional on + * the appropriate req_event or rsp_event field in the shared ring. + * If the client sends notification for rx requests then it should specify + * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume + * that it cannot safely queue packets (as it may not be kicked to send them). + */ + +/* + * "feature-split-event-channels" is introduced to separate guest TX + * and RX notification. Backend either doesn't support this feature or + * advertises it via xenstore as 0 (disabled) or 1 (enabled). + * + * To make use of this feature, frontend should allocate two event + * channels for TX and RX, advertise them to backend as + * "event-channel-tx" and "event-channel-rx" respectively. If frontend + * doesn't want to use this feature, it just writes "event-channel" + * node as before. + */ + +/* + * Multiple transmit and receive queues: + * If supported, the backend will write the key "multi-queue-max-queues" to + * the directory for that vif, and set its value to the maximum supported + * number of queues. + * Frontends that are aware of this feature and wish to use it can write the + * key "multi-queue-num-queues", set to the number they wish to use, which + * must be greater than zero, and no more than the value reported by the backend + * in "multi-queue-max-queues". + * + * Queues replicate the shared rings and event channels. + * "feature-split-event-channels" may optionally be used when using + * multiple queues, but is not mandatory. + * + * Each queue consists of one shared ring pair, i.e. there must be the same + * number of tx and rx rings. + * + * For frontends requesting just one queue, the usual event-channel and + * ring-ref keys are written as before, simplifying the backend processing + * to avoid distinguishing between a frontend that doesn't understand the + * multi-queue feature, and one that does, but requested only one queue. + * + * Frontends requesting two or more queues must not write the toplevel + * event-channel (or event-channel-{tx,rx}) and {tx,rx}-ring-ref keys, + * instead writing those keys under sub-keys having the name "queue-N" where + * N is the integer ID of the queue for which those keys belong. Queues + * are indexed from zero. For example, a frontend with two queues and split + * event channels must write the following set of queue-related keys: + * + * /local/domain/1/device/vif/0/multi-queue-num-queues = "2" + * /local/domain/1/device/vif/0/queue-0 = "" + * /local/domain/1/device/vif/0/queue-0/tx-ring-ref = "" + * /local/domain/1/device/vif/0/queue-0/rx-ring-ref = "" + * /local/domain/1/device/vif/0/queue-0/event-channel-tx = "" + * /local/domain/1/device/vif/0/queue-0/event-channel-rx = "" + * /local/domain/1/device/vif/0/queue-1 = "" + * /local/domain/1/device/vif/0/queue-1/tx-ring-ref = "" + * /local/domain/1/device/vif/0/queue-1/rx-ring-ref = ">2)<<2 : __RD2(_x)) +#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) +#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) +#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) + +/* + * Calculate size of a shared ring, given the total available space for the + * ring and indexes (_sz), and the name tag of the request/response structure. + * A ring contains as many entries as will fit, rounded down to the nearest + * power of two (so we can mask with (size-1) to loop around). + */ +#define __CONST_RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ + sizeof(((struct _s##_sring *)0)->ring[0]))) +/* + * The same for passing in an actual pointer instead of a name tag. + */ +#define __RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) + +/* + * Macros to make the correct C datatypes for a new kind of ring. + * + * To make a new ring datatype, you need to have two message structures, + * let's say request_t, and response_t already defined. + * + * In a header where you want the ring datatype declared, you then do: + * + * DEFINE_RING_TYPES(mytag, request_t, response_t); + * + * These expand out to give you a set of types, as you can see below. + * The most important of these are: + * + * mytag_sring_t - The shared ring. + * mytag_front_ring_t - The 'front' half of the ring. + * mytag_back_ring_t - The 'back' half of the ring. + * + * To initialize a ring in your code you need to know the location and size + * of the shared memory area (PAGE_SIZE, for instance). To initialise + * the front half: + * + * mytag_front_ring_t front_ring; + * SHARED_RING_INIT((mytag_sring_t *)shared_page); + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); + * + * Initializing the back follows similarly (note that only the front + * initializes the shared ring): + * + * mytag_back_ring_t back_ring; + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); + */ + +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ + \ +/* Shared ring entry */ \ +union __name##_sring_entry { \ + __req_t req; \ + __rsp_t rsp; \ +}; \ + \ +/* Shared ring page */ \ +struct __name##_sring { \ + RING_IDX req_prod, req_event; \ + RING_IDX rsp_prod, rsp_event; \ + union { \ + struct { \ + uint8_t smartpoll_active; \ + } netif; \ + struct { \ + uint8_t msg; \ + } tapif_user; \ + uint8_t pvt_pad[4]; \ + } private; \ + uint8_t __pad[44]; \ + union __name##_sring_entry ring[1]; /* variable-length */ \ +}; \ + \ +/* "Front" end's private variables */ \ +struct __name##_front_ring { \ + RING_IDX req_prod_pvt; \ + RING_IDX rsp_cons; \ + unsigned int nr_ents; \ + struct __name##_sring *sring; \ +}; \ + \ +/* "Back" end's private variables */ \ +struct __name##_back_ring { \ + RING_IDX rsp_prod_pvt; \ + RING_IDX req_cons; \ + unsigned int nr_ents; \ + struct __name##_sring *sring; \ +}; \ + \ +/* Syntactic sugar */ \ +typedef struct __name##_sring __name##_sring_t; \ +typedef struct __name##_front_ring __name##_front_ring_t; \ +typedef struct __name##_back_ring __name##_back_ring_t + +/* + * Macros for manipulating rings. + * + * FRONT_RING_whatever works on the "front end" of a ring: here + * requests are pushed on to the ring and responses taken off it. + * + * BACK_RING_whatever works on the "back end" of a ring: here + * requests are taken off the ring and responses put on. + * + * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. + * This is OK in 1-for-1 request-response situations where the + * requestor (front end) never has more than RING_SIZE()-1 + * outstanding requests. + */ + +/* Initialising empty rings */ +#define SHARED_RING_INIT(_s) do { \ + (_s)->req_prod = (_s)->rsp_prod = 0; \ + (_s)->req_event = (_s)->rsp_event = 1; \ + (void)memset((_s)->private.pvt_pad, 0, sizeof((_s)->private.pvt_pad)); \ + (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ +} while(0) + +#define FRONT_RING_INIT(_r, _s, __size) do { \ + (_r)->req_prod_pvt = 0; \ + (_r)->rsp_cons = 0; \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ + (_r)->sring = (_s); \ +} while (0) + +#define BACK_RING_INIT(_r, _s, __size) do { \ + (_r)->rsp_prod_pvt = 0; \ + (_r)->req_cons = 0; \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ + (_r)->sring = (_s); \ +} while (0) + +/* How big is this ring? */ +#define RING_SIZE(_r) \ + ((_r)->nr_ents) + +/* Number of free requests (for use on front side only). */ +#define RING_FREE_REQUESTS(_r) \ + (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) + +/* Test if there is an empty slot available on the front ring. + * (This is only meaningful from the front. ) + */ +#define RING_FULL(_r) \ + (RING_FREE_REQUESTS(_r) == 0) + +/* Test if there are outstanding messages to be processed on a ring. */ +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \ + ((_r)->sring->rsp_prod - (_r)->rsp_cons) + +#ifdef __GNUC__ +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ + unsigned int rsp = RING_SIZE(_r) - \ + ((_r)->req_cons - (_r)->rsp_prod_pvt); \ + req < rsp ? req : rsp; \ +}) +#else +/* Same as above, but without the nice GCC ({ ... }) syntax. */ +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \ + ((((_r)->sring->req_prod - (_r)->req_cons) < \ + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ + ((_r)->sring->req_prod - (_r)->req_cons) : \ + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) +#endif + +/* Direct access to individual ring elements, by index. */ +#define RING_GET_REQUEST(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) + +#define RING_GET_RESPONSE(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) + +/* Loop termination condition: Would the specified index overflow the ring? */ +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ + (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) + +/* Ill-behaved frontend determination: Can there be this many requests? */ +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) + +#define RING_PUSH_REQUESTS(_r) do { \ + xen_wmb(); /* back sees requests /before/ updated producer index */ \ + (_r)->sring->req_prod = (_r)->req_prod_pvt; \ +} while (0) + +#define RING_PUSH_RESPONSES(_r) do { \ + xen_wmb(); /* front sees resps /before/ updated producer index */ \ + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ +} while (0) + +/* + * Notification hold-off (req_event and rsp_event): + * + * When queueing requests or responses on a shared ring, it may not always be + * necessary to notify the remote end. For example, if requests are in flight + * in a backend, the front may be able to queue further requests without + * notifying the back (if the back checks for new requests when it queues + * responses). + * + * When enqueuing requests or responses: + * + * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument + * is a boolean return value. True indicates that the receiver requires an + * asynchronous notification. + * + * After dequeuing requests or responses (before sleeping the connection): + * + * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). + * The second argument is a boolean return value. True indicates that there + * are pending messages on the ring (i.e., the connection should not be put + * to sleep). + * + * These macros will set the req_event/rsp_event field to trigger a + * notification on the very next message that is enqueued. If you want to + * create batches of work (i.e., only receive a notification after several + * messages have been enqueued) then you will need to create a customised + * version of the FINAL_CHECK macro in your own code, which sets the event + * field appropriately. + */ + +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ + RING_IDX __old = (_r)->sring->req_prod; \ + RING_IDX __new = (_r)->req_prod_pvt; \ + xen_wmb(); /* back sees requests /before/ updated producer index */ \ + (_r)->sring->req_prod = __new; \ + xen_mb(); /* back sees new requests /before/ we check req_event */ \ + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ + (RING_IDX)(__new - __old)); \ +} while (0) + +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ + RING_IDX __old = (_r)->sring->rsp_prod; \ + RING_IDX __new = (_r)->rsp_prod_pvt; \ + xen_wmb(); /* front sees resps /before/ updated producer index */ \ + (_r)->sring->rsp_prod = __new; \ + xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ + (RING_IDX)(__new - __old)); \ +} while (0) + +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ + if (_work_to_do) break; \ + (_r)->sring->req_event = (_r)->req_cons + 1; \ + xen_mb(); \ + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ +} while (0) + +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ + if (_work_to_do) break; \ + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ + xen_mb(); \ + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ +} while (0) + +#endif /* __XEN_PUBLIC_IO_RING_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/io/xenbus.h b/src/include/xen/io/xenbus.h new file mode 100644 index 00000000..182aeb9b --- /dev/null +++ b/src/include/xen/io/xenbus.h @@ -0,0 +1,82 @@ +/***************************************************************************** + * xenbus.h + * + * Xenbus protocol details. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (C) 2005 XenSource Ltd. + */ + +#ifndef _XEN_PUBLIC_IO_XENBUS_H +#define _XEN_PUBLIC_IO_XENBUS_H + +FILE_LICENCE ( MIT ); + +/* + * The state of either end of the Xenbus, i.e. the current communication + * status of initialisation across the bus. States here imply nothing about + * the state of the connection between the driver and the kernel's device + * layers. + */ +enum xenbus_state { + XenbusStateUnknown = 0, + + XenbusStateInitialising = 1, + + /* + * InitWait: Finished early initialisation but waiting for information + * from the peer or hotplug scripts. + */ + XenbusStateInitWait = 2, + + /* + * Initialised: Waiting for a connection from the peer. + */ + XenbusStateInitialised = 3, + + XenbusStateConnected = 4, + + /* + * Closing: The device is being closed due to an error or an unplug event. + */ + XenbusStateClosing = 5, + + XenbusStateClosed = 6, + + /* + * Reconfiguring: The device is being reconfigured. + */ + XenbusStateReconfiguring = 7, + + XenbusStateReconfigured = 8 +}; +typedef enum xenbus_state XenbusState; + +#endif /* _XEN_PUBLIC_IO_XENBUS_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/io/xs_wire.h b/src/include/xen/io/xs_wire.h new file mode 100644 index 00000000..50415f02 --- /dev/null +++ b/src/include/xen/io/xs_wire.h @@ -0,0 +1,140 @@ +/* + * Details of the "wire" protocol between Xen Store Daemon and client + * library or guest kernel. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (C) 2005 Rusty Russell IBM Corporation + */ + +#ifndef _XS_WIRE_H +#define _XS_WIRE_H + +FILE_LICENCE ( MIT ); + +enum xsd_sockmsg_type +{ + XS_DEBUG, + XS_DIRECTORY, + XS_READ, + XS_GET_PERMS, + XS_WATCH, + XS_UNWATCH, + XS_TRANSACTION_START, + XS_TRANSACTION_END, + XS_INTRODUCE, + XS_RELEASE, + XS_GET_DOMAIN_PATH, + XS_WRITE, + XS_MKDIR, + XS_RM, + XS_SET_PERMS, + XS_WATCH_EVENT, + XS_ERROR, + XS_IS_DOMAIN_INTRODUCED, + XS_RESUME, + XS_SET_TARGET, + XS_RESTRICT, + XS_RESET_WATCHES +}; + +#define XS_WRITE_NONE "NONE" +#define XS_WRITE_CREATE "CREATE" +#define XS_WRITE_CREATE_EXCL "CREATE|EXCL" + +/* We hand errors as strings, for portability. */ +struct xsd_errors +{ + int errnum; + const char *errstring; +}; +#ifdef EINVAL +#define XSD_ERROR(x) { x, #x } +/* LINTED: static unused */ +static struct xsd_errors xsd_errors[] +#if defined(__GNUC__) +__attribute__((unused)) +#endif + = { + XSD_ERROR(EINVAL), + XSD_ERROR(EACCES), + XSD_ERROR(EEXIST), + XSD_ERROR(EISDIR), + XSD_ERROR(ENOENT), + XSD_ERROR(ENOMEM), + XSD_ERROR(ENOSPC), + XSD_ERROR(EIO), + XSD_ERROR(ENOTEMPTY), + XSD_ERROR(ENOSYS), + XSD_ERROR(EROFS), + XSD_ERROR(EBUSY), + XSD_ERROR(EAGAIN), + XSD_ERROR(EISCONN), + XSD_ERROR(E2BIG) +}; +#endif + +struct xsd_sockmsg +{ + uint32_t type; /* XS_??? */ + uint32_t req_id;/* Request identifier, echoed in daemon's response. */ + uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ + uint32_t len; /* Length of data following this. */ + + /* Generally followed by nul-terminated string(s). */ +}; + +enum xs_watch_type +{ + XS_WATCH_PATH = 0, + XS_WATCH_TOKEN +}; + +/* + * `incontents 150 xenstore_struct XenStore wire protocol. + * + * Inter-domain shared memory communications. */ +#define XENSTORE_RING_SIZE 1024 +typedef uint32_t XENSTORE_RING_IDX; +#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) +struct xenstore_domain_interface { + char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ + char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ + XENSTORE_RING_IDX req_cons, req_prod; + XENSTORE_RING_IDX rsp_cons, rsp_prod; +}; + +/* Violating this is very bad. See docs/misc/xenstore.txt. */ +#define XENSTORE_PAYLOAD_MAX 4096 + +/* Violating these just gets you an error back */ +#define XENSTORE_ABS_PATH_MAX 3072 +#define XENSTORE_REL_PATH_MAX 2048 + +#endif /* _XS_WIRE_H */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/memory.h b/src/include/xen/memory.h new file mode 100644 index 00000000..0c76c0d6 --- /dev/null +++ b/src/include/xen/memory.h @@ -0,0 +1,540 @@ +/****************************************************************************** + * memory.h + * + * Memory reservation and information. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2005, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_MEMORY_H__ +#define __XEN_PUBLIC_MEMORY_H__ + +FILE_LICENCE ( MIT ); + +#include "xen.h" + +/* + * Increase or decrease the specified domain's memory reservation. Returns the + * number of extents successfully allocated or freed. + * arg == addr of struct xen_memory_reservation. + */ +#define XENMEM_increase_reservation 0 +#define XENMEM_decrease_reservation 1 +#define XENMEM_populate_physmap 6 + +#if __XEN_INTERFACE_VERSION__ >= 0x00030209 +/* + * Maximum # bits addressable by the user of the allocated region (e.g., I/O + * devices often have a 32-bit limitation even in 64-bit systems). If zero + * then the user has no addressing restriction. This field is not used by + * XENMEM_decrease_reservation. + */ +#define XENMEMF_address_bits(x) (x) +#define XENMEMF_get_address_bits(x) ((x) & 0xffu) +/* NUMA node to allocate from. */ +#define XENMEMF_node(x) (((x) + 1) << 8) +#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu) +/* Flag to populate physmap with populate-on-demand entries */ +#define XENMEMF_populate_on_demand (1<<16) +/* Flag to request allocation only from the node specified */ +#define XENMEMF_exact_node_request (1<<17) +#define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request) +#endif + +struct xen_memory_reservation { + + /* + * XENMEM_increase_reservation: + * OUT: MFN (*not* GMFN) bases of extents that were allocated + * XENMEM_decrease_reservation: + * IN: GMFN bases of extents to free + * XENMEM_populate_physmap: + * IN: GPFN bases of extents to populate with memory + * OUT: GMFN bases of extents that were allocated + * (NB. This command also updates the mach_to_phys translation table) + * XENMEM_claim_pages: + * IN: must be zero + */ + XEN_GUEST_HANDLE(xen_pfn_t) extent_start; + + /* Number of extents, and size/alignment of each (2^extent_order pages). */ + xen_ulong_t nr_extents; + unsigned int extent_order; + +#if __XEN_INTERFACE_VERSION__ >= 0x00030209 + /* XENMEMF flags. */ + unsigned int mem_flags; +#else + unsigned int address_bits; +#endif + + /* + * Domain whose reservation is being changed. + * Unprivileged domains can specify only DOMID_SELF. + */ + domid_t domid; +}; +typedef struct xen_memory_reservation xen_memory_reservation_t; +DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); + +/* + * An atomic exchange of memory pages. If return code is zero then + * @out.extent_list provides GMFNs of the newly-allocated memory. + * Returns zero on complete success, otherwise a negative error code. + * On complete success then always @nr_exchanged == @in.nr_extents. + * On partial success @nr_exchanged indicates how much work was done. + */ +#define XENMEM_exchange 11 +struct xen_memory_exchange { + /* + * [IN] Details of memory extents to be exchanged (GMFN bases). + * Note that @in.address_bits is ignored and unused. + */ + struct xen_memory_reservation in; + + /* + * [IN/OUT] Details of new memory extents. + * We require that: + * 1. @in.domid == @out.domid + * 2. @in.nr_extents << @in.extent_order == + * @out.nr_extents << @out.extent_order + * 3. @in.extent_start and @out.extent_start lists must not overlap + * 4. @out.extent_start lists GPFN bases to be populated + * 5. @out.extent_start is overwritten with allocated GMFN bases + */ + struct xen_memory_reservation out; + + /* + * [OUT] Number of input extents that were successfully exchanged: + * 1. The first @nr_exchanged input extents were successfully + * deallocated. + * 2. The corresponding first entries in the output extent list correctly + * indicate the GMFNs that were successfully exchanged. + * 3. All other input and output extents are untouched. + * 4. If not all input exents are exchanged then the return code of this + * command will be non-zero. + * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! + */ + xen_ulong_t nr_exchanged; +}; +typedef struct xen_memory_exchange xen_memory_exchange_t; +DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); + +/* + * Returns the maximum machine frame number of mapped RAM in this system. + * This command always succeeds (it never returns an error code). + * arg == NULL. + */ +#define XENMEM_maximum_ram_page 2 + +/* + * Returns the current or maximum memory reservation, in pages, of the + * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. + * arg == addr of domid_t. + */ +#define XENMEM_current_reservation 3 +#define XENMEM_maximum_reservation 4 + +/* + * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. + */ +#define XENMEM_maximum_gpfn 14 + +/* + * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys + * mapping table. Architectures which do not have a m2p table do not implement + * this command. + * arg == addr of xen_machphys_mfn_list_t. + */ +#define XENMEM_machphys_mfn_list 5 +struct xen_machphys_mfn_list { + /* + * Size of the 'extent_start' array. Fewer entries will be filled if the + * machphys table is smaller than max_extents * 2MB. + */ + unsigned int max_extents; + + /* + * Pointer to buffer to fill with list of extent starts. If there are + * any large discontiguities in the machine address space, 2MB gaps in + * the machphys table will be represented by an MFN base of zero. + */ + XEN_GUEST_HANDLE(xen_pfn_t) extent_start; + + /* + * Number of extents written to the above array. This will be smaller + * than 'max_extents' if the machphys table is smaller than max_e * 2MB. + */ + unsigned int nr_extents; +}; +typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); + +/* + * For a compat caller, this is identical to XENMEM_machphys_mfn_list. + * + * For a non compat caller, this functions similarly to + * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility + * m2p table. + */ +#define XENMEM_machphys_compat_mfn_list 25 + +/* + * Returns the location in virtual address space of the machine_to_phys + * mapping table. Architectures which do not have a m2p table, or which do not + * map it by default into guest address space, do not implement this command. + * arg == addr of xen_machphys_mapping_t. + */ +#define XENMEM_machphys_mapping 12 +struct xen_machphys_mapping { + xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ + xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ +}; +typedef struct xen_machphys_mapping xen_machphys_mapping_t; +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); + +/* Source mapping space. */ +/* ` enum phys_map_space { */ +#define XENMAPSPACE_shared_info 0 /* shared info page */ +#define XENMAPSPACE_grant_table 1 /* grant table page */ +#define XENMAPSPACE_gmfn 2 /* GMFN */ +#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */ +#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom, + * XENMEM_add_to_physmap_batch only. */ +/* ` } */ + +/* + * Sets the GPFN at which a particular page appears in the specified guest's + * pseudophysical address space. + * arg == addr of xen_add_to_physmap_t. + */ +#define XENMEM_add_to_physmap 7 +struct xen_add_to_physmap { + /* Which domain to change the mapping for. */ + domid_t domid; + + /* Number of pages to go through for gmfn_range */ + uint16_t size; + + unsigned int space; /* => enum phys_map_space */ + +#define XENMAPIDX_grant_table_status 0x80000000 + + /* Index into space being mapped. */ + xen_ulong_t idx; + + /* GPFN in domid where the source mapping page should appear. */ + xen_pfn_t gpfn; +}; +typedef struct xen_add_to_physmap xen_add_to_physmap_t; +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); + +/* A batched version of add_to_physmap. */ +#define XENMEM_add_to_physmap_batch 23 +struct xen_add_to_physmap_batch { + /* IN */ + /* Which domain to change the mapping for. */ + domid_t domid; + uint16_t space; /* => enum phys_map_space */ + + /* Number of pages to go through */ + uint16_t size; + domid_t foreign_domid; /* IFF gmfn_foreign */ + + /* Indexes into space being mapped. */ + XEN_GUEST_HANDLE(xen_ulong_t) idxs; + + /* GPFN in domid where the source mapping page should appear. */ + XEN_GUEST_HANDLE(xen_pfn_t) gpfns; + + /* OUT */ + + /* Per index error code. */ + XEN_GUEST_HANDLE(int) errs; +}; +typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t; +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t); + +#if __XEN_INTERFACE_VERSION__ < 0x00040400 +#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch +#define xen_add_to_physmap_range xen_add_to_physmap_batch +typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t; +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t); +#endif + +/* + * Unmaps the page appearing at a particular GPFN from the specified guest's + * pseudophysical address space. + * arg == addr of xen_remove_from_physmap_t. + */ +#define XENMEM_remove_from_physmap 15 +struct xen_remove_from_physmap { + /* Which domain to change the mapping for. */ + domid_t domid; + + /* GPFN of the current mapping of the page. */ + xen_pfn_t gpfn; +}; +typedef struct xen_remove_from_physmap xen_remove_from_physmap_t; +DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t); + +/*** REMOVED ***/ +/*#define XENMEM_translate_gpfn_list 8*/ + +/* + * Returns the pseudo-physical memory map as it was when the domain + * was started (specified by XENMEM_set_memory_map). + * arg == addr of xen_memory_map_t. + */ +#define XENMEM_memory_map 9 +struct xen_memory_map { + /* + * On call the number of entries which can be stored in buffer. On + * return the number of entries which have been stored in + * buffer. + */ + unsigned int nr_entries; + + /* + * Entries in the buffer are in the same format as returned by the + * BIOS INT 0x15 EAX=0xE820 call. + */ + XEN_GUEST_HANDLE(void) buffer; +}; +typedef struct xen_memory_map xen_memory_map_t; +DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); + +/* + * Returns the real physical memory map. Passes the same structure as + * XENMEM_memory_map. + * arg == addr of xen_memory_map_t. + */ +#define XENMEM_machine_memory_map 10 + +/* + * Set the pseudo-physical memory map of a domain, as returned by + * XENMEM_memory_map. + * arg == addr of xen_foreign_memory_map_t. + */ +#define XENMEM_set_memory_map 13 +struct xen_foreign_memory_map { + domid_t domid; + struct xen_memory_map map; +}; +typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; +DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); + +#define XENMEM_set_pod_target 16 +#define XENMEM_get_pod_target 17 +struct xen_pod_target { + /* IN */ + uint64_t target_pages; + /* OUT */ + uint64_t tot_pages; + uint64_t pod_cache_pages; + uint64_t pod_entries; + /* IN */ + domid_t domid; +}; +typedef struct xen_pod_target xen_pod_target_t; + +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +#ifndef uint64_aligned_t +#define uint64_aligned_t uint64_t +#endif + +/* + * Get the number of MFNs saved through memory sharing. + * The call never fails. + */ +#define XENMEM_get_sharing_freed_pages 18 +#define XENMEM_get_sharing_shared_pages 19 + +#define XENMEM_paging_op 20 +#define XENMEM_paging_op_nominate 0 +#define XENMEM_paging_op_evict 1 +#define XENMEM_paging_op_prep 2 + +struct xen_mem_event_op { + uint8_t op; /* XENMEM_*_op_* */ + domid_t domain; + + + /* PAGING_PREP IN: buffer to immediately fill page in */ + uint64_aligned_t buffer; + /* Other OPs */ + uint64_aligned_t gfn; /* IN: gfn of page being operated on */ +}; +typedef struct xen_mem_event_op xen_mem_event_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t); + +#define XENMEM_access_op 21 +#define XENMEM_access_op_resume 0 +#define XENMEM_access_op_set_access 1 +#define XENMEM_access_op_get_access 2 + +typedef enum { + XENMEM_access_n, + XENMEM_access_r, + XENMEM_access_w, + XENMEM_access_rw, + XENMEM_access_x, + XENMEM_access_rx, + XENMEM_access_wx, + XENMEM_access_rwx, + /* + * Page starts off as r-x, but automatically + * change to r-w on a write + */ + XENMEM_access_rx2rw, + /* + * Log access: starts off as n, automatically + * goes to rwx, generating an event without + * pausing the vcpu + */ + XENMEM_access_n2rwx, + /* Take the domain default */ + XENMEM_access_default +} xenmem_access_t; + +struct xen_mem_access_op { + /* XENMEM_access_op_* */ + uint8_t op; + /* xenmem_access_t */ + uint8_t access; + domid_t domid; + /* + * Number of pages for set op + * Ignored on setting default access and other ops + */ + uint32_t nr; + /* + * First pfn for set op + * pfn for get op + * ~0ull is used to set and get the default access for pages + */ + uint64_aligned_t pfn; +}; +typedef struct xen_mem_access_op xen_mem_access_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t); + +#define XENMEM_sharing_op 22 +#define XENMEM_sharing_op_nominate_gfn 0 +#define XENMEM_sharing_op_nominate_gref 1 +#define XENMEM_sharing_op_share 2 +#define XENMEM_sharing_op_resume 3 +#define XENMEM_sharing_op_debug_gfn 4 +#define XENMEM_sharing_op_debug_mfn 5 +#define XENMEM_sharing_op_debug_gref 6 +#define XENMEM_sharing_op_add_physmap 7 +#define XENMEM_sharing_op_audit 8 + +#define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10) +#define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9) + +/* The following allows sharing of grant refs. This is useful + * for sharing utilities sitting as "filters" in IO backends + * (e.g. memshr + blktap(2)). The IO backend is only exposed + * to grant references, and this allows sharing of the grefs */ +#define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (1ULL << 62) + +#define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \ + (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val) +#define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \ + ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG) +#define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \ + ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)) + +struct xen_mem_sharing_op { + uint8_t op; /* XENMEM_sharing_op_* */ + domid_t domain; + + union { + struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */ + union { + uint64_aligned_t gfn; /* IN: gfn to nominate */ + uint32_t grant_ref; /* IN: grant ref to nominate */ + } u; + uint64_aligned_t handle; /* OUT: the handle */ + } nominate; + struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */ + uint64_aligned_t source_gfn; /* IN: the gfn of the source page */ + uint64_aligned_t source_handle; /* IN: handle to the source page */ + uint64_aligned_t client_gfn; /* IN: the client gfn */ + uint64_aligned_t client_handle; /* IN: handle to the client page */ + domid_t client_domain; /* IN: the client domain id */ + } share; + struct mem_sharing_op_debug { /* OP_DEBUG_xxx */ + union { + uint64_aligned_t gfn; /* IN: gfn to debug */ + uint64_aligned_t mfn; /* IN: mfn to debug */ + uint32_t gref; /* IN: gref to debug */ + } u; + } debug; + } u; +}; +typedef struct xen_mem_sharing_op xen_mem_sharing_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t); + +/* + * Attempt to stake a claim for a domain on a quantity of pages + * of system RAM, but _not_ assign specific pageframes. Only + * arithmetic is performed so the hypercall is very fast and need + * not be preemptible, thus sidestepping time-of-check-time-of-use + * races for memory allocation. Returns 0 if the hypervisor page + * allocator has atomically and successfully claimed the requested + * number of pages, else non-zero. + * + * Any domain may have only one active claim. When sufficient memory + * has been allocated to resolve the claim, the claim silently expires. + * Claiming zero pages effectively resets any outstanding claim and + * is always successful. + * + * Note that a valid claim may be staked even after memory has been + * allocated for a domain. In this case, the claim is not incremental, + * i.e. if the domain's tot_pages is 3, and a claim is staked for 10, + * only 7 additional pages are claimed. + * + * Caller must be privileged or the hypercall fails. + */ +#define XENMEM_claim_pages 24 + +/* + * XENMEM_claim_pages flags - the are no flags at this time. + * The zero value is appropiate. + */ + +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + +/* Next available subop number is 26 */ + +#endif /* __XEN_PUBLIC_MEMORY_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/trace.h b/src/include/xen/trace.h new file mode 100644 index 00000000..bf8bf65a --- /dev/null +++ b/src/include/xen/trace.h @@ -0,0 +1,332 @@ +/****************************************************************************** + * include/public/trace.h + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Mark Williamson, (C) 2004 Intel Research Cambridge + * Copyright (C) 2005 Bin Ren + */ + +#ifndef __XEN_PUBLIC_TRACE_H__ +#define __XEN_PUBLIC_TRACE_H__ + +FILE_LICENCE ( MIT ); + +#define TRACE_EXTRA_MAX 7 +#define TRACE_EXTRA_SHIFT 28 + +/* Trace classes */ +#define TRC_CLS_SHIFT 16 +#define TRC_GEN 0x0001f000 /* General trace */ +#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */ +#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */ +#define TRC_HVM 0x0008f000 /* Xen HVM trace */ +#define TRC_MEM 0x0010f000 /* Xen memory trace */ +#define TRC_PV 0x0020f000 /* Xen PV traces */ +#define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */ +#define TRC_HW 0x0080f000 /* Xen hardware-related traces */ +#define TRC_GUEST 0x0800f000 /* Guest-generated traces */ +#define TRC_ALL 0x0ffff000 +#define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff) +#define TRC_HD_CYCLE_FLAG (1UL<<31) +#define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) ) +#define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX) + +/* Trace subclasses */ +#define TRC_SUBCLS_SHIFT 12 + +/* trace subclasses for SVM */ +#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */ +#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */ +#define TRC_HVM_EMUL 0x00084000 /* emulated devices */ + +#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */ +#define TRC_SCHED_CLASS 0x00022000 /* Scheduler-specific */ +#define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */ + +/* + * The highest 3 bits of the last 12 bits of TRC_SCHED_CLASS above are + * reserved for encoding what scheduler produced the information. The + * actual event is encoded in the last 9 bits. + * + * This means we have 8 scheduling IDs available (which means at most 8 + * schedulers generating events) and, in each scheduler, up to 512 + * different events. + */ +#define TRC_SCHED_ID_BITS 3 +#define TRC_SCHED_ID_SHIFT (TRC_SUBCLS_SHIFT - TRC_SCHED_ID_BITS) +#define TRC_SCHED_ID_MASK (((1UL<cpu_offset[cpu]). + */ +struct t_info { + uint16_t tbuf_size; /* Size in pages of each trace buffer */ + uint16_t mfn_offset[]; /* Offset within t_info structure of the page list per cpu */ + /* MFN lists immediately after the header */ +}; + +#endif /* __XEN_PUBLIC_TRACE_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/version.h b/src/include/xen/version.h new file mode 100644 index 00000000..4e81ca0f --- /dev/null +++ b/src/include/xen/version.h @@ -0,0 +1,98 @@ +/****************************************************************************** + * version.h + * + * Xen version, type, and compile information. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2005, Nguyen Anh Quynh + * Copyright (c) 2005, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_VERSION_H__ +#define __XEN_PUBLIC_VERSION_H__ + +FILE_LICENCE ( MIT ); + +#include "xen.h" + +/* NB. All ops return zero on success, except XENVER_{version,pagesize} */ + +/* arg == NULL; returns major:minor (16:16). */ +#define XENVER_version 0 + +/* arg == xen_extraversion_t. */ +#define XENVER_extraversion 1 +typedef char xen_extraversion_t[16]; +#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t)) + +/* arg == xen_compile_info_t. */ +#define XENVER_compile_info 2 +struct xen_compile_info { + char compiler[64]; + char compile_by[16]; + char compile_domain[32]; + char compile_date[32]; +}; +typedef struct xen_compile_info xen_compile_info_t; + +#define XENVER_capabilities 3 +typedef char xen_capabilities_info_t[1024]; +#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t)) + +#define XENVER_changeset 4 +typedef char xen_changeset_info_t[64]; +#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) + +#define XENVER_platform_parameters 5 +struct xen_platform_parameters { + xen_ulong_t virt_start; +}; +typedef struct xen_platform_parameters xen_platform_parameters_t; + +#define XENVER_get_features 6 +struct xen_feature_info { + unsigned int submap_idx; /* IN: which 32-bit submap to return */ + uint32_t submap; /* OUT: 32-bit submap */ +}; +typedef struct xen_feature_info xen_feature_info_t; + +/* Declares the features reported by XENVER_get_features. */ +#include "features.h" + +/* arg == NULL; returns host memory page size. */ +#define XENVER_pagesize 7 + +/* arg == xen_domain_handle_t. */ +#define XENVER_guest_handle 8 + +#define XENVER_commandline 9 +typedef char xen_commandline_t[1024]; + +#endif /* __XEN_PUBLIC_VERSION_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/include/xen/xen-compat.h b/src/include/xen/xen-compat.h new file mode 100644 index 00000000..0ba6fca6 --- /dev/null +++ b/src/include/xen/xen-compat.h @@ -0,0 +1,46 @@ +/****************************************************************************** + * xen-compat.h + * + * Guest OS interface to Xen. Compatibility layer. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2006, Christian Limpach + */ + +#ifndef __XEN_PUBLIC_XEN_COMPAT_H__ +#define __XEN_PUBLIC_XEN_COMPAT_H__ + +FILE_LICENCE ( MIT ); + +#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040400 + +#if defined(__XEN__) || defined(__XEN_TOOLS__) +/* Xen is built with matching headers and implements the latest interface. */ +#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__ +#elif !defined(__XEN_INTERFACE_VERSION__) +/* Guests which do not specify a version get the legacy interface. */ +#define __XEN_INTERFACE_VERSION__ 0x00000000 +#endif + +#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__ +#error "These header files do not support the requested interface version." +#endif + +#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ diff --git a/src/include/xen/xen.h b/src/include/xen/xen.h new file mode 100644 index 00000000..2da521d2 --- /dev/null +++ b/src/include/xen/xen.h @@ -0,0 +1,901 @@ +/****************************************************************************** + * xen.h + * + * Guest OS interface to Xen. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (c) 2004, K A Fraser + */ + +#ifndef __XEN_PUBLIC_XEN_H__ +#define __XEN_PUBLIC_XEN_H__ + +FILE_LICENCE ( MIT ); + +#include "xen-compat.h" + +#if defined(__i386__) || defined(__x86_64__) +#include "arch-x86/xen.h" +#elif defined(__arm__) || defined (__aarch64__) +#include "arch-arm.h" +#else +#error "Unsupported architecture" +#endif + +#ifndef __ASSEMBLY__ +/* Guest handles for primitive C types. */ +DEFINE_XEN_GUEST_HANDLE(char); +__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); +DEFINE_XEN_GUEST_HANDLE(int); +__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); +#if __XEN_INTERFACE_VERSION__ < 0x00040300 +DEFINE_XEN_GUEST_HANDLE(long); +__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); +#endif +DEFINE_XEN_GUEST_HANDLE(void); + +DEFINE_XEN_GUEST_HANDLE(uint64_t); +DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); +DEFINE_XEN_GUEST_HANDLE(xen_ulong_t); +#endif + +/* + * HYPERCALLS + */ + +/* `incontents 100 hcalls List of hypercalls + * ` enum hypercall_num { // __HYPERVISOR_* => HYPERVISOR_*() + */ + +#define __HYPERVISOR_set_trap_table 0 +#define __HYPERVISOR_mmu_update 1 +#define __HYPERVISOR_set_gdt 2 +#define __HYPERVISOR_stack_switch 3 +#define __HYPERVISOR_set_callbacks 4 +#define __HYPERVISOR_fpu_taskswitch 5 +#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ +#define __HYPERVISOR_platform_op 7 +#define __HYPERVISOR_set_debugreg 8 +#define __HYPERVISOR_get_debugreg 9 +#define __HYPERVISOR_update_descriptor 10 +#define __HYPERVISOR_memory_op 12 +#define __HYPERVISOR_multicall 13 +#define __HYPERVISOR_update_va_mapping 14 +#define __HYPERVISOR_set_timer_op 15 +#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ +#define __HYPERVISOR_xen_version 17 +#define __HYPERVISOR_console_io 18 +#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ +#define __HYPERVISOR_grant_table_op 20 +#define __HYPERVISOR_vm_assist 21 +#define __HYPERVISOR_update_va_mapping_otherdomain 22 +#define __HYPERVISOR_iret 23 /* x86 only */ +#define __HYPERVISOR_vcpu_op 24 +#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ +#define __HYPERVISOR_mmuext_op 26 +#define __HYPERVISOR_xsm_op 27 +#define __HYPERVISOR_nmi_op 28 +#define __HYPERVISOR_sched_op 29 +#define __HYPERVISOR_callback_op 30 +#define __HYPERVISOR_xenoprof_op 31 +#define __HYPERVISOR_event_channel_op 32 +#define __HYPERVISOR_physdev_op 33 +#define __HYPERVISOR_hvm_op 34 +#define __HYPERVISOR_sysctl 35 +#define __HYPERVISOR_domctl 36 +#define __HYPERVISOR_kexec_op 37 +#define __HYPERVISOR_tmem_op 38 +#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */ + +/* Architecture-specific hypercall definitions. */ +#define __HYPERVISOR_arch_0 48 +#define __HYPERVISOR_arch_1 49 +#define __HYPERVISOR_arch_2 50 +#define __HYPERVISOR_arch_3 51 +#define __HYPERVISOR_arch_4 52 +#define __HYPERVISOR_arch_5 53 +#define __HYPERVISOR_arch_6 54 +#define __HYPERVISOR_arch_7 55 + +/* ` } */ + +/* + * HYPERCALL COMPATIBILITY. + */ + +/* New sched_op hypercall introduced in 0x00030101. */ +#if __XEN_INTERFACE_VERSION__ < 0x00030101 +#undef __HYPERVISOR_sched_op +#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat +#endif + +/* New event-channel and physdev hypercalls introduced in 0x00030202. */ +#if __XEN_INTERFACE_VERSION__ < 0x00030202 +#undef __HYPERVISOR_event_channel_op +#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat +#undef __HYPERVISOR_physdev_op +#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat +#endif + +/* New platform_op hypercall introduced in 0x00030204. */ +#if __XEN_INTERFACE_VERSION__ < 0x00030204 +#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op +#endif + +/* + * VIRTUAL INTERRUPTS + * + * Virtual interrupts that a guest OS may receive from Xen. + * + * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a + * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. + * The latter can be allocated only once per guest: they must initially be + * allocated to VCPU0 but can subsequently be re-bound. + */ +/* ` enum virq { */ +#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ +#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ +#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ +#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ +#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ +#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ +#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ +#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ +#define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */ +#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */ +#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */ +#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */ + +/* Architecture-specific VIRQ definitions. */ +#define VIRQ_ARCH_0 16 +#define VIRQ_ARCH_1 17 +#define VIRQ_ARCH_2 18 +#define VIRQ_ARCH_3 19 +#define VIRQ_ARCH_4 20 +#define VIRQ_ARCH_5 21 +#define VIRQ_ARCH_6 22 +#define VIRQ_ARCH_7 23 +/* ` } */ + +#define NR_VIRQS 24 + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_mmu_update(const struct mmu_update reqs[], + * ` unsigned count, unsigned *done_out, + * ` unsigned foreigndom) + * ` + * @reqs is an array of mmu_update_t structures ((ptr, val) pairs). + * @count is the length of the above array. + * @pdone is an output parameter indicating number of completed operations + * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this + * hypercall invocation. Can be DOMID_SELF. + * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced + * in this hypercall invocation. The value of this field + * (x) encodes the PFD as follows: + * x == 0 => PFD == DOMID_SELF + * x != 0 => PFD == x - 1 + * + * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command. + * ------------- + * ptr[1:0] == MMU_NORMAL_PT_UPDATE: + * Updates an entry in a page table belonging to PFD. If updating an L1 table, + * and the new table entry is valid/present, the mapped frame must belong to + * FD. If attempting to map an I/O page then the caller assumes the privilege + * of the FD. + * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. + * FD == DOMID_XEN: Map restricted areas of Xen's heap space. + * ptr[:2] -- Machine address of the page-table entry to modify. + * val -- Value to write. + * + * There also certain implicit requirements when using this hypercall. The + * pages that make up a pagetable must be mapped read-only in the guest. + * This prevents uncontrolled guest updates to the pagetable. Xen strictly + * enforces this, and will disallow any pagetable update which will end up + * mapping pagetable page RW, and will disallow using any writable page as a + * pagetable. In practice it means that when constructing a page table for a + * process, thread, etc, we MUST be very dilligient in following these rules: + * 1). Start with top-level page (PGD or in Xen language: L4). Fill out + * the entries. + * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD + * or L2). + * 3). Start filling out the PTE table (L1) with the PTE entries. Once + * done, make sure to set each of those entries to RO (so writeable bit + * is unset). Once that has been completed, set the PMD (L2) for this + * PTE table as RO. + * 4). When completed with all of the PMD (L2) entries, and all of them have + * been set to RO, make sure to set RO the PUD (L3). Do the same + * operation on PGD (L4) pagetable entries that have a PUD (L3) entry. + * 5). Now before you can use those pages (so setting the cr3), you MUST also + * pin them so that the hypervisor can verify the entries. This is done + * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame + * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op( + * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be + * issued. + * For 32-bit guests, the L4 is not used (as there is less pagetables), so + * instead use L3. + * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE + * hypercall. Also if so desired the OS can also try to write to the PTE + * and be trapped by the hypervisor (as the PTE entry is RO). + * + * To deallocate the pages, the operations are the reverse of the steps + * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the + * pagetable MUST not be in use (meaning that the cr3 is not set to it). + * + * ptr[1:0] == MMU_MACHPHYS_UPDATE: + * Updates an entry in the machine->pseudo-physical mapping table. + * ptr[:2] -- Machine address within the frame whose mapping to modify. + * The frame must belong to the FD, if one is specified. + * val -- Value to write into the mapping entry. + * + * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: + * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed + * with those in @val. + * + * @val is usually the machine frame number along with some attributes. + * The attributes by default follow the architecture defined bits. Meaning that + * if this is a X86_64 machine and four page table layout is used, the layout + * of val is: + * - 63 if set means No execute (NX) + * - 46-13 the machine frame number + * - 12 available for guest + * - 11 available for guest + * - 10 available for guest + * - 9 available for guest + * - 8 global + * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages) + * - 6 dirty + * - 5 accessed + * - 4 page cached disabled + * - 3 page write through + * - 2 userspace accessible + * - 1 writeable + * - 0 present + * + * The one bits that does not fit with the default layout is the PAGE_PSE + * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the + * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB + * (or 2MB) instead of using the PAGE_PSE bit. + * + * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen + * using it as the Page Attribute Table (PAT) bit - for details on it please + * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of + * pages instead of using MTRRs. + * + * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits): + * PAT4 PAT0 + * +-----+-----+----+----+----+-----+----+----+ + * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux + * +-----+-----+----+----+----+-----+----+----+ + * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots) + * +-----+-----+----+----+----+-----+----+----+ + * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen + * +-----+-----+----+----+----+-----+----+----+ + * + * The lookup of this index table translates to looking up + * Bit 7, Bit 4, and Bit 3 of val entry: + * + * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3). + * + * If all bits are off, then we are using PAT0. If bit 3 turned on, + * then we are using PAT1, if bit 3 and bit 4, then PAT2.. + * + * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means + * that if a guest that follows Linux's PAT setup and would like to set Write + * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is + * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the + * caching as: + * + * WB = none (so PAT0) + * WC = PWT (bit 3 on) + * UC = PWT | PCD (bit 3 and 4 are on). + * + * To make it work with Xen, it needs to translate the WC bit as so: + * + * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3 + * + * And to translate back it would: + * + * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7. + */ +#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ +#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ +#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ + +/* + * MMU EXTENDED OPERATIONS + * + * ` enum neg_errnoval + * ` HYPERVISOR_mmuext_op(mmuext_op_t uops[], + * ` unsigned int count, + * ` unsigned int *pdone, + * ` unsigned int foreigndom) + */ +/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. + * A foreigndom (FD) can be specified (or DOMID_SELF for none). + * Where the FD has some effect, it is described below. + * + * cmd: MMUEXT_(UN)PIN_*_TABLE + * mfn: Machine frame number to be (un)pinned as a p.t. page. + * The frame must belong to the FD, if one is specified. + * + * cmd: MMUEXT_NEW_BASEPTR + * mfn: Machine frame number of new page-table base to install in MMU. + * + * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] + * mfn: Machine frame number of new page-table base to install in MMU + * when in user space. + * + * cmd: MMUEXT_TLB_FLUSH_LOCAL + * No additional arguments. Flushes local TLB. + * + * cmd: MMUEXT_INVLPG_LOCAL + * linear_addr: Linear address to be flushed from the local TLB. + * + * cmd: MMUEXT_TLB_FLUSH_MULTI + * vcpumask: Pointer to bitmap of VCPUs to be flushed. + * + * cmd: MMUEXT_INVLPG_MULTI + * linear_addr: Linear address to be flushed. + * vcpumask: Pointer to bitmap of VCPUs to be flushed. + * + * cmd: MMUEXT_TLB_FLUSH_ALL + * No additional arguments. Flushes all VCPUs' TLBs. + * + * cmd: MMUEXT_INVLPG_ALL + * linear_addr: Linear address to be flushed from all VCPUs' TLBs. + * + * cmd: MMUEXT_FLUSH_CACHE + * No additional arguments. Writes back and flushes cache contents. + * + * cmd: MMUEXT_FLUSH_CACHE_GLOBAL + * No additional arguments. Writes back and flushes cache contents + * on all CPUs in the system. + * + * cmd: MMUEXT_SET_LDT + * linear_addr: Linear address of LDT base (NB. must be page-aligned). + * nr_ents: Number of entries in LDT. + * + * cmd: MMUEXT_CLEAR_PAGE + * mfn: Machine frame number to be cleared. + * + * cmd: MMUEXT_COPY_PAGE + * mfn: Machine frame number of the destination page. + * src_mfn: Machine frame number of the source page. + * + * cmd: MMUEXT_[UN]MARK_SUPER + * mfn: Machine frame number of head of superpage to be [un]marked. + */ +/* ` enum mmuext_cmd { */ +#define MMUEXT_PIN_L1_TABLE 0 +#define MMUEXT_PIN_L2_TABLE 1 +#define MMUEXT_PIN_L3_TABLE 2 +#define MMUEXT_PIN_L4_TABLE 3 +#define MMUEXT_UNPIN_TABLE 4 +#define MMUEXT_NEW_BASEPTR 5 +#define MMUEXT_TLB_FLUSH_LOCAL 6 +#define MMUEXT_INVLPG_LOCAL 7 +#define MMUEXT_TLB_FLUSH_MULTI 8 +#define MMUEXT_INVLPG_MULTI 9 +#define MMUEXT_TLB_FLUSH_ALL 10 +#define MMUEXT_INVLPG_ALL 11 +#define MMUEXT_FLUSH_CACHE 12 +#define MMUEXT_SET_LDT 13 +#define MMUEXT_NEW_USER_BASEPTR 15 +#define MMUEXT_CLEAR_PAGE 16 +#define MMUEXT_COPY_PAGE 17 +#define MMUEXT_FLUSH_CACHE_GLOBAL 18 +#define MMUEXT_MARK_SUPER 19 +#define MMUEXT_UNMARK_SUPER 20 +/* ` } */ + +#ifndef __ASSEMBLY__ +struct mmuext_op { + unsigned int cmd; /* => enum mmuext_cmd */ + union { + /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR + * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */ + xen_pfn_t mfn; + /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ + unsigned long linear_addr; + } arg1; + union { + /* SET_LDT */ + unsigned int nr_ents; + /* TLB_FLUSH_MULTI, INVLPG_MULTI */ +#if __XEN_INTERFACE_VERSION__ >= 0x00030205 + XEN_GUEST_HANDLE(const_void) vcpumask; +#else + const void *vcpumask; +#endif + /* COPY_PAGE */ + xen_pfn_t src_mfn; + } arg2; +}; +typedef struct mmuext_op mmuext_op_t; +DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); +#endif + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_update_va_mapping(unsigned long va, u64 val, + * ` enum uvm_flags flags) + * ` + * ` enum neg_errnoval + * ` HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, u64 val, + * ` enum uvm_flags flags, + * ` domid_t domid) + * ` + * ` @va: The virtual address whose mapping we want to change + * ` @val: The new page table entry, must contain a machine address + * ` @flags: Control TLB flushes + */ +/* These are passed as 'flags' to update_va_mapping. They can be ORed. */ +/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ +/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ +/* ` enum uvm_flags { */ +#define UVMF_NONE (0UL<<0) /* No flushing at all. */ +#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ +#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ +#define UVMF_FLUSHTYPE_MASK (3UL<<0) +#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ +#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ +#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ +/* ` } */ + +/* + * Commands to HYPERVISOR_console_io(). + */ +#define CONSOLEIO_write 0 +#define CONSOLEIO_read 1 + +/* + * Commands to HYPERVISOR_vm_assist(). + */ +#define VMASST_CMD_enable 0 +#define VMASST_CMD_disable 1 + +/* x86/32 guests: simulate full 4GB segment limits. */ +#define VMASST_TYPE_4gb_segments 0 + +/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ +#define VMASST_TYPE_4gb_segments_notify 1 + +/* + * x86 guests: support writes to bottom-level PTEs. + * NB1. Page-directory entries cannot be written. + * NB2. Guest must continue to remove all writable mappings of PTEs. + */ +#define VMASST_TYPE_writable_pagetables 2 + +/* x86/PAE guests: support PDPTs above 4GB. */ +#define VMASST_TYPE_pae_extended_cr3 3 + +#define MAX_VMASST_TYPE 3 + +#ifndef __ASSEMBLY__ + +typedef uint16_t domid_t; + +/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ +#define DOMID_FIRST_RESERVED (0x7FF0U) + +/* DOMID_SELF is used in certain contexts to refer to oneself. */ +#define DOMID_SELF (0x7FF0U) + +/* + * DOMID_IO is used to restrict page-table updates to mapping I/O memory. + * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO + * is useful to ensure that no mappings to the OS's own heap are accidentally + * installed. (e.g., in Linux this could cause havoc as reference counts + * aren't adjusted on the I/O-mapping code path). + * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can + * be specified by any calling domain. + */ +#define DOMID_IO (0x7FF1U) + +/* + * DOMID_XEN is used to allow privileged domains to map restricted parts of + * Xen's heap space (e.g., the machine_to_phys table). + * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if + * the caller is privileged. + */ +#define DOMID_XEN (0x7FF2U) + +/* + * DOMID_COW is used as the owner of sharable pages */ +#define DOMID_COW (0x7FF3U) + +/* DOMID_INVALID is used to identify pages with unknown owner. */ +#define DOMID_INVALID (0x7FF4U) + +/* Idle domain. */ +#define DOMID_IDLE (0x7FFFU) + +/* + * Send an array of these to HYPERVISOR_mmu_update(). + * NB. The fields are natural pointer/address size for this architecture. + */ +struct mmu_update { + uint64_t ptr; /* Machine address of PTE. */ + uint64_t val; /* New contents of PTE. */ +}; +typedef struct mmu_update mmu_update_t; +DEFINE_XEN_GUEST_HANDLE(mmu_update_t); + +/* + * ` enum neg_errnoval + * ` HYPERVISOR_multicall(multicall_entry_t call_list[], + * ` uint32_t nr_calls); + * + * NB. The fields are logically the natural register size for this + * architecture. In cases where xen_ulong_t is larger than this then + * any unused bits in the upper portion must be zero. + */ +struct multicall_entry { + xen_ulong_t op, result; + xen_ulong_t args[6]; +}; +typedef struct multicall_entry multicall_entry_t; +DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); + +#if __XEN_INTERFACE_VERSION__ < 0x00040400 +/* + * Event channel endpoints per domain (when using the 2-level ABI): + * 1024 if a long is 32 bits; 4096 if a long is 64 bits. + */ +#define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS +#endif + +struct vcpu_time_info { + /* + * Updates to the following values are preceded and followed by an + * increment of 'version'. The guest can therefore detect updates by + * looking for changes to 'version'. If the least-significant bit of + * the version number is set then an update is in progress and the guest + * must wait to read a consistent set of values. + * The correct way to interact with the version number is similar to + * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry. + */ + uint32_t version; + uint32_t pad0; + uint64_t tsc_timestamp; /* TSC at last update of time vals. */ + uint64_t system_time; /* Time, in nanosecs, since boot. */ + /* + * Current system time: + * system_time + + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32) + * CPU frequency (Hz): + * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift + */ + uint32_t tsc_to_system_mul; + int8_t tsc_shift; + int8_t pad1[3]; +}; /* 32 bytes */ +typedef struct vcpu_time_info vcpu_time_info_t; + +struct vcpu_info { + /* + * 'evtchn_upcall_pending' is written non-zero by Xen to indicate + * a pending notification for a particular VCPU. It is then cleared + * by the guest OS /before/ checking for pending work, thus avoiding + * a set-and-check race. Note that the mask is only accessed by Xen + * on the CPU that is currently hosting the VCPU. This means that the + * pending and mask flags can be updated by the guest without special + * synchronisation (i.e., no need for the x86 LOCK prefix). + * This may seem suboptimal because if the pending flag is set by + * a different CPU then an IPI may be scheduled even when the mask + * is set. However, note: + * 1. The task of 'interrupt holdoff' is covered by the per-event- + * channel mask bits. A 'noisy' event that is continually being + * triggered can be masked at source at this very precise + * granularity. + * 2. The main purpose of the per-VCPU mask is therefore to restrict + * reentrant execution: whether for concurrency control, or to + * prevent unbounded stack usage. Whatever the purpose, we expect + * that the mask will be asserted only for short periods at a time, + * and so the likelihood of a 'spurious' IPI is suitably small. + * The mask is read before making an event upcall to the guest: a + * non-zero mask therefore guarantees that the VCPU will not receive + * an upcall activation. The mask is cleared when the VCPU requests + * to block: this avoids wakeup-waiting races. + */ + uint8_t evtchn_upcall_pending; +#ifdef XEN_HAVE_PV_UPCALL_MASK + uint8_t evtchn_upcall_mask; +#else /* XEN_HAVE_PV_UPCALL_MASK */ + uint8_t pad0; +#endif /* XEN_HAVE_PV_UPCALL_MASK */ + xen_ulong_t evtchn_pending_sel; + struct arch_vcpu_info arch; + struct vcpu_time_info time; +}; /* 64 bytes (x86) */ +#ifndef __XEN__ +typedef struct vcpu_info vcpu_info_t; +#endif + +/* + * `incontents 200 startofday_shared Start-of-day shared data structure + * Xen/kernel shared data -- pointer provided in start_info. + * + * This structure is defined to be both smaller than a page, and the + * only data on the shared page, but may vary in actual size even within + * compatible Xen versions; guests should not rely on the size + * of this structure remaining constant. + */ +struct shared_info { + struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS]; + + /* + * A domain can create "event channels" on which it can send and receive + * asynchronous event notifications. There are three classes of event that + * are delivered by this mechanism: + * 1. Bi-directional inter- and intra-domain connections. Domains must + * arrange out-of-band to set up a connection (usually by allocating + * an unbound 'listener' port and avertising that via a storage service + * such as xenstore). + * 2. Physical interrupts. A domain with suitable hardware-access + * privileges can bind an event-channel port to a physical interrupt + * source. + * 3. Virtual interrupts ('events'). A domain can bind an event-channel + * port to a virtual interrupt source, such as the virtual-timer + * device or the emergency console. + * + * Event channels are addressed by a "port index". Each channel is + * associated with two bits of information: + * 1. PENDING -- notifies the domain that there is a pending notification + * to be processed. This bit is cleared by the guest. + * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING + * will cause an asynchronous upcall to be scheduled. This bit is only + * updated by the guest. It is read-only within Xen. If a channel + * becomes pending while the channel is masked then the 'edge' is lost + * (i.e., when the channel is unmasked, the guest must manually handle + * pending notifications as no upcall will be scheduled by Xen). + * + * To expedite scanning of pending notifications, any 0->1 pending + * transition on an unmasked channel causes a corresponding bit in a + * per-vcpu selector word to be set. Each bit in the selector covers a + * 'C long' in the PENDING bitfield array. + */ + xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8]; + xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8]; + + /* + * Wallclock time: updated only by control software. Guests should base + * their gettimeofday() syscall on this wallclock-base value. + */ + uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ + uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ + uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ + + struct arch_shared_info arch; + +}; +#ifndef __XEN__ +typedef struct shared_info shared_info_t; +#endif + +/* + * `incontents 200 startofday Start-of-day memory layout + * + * 1. The domain is started within contiguous virtual-memory region. + * 2. The contiguous region ends on an aligned 4MB boundary. + * 3. This the order of bootstrap elements in the initial virtual region: + * a. relocated kernel image + * b. initial ram disk [mod_start, mod_len] + * c. list of allocated page frames [mfn_list, nr_pages] + * (unless relocated due to XEN_ELFNOTE_INIT_P2M) + * d. start_info_t structure [register ESI (x86)] + * e. bootstrap page tables [pt_base and CR3 (x86)] + * f. bootstrap stack [register ESP (x86)] + * 4. Bootstrap elements are packed together, but each is 4kB-aligned. + * 5. The initial ram disk may be omitted. + * 6. The list of page frames forms a contiguous 'pseudo-physical' memory + * layout for the domain. In particular, the bootstrap virtual-memory + * region is a 1:1 mapping to the first section of the pseudo-physical map. + * 7. All bootstrap elements are mapped read-writable for the guest OS. The + * only exception is the bootstrap page table, which is mapped read-only. + * 8. There is guaranteed to be at least 512kB padding after the final + * bootstrap element. If necessary, the bootstrap virtual region is + * extended by an extra 4MB to ensure this. + * + * Note: Prior to 25833:bb85bbccb1c9. ("x86/32-on-64 adjust Dom0 initial page + * table layout") a bug caused the pt_base (3.e above) and cr3 to not point + * to the start of the guest page tables (it was offset by two pages). + * This only manifested itself on 32-on-64 dom0 kernels and not 32-on-64 domU + * or 64-bit kernels of any colour. The page tables for a 32-on-64 dom0 got + * allocated in the order: 'first L1','first L2', 'first L3', so the offset + * to the page table base is by two pages back. The initial domain if it is + * 32-bit and runs under a 64-bit hypervisor should _NOT_ use two of the + * pages preceding pt_base and mark them as reserved/unused. + */ +#ifdef XEN_HAVE_PV_GUEST_ENTRY +struct start_info { + /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ + char magic[32]; /* "xen--". */ + unsigned long nr_pages; /* Total pages allocated to this domain. */ + unsigned long shared_info; /* MACHINE address of shared info struct. */ + uint32_t flags; /* SIF_xxx flags. */ + xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ + uint32_t store_evtchn; /* Event channel for store communication. */ + union { + struct { + xen_pfn_t mfn; /* MACHINE page number of console page. */ + uint32_t evtchn; /* Event channel for console page. */ + } domU; + struct { + uint32_t info_off; /* Offset of console_info struct. */ + uint32_t info_size; /* Size of console_info struct from start.*/ + } dom0; + } console; + /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ + unsigned long pt_base; /* VIRTUAL address of page directory. */ + unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ + unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ + unsigned long mod_start; /* VIRTUAL address of pre-loaded module */ + /* (PFN of pre-loaded module if */ + /* SIF_MOD_START_PFN set in flags). */ + unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ +#define MAX_GUEST_CMDLINE 1024 + int8_t cmd_line[MAX_GUEST_CMDLINE]; + /* The pfn range here covers both page table and p->m table frames. */ + unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */ + unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */ +}; +typedef struct start_info start_info_t; + +/* New console union for dom0 introduced in 0x00030203. */ +#if __XEN_INTERFACE_VERSION__ < 0x00030203 +#define console_mfn console.domU.mfn +#define console_evtchn console.domU.evtchn +#endif +#endif /* XEN_HAVE_PV_GUEST_ENTRY */ + +/* These flags are passed in the 'flags' field of start_info_t. */ +#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ +#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ +#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */ +#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */ +#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ + +/* + * A multiboot module is a package containing modules very similar to a + * multiboot module array. The only differences are: + * - the array of module descriptors is by convention simply at the beginning + * of the multiboot module, + * - addresses in the module descriptors are based on the beginning of the + * multiboot module, + * - the number of modules is determined by a termination descriptor that has + * mod_start == 0. + * + * This permits to both build it statically and reference it in a configuration + * file, and let the PV guest easily rebase the addresses to virtual addresses + * and at the same time count the number of modules. + */ +struct xen_multiboot_mod_list +{ + /* Address of first byte of the module */ + uint32_t mod_start; + /* Address of last byte of the module (inclusive) */ + uint32_t mod_end; + /* Address of zero-terminated command line */ + uint32_t cmdline; + /* Unused, must be zero */ + uint32_t pad; +}; +/* + * `incontents 200 startofday_dom0_console Dom0_console + * + * The console structure in start_info.console.dom0 + * + * This structure includes a variety of information required to + * have a working VGA/VESA console. + */ +typedef struct dom0_vga_console_info { + uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ +#define XEN_VGATYPE_TEXT_MODE_3 0x03 +#define XEN_VGATYPE_VESA_LFB 0x23 +#define XEN_VGATYPE_EFI_LFB 0x70 + + union { + struct { + /* Font height, in pixels. */ + uint16_t font_height; + /* Cursor location (column, row). */ + uint16_t cursor_x, cursor_y; + /* Number of rows and columns (dimensions in characters). */ + uint16_t rows, columns; + } text_mode_3; + + struct { + /* Width and height, in pixels. */ + uint16_t width, height; + /* Bytes per scan line. */ + uint16_t bytes_per_line; + /* Bits per pixel. */ + uint16_t bits_per_pixel; + /* LFB physical address, and size (in units of 64kB). */ + uint32_t lfb_base; + uint32_t lfb_size; + /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ + uint8_t red_pos, red_size; + uint8_t green_pos, green_size; + uint8_t blue_pos, blue_size; + uint8_t rsvd_pos, rsvd_size; +#if __XEN_INTERFACE_VERSION__ >= 0x00030206 + /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ + uint32_t gbl_caps; + /* Mode attributes (offset 0x0, VESA command 0x4f01). */ + uint16_t mode_attrs; +#endif + } vesa_lfb; + } u; +} dom0_vga_console_info_t; +#define xen_vga_console_info dom0_vga_console_info +#define xen_vga_console_info_t dom0_vga_console_info_t + +typedef uint8_t xen_domain_handle_t[16]; + +/* Turn a plain number into a C unsigned long constant. */ +#define __mk_unsigned_long(x) x ## UL +#define mk_unsigned_long(x) __mk_unsigned_long(x) + +__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t); +__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t); +__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t); +__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t); + +#else /* __ASSEMBLY__ */ + +/* In assembly code we cannot use C numeric constant suffixes. */ +#define mk_unsigned_long(x) x + +#endif /* !__ASSEMBLY__ */ + +/* Default definitions for macros used by domctl/sysctl. */ +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +#ifndef uint64_aligned_t +#define uint64_aligned_t uint64_t +#endif +#ifndef XEN_GUEST_HANDLE_64 +#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) +#endif + +#ifndef __ASSEMBLY__ +struct xenctl_bitmap { + XEN_GUEST_HANDLE_64(uint8) bitmap; + uint32_t nr_bits; +}; +#endif + +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + +#endif /* __XEN_PUBLIC_XEN_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/src/interface/efi/efi_acpi.c b/src/interface/efi/efi_acpi.c new file mode 100644 index 00000000..a347eaf3 --- /dev/null +++ b/src/interface/efi/efi_acpi.c @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * iPXE ACPI API for EFI + * + */ + +#include +#include +#include +#include + +/** ACPI configuration table */ +static EFI_ACPI_1_0_ROOT_SYSTEM_DESCRIPTION_POINTER *rsdp; +EFI_USE_TABLE ( ACPI_10_TABLE, &rsdp, 0 ); + +/** + * Locate ACPI root system description table + * + * @ret rsdt ACPI root system description table, or UNULL + */ +static userptr_t efi_find_rsdt ( void ) { + + /* Locate RSDT via ACPI configuration table, if available */ + if ( rsdp ) + return phys_to_user ( rsdp->RsdtAddress ); + + return UNULL; +} + +PROVIDE_ACPI ( efi, acpi_find_rsdt, efi_find_rsdt ); diff --git a/src/interface/efi/efi_autoboot.c b/src/interface/efi/efi_autoboot.c new file mode 100644 index 00000000..a9e807e2 --- /dev/null +++ b/src/interface/efi/efi_autoboot.c @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** @file + * + * EFI autoboot device + * + */ + +/** + * Identify autoboot device + * + */ +void efi_set_autoboot ( void ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + union { + EFI_SIMPLE_NETWORK_PROTOCOL *snp; + void *interface; + } snp; + EFI_SIMPLE_NETWORK_MODE *mode; + EFI_STATUS efirc; + + /* Look for an SNP instance on the image's device handle */ + if ( ( efirc = bs->OpenProtocol ( efi_loaded_image->DeviceHandle, + &efi_simple_network_protocol_guid, + &snp.interface, efi_image_handle, + NULL, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ + DBGC ( efi_loaded_image, "EFI found no autoboot device\n" ); + return; + } + + /* Record autoboot device */ + mode = snp.snp->Mode; + set_autoboot_ll_addr ( &mode->CurrentAddress, mode->HwAddressSize ); + DBGC ( efi_loaded_image, "EFI found autoboot link-layer address:\n" ); + DBGC_HDA ( efi_loaded_image, 0, &mode->CurrentAddress, + mode->HwAddressSize ); + + /* Close protocol */ + bs->CloseProtocol ( efi_loaded_image->DeviceHandle, + &efi_simple_network_protocol_guid, + efi_image_handle, NULL ); +} diff --git a/src/interface/efi/efi_blacklist.c b/src/interface/efi/efi_blacklist.c new file mode 100644 index 00000000..292b28e8 --- /dev/null +++ b/src/interface/efi/efi_blacklist.c @@ -0,0 +1,237 @@ +/* + * Copyright (C) 2019 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * EFI driver blacklist + * + */ + +/** A blacklisted driver */ +struct efi_blacklist { + /** Name */ + const char *name; + /** + * Check if driver is blacklisted + * + * @v binding Driver binding protocol + * @v loaded Loaded image protocol + * @v wtf Component name protocol, if present + * @ret blacklisted Driver is the blacklisted driver + */ + int ( * blacklist ) ( EFI_DRIVER_BINDING_PROTOCOL *binding, + EFI_LOADED_IMAGE_PROTOCOL *loaded, + EFI_COMPONENT_NAME_PROTOCOL *wtf ); +}; + +/** + * Blacklist Dell Ip4ConfigDxe driver + * + * @v binding Driver binding protocol + * @v loaded Loaded image protocol + * @v wtf Component name protocol, if present + * @ret blacklisted Driver is the blacklisted driver + */ +static int +efi_blacklist_dell_ip4config ( EFI_DRIVER_BINDING_PROTOCOL *binding __unused, + EFI_LOADED_IMAGE_PROTOCOL *loaded __unused, + EFI_COMPONENT_NAME_PROTOCOL *wtf ) { + static const CHAR16 ip4cfg[] = L"IP4 CONFIG Network Service Driver"; + static const char dell[] = "Dell Inc."; + char manufacturer[ sizeof ( dell ) ]; + CHAR16 *name; + + /* Check driver name */ + if ( ! wtf ) + return 0; + if ( wtf->GetDriverName ( wtf, "eng", &name ) != 0 ) + return 0; + if ( memcmp ( name, ip4cfg, sizeof ( ip4cfg ) ) != 0 ) + return 0; + + /* Check manufacturer */ + fetch_string_setting ( NULL, &manufacturer_setting, manufacturer, + sizeof ( manufacturer ) ); + if ( strcmp ( manufacturer, dell ) != 0 ) + return 0; + + return 1; +} + +/** Blacklisted drivers */ +static struct efi_blacklist efi_blacklists[] = { + { + .name = "Dell Ip4Config", + .blacklist = efi_blacklist_dell_ip4config, + }, +}; + +/** + * Find driver blacklisting, if any + * + * @v driver Driver binding handle + * @ret blacklist Driver blacklisting, or NULL + * @ret rc Return status code + */ +static int efi_blacklist ( EFI_HANDLE driver, + struct efi_blacklist **blacklist ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + union { + EFI_DRIVER_BINDING_PROTOCOL *binding; + void *interface; + } binding; + union { + EFI_LOADED_IMAGE_PROTOCOL *loaded; + void *interface; + } loaded; + union { + EFI_COMPONENT_NAME_PROTOCOL *wtf; + void *interface; + } wtf; + unsigned int i; + EFI_HANDLE image; + EFI_STATUS efirc; + int rc; + + DBGC2 ( &efi_blacklists, "EFIBL checking %s\n", + efi_handle_name ( driver ) ); + + /* Mark as not blacklisted */ + *blacklist = NULL; + + /* Open driver binding protocol */ + if ( ( efirc = bs->OpenProtocol ( + driver, &efi_driver_binding_protocol_guid, + &binding.interface, efi_image_handle, driver, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIBL %s could not open driver binding " + "protocol: %s\n", efi_handle_name ( driver ), + strerror ( rc ) ); + goto err_binding; + } + image = binding.binding->ImageHandle; + + /* Open loaded image protocol */ + if ( ( efirc = bs->OpenProtocol ( + image, &efi_loaded_image_protocol_guid, + &loaded.interface, efi_image_handle, image, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIBL %s could not open", + efi_handle_name ( driver ) ); + DBGC ( driver, " %s loaded image protocol: %s\n", + efi_handle_name ( image ), strerror ( rc ) ); + goto err_loaded; + } + + /* Open component name protocol, if present*/ + if ( ( efirc = bs->OpenProtocol ( + driver, &efi_component_name_protocol_guid, + &wtf.interface, efi_image_handle, driver, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ) ) != 0 ) { + /* Ignore failure; is not required to be present */ + wtf.interface = NULL; + } + + /* Check blacklistings */ + for ( i = 0 ; i < ( sizeof ( efi_blacklists ) / + sizeof ( efi_blacklists[0] ) ) ; i++ ) { + if ( efi_blacklists[i].blacklist ( binding.binding, + loaded.loaded, wtf.wtf ) ) { + *blacklist = &efi_blacklists[i]; + break; + } + } + + /* Success */ + rc = 0; + + /* Close protocols */ + if ( wtf.wtf ) { + bs->CloseProtocol ( driver, &efi_component_name_protocol_guid, + efi_image_handle, driver ); + } + bs->CloseProtocol ( image, &efi_loaded_image_protocol_guid, + efi_image_handle, image ); + err_loaded: + bs->CloseProtocol ( driver, &efi_driver_binding_protocol_guid, + efi_image_handle, driver ); + err_binding: + return rc; +} + +/** + * Unload any blacklisted drivers + * + */ +void efi_unload_blacklist ( void ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_blacklist *blacklist; + EFI_HANDLE *drivers; + EFI_HANDLE driver; + UINTN num_drivers; + unsigned int i; + EFI_STATUS efirc; + int rc; + + /* Locate all driver binding protocol handles */ + if ( ( efirc = bs->LocateHandleBuffer ( + ByProtocol, &efi_driver_binding_protocol_guid, + NULL, &num_drivers, &drivers ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( &efi_blacklists, "EFIBL could not list all drivers: " + "%s\n", strerror ( rc ) ); + return; + } + + /* Unload any blacklisted drivers */ + for ( i = 0 ; i < num_drivers ; i++ ) { + driver = drivers[i]; + if ( ( rc = efi_blacklist ( driver, &blacklist ) ) != 0 ) { + DBGC ( driver, "EFIBL could not determine " + "blacklisting for %s: %s\n", + efi_handle_name ( driver ), strerror ( rc ) ); + continue; + } + if ( ! blacklist ) + continue; + DBGC ( driver, "EFIBL unloading %s (%s)\n", + efi_handle_name ( driver ), blacklist->name ); + if ( ( efirc = bs->UnloadImage ( driver ) ) != 0 ) { + DBGC ( driver, "EFIBL could not unload %s: %s\n", + efi_handle_name ( driver ), strerror ( rc ) ); + } + } + + /* Free handle list */ + bs->FreePool ( drivers ); +} diff --git a/src/interface/efi/efi_block.c b/src/interface/efi/efi_block.c new file mode 100644 index 00000000..19f669fd --- /dev/null +++ b/src/interface/efi/efi_block.c @@ -0,0 +1,684 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * EFI block device protocols + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** ACPI table protocol protocol */ +static EFI_ACPI_TABLE_PROTOCOL *acpi; +EFI_REQUEST_PROTOCOL ( EFI_ACPI_TABLE_PROTOCOL, &acpi ); + +/** Boot filename */ +static wchar_t efi_block_boot_filename[] = EFI_REMOVABLE_MEDIA_FILE_NAME; + +/** EFI SAN device private data */ +struct efi_block_data { + /** SAN device */ + struct san_device *sandev; + /** EFI handle */ + EFI_HANDLE handle; + /** Media descriptor */ + EFI_BLOCK_IO_MEDIA media; + /** Block I/O protocol */ + EFI_BLOCK_IO_PROTOCOL block_io; + /** Device path protocol */ + EFI_DEVICE_PATH_PROTOCOL *path; +}; + +/** + * Read from or write to EFI block device + * + * @v sandev SAN device + * @v lba Starting LBA + * @v data Data buffer + * @v len Size of buffer + * @v sandev_rw SAN device read/write method + * @ret rc Return status code + */ +static int efi_block_rw ( struct san_device *sandev, uint64_t lba, + void *data, size_t len, + int ( * sandev_rw ) ( struct san_device *sandev, + uint64_t lba, unsigned int count, + userptr_t buffer ) ) { + struct efi_block_data *block = sandev->priv; + unsigned int count; + int rc; + + /* Sanity check */ + count = ( len / block->media.BlockSize ); + if ( ( count * block->media.BlockSize ) != len ) { + DBGC ( sandev, "EFIBLK %#02x impossible length %#zx\n", + sandev->drive, len ); + return -EINVAL; + } + + /* Read from / write to block device */ + if ( ( rc = sandev_rw ( sandev, lba, count, + virt_to_user ( data ) ) ) != 0 ) { + DBGC ( sandev, "EFIBLK %#02x I/O failed: %s\n", + sandev->drive, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Reset EFI block device + * + * @v block_io Block I/O protocol + * @v verify Perform extended verification + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI efi_block_io_reset ( EFI_BLOCK_IO_PROTOCOL *block_io, + BOOLEAN verify __unused ) { + struct efi_block_data *block = + container_of ( block_io, struct efi_block_data, block_io ); + struct san_device *sandev = block->sandev; + int rc; + + DBGC2 ( sandev, "EFIBLK %#02x reset\n", sandev->drive ); + efi_snp_claim(); + rc = sandev_reset ( sandev ); + efi_snp_release(); + return EFIRC ( rc ); +} + +/** + * Read from EFI block device + * + * @v block_io Block I/O protocol + * @v media Media identifier + * @v lba Starting LBA + * @v len Size of buffer + * @v data Data buffer + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_block_io_read ( EFI_BLOCK_IO_PROTOCOL *block_io, UINT32 media __unused, + EFI_LBA lba, UINTN len, VOID *data ) { + struct efi_block_data *block = + container_of ( block_io, struct efi_block_data, block_io ); + struct san_device *sandev = block->sandev; + int rc; + + DBGC2 ( sandev, "EFIBLK %#02x read LBA %#08llx to %p+%#08zx\n", + sandev->drive, lba, data, ( ( size_t ) len ) ); + efi_snp_claim(); + rc = efi_block_rw ( sandev, lba, data, len, sandev_read ); + efi_snp_release(); + return EFIRC ( rc ); +} + +/** + * Write to EFI block device + * + * @v block_io Block I/O protocol + * @v media Media identifier + * @v lba Starting LBA + * @v len Size of buffer + * @v data Data buffer + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_block_io_write ( EFI_BLOCK_IO_PROTOCOL *block_io, UINT32 media __unused, + EFI_LBA lba, UINTN len, VOID *data ) { + struct efi_block_data *block = + container_of ( block_io, struct efi_block_data, block_io ); + struct san_device *sandev = block->sandev; + int rc; + + DBGC2 ( sandev, "EFIBLK %#02x write LBA %#08llx from %p+%#08zx\n", + sandev->drive, lba, data, ( ( size_t ) len ) ); + efi_snp_claim(); + rc = efi_block_rw ( sandev, lba, data, len, sandev_write ); + efi_snp_release(); + return EFIRC ( rc ); +} + +/** + * Flush data to EFI block device + * + * @v block_io Block I/O protocol + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_block_io_flush ( EFI_BLOCK_IO_PROTOCOL *block_io ) { + struct efi_block_data *block = + container_of ( block_io, struct efi_block_data, block_io ); + struct san_device *sandev = block->sandev; + + DBGC2 ( sandev, "EFIBLK %#02x flush\n", sandev->drive ); + + /* Nothing to do */ + return 0; +} + +/** + * Connect all possible drivers to EFI block device + * + * @v sandev SAN device + */ +static void efi_block_connect ( struct san_device *sandev ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_block_data *block = sandev->priv; + EFI_STATUS efirc; + int rc; + + /* Try to connect all possible drivers to this block device */ + if ( ( efirc = bs->ConnectController ( block->handle, NULL, + NULL, TRUE ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( sandev, "EFIBLK %#02x could not connect drivers: %s\n", + sandev->drive, strerror ( rc ) ); + /* May not be an error; may already be connected */ + } + DBGC2 ( sandev, "EFIBLK %#02x supports protocols:\n", sandev->drive ); + DBGC2_EFI_PROTOCOLS ( sandev, block->handle ); +} + +/** + * Hook EFI block device + * + * @v drive Drive number + * @v uris List of URIs + * @v count Number of URIs + * @v flags Flags + * @ret drive Drive number, or negative error + */ +static int efi_block_hook ( unsigned int drive, struct uri **uris, + unsigned int count, unsigned int flags ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct san_device *sandev; + struct efi_block_data *block; + int leak = 0; + EFI_STATUS efirc; + int rc; + + /* Sanity check */ + if ( ! count ) { + DBG ( "EFIBLK has no URIs\n" ); + rc = -ENOTTY; + goto err_no_uris; + } + + /* Allocate and initialise structure */ + sandev = alloc_sandev ( uris, count, sizeof ( *block ) ); + if ( ! sandev ) { + rc = -ENOMEM; + goto err_alloc; + } + block = sandev->priv; + block->sandev = sandev; + block->media.MediaPresent = 1; + block->media.LogicalBlocksPerPhysicalBlock = 1; + block->block_io.Revision = EFI_BLOCK_IO_PROTOCOL_REVISION3; + block->block_io.Media = &block->media; + block->block_io.Reset = efi_block_io_reset; + block->block_io.ReadBlocks = efi_block_io_read; + block->block_io.WriteBlocks = efi_block_io_write; + block->block_io.FlushBlocks = efi_block_io_flush; + + /* Register SAN device */ + if ( ( rc = register_sandev ( sandev, drive, flags ) ) != 0 ) { + DBGC ( sandev, "EFIBLK %#02x could not register: %s\n", + drive, strerror ( rc ) ); + goto err_register; + } + + /* Update media descriptor */ + block->media.BlockSize = + ( sandev->capacity.blksize << sandev->blksize_shift ); + block->media.LastBlock = + ( ( sandev->capacity.blocks >> sandev->blksize_shift ) - 1 ); + + /* Construct device path */ + if ( ! sandev->active ) { + rc = -ENODEV; + DBGC ( sandev, "EFIBLK %#02x not active after registration\n", + drive ); + goto err_active; + } + block->path = efi_describe ( &sandev->active->block ); + if ( ! block->path ) { + rc = -ENODEV; + DBGC ( sandev, "EFIBLK %#02x has no device path\n", drive ); + goto err_describe; + } + DBGC ( sandev, "EFIBLK %#02x has device path %s\n", + drive, efi_devpath_text ( block->path ) ); + + /* Install protocols */ + if ( ( efirc = bs->InstallMultipleProtocolInterfaces ( + &block->handle, + &efi_block_io_protocol_guid, &block->block_io, + &efi_device_path_protocol_guid, block->path, + NULL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( sandev, "EFIBLK %#02x could not install protocols: %s\n", + sandev->drive, strerror ( rc ) ); + goto err_install; + } + + /* Connect all possible protocols */ + efi_block_connect ( sandev ); + + return drive; + + if ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( + block->handle, + &efi_block_io_protocol_guid, &block->block_io, + &efi_device_path_protocol_guid, block->path, + NULL ) ) != 0 ) { + DBGC ( sandev, "EFIBLK %#02x could not uninstall protocols: " + "%s\n", sandev->drive, strerror ( -EEFI ( efirc ) ) ); + efi_nullify_block ( &block->block_io ); + leak = 1; + } + err_install: + if ( ! leak ) { + free ( block->path ); + block->path = NULL; + } + err_describe: + err_active: + unregister_sandev ( sandev ); + err_register: + if ( ! leak ) + sandev_put ( sandev ); + err_alloc: + err_no_uris: + if ( leak ) { + DBGC ( sandev, "EFIBLK %#02x nullified and leaked\n", + sandev->drive ); + } + return rc; +} + +/** + * Unhook EFI block device + * + * @v drive Drive number + */ +static void efi_block_unhook ( unsigned int drive ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct san_device *sandev; + struct efi_block_data *block; + int leak = 0; + EFI_STATUS efirc; + + /* Find SAN device */ + sandev = sandev_find ( drive ); + if ( ! sandev ) { + DBG ( "EFIBLK cannot find drive %#02x\n", drive ); + return; + } + block = sandev->priv; + + /* Uninstall protocols */ + if ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( + block->handle, + &efi_block_io_protocol_guid, &block->block_io, + &efi_device_path_protocol_guid, block->path, + NULL ) ) != 0 ) { + DBGC ( sandev, "EFIBLK %#02x could not uninstall protocols: " + "%s\n", sandev->drive, strerror ( -EEFI ( efirc ) ) ); + efi_nullify_block ( &block->block_io ); + leak = 1; + } + + /* Free device path */ + if ( ! leak ) { + free ( block->path ); + block->path = NULL; + } + + /* Unregister SAN device */ + unregister_sandev ( sandev ); + + /* Drop reference to drive */ + if ( ! leak ) + sandev_put ( sandev ); + + /* Report leakage, if applicable */ + if ( leak ) { + DBGC ( sandev, "EFIBLK %#02x nullified and leaked\n", + sandev->drive ); + } +} + +/** An installed ACPI table */ +struct efi_acpi_table { + /** List of installed tables */ + struct list_head list; + /** Table key */ + UINTN key; +}; + +/** List of installed ACPI tables */ +static LIST_HEAD ( efi_acpi_tables ); + +/** + * Install ACPI table + * + * @v hdr ACPI description header + * @ret rc Return status code + */ +static int efi_block_install ( struct acpi_header *hdr ) { + size_t len = le32_to_cpu ( hdr->length ); + struct efi_acpi_table *installed; + EFI_STATUS efirc; + int rc; + + /* Allocate installed table record */ + installed = zalloc ( sizeof ( *installed ) ); + if ( ! installed ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Fill in common parameters */ + strncpy ( hdr->oem_id, "FENSYS", sizeof ( hdr->oem_id ) ); + strncpy ( hdr->oem_table_id, "iPXE", sizeof ( hdr->oem_table_id ) ); + + /* Fix up ACPI checksum */ + acpi_fix_checksum ( hdr ); + + /* Install table */ + if ( ( efirc = acpi->InstallAcpiTable ( acpi, hdr, len, + &installed->key ) ) != 0 ){ + rc = -EEFI ( efirc ); + DBGC ( acpi, "EFIBLK could not install %s: %s\n", + acpi_name ( hdr->signature ), strerror ( rc ) ); + DBGC2_HDA ( acpi, 0, hdr, len ); + goto err_install; + } + + /* Add to list of installed tables */ + list_add_tail ( &installed->list, &efi_acpi_tables ); + + DBGC ( acpi, "EFIBLK installed %s as ACPI table %#lx\n", + acpi_name ( hdr->signature ), + ( ( unsigned long ) installed->key ) ); + DBGC2_HDA ( acpi, 0, hdr, len ); + return 0; + + list_del ( &installed->list ); + err_install: + free ( installed ); + err_alloc: + return rc; +} + +/** + * Describe EFI block devices + * + * @ret rc Return status code + */ +static int efi_block_describe ( void ) { + struct efi_acpi_table *installed; + struct efi_acpi_table *tmp; + UINTN key; + EFI_STATUS efirc; + int rc; + + /* Sanity check */ + if ( ! acpi ) { + DBG ( "EFIBLK has no ACPI table protocol\n" ); + return -ENOTSUP; + } + + /* Uninstall any existing ACPI tables */ + list_for_each_entry_safe ( installed, tmp, &efi_acpi_tables, list ) { + key = installed->key; + if ( ( efirc = acpi->UninstallAcpiTable ( acpi, key ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( acpi, "EFIBLK could not uninstall ACPI table " + "%#lx: %s\n", ( ( unsigned long ) key ), + strerror ( rc ) ); + /* Continue anyway */ + } + list_del ( &installed->list ); + free ( installed ); + } + + /* Install ACPI tables */ + if ( ( rc = acpi_install ( efi_block_install ) ) != 0 ) { + DBGC ( acpi, "EFIBLK could not install ACPI tables: %s\n", + strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Try booting from child device of EFI block device + * + * @v sandev SAN device + * @v handle EFI handle + * @v filename Filename (or NULL to use default) + * @v image Image handle to fill in + * @ret rc Return status code + */ +static int efi_block_boot_image ( struct san_device *sandev, EFI_HANDLE handle, + const char *filename, EFI_HANDLE *image ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_block_data *block = sandev->priv; + union { + EFI_DEVICE_PATH_PROTOCOL *path; + void *interface; + } path; + EFI_DEVICE_PATH_PROTOCOL *boot_path; + FILEPATH_DEVICE_PATH *filepath; + EFI_DEVICE_PATH_PROTOCOL *end; + size_t prefix_len; + size_t filepath_len; + size_t boot_path_len; + EFI_STATUS efirc; + int rc; + + /* Identify device path */ + if ( ( efirc = bs->OpenProtocol ( handle, + &efi_device_path_protocol_guid, + &path.interface, efi_image_handle, + handle, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ + DBGC ( sandev, "EFIBLK %#02x found filesystem with no device " + "path??", sandev->drive ); + rc = -EEFI ( efirc ); + goto err_open_device_path; + } + + /* Check if this device is a child of our block device */ + prefix_len = efi_path_len ( block->path ); + if ( memcmp ( path.path, block->path, prefix_len ) != 0 ) { + /* Not a child device */ + rc = -ENOTTY; + goto err_not_child; + } + DBGC ( sandev, "EFIBLK %#02x found child device %s\n", + sandev->drive, efi_devpath_text ( path.path ) ); + + /* Construct device path for boot image */ + end = efi_path_end ( path.path ); + prefix_len = ( ( ( void * ) end ) - ( ( void * ) path.path ) ); + filepath_len = ( SIZE_OF_FILEPATH_DEVICE_PATH + + ( filename ? + ( ( strlen ( filename ) + 1 /* NUL */ ) * + sizeof ( filepath->PathName[0] ) ) : + sizeof ( efi_block_boot_filename ) ) ); + boot_path_len = ( prefix_len + filepath_len + sizeof ( *end ) ); + boot_path = zalloc ( boot_path_len ); + if ( ! boot_path ) { + rc = -ENOMEM; + goto err_alloc_path; + } + memcpy ( boot_path, path.path, prefix_len ); + filepath = ( ( ( void * ) boot_path ) + prefix_len ); + filepath->Header.Type = MEDIA_DEVICE_PATH; + filepath->Header.SubType = MEDIA_FILEPATH_DP; + filepath->Header.Length[0] = ( filepath_len & 0xff ); + filepath->Header.Length[1] = ( filepath_len >> 8 ); + if ( filename ) { + efi_sprintf ( filepath->PathName, "%s", filename ); + } else { + memcpy ( filepath->PathName, efi_block_boot_filename, + sizeof ( efi_block_boot_filename ) ); + } + end = ( ( ( void * ) filepath ) + filepath_len ); + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + DBGC ( sandev, "EFIBLK %#02x trying to load %s\n", + sandev->drive, efi_devpath_text ( boot_path ) ); + + /* Try loading boot image from this device */ + *image = NULL; + if ( ( efirc = bs->LoadImage ( FALSE, efi_image_handle, boot_path, + NULL, 0, image ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( sandev, "EFIBLK %#02x could not load image: %s\n", + sandev->drive, strerror ( rc ) ); + if ( efirc == EFI_SECURITY_VIOLATION ) + bs->UnloadImage ( *image ); + goto err_load_image; + } + + /* Success */ + rc = 0; + + err_load_image: + free ( boot_path ); + err_alloc_path: + err_not_child: + err_open_device_path: + return rc; +} + +/** + * Boot from EFI block device + * + * @v drive Drive number + * @v filename Filename (or NULL to use default) + * @ret rc Return status code + */ +static int efi_block_boot ( unsigned int drive, const char *filename ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct san_device *sandev; + EFI_HANDLE *handles; + EFI_HANDLE image = NULL; + UINTN count; + unsigned int i; + EFI_STATUS efirc; + int rc; + + /* Find SAN device */ + sandev = sandev_find ( drive ); + if ( ! sandev ) { + DBG ( "EFIBLK cannot find drive %#02x\n", drive ); + rc = -ENODEV; + goto err_sandev_find; + } + + /* Release SNP devices */ + efi_snp_release(); + + /* Connect all possible protocols */ + efi_block_connect ( sandev ); + + /* Locate all handles supporting the Simple File System protocol */ + if ( ( efirc = bs->LocateHandleBuffer ( + ByProtocol, &efi_simple_file_system_protocol_guid, + NULL, &count, &handles ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( sandev, "EFIBLK %#02x cannot locate file systems: %s\n", + sandev->drive, strerror ( rc ) ); + goto err_locate_file_systems; + } + + /* Try booting from any available child device containing a + * suitable boot image. This is something of a wild stab in + * the dark, but should end up conforming to user expectations + * most of the time. + */ + rc = -ENOENT; + for ( i = 0 ; i < count ; i++ ) { + if ( ( rc = efi_block_boot_image ( sandev, handles[i], filename, + &image ) ) != 0 ) + continue; + DBGC ( sandev, "EFIBLK %#02x found boot image\n", + sandev->drive ); + efirc = bs->StartImage ( image, NULL, NULL ); + rc = ( efirc ? -EEFI ( efirc ) : 0 ); + bs->UnloadImage ( image ); + DBGC ( sandev, "EFIBLK %#02x boot image returned: %s\n", + sandev->drive, strerror ( rc ) ); + break; + } + + bs->FreePool ( handles ); + err_locate_file_systems: + efi_snp_claim(); + err_sandev_find: + return rc; +} + +PROVIDE_SANBOOT ( efi, san_hook, efi_block_hook ); +PROVIDE_SANBOOT ( efi, san_unhook, efi_block_unhook ); +PROVIDE_SANBOOT ( efi, san_describe, efi_block_describe ); +PROVIDE_SANBOOT ( efi, san_boot, efi_block_boot ); diff --git a/src/interface/efi/efi_fbcon.c b/src/interface/efi/efi_fbcon.c new file mode 100644 index 00000000..abc5a939 --- /dev/null +++ b/src/interface/efi/efi_fbcon.c @@ -0,0 +1,573 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * EFI frame buffer console + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Avoid dragging in EFI console if not otherwise used */ +extern struct console_driver efi_console; +struct console_driver efi_console __attribute__ (( weak )); + +/* Set default console usage if applicable + * + * We accept either CONSOLE_FRAMEBUFFER or CONSOLE_EFIFB. + */ +#if ( defined ( CONSOLE_FRAMEBUFFER ) && ! defined ( CONSOLE_EFIFB ) ) +#define CONSOLE_EFIFB CONSOLE_FRAMEBUFFER +#endif +#if ! ( defined ( CONSOLE_EFIFB ) && CONSOLE_EXPLICIT ( CONSOLE_EFIFB ) ) +#undef CONSOLE_EFIFB +#define CONSOLE_EFIFB ( CONSOLE_USAGE_ALL & ~CONSOLE_USAGE_LOG ) +#endif + +/* Forward declaration */ +struct console_driver efifb_console __console_driver; + +/** An EFI frame buffer */ +struct efifb { + /** EFI graphics output protocol */ + EFI_GRAPHICS_OUTPUT_PROTOCOL *gop; + /** EFI HII font protocol */ + EFI_HII_FONT_PROTOCOL *hiifont; + /** Saved mode */ + UINT32 saved_mode; + + /** Frame buffer console */ + struct fbcon fbcon; + /** Physical start address */ + physaddr_t start; + /** Pixel geometry */ + struct fbcon_geometry pixel; + /** Colour mapping */ + struct fbcon_colour_map map; + /** Font definition */ + struct fbcon_font font; + /** Character glyphs */ + userptr_t glyphs; +}; + +/** The EFI frame buffer */ +static struct efifb efifb; + +/** + * Get character glyph + * + * @v character Character + * @v glyph Character glyph to fill in + */ +static void efifb_glyph ( unsigned int character, uint8_t *glyph ) { + size_t offset = ( character * efifb.font.height ); + + copy_from_user ( glyph, efifb.glyphs, offset, efifb.font.height ); +} + +/** + * Get character glyphs + * + * @ret rc Return status code + */ +static int efifb_glyphs ( void ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_IMAGE_OUTPUT *blt; + EFI_GRAPHICS_OUTPUT_BLT_PIXEL *pixel; + size_t offset; + size_t len; + uint8_t bitmask; + unsigned int character; + unsigned int x; + unsigned int y; + EFI_STATUS efirc; + int rc; + + /* Get font height. The GetFontInfo() call nominally returns + * this information in an EFI_FONT_DISPLAY_INFO structure, but + * is known to fail on many UEFI implementations. Instead, we + * iterate over all printable characters to find the maximum + * height. + */ + efifb.font.height = 0; + for ( character = 0 ; character < 256 ; character++ ) { + + /* Skip non-printable characters */ + if ( ! isprint ( character ) ) + continue; + + /* Get glyph */ + blt = NULL; + if ( ( efirc = efifb.hiifont->GetGlyph ( efifb.hiifont, + character, NULL, &blt, + NULL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( &efifb, "EFIFB could not get glyph %d: %s\n", + character, strerror ( rc ) ); + continue; + } + assert ( blt != NULL ); + + /* Calculate maximum height */ + if ( efifb.font.height < blt->Height ) + efifb.font.height = blt->Height; + + /* Free glyph */ + bs->FreePool ( blt ); + } + if ( ! efifb.font.height ) { + DBGC ( &efifb, "EFIFB could not get font height\n" ); + return -ENOENT; + } + + /* Allocate glyph data */ + len = ( 256 * efifb.font.height * sizeof ( bitmask ) ); + efifb.glyphs = umalloc ( len ); + if ( ! efifb.glyphs ) { + rc = -ENOMEM; + goto err_alloc; + } + memset_user ( efifb.glyphs, 0, 0, len ); + + /* Get font data */ + for ( character = 0 ; character < 256 ; character++ ) { + + /* Skip non-printable characters */ + if ( ! isprint ( character ) ) + continue; + + /* Get glyph */ + blt = NULL; + if ( ( efirc = efifb.hiifont->GetGlyph ( efifb.hiifont, + character, NULL, &blt, + NULL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( &efifb, "EFIFB could not get glyph %d: %s\n", + character, strerror ( rc ) ); + continue; + } + assert ( blt != NULL ); + + /* Sanity check */ + if ( blt->Width > 8 ) { + DBGC ( &efifb, "EFIFB glyph %d invalid width %d\n", + character, blt->Width ); + continue; + } + if ( blt->Height > efifb.font.height ) { + DBGC ( &efifb, "EFIFB glyph %d invalid height %d\n", + character, blt->Height ); + continue; + } + + /* Convert glyph to bitmap */ + pixel = blt->Image.Bitmap; + offset = ( character * efifb.font.height ); + for ( y = 0 ; y < blt->Height ; y++ ) { + bitmask = 0; + for ( x = 0 ; x < blt->Width ; x++ ) { + bitmask = rol8 ( bitmask, 1 ); + if ( pixel->Blue || pixel->Green || pixel->Red ) + bitmask |= 0x01; + pixel++; + } + copy_to_user ( efifb.glyphs, offset++, &bitmask, + sizeof ( bitmask ) ); + } + + /* Free glyph */ + bs->FreePool ( blt ); + } + + efifb.font.glyph = efifb_glyph; + return 0; + + ufree ( efifb.glyphs ); + err_alloc: + return rc; +} + +/** + * Generate colour mapping for a single colour component + * + * @v mask Mask value + * @v scale Scale value to fill in + * @v lsb LSB value to fill in + * @ret rc Return status code + */ +static int efifb_colour_map_mask ( uint32_t mask, uint8_t *scale, + uint8_t *lsb ) { + uint32_t check; + + /* Fill in LSB and scale */ + *lsb = ( mask ? ( ffs ( mask ) - 1 ) : 0 ); + *scale = ( mask ? ( 8 - ( fls ( mask ) - *lsb ) ) : 8 ); + + /* Check that original mask was contiguous */ + check = ( ( 0xff >> *scale ) << *lsb ); + if ( check != mask ) + return -ENOTSUP; + + return 0; +} + +/** + * Generate colour mapping + * + * @v info EFI mode information + * @v map Colour mapping to fill in + * @ret bpp Number of bits per pixel, or negative error + */ +static int efifb_colour_map ( EFI_GRAPHICS_OUTPUT_MODE_INFORMATION *info, + struct fbcon_colour_map *map ) { + static EFI_PIXEL_BITMASK rgb_mask = { + 0x000000ffUL, 0x0000ff00UL, 0x00ff0000UL, 0xff000000UL + }; + static EFI_PIXEL_BITMASK bgr_mask = { + 0x00ff0000UL, 0x0000ff00UL, 0x000000ffUL, 0xff000000UL + }; + EFI_PIXEL_BITMASK *mask; + uint8_t reserved_scale; + uint8_t reserved_lsb; + int rc; + + /* Determine applicable mask */ + switch ( info->PixelFormat ) { + case PixelRedGreenBlueReserved8BitPerColor: + mask = &rgb_mask; + break; + case PixelBlueGreenRedReserved8BitPerColor: + mask = &bgr_mask; + break; + case PixelBitMask: + mask = &info->PixelInformation; + break; + default: + DBGC ( &efifb, "EFIFB unrecognised pixel format %d\n", + info->PixelFormat ); + return -ENOTSUP; + } + + /* Map each colour component */ + if ( ( rc = efifb_colour_map_mask ( mask->RedMask, &map->red_scale, + &map->red_lsb ) ) != 0 ) + return rc; + if ( ( rc = efifb_colour_map_mask ( mask->GreenMask, &map->green_scale, + &map->green_lsb ) ) != 0 ) + return rc; + if ( ( rc = efifb_colour_map_mask ( mask->BlueMask, &map->blue_scale, + &map->blue_lsb ) ) != 0 ) + return rc; + if ( ( rc = efifb_colour_map_mask ( mask->ReservedMask, &reserved_scale, + &reserved_lsb ) ) != 0 ) + return rc; + + /* Calculate total number of bits per pixel */ + return ( 32 - ( reserved_scale + map->red_scale + map->green_scale + + map->blue_scale ) ); +} + +/** + * Select video mode + * + * @v min_width Minimum required width (in pixels) + * @v min_height Minimum required height (in pixels) + * @v min_bpp Minimum required colour depth (in bits per pixel) + * @ret mode_number Mode number, or negative error + */ +static int efifb_select_mode ( unsigned int min_width, unsigned int min_height, + unsigned int min_bpp ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct fbcon_colour_map map; + EFI_GRAPHICS_OUTPUT_MODE_INFORMATION *info; + int best_mode_number = -ENOENT; + unsigned int best_score = INT_MAX; + unsigned int score; + unsigned int mode; + int bpp; + UINTN size; + EFI_STATUS efirc; + int rc; + + /* Find the best mode */ + for ( mode = 0 ; mode < efifb.gop->Mode->MaxMode ; mode++ ) { + + /* Get mode information */ + if ( ( efirc = efifb.gop->QueryMode ( efifb.gop, mode, &size, + &info ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( &efifb, "EFIFB could not get mode %d " + "information: %s\n", mode, strerror ( rc ) ); + goto err_query; + } + + /* Skip unusable modes */ + bpp = efifb_colour_map ( info, &map ); + if ( bpp < 0 ) { + rc = bpp; + DBGC ( &efifb, "EFIFB could not build colour map for " + "mode %d: %s\n", mode, strerror ( rc ) ); + goto err_map; + } + + /* Skip modes not meeting the requirements */ + if ( ( info->HorizontalResolution < min_width ) || + ( info->VerticalResolution < min_height ) || + ( ( ( unsigned int ) bpp ) < min_bpp ) ) { + goto err_requirements; + } + + /* Select this mode if it has the best (i.e. lowest) + * score. We choose the scoring system to favour + * modes close to the specified width and height; + * within modes of the same width and height we prefer + * a higher colour depth. + */ + score = ( ( info->HorizontalResolution * + info->VerticalResolution ) - bpp ); + if ( score < best_score ) { + best_mode_number = mode; + best_score = score; + } + + err_requirements: + err_map: + bs->FreePool ( info ); + err_query: + continue; + } + + if ( best_mode_number < 0 ) + DBGC ( &efifb, "EFIFB found no suitable mode\n" ); + return best_mode_number; +} + +/** + * Restore video mode + * + * @v rc Return status code + */ +static int efifb_restore ( void ) { + EFI_STATUS efirc; + int rc; + + /* Restore original mode */ + if ( ( efirc = efifb.gop->SetMode ( efifb.gop, + efifb.saved_mode ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( &efifb, "EFIFB could not restore mode %d: %s\n", + efifb.saved_mode, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Initialise EFI frame buffer + * + * @v config Console configuration, or NULL to reset + * @ret rc Return status code + */ +static int efifb_init ( struct console_configuration *config ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_GRAPHICS_OUTPUT_MODE_INFORMATION *info; + void *interface; + int mode; + int bpp; + EFI_STATUS efirc; + int rc; + + /* Locate graphics output protocol */ + if ( ( efirc = bs->LocateProtocol ( &efi_graphics_output_protocol_guid, + NULL, &interface ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( &efifb, "EFIFB could not locate graphics output " + "protocol: %s\n", strerror ( rc ) ); + goto err_locate_gop; + } + efifb.gop = interface; + + /* Locate HII font protocol */ + if ( ( efirc = bs->LocateProtocol ( &efi_hii_font_protocol_guid, + NULL, &interface ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( &efifb, "EFIFB could not locate HII font protocol: %s\n", + strerror ( rc ) ); + goto err_locate_hiifont; + } + efifb.hiifont = interface; + + /* Locate glyphs */ + if ( ( rc = efifb_glyphs() ) != 0 ) + goto err_glyphs; + + /* Save original mode */ + efifb.saved_mode = efifb.gop->Mode->Mode; + + /* Select mode */ + if ( ( mode = efifb_select_mode ( config->width, config->height, + config->depth ) ) < 0 ) { + rc = mode; + goto err_select_mode; + } + + /* Set mode */ + if ( ( efirc = efifb.gop->SetMode ( efifb.gop, mode ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( &efifb, "EFIFB could not set mode %d: %s\n", + mode, strerror ( rc ) ); + goto err_set_mode; + } + info = efifb.gop->Mode->Info; + + /* Populate colour map */ + bpp = efifb_colour_map ( info, &efifb.map ); + if ( bpp < 0 ) { + rc = bpp; + DBGC ( &efifb, "EFIFB could not build colour map for " + "mode %d: %s\n", mode, strerror ( rc ) ); + goto err_map; + } + + /* Populate pixel geometry */ + efifb.pixel.width = info->HorizontalResolution; + efifb.pixel.height = info->VerticalResolution; + efifb.pixel.len = ( ( bpp + 7 ) / 8 ); + efifb.pixel.stride = ( efifb.pixel.len * info->PixelsPerScanLine ); + + /* Populate frame buffer address */ + efifb.start = efifb.gop->Mode->FrameBufferBase; + DBGC ( &efifb, "EFIFB using mode %d (%dx%d %dbpp at %#08lx)\n", + mode, efifb.pixel.width, efifb.pixel.height, bpp, efifb.start ); + + /* Initialise frame buffer console */ + if ( ( rc = fbcon_init ( &efifb.fbcon, phys_to_user ( efifb.start ), + &efifb.pixel, &efifb.map, &efifb.font, + config ) ) != 0 ) + goto err_fbcon_init; + + return 0; + + fbcon_fini ( &efifb.fbcon ); + err_fbcon_init: + err_map: + efifb_restore(); + err_set_mode: + err_select_mode: + ufree ( efifb.glyphs ); + err_glyphs: + err_locate_hiifont: + err_locate_gop: + return rc; +} + +/** + * Finalise EFI frame buffer + * + */ +static void efifb_fini ( void ) { + + /* Finalise frame buffer console */ + fbcon_fini ( &efifb.fbcon ); + + /* Restore saved mode */ + efifb_restore(); + + /* Free glyphs */ + ufree ( efifb.glyphs ); +} + +/** + * Print a character to current cursor position + * + * @v character Character + */ +static void efifb_putchar ( int character ) { + + fbcon_putchar ( &efifb.fbcon, character ); +} + +/** + * Configure console + * + * @v config Console configuration, or NULL to reset + * @ret rc Return status code + */ +static int efifb_configure ( struct console_configuration *config ) { + int rc; + + /* Reset console, if applicable */ + if ( ! efifb_console.disabled ) { + efifb_fini(); + efi_console.disabled &= ~CONSOLE_DISABLED_OUTPUT; + ansicol_reset_magic(); + } + efifb_console.disabled = CONSOLE_DISABLED; + + /* Do nothing more unless we have a usable configuration */ + if ( ( config == NULL ) || + ( config->width == 0 ) || ( config->height == 0 ) ) { + return 0; + } + + /* Initialise EFI frame buffer */ + if ( ( rc = efifb_init ( config ) ) != 0 ) + return rc; + + /* Mark console as enabled */ + efifb_console.disabled = 0; + efi_console.disabled |= CONSOLE_DISABLED_OUTPUT; + + /* Set magic colour to transparent if we have a background picture */ + if ( config->pixbuf ) + ansicol_set_magic_transparent(); + + return 0; +} + +/** EFI graphics output protocol console driver */ +struct console_driver efifb_console __console_driver = { + .usage = CONSOLE_EFIFB, + .putchar = efifb_putchar, + .configure = efifb_configure, + .disabled = CONSOLE_DISABLED, +}; diff --git a/src/interface/efi/efi_fdt.c b/src/interface/efi/efi_fdt.c new file mode 100644 index 00000000..cd3f109d --- /dev/null +++ b/src/interface/efi/efi_fdt.c @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2019 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** @file + * + * EFI Flattened Device Tree + * + */ + +#define DEVICE_TREE_TABLE_GUID \ + { 0xb1b621d5, 0xf19c, 0x41a5, \ + { 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 } } + +/** EFI Flattened Device Tree configuration table */ +static struct fdt_header *efi_fdt; +EFI_USE_TABLE ( DEVICE_TREE_TABLE, &efi_fdt, 0 ); + +/** + * Initialise EFI Flattened Device Tree + * + */ +static void efi_fdt_init ( void ) { + int rc; + + /* Do nothing if no configuration table is present */ + if ( ! efi_fdt ) { + DBGC ( &efi_fdt, "EFIFDT has no configuration table\n" ); + return; + } + DBGC ( &efi_fdt, "EFIFDT configuration table at %p\n", efi_fdt ); + + /* Register device tree */ + if ( ( rc = register_fdt ( efi_fdt ) ) != 0 ) { + DBGC ( &efi_fdt, "EFIFDT could not register: %s\n", + strerror ( rc ) ); + return; + } +} + +/** EFI Flattened Device Tree initialisation function */ +struct init_fn efi_fdt_init_fn __init_fn ( INIT_EARLY ) = { + .initialise = efi_fdt_init, +}; diff --git a/src/interface/efi/efi_guid.c b/src/interface/efi/efi_guid.c new file mode 100644 index 00000000..663585dc --- /dev/null +++ b/src/interface/efi/efi_guid.c @@ -0,0 +1,306 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * EFI GUIDs + * + */ + +/* TrEE protocol GUID definition in EDK2 headers is broken (missing braces) */ +#define EFI_TREE_PROTOCOL_GUID \ + { 0x607f766c, 0x7455, 0x42be, \ + { 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f } } + +/** Absolute pointer protocol GUID */ +EFI_GUID efi_absolute_pointer_protocol_guid + = EFI_ABSOLUTE_POINTER_PROTOCOL_GUID; + +/** ACPI table protocol GUID */ +EFI_GUID efi_acpi_table_protocol_guid + = EFI_ACPI_TABLE_PROTOCOL_GUID; + +/** Apple NetBoot protocol GUID */ +EFI_GUID efi_apple_net_boot_protocol_guid + = EFI_APPLE_NET_BOOT_PROTOCOL_GUID; + +/** ARP protocol GUID */ +EFI_GUID efi_arp_protocol_guid + = EFI_ARP_PROTOCOL_GUID; + +/** ARP service binding protocol GUID */ +EFI_GUID efi_arp_service_binding_protocol_guid + = EFI_ARP_SERVICE_BINDING_PROTOCOL_GUID; + +/** Block I/O protocol GUID */ +EFI_GUID efi_block_io_protocol_guid + = EFI_BLOCK_IO_PROTOCOL_GUID; + +/** Block I/O version 2 protocol GUID */ +EFI_GUID efi_block_io2_protocol_guid + = EFI_BLOCK_IO2_PROTOCOL_GUID; + +/** Bus specific driver override protocol GUID */ +EFI_GUID efi_bus_specific_driver_override_protocol_guid + = EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_PROTOCOL_GUID; + +/** Component name protocol GUID */ +EFI_GUID efi_component_name_protocol_guid + = EFI_COMPONENT_NAME_PROTOCOL_GUID; + +/** Component name 2 protocol GUID */ +EFI_GUID efi_component_name2_protocol_guid + = EFI_COMPONENT_NAME2_PROTOCOL_GUID; + +/** Console control protocol GUID */ +EFI_GUID efi_console_control_protocol_guid + = EFI_CONSOLE_CONTROL_PROTOCOL_GUID; + +/** Device path protocol GUID */ +EFI_GUID efi_device_path_protocol_guid + = EFI_DEVICE_PATH_PROTOCOL_GUID; + +/** DHCPv4 protocol GUID */ +EFI_GUID efi_dhcp4_protocol_guid + = EFI_DHCP4_PROTOCOL_GUID; + +/** DHCPv4 service binding protocol GUID */ +EFI_GUID efi_dhcp4_service_binding_protocol_guid + = EFI_DHCP4_SERVICE_BINDING_PROTOCOL_GUID; + +/** Disk I/O protocol GUID */ +EFI_GUID efi_disk_io_protocol_guid + = EFI_DISK_IO_PROTOCOL_GUID; + +/** Driver binding protocol GUID */ +EFI_GUID efi_driver_binding_protocol_guid + = EFI_DRIVER_BINDING_PROTOCOL_GUID; + +/** Graphics output protocol GUID */ +EFI_GUID efi_graphics_output_protocol_guid + = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID; + +/** HII configuration access protocol GUID */ +EFI_GUID efi_hii_config_access_protocol_guid + = EFI_HII_CONFIG_ACCESS_PROTOCOL_GUID; + +/** HII font protocol GUID */ +EFI_GUID efi_hii_font_protocol_guid + = EFI_HII_FONT_PROTOCOL_GUID; + +/** IPv4 protocol GUID */ +EFI_GUID efi_ip4_protocol_guid + = EFI_IP4_PROTOCOL_GUID; + +/** IPv4 configuration protocol GUID */ +EFI_GUID efi_ip4_config_protocol_guid + = EFI_IP4_CONFIG_PROTOCOL_GUID; + +/** IPv4 service binding protocol GUID */ +EFI_GUID efi_ip4_service_binding_protocol_guid + = EFI_IP4_SERVICE_BINDING_PROTOCOL_GUID; + +/** Load file protocol GUID */ +EFI_GUID efi_load_file_protocol_guid + = EFI_LOAD_FILE_PROTOCOL_GUID; + +/** Load file 2 protocol GUID */ +EFI_GUID efi_load_file2_protocol_guid + = EFI_LOAD_FILE2_PROTOCOL_GUID; + +/** Loaded image protocol GUID */ +EFI_GUID efi_loaded_image_protocol_guid + = EFI_LOADED_IMAGE_PROTOCOL_GUID; + +/** Loaded image device path protocol GUID */ +EFI_GUID efi_loaded_image_device_path_protocol_guid + = EFI_LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID; + +/** Managed network protocol GUID */ +EFI_GUID efi_managed_network_protocol_guid + = EFI_MANAGED_NETWORK_PROTOCOL_GUID; + +/** Managed network service binding protocol GUID */ +EFI_GUID efi_managed_network_service_binding_protocol_guid + = EFI_MANAGED_NETWORK_SERVICE_BINDING_PROTOCOL_GUID; + +/** MTFTPv4 protocol GUID */ +EFI_GUID efi_mtftp4_protocol_guid + = EFI_MTFTP4_PROTOCOL_GUID; + +/** MTFTPv4 service binding protocol GUID */ +EFI_GUID efi_mtftp4_service_binding_protocol_guid + = EFI_MTFTP4_SERVICE_BINDING_PROTOCOL_GUID; + +/** Network interface identifier protocol GUID (old version) */ +EFI_GUID efi_nii_protocol_guid + = EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL_GUID; + +/** Network interface identifier protocol GUID (new version) */ +EFI_GUID efi_nii31_protocol_guid + = EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL_GUID_31; + +/** PCI I/O protocol GUID */ +EFI_GUID efi_pci_io_protocol_guid + = EFI_PCI_IO_PROTOCOL_GUID; + +/** PCI root bridge I/O protocol GUID */ +EFI_GUID efi_pci_root_bridge_io_protocol_guid + = EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL_GUID; + +/** PXE base code protocol GUID */ +EFI_GUID efi_pxe_base_code_protocol_guid + = EFI_PXE_BASE_CODE_PROTOCOL_GUID; + +/** Serial I/O protocol GUID */ +EFI_GUID efi_serial_io_protocol_guid + = EFI_SERIAL_IO_PROTOCOL_GUID; + +/** Simple file system protocol GUID */ +EFI_GUID efi_simple_file_system_protocol_guid + = EFI_SIMPLE_FILE_SYSTEM_PROTOCOL_GUID; + +/** Simple network protocol GUID */ +EFI_GUID efi_simple_network_protocol_guid + = EFI_SIMPLE_NETWORK_PROTOCOL_GUID; + +/** Simple pointer protocol GUID */ +EFI_GUID efi_simple_pointer_protocol_guid + = EFI_SIMPLE_POINTER_PROTOCOL_GUID; + +/** Simple text input protocol GUID */ +EFI_GUID efi_simple_text_input_protocol_guid + = EFI_SIMPLE_TEXT_INPUT_PROTOCOL_GUID; + +/** Simple text input extension protocol GUID */ +EFI_GUID efi_simple_text_input_ex_protocol_guid + = EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL_GUID; + +/** Simple text output protocol GUID */ +EFI_GUID efi_simple_text_output_protocol_guid + = EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL_GUID; + +/** TCG protocol GUID */ +EFI_GUID efi_tcg_protocol_guid + = EFI_TCG_PROTOCOL_GUID; + +/** TCPv4 protocol GUID */ +EFI_GUID efi_tcp4_protocol_guid + = EFI_TCP4_PROTOCOL_GUID; + +/** TCPv4 service binding protocol GUID */ +EFI_GUID efi_tcp4_service_binding_protocol_guid + = EFI_TCP4_SERVICE_BINDING_PROTOCOL_GUID; + +/** TrEE protocol GUID */ +EFI_GUID efi_tree_protocol_guid + = EFI_TREE_PROTOCOL_GUID; + +/** UDPv4 protocol GUID */ +EFI_GUID efi_udp4_protocol_guid + = EFI_UDP4_PROTOCOL_GUID; + +/** UDPv4 service binding protocol GUID */ +EFI_GUID efi_udp4_service_binding_protocol_guid + = EFI_UDP4_SERVICE_BINDING_PROTOCOL_GUID; + +/** UGA draw protocol GUID */ +EFI_GUID efi_uga_draw_protocol_guid + = EFI_UGA_DRAW_PROTOCOL_GUID; + +/** Unicode collation protocol GUID */ +EFI_GUID efi_unicode_collation_protocol_guid + = EFI_UNICODE_COLLATION_PROTOCOL_GUID; + +/** USB host controller protocol GUID */ +EFI_GUID efi_usb_hc_protocol_guid + = EFI_USB_HC_PROTOCOL_GUID; + +/** USB2 host controller protocol GUID */ +EFI_GUID efi_usb2_hc_protocol_guid + = EFI_USB2_HC_PROTOCOL_GUID; + +/** USB I/O protocol GUID */ +EFI_GUID efi_usb_io_protocol_guid + = EFI_USB_IO_PROTOCOL_GUID; + +/** VLAN configuration protocol GUID */ +EFI_GUID efi_vlan_config_protocol_guid + = EFI_VLAN_CONFIG_PROTOCOL_GUID; + +/** File information GUID */ +EFI_GUID efi_file_info_id = EFI_FILE_INFO_ID; + +/** File system information GUID */ +EFI_GUID efi_file_system_info_id = EFI_FILE_SYSTEM_INFO_ID; diff --git a/src/interface/efi/efi_local.c b/src/interface/efi/efi_local.c new file mode 100644 index 00000000..4ebca572 --- /dev/null +++ b/src/interface/efi/efi_local.c @@ -0,0 +1,587 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * EFI local file access + * + */ + +/** Download blocksize */ +#define EFI_LOCAL_BLKSIZE 4096 + +/** An EFI local file */ +struct efi_local { + /** Reference count */ + struct refcnt refcnt; + /** Data transfer interface */ + struct interface xfer; + /** Download process */ + struct process process; + + /** EFI root directory */ + EFI_FILE_PROTOCOL *root; + /** EFI file */ + EFI_FILE_PROTOCOL *file; + /** Length of file */ + size_t len; +}; + +/** + * Close local file + * + * @v local Local file + * @v rc Reason for close + */ +static void efi_local_close ( struct efi_local *local, int rc ) { + + /* Stop process */ + process_del ( &local->process ); + + /* Shut down data transfer interface */ + intf_shutdown ( &local->xfer, rc ); + + /* Close EFI file */ + if ( local->file ) { + local->file->Close ( local->file ); + local->file = NULL; + } + + /* Close EFI root directory */ + if ( local->root ) { + local->root->Close ( local->root ); + local->root = NULL; + } +} + +/** + * Local file process + * + * @v local Local file + */ +static void efi_local_step ( struct efi_local *local ) { + EFI_FILE_PROTOCOL *file = local->file; + struct io_buffer *iobuf = NULL; + size_t remaining; + size_t frag_len; + UINTN size; + EFI_STATUS efirc; + int rc; + + /* Wait until data transfer interface is ready */ + if ( ! xfer_window ( &local->xfer ) ) + return; + + /* Presize receive buffer */ + remaining = local->len; + xfer_seek ( &local->xfer, remaining ); + xfer_seek ( &local->xfer, 0 ); + + /* Get file contents */ + while ( remaining ) { + + /* Calculate length for this fragment */ + frag_len = remaining; + if ( frag_len > EFI_LOCAL_BLKSIZE ) + frag_len = EFI_LOCAL_BLKSIZE; + + /* Allocate I/O buffer */ + iobuf = xfer_alloc_iob ( &local->xfer, frag_len ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err; + } + + /* Read block */ + size = frag_len; + if ( ( efirc = file->Read ( file, &size, iobuf->data ) ) != 0 ){ + rc = -EEFI ( efirc ); + DBGC ( local, "LOCAL %p could not read from file: %s\n", + local, strerror ( rc ) ); + goto err; + } + assert ( size <= frag_len ); + iob_put ( iobuf, size ); + + /* Deliver data */ + if ( ( rc = xfer_deliver_iob ( &local->xfer, + iob_disown ( iobuf ) ) ) != 0 ) { + DBGC ( local, "LOCAL %p could not deliver data: %s\n", + local, strerror ( rc ) ); + goto err; + } + + /* Move to next block */ + remaining -= frag_len; + } + + /* Close download */ + efi_local_close ( local, 0 ); + + return; + + err: + free_iob ( iobuf ); + efi_local_close ( local, rc ); +} + +/** Data transfer interface operations */ +static struct interface_operation efi_local_operations[] = { + INTF_OP ( xfer_window_changed, struct efi_local *, efi_local_step ), + INTF_OP ( intf_close, struct efi_local *, efi_local_close ), +}; + +/** Data transfer interface descriptor */ +static struct interface_descriptor efi_local_xfer_desc = + INTF_DESC ( struct efi_local, xfer, efi_local_operations ); + +/** Process descriptor */ +static struct process_descriptor efi_local_process_desc = + PROC_DESC_ONCE ( struct efi_local, process, efi_local_step ); + +/** + * Check for matching volume name + * + * @v local Local file + * @v device Device handle + * @v root Root filesystem handle + * @v volume Volume name + * @ret rc Return status code + */ +static int efi_local_check_volume_name ( struct efi_local *local, + EFI_HANDLE device, + EFI_FILE_PROTOCOL *root, + const char *volume ) { + EFI_FILE_SYSTEM_INFO *info; + UINTN size; + char *label; + EFI_STATUS efirc; + int rc; + + /* Get length of file system information */ + size = 0; + root->GetInfo ( root, &efi_file_system_info_id, &size, NULL ); + + /* Allocate file system information */ + info = malloc ( size ); + if ( ! info ) { + rc = -ENOMEM; + goto err_alloc_info; + } + + /* Get file system information */ + if ( ( efirc = root->GetInfo ( root, &efi_file_system_info_id, &size, + info ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( local, "LOCAL %p could not get file system info on %s: " + "%s\n", local, efi_handle_name ( device ), + strerror ( rc ) ); + goto err_get_info; + } + DBGC2 ( local, "LOCAL %p found %s with label \"%ls\"\n", + local, efi_handle_name ( device ), info->VolumeLabel ); + + /* Construct volume label for comparison */ + if ( asprintf ( &label, "%ls", info->VolumeLabel ) < 0 ) { + rc = -ENOMEM; + goto err_alloc_label; + } + + /* Compare volume label */ + if ( strcasecmp ( volume, label ) != 0 ) { + rc = -ENOENT; + goto err_compare; + } + + /* Success */ + rc = 0; + + err_compare: + free ( label ); + err_alloc_label: + err_get_info: + free ( info ); + err_alloc_info: + return rc; +} + +/** + * Open root filesystem + * + * @v local Local file + * @v device Device handle + * @v root Root filesystem handle to fill in + * @ret rc Return status code + */ +static int efi_local_open_root ( struct efi_local *local, EFI_HANDLE device, + EFI_FILE_PROTOCOL **root ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + union { + void *interface; + EFI_SIMPLE_FILE_SYSTEM_PROTOCOL *fs; + } u; + EFI_STATUS efirc; + int rc; + + /* Open file system protocol */ + if ( ( efirc = bs->OpenProtocol ( device, + &efi_simple_file_system_protocol_guid, + &u.interface, efi_image_handle, + device, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ + rc = -EEFI ( efirc ); + DBGC ( local, "LOCAL %p could not open filesystem on %s: %s\n", + local, efi_handle_name ( device ), strerror ( rc ) ); + goto err_filesystem; + } + + /* Open root directory */ + if ( ( efirc = u.fs->OpenVolume ( u.fs, root ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( local, "LOCAL %p could not open volume on %s: %s\n", + local, efi_handle_name ( device ), strerror ( rc ) ); + goto err_volume; + } + + /* Success */ + rc = 0; + + err_volume: + bs->CloseProtocol ( device, &efi_simple_file_system_protocol_guid, + efi_image_handle, device ); + err_filesystem: + return rc; +} + +/** + * Open root filesystem of specified volume + * + * @v local Local file + * @v volume Volume name, or NULL to use loaded image's device + * @ret rc Return status code + */ +static int efi_local_open_volume ( struct efi_local *local, + const char *volume ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_GUID *protocol = &efi_simple_file_system_protocol_guid; + int ( * check ) ( struct efi_local *local, EFI_HANDLE device, + EFI_FILE_PROTOCOL *root, const char *volume ); + EFI_DEVICE_PATH_PROTOCOL *path; + EFI_FILE_PROTOCOL *root; + EFI_HANDLE *handles; + EFI_HANDLE device; + UINTN num_handles; + UINTN i; + EFI_STATUS efirc; + int rc; + + /* Identify candidate handles */ + if ( volume ) { + /* Locate all filesystem handles */ + if ( ( efirc = bs->LocateHandleBuffer ( ByProtocol, protocol, + NULL, &num_handles, + &handles ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( local, "LOCAL %p could not enumerate handles: " + "%s\n", local, strerror ( rc ) ); + return rc; + } + check = efi_local_check_volume_name; + } else { + /* Locate filesystem from which we were loaded */ + path = efi_loaded_image_path; + if ( ( efirc = bs->LocateDevicePath ( protocol, &path, + &device ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( local, "LOCAL %p could not locate file system " + "on %s: %s\n", local, + efi_devpath_text ( efi_loaded_image_path ), + strerror ( rc ) ); + return rc; + } + handles = &device; + num_handles = 1; + check = NULL; + } + + /* Find matching handle */ + for ( i = 0 ; i < num_handles ; i++ ) { + + /* Get this device handle */ + device = handles[i]; + + /* Open root directory */ + if ( ( rc = efi_local_open_root ( local, device, &root ) ) != 0) + continue; + + /* Check volume name, if applicable */ + if ( ( check == NULL ) || + ( ( rc = check ( local, device, root, volume ) ) == 0 ) ) { + DBGC ( local, "LOCAL %p using %s", + local, efi_handle_name ( device ) ); + if ( volume ) + DBGC ( local, " with label \"%s\"", volume ); + DBGC ( local, "\n" ); + local->root = root; + break; + } + + /* Close root directory */ + root->Close ( root ); + } + + /* Free handles, if applicable */ + if ( volume ) + bs->FreePool ( handles ); + + /* Fail if we found no matching handle */ + if ( ! local->root ) { + DBGC ( local, "LOCAL %p found no matching handle\n", local ); + return -ENOENT; + } + + return 0; +} + +/** + * Open fully-resolved path + * + * @v local Local file + * @v resolved Resolved path + * @ret rc Return status code + */ +static int efi_local_open_resolved ( struct efi_local *local, + const char *resolved ) { + size_t name_len = strlen ( resolved ); + CHAR16 name[ name_len + 1 /* wNUL */ ]; + EFI_FILE_PROTOCOL *file; + EFI_STATUS efirc; + int rc; + + /* Construct filename */ + efi_snprintf ( name, ( name_len + 1 /* wNUL */ ), "%s", resolved ); + + /* Open file */ + if ( ( efirc = local->root->Open ( local->root, &file, name, + EFI_FILE_MODE_READ, 0 ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( local, "LOCAL %p could not open \"%s\": %s\n", + local, resolved, strerror ( rc ) ); + return rc; + } + local->file = file; + + return 0; +} + +/** + * Open specified path + * + * @v local Local file + * @v path Path to file + * @ret rc Return status code + */ +static int efi_local_open_path ( struct efi_local *local, const char *path ) { + FILEPATH_DEVICE_PATH *fp = container_of ( efi_loaded_image->FilePath, + FILEPATH_DEVICE_PATH, Header); + size_t fp_len = ( fp ? efi_path_len ( &fp->Header ) : 0 ); + char base[ fp_len / 2 /* Cannot exceed this length */ ]; + size_t remaining = sizeof ( base ); + size_t len; + char *resolved; + char *tmp; + int rc; + + /* Construct base path to our own image, if possible */ + memset ( base, 0, sizeof ( base ) ); + tmp = base; + while ( fp && ( fp->Header.Type != END_DEVICE_PATH_TYPE ) ) { + len = snprintf ( tmp, remaining, "%ls", fp->PathName ); + assert ( len < remaining ); + tmp += len; + remaining -= len; + fp = ( ( ( void * ) fp ) + ( ( fp->Header.Length[1] << 8 ) | + fp->Header.Length[0] ) ); + } + DBGC2 ( local, "LOCAL %p base path \"%s\"\n", + local, base ); + + /* Convert to sane path separators */ + for ( tmp = base ; *tmp ; tmp++ ) { + if ( *tmp == '\\' ) + *tmp = '/'; + } + + /* Resolve path */ + resolved = resolve_path ( base, path ); + if ( ! resolved ) { + rc = -ENOMEM; + goto err_resolve; + } + + /* Convert to insane path separators */ + for ( tmp = resolved ; *tmp ; tmp++ ) { + if ( *tmp == '/' ) + *tmp = '\\'; + } + DBGC ( local, "LOCAL %p using \"%s\"\n", + local, resolved ); + + /* Open resolved path */ + if ( ( rc = efi_local_open_resolved ( local, resolved ) ) != 0 ) + goto err_open; + + err_open: + free ( resolved ); + err_resolve: + return rc; +} + +/** + * Get file length + * + * @v local Local file + * @ret rc Return status code + */ +static int efi_local_len ( struct efi_local *local ) { + EFI_FILE_PROTOCOL *file = local->file; + EFI_FILE_INFO *info; + EFI_STATUS efirc; + UINTN size; + int rc; + + /* Get size of file information */ + size = 0; + file->GetInfo ( file, &efi_file_info_id, &size, NULL ); + + /* Allocate file information */ + info = malloc ( size ); + if ( ! info ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Get file information */ + if ( ( efirc = file->GetInfo ( file, &efi_file_info_id, &size, + info ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( local, "LOCAL %p could not get file info: %s\n", + local, strerror ( rc ) ); + goto err_info; + } + + /* Record file length */ + local->len = info->FileSize; + + /* Success */ + rc = 0; + + err_info: + free ( info ); + err_alloc: + return rc; +} + +/** + * Open local file + * + * @v xfer Data transfer interface + * @v uri Request URI + * @ret rc Return status code + */ +static int efi_local_open ( struct interface *xfer, struct uri *uri ) { + struct efi_local *local; + const char *volume; + const char *path; + int rc; + + /* Parse URI */ + volume = ( ( uri->host && uri->host[0] ) ? uri->host : NULL ); + path = ( uri->opaque ? uri->opaque : uri->path ); + + /* Allocate and initialise structure */ + local = zalloc ( sizeof ( *local ) ); + if ( ! local ) { + rc = -ENOMEM; + goto err_alloc; + } + ref_init ( &local->refcnt, NULL ); + intf_init ( &local->xfer, &efi_local_xfer_desc, &local->refcnt ); + process_init_stopped ( &local->process, &efi_local_process_desc, + &local->refcnt ); + + /* Open specified volume */ + if ( ( rc = efi_local_open_volume ( local, volume ) ) != 0 ) + goto err_open_root; + + /* Open specified path */ + if ( ( rc = efi_local_open_path ( local, path ) ) != 0 ) + goto err_open_file; + + /* Get length of file */ + if ( ( rc = efi_local_len ( local ) ) != 0 ) + goto err_len; + + /* Start download process */ + process_add ( &local->process ); + + /* Attach to parent interface, mortalise self, and return */ + intf_plug_plug ( &local->xfer, xfer ); + ref_put ( &local->refcnt ); + return 0; + + err_len: + err_open_file: + err_open_root: + efi_local_close ( local, 0 ); + ref_put ( &local->refcnt ); + err_alloc: + return rc; +} + +/** EFI local file URI opener */ +struct uri_opener efi_local_uri_opener __uri_opener = { + .scheme = "file", + .open = efi_local_open, +}; diff --git a/src/interface/efi/efi_null.c b/src/interface/efi/efi_null.c new file mode 100644 index 00000000..aa27ab67 --- /dev/null +++ b/src/interface/efi/efi_null.c @@ -0,0 +1,532 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * EFI null interfaces + * + */ + +/****************************************************************************** + * + * Simple Network Protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_snp_start ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_stop ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_initialize ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + UINTN extra_rx_bufsize __unused, + UINTN extra_tx_bufsize __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_reset ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + BOOLEAN ext_verify __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_shutdown ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_receive_filters ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + UINT32 enable __unused, + UINT32 disable __unused, + BOOLEAN mcast_reset __unused, + UINTN mcast_count __unused, + EFI_MAC_ADDRESS *mcast __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_station_address ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + BOOLEAN reset __unused, + EFI_MAC_ADDRESS *new __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_statistics ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + BOOLEAN reset __unused, UINTN *stats_len __unused, + EFI_NETWORK_STATISTICS *stats __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_mcast_ip_to_mac ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + BOOLEAN ipv6 __unused, + EFI_IP_ADDRESS *ip __unused, + EFI_MAC_ADDRESS *mac __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_nvdata ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + BOOLEAN read __unused, UINTN offset __unused, + UINTN len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_get_status ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + UINT32 *interrupts __unused, VOID **txbuf __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_transmit ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + UINTN ll_header_len __unused, UINTN len __unused, + VOID *data __unused, EFI_MAC_ADDRESS *ll_src __unused, + EFI_MAC_ADDRESS *ll_dest __unused, + UINT16 *net_proto __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_receive ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + UINTN *ll_header_len __unused, UINTN *len __unused, + VOID *data __unused, EFI_MAC_ADDRESS *ll_src __unused, + EFI_MAC_ADDRESS *ll_dest __unused, + UINT16 *net_proto __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_SIMPLE_NETWORK_PROTOCOL efi_null_snp = { + .Revision = EFI_SIMPLE_NETWORK_PROTOCOL_REVISION, + .Start = efi_null_snp_start, + .Stop = efi_null_snp_stop, + .Initialize = efi_null_snp_initialize, + .Reset = efi_null_snp_reset, + .Shutdown = efi_null_snp_shutdown, + .ReceiveFilters = efi_null_snp_receive_filters, + .StationAddress = efi_null_snp_station_address, + .Statistics = efi_null_snp_statistics, + .MCastIpToMac = efi_null_snp_mcast_ip_to_mac, + .NvData = efi_null_snp_nvdata, + .GetStatus = efi_null_snp_get_status, + .Transmit = efi_null_snp_transmit, + .Receive = efi_null_snp_receive, +}; + +/** + * Nullify SNP interface + * + * @v snp SNP interface + */ +void efi_nullify_snp ( EFI_SIMPLE_NETWORK_PROTOCOL *snp ) { + + memcpy ( snp, &efi_null_snp, + offsetof ( typeof ( *snp ), WaitForPacket ) ); + snp->Mode->State = EfiSimpleNetworkStopped; +} + +/****************************************************************************** + * + * Network Interface Identification protocol + * + ****************************************************************************** + */ + +static EFIAPI VOID efi_null_undi_issue ( UINT64 cdb_phys ) { + PXE_CDB *cdb = ( ( void * ) ( intptr_t ) cdb_phys ); + + cdb->StatCode = PXE_STATCODE_UNSUPPORTED; + cdb->StatFlags = PXE_STATFLAGS_COMMAND_FAILED; +} + +static PXE_SW_UNDI efi_null_undi __attribute__ (( aligned ( 16 ) )) = { + .Signature = PXE_ROMID_SIGNATURE, + .Len = sizeof ( efi_null_undi ), + .Rev = PXE_ROMID_REV, + .MajorVer = PXE_ROMID_MAJORVER, + .MinorVer = PXE_ROMID_MINORVER, + .Implementation = PXE_ROMID_IMP_SW_VIRT_ADDR, +}; + +/** + * Nullify NII interface + * + * @v nii NII interface + */ +void efi_nullify_nii ( EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL *nii ) { + efi_null_undi.EntryPoint = ( ( intptr_t ) efi_null_undi_issue ); + nii->Id = ( ( intptr_t ) &efi_null_undi ); +} + +/****************************************************************************** + * + * Component name protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_get_driver_name ( EFI_COMPONENT_NAME2_PROTOCOL *name2 __unused, + CHAR8 *language __unused, + CHAR16 **driver_name __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_get_controller_name ( EFI_COMPONENT_NAME2_PROTOCOL *name2 __unused, + EFI_HANDLE device __unused, + EFI_HANDLE child __unused, + CHAR8 *language __unused, + CHAR16 **controller_name __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_COMPONENT_NAME2_PROTOCOL efi_null_name2 = { + .GetDriverName = efi_null_get_driver_name, + .GetControllerName = efi_null_get_controller_name, + .SupportedLanguages = "", +}; + +/** + * Nullify Component Name Protocol interface + * + * @v name2 Component name protocol + */ +void efi_nullify_name2 ( EFI_COMPONENT_NAME2_PROTOCOL *name2 ) { + + memcpy ( name2, &efi_null_name2, sizeof ( *name2 ) ); +} + +/****************************************************************************** + * + * Load file protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_load_file ( EFI_LOAD_FILE_PROTOCOL *load_file __unused, + EFI_DEVICE_PATH_PROTOCOL *path __unused, + BOOLEAN booting __unused, UINTN *len __unused, + VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +/** + * Nullify Load File Protocol interface + * + * @v load_file Load file protocol + */ +void efi_nullify_load_file ( EFI_LOAD_FILE_PROTOCOL *load_file ) { + load_file->LoadFile = efi_null_load_file; +} + +/****************************************************************************** + * + * HII configuration access protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_hii_extract ( const EFI_HII_CONFIG_ACCESS_PROTOCOL *hii __unused, + EFI_STRING request __unused, + EFI_STRING *progress __unused, + EFI_STRING *results __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_hii_route ( const EFI_HII_CONFIG_ACCESS_PROTOCOL *hii __unused, + EFI_STRING config __unused, + EFI_STRING *progress __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_hii_callback ( const EFI_HII_CONFIG_ACCESS_PROTOCOL *hii __unused, + EFI_BROWSER_ACTION action __unused, + EFI_QUESTION_ID question_id __unused, + UINT8 type __unused, EFI_IFR_TYPE_VALUE *value __unused, + EFI_BROWSER_ACTION_REQUEST *action_request __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_HII_CONFIG_ACCESS_PROTOCOL efi_null_hii = { + .ExtractConfig = efi_null_hii_extract, + .RouteConfig = efi_null_hii_route, + .Callback = efi_null_hii_callback, +}; + +/** + * Nullify HII configuration access protocol + * + * @v hii HII configuration access protocol + */ +void efi_nullify_hii ( EFI_HII_CONFIG_ACCESS_PROTOCOL *hii ) { + + memcpy ( hii, &efi_null_hii, sizeof ( *hii ) ); +} + +/****************************************************************************** + * + * Block I/O protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_block_reset ( EFI_BLOCK_IO_PROTOCOL *block __unused, + BOOLEAN verify __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_block_read ( EFI_BLOCK_IO_PROTOCOL *block __unused, + UINT32 media __unused, EFI_LBA lba __unused, + UINTN len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_block_write ( EFI_BLOCK_IO_PROTOCOL *block __unused, + UINT32 media __unused, EFI_LBA lba __unused, + UINTN len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_block_flush ( EFI_BLOCK_IO_PROTOCOL *block __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_BLOCK_IO_MEDIA efi_null_block_media; + +static EFI_BLOCK_IO_PROTOCOL efi_null_block = { + .Revision = EFI_BLOCK_IO_INTERFACE_REVISION, + .Media = &efi_null_block_media, + .Reset = efi_null_block_reset, + .ReadBlocks = efi_null_block_read, + .WriteBlocks = efi_null_block_write, + .FlushBlocks = efi_null_block_flush, +}; + +/** + * Nullify block I/O protocol + * + * @v block Block I/O protocol + */ +void efi_nullify_block ( EFI_BLOCK_IO_PROTOCOL *block ) { + + memcpy ( block, &efi_null_block, sizeof ( *block ) ); +} + +/****************************************************************************** + * + * PXE base code protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_pxe_start ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + BOOLEAN use_ipv6 __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_stop ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_dhcp ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + BOOLEAN sort __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_discover ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + UINT16 type __unused, UINT16 *layer __unused, + BOOLEAN bis __unused, + EFI_PXE_BASE_CODE_DISCOVER_INFO *info __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_mtftp ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + EFI_PXE_BASE_CODE_TFTP_OPCODE opcode __unused, + VOID *data __unused, BOOLEAN overwrite __unused, + UINT64 *len __unused, UINTN *blksize __unused, + EFI_IP_ADDRESS *ip __unused, UINT8 *filename __unused, + EFI_PXE_BASE_CODE_MTFTP_INFO *info __unused, + BOOLEAN callback __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_udp_write ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + UINT16 flags __unused, + EFI_IP_ADDRESS *dest_ip __unused, + EFI_PXE_BASE_CODE_UDP_PORT *dest_port __unused, + EFI_IP_ADDRESS *gateway __unused, + EFI_IP_ADDRESS *src_ip __unused, + EFI_PXE_BASE_CODE_UDP_PORT *src_port __unused, + UINTN *hdr_len __unused, VOID *hdr __unused, + UINTN *len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_udp_read ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + UINT16 flags __unused, + EFI_IP_ADDRESS *dest_ip __unused, + EFI_PXE_BASE_CODE_UDP_PORT *dest_port __unused, + EFI_IP_ADDRESS *src_ip __unused, + EFI_PXE_BASE_CODE_UDP_PORT *src_port __unused, + UINTN *hdr_len __unused, VOID *hdr __unused, + UINTN *len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_set_ip_filter ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + EFI_PXE_BASE_CODE_IP_FILTER *filter __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_arp ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + EFI_IP_ADDRESS *ip __unused, + EFI_MAC_ADDRESS *mac __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_set_parameters ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + BOOLEAN *autoarp __unused, + BOOLEAN *sendguid __unused, UINT8 *ttl __unused, + UINT8 *tos __unused, + BOOLEAN *callback __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_set_station_ip ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + EFI_IP_ADDRESS *ip __unused, + EFI_IP_ADDRESS *netmask __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_set_packets ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + BOOLEAN *dhcpdisc_ok __unused, + BOOLEAN *dhcpack_ok __unused, + BOOLEAN *proxyoffer_ok __unused, + BOOLEAN *pxebsdisc_ok __unused, + BOOLEAN *pxebsack_ok __unused, + BOOLEAN *pxebsbis_ok __unused, + EFI_PXE_BASE_CODE_PACKET *dhcpdisc __unused, + EFI_PXE_BASE_CODE_PACKET *dhcpack __unused, + EFI_PXE_BASE_CODE_PACKET *proxyoffer __unused, + EFI_PXE_BASE_CODE_PACKET *pxebsdisc __unused, + EFI_PXE_BASE_CODE_PACKET *pxebsack __unused, + EFI_PXE_BASE_CODE_PACKET *pxebsbis __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_PXE_BASE_CODE_PROTOCOL efi_null_pxe = { + .Revision = EFI_PXE_BASE_CODE_PROTOCOL_REVISION, + .Start = efi_null_pxe_start, + .Stop = efi_null_pxe_stop, + .Dhcp = efi_null_pxe_dhcp, + .Discover = efi_null_pxe_discover, + .Mtftp = efi_null_pxe_mtftp, + .UdpWrite = efi_null_pxe_udp_write, + .UdpRead = efi_null_pxe_udp_read, + .SetIpFilter = efi_null_pxe_set_ip_filter, + .Arp = efi_null_pxe_arp, + .SetParameters = efi_null_pxe_set_parameters, + .SetStationIp = efi_null_pxe_set_station_ip, + .SetPackets = efi_null_pxe_set_packets, +}; + +/** + * Nullify PXE base code protocol + * + * @v pxe PXE base code protocol + */ +void efi_nullify_pxe ( EFI_PXE_BASE_CODE_PROTOCOL *pxe ) { + + memcpy ( pxe, &efi_null_pxe, offsetof ( typeof ( *pxe ), Mode ) ); + pxe->Mode->Started = FALSE; +} + +/****************************************************************************** + * + * Apple Net Boot protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_apple_dhcp ( EFI_APPLE_NET_BOOT_PROTOCOL *apple __unused, + UINTN *len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_apple_bsdp ( EFI_APPLE_NET_BOOT_PROTOCOL *apple __unused, + UINTN *len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_APPLE_NET_BOOT_PROTOCOL efi_null_apple = { + .GetDhcpResponse = efi_null_apple_dhcp, + .GetBsdpResponse = efi_null_apple_bsdp, +}; + +/** + * Nullify Apple Net Boot protocol + * + * @v apple Apple Net Boot protocol + */ +void efi_nullify_apple ( EFI_APPLE_NET_BOOT_PROTOCOL *apple ) { + + memcpy ( apple, &efi_null_apple, sizeof ( *apple ) ); +} diff --git a/src/interface/efi/efi_path.c b/src/interface/efi/efi_path.c new file mode 100644 index 00000000..bae0ac4b --- /dev/null +++ b/src/interface/efi/efi_path.c @@ -0,0 +1,506 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * EFI device paths + * + */ + +/** + * Find end of device path + * + * @v path Path to device + * @ret path_end End of device path + */ +EFI_DEVICE_PATH_PROTOCOL * efi_path_end ( EFI_DEVICE_PATH_PROTOCOL *path ) { + + while ( path->Type != END_DEVICE_PATH_TYPE ) { + path = ( ( ( void * ) path ) + + /* There's this amazing new-fangled thing known as + * a UINT16, but who wants to use one of those? */ + ( ( path->Length[1] << 8 ) | path->Length[0] ) ); + } + + return path; +} + +/** + * Find length of device path (excluding terminator) + * + * @v path Path to device + * @ret path_len Length of device path + */ +size_t efi_path_len ( EFI_DEVICE_PATH_PROTOCOL *path ) { + EFI_DEVICE_PATH_PROTOCOL *end = efi_path_end ( path ); + + return ( ( ( void * ) end ) - ( ( void * ) path ) ); +} + +/** + * Concatenate EFI device paths + * + * @v ... List of device paths (NULL terminated) + * @ret path Concatenated device path, or NULL on error + * + * The caller is responsible for eventually calling free() on the + * allocated device path. + */ +EFI_DEVICE_PATH_PROTOCOL * efi_paths ( EFI_DEVICE_PATH_PROTOCOL *first, ... ) { + EFI_DEVICE_PATH_PROTOCOL *path; + EFI_DEVICE_PATH_PROTOCOL *src; + EFI_DEVICE_PATH_PROTOCOL *dst; + EFI_DEVICE_PATH_PROTOCOL *end; + va_list args; + size_t len; + + /* Calculate device path length */ + va_start ( args, first ); + len = 0; + src = first; + while ( src ) { + len += efi_path_len ( src ); + src = va_arg ( args, EFI_DEVICE_PATH_PROTOCOL * ); + } + va_end ( args ); + + /* Allocate device path */ + path = zalloc ( len + sizeof ( *end ) ); + if ( ! path ) + return NULL; + + /* Populate device path */ + va_start ( args, first ); + dst = path; + src = first; + while ( src ) { + len = efi_path_len ( src ); + memcpy ( dst, src, len ); + dst = ( ( ( void * ) dst ) + len ); + src = va_arg ( args, EFI_DEVICE_PATH_PROTOCOL * ); + } + va_end ( args ); + end = dst; + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + + return path; +} + +/** + * Construct EFI device path for network device + * + * @v netdev Network device + * @ret path EFI device path, or NULL on error + * + * The caller is responsible for eventually calling free() on the + * allocated device path. + */ +EFI_DEVICE_PATH_PROTOCOL * efi_netdev_path ( struct net_device *netdev ) { + struct efi_device *efidev; + EFI_DEVICE_PATH_PROTOCOL *path; + MAC_ADDR_DEVICE_PATH *macpath; + VLAN_DEVICE_PATH *vlanpath; + EFI_DEVICE_PATH_PROTOCOL *end; + unsigned int tag; + size_t prefix_len; + size_t len; + + /* Find parent EFI device */ + efidev = efidev_parent ( netdev->dev ); + if ( ! efidev ) + return NULL; + + /* Calculate device path length */ + prefix_len = efi_path_len ( efidev->path ); + len = ( prefix_len + sizeof ( *macpath ) + sizeof ( *vlanpath ) + + sizeof ( *end ) ); + + /* Allocate device path */ + path = zalloc ( len ); + if ( ! path ) + return NULL; + + /* Construct device path */ + memcpy ( path, efidev->path, prefix_len ); + macpath = ( ( ( void * ) path ) + prefix_len ); + macpath->Header.Type = MESSAGING_DEVICE_PATH; + macpath->Header.SubType = MSG_MAC_ADDR_DP; + macpath->Header.Length[0] = sizeof ( *macpath ); + assert ( netdev->ll_protocol->ll_addr_len < + sizeof ( macpath->MacAddress ) ); + memcpy ( &macpath->MacAddress, netdev->ll_addr, + netdev->ll_protocol->ll_addr_len ); + macpath->IfType = ntohs ( netdev->ll_protocol->ll_proto ); + if ( ( tag = vlan_tag ( netdev ) ) ) { + vlanpath = ( ( ( void * ) macpath ) + sizeof ( *macpath ) ); + vlanpath->Header.Type = MESSAGING_DEVICE_PATH; + vlanpath->Header.SubType = MSG_VLAN_DP; + vlanpath->Header.Length[0] = sizeof ( *vlanpath ); + vlanpath->VlanId = tag; + end = ( ( ( void * ) vlanpath ) + sizeof ( *vlanpath ) ); + } else { + end = ( ( ( void * ) macpath ) + sizeof ( *macpath ) ); + } + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + + return path; +} + +/** + * Construct EFI device path for URI + * + * @v uri URI + * @ret path EFI device path, or NULL on error + * + * The caller is responsible for eventually calling free() on the + * allocated device path. + */ +EFI_DEVICE_PATH_PROTOCOL * efi_uri_path ( struct uri *uri ) { + EFI_DEVICE_PATH_PROTOCOL *path; + EFI_DEVICE_PATH_PROTOCOL *end; + URI_DEVICE_PATH *uripath; + size_t uri_len; + size_t uripath_len; + size_t len; + + /* Calculate device path length */ + uri_len = ( format_uri ( uri, NULL, 0 ) + 1 /* NUL */ ); + uripath_len = ( sizeof ( *uripath ) + uri_len ); + len = ( uripath_len + sizeof ( *end ) ); + + /* Allocate device path */ + path = zalloc ( len ); + if ( ! path ) + return NULL; + + /* Construct device path */ + uripath = ( ( void * ) path ); + uripath->Header.Type = MESSAGING_DEVICE_PATH; + uripath->Header.SubType = MSG_URI_DP; + uripath->Header.Length[0] = ( uripath_len & 0xff ); + uripath->Header.Length[1] = ( uripath_len >> 8 ); + format_uri ( uri, uripath->Uri, uri_len ); + end = ( ( ( void * ) path ) + uripath_len ); + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + + return path; +} + +/** + * Construct EFI device path for iSCSI device + * + * @v iscsi iSCSI session + * @ret path EFI device path, or NULL on error + */ +EFI_DEVICE_PATH_PROTOCOL * efi_iscsi_path ( struct iscsi_session *iscsi ) { + struct sockaddr_tcpip *st_target; + struct net_device *netdev; + EFI_DEVICE_PATH_PROTOCOL *netpath; + EFI_DEVICE_PATH_PROTOCOL *path; + EFI_DEVICE_PATH_PROTOCOL *end; + ISCSI_DEVICE_PATH *iscsipath; + char *name; + size_t prefix_len; + size_t name_len; + size_t iscsi_len; + size_t len; + + /* Get network device associated with target address */ + st_target = ( ( struct sockaddr_tcpip * ) &iscsi->target_sockaddr ); + netdev = tcpip_netdev ( st_target ); + if ( ! netdev ) + goto err_netdev; + + /* Get network device path */ + netpath = efi_netdev_path ( netdev ); + if ( ! netpath ) + goto err_netpath; + + /* Calculate device path length */ + prefix_len = efi_path_len ( netpath ); + name_len = ( strlen ( iscsi->target_iqn ) + 1 /* NUL */ ); + iscsi_len = ( sizeof ( *iscsipath ) + name_len ); + len = ( prefix_len + iscsi_len + sizeof ( *end ) ); + + /* Allocate device path */ + path = zalloc ( len ); + if ( ! path ) + goto err_alloc; + + /* Construct device path */ + memcpy ( path, netpath, prefix_len ); + iscsipath = ( ( ( void * ) path ) + prefix_len ); + iscsipath->Header.Type = MESSAGING_DEVICE_PATH; + iscsipath->Header.SubType = MSG_ISCSI_DP; + iscsipath->Header.Length[0] = iscsi_len; + iscsipath->LoginOption = ISCSI_LOGIN_OPTION_AUTHMETHOD_NON; + memcpy ( &iscsipath->Lun, &iscsi->lun, sizeof ( iscsipath->Lun ) ); + name = ( ( ( void * ) iscsipath ) + sizeof ( *iscsipath ) ); + memcpy ( name, iscsi->target_iqn, name_len ); + end = ( ( ( void * ) name ) + name_len ); + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + + /* Free temporary paths */ + free ( netpath ); + + return path; + + err_alloc: + free ( netpath ); + err_netpath: + err_netdev: + return NULL; +} + +/** + * Construct EFI device path for AoE device + * + * @v aoedev AoE device + * @ret path EFI device path, or NULL on error + */ +EFI_DEVICE_PATH_PROTOCOL * efi_aoe_path ( struct aoe_device *aoedev ) { + struct { + SATA_DEVICE_PATH sata; + EFI_DEVICE_PATH_PROTOCOL end; + } satapath; + EFI_DEVICE_PATH_PROTOCOL *netpath; + EFI_DEVICE_PATH_PROTOCOL *path; + + /* Get network device path */ + netpath = efi_netdev_path ( aoedev->netdev ); + if ( ! netpath ) + goto err_netdev; + + /* Construct SATA path */ + memset ( &satapath, 0, sizeof ( satapath ) ); + satapath.sata.Header.Type = MESSAGING_DEVICE_PATH; + satapath.sata.Header.SubType = MSG_SATA_DP; + satapath.sata.Header.Length[0] = sizeof ( satapath.sata ); + satapath.sata.HBAPortNumber = aoedev->major; + satapath.sata.PortMultiplierPortNumber = aoedev->minor; + satapath.end.Type = END_DEVICE_PATH_TYPE; + satapath.end.SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + satapath.end.Length[0] = sizeof ( satapath.end ); + + /* Construct overall device path */ + path = efi_paths ( netpath, &satapath, NULL ); + if ( ! path ) + goto err_paths; + + /* Free temporary paths */ + free ( netpath ); + + return path; + + err_paths: + free ( netpath ); + err_netdev: + return NULL; +} + +/** + * Construct EFI device path for Fibre Channel device + * + * @v desc FCP device description + * @ret path EFI device path, or NULL on error + */ +EFI_DEVICE_PATH_PROTOCOL * efi_fcp_path ( struct fcp_description *desc ) { + struct { + FIBRECHANNELEX_DEVICE_PATH fc; + EFI_DEVICE_PATH_PROTOCOL end; + } __attribute__ (( packed )) *path; + + /* Allocate device path */ + path = zalloc ( sizeof ( *path ) ); + if ( ! path ) + return NULL; + + /* Construct device path */ + path->fc.Header.Type = MESSAGING_DEVICE_PATH; + path->fc.Header.SubType = MSG_FIBRECHANNELEX_DP; + path->fc.Header.Length[0] = sizeof ( path->fc ); + memcpy ( path->fc.WWN, &desc->wwn, sizeof ( path->fc.WWN ) ); + memcpy ( path->fc.Lun, &desc->lun, sizeof ( path->fc.Lun ) ); + path->end.Type = END_DEVICE_PATH_TYPE; + path->end.SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + path->end.Length[0] = sizeof ( path->end ); + + return &path->fc.Header; +} + +/** + * Construct EFI device path for Infiniband SRP device + * + * @v ib_srp Infiniband SRP device + * @ret path EFI device path, or NULL on error + */ +EFI_DEVICE_PATH_PROTOCOL * efi_ib_srp_path ( struct ib_srp_device *ib_srp ) { + const struct ipxe_ib_sbft *sbft = &ib_srp->sbft; + union ib_srp_target_port_id *id = + container_of ( &sbft->srp.target, union ib_srp_target_port_id, + srp ); + struct efi_device *efidev; + EFI_DEVICE_PATH_PROTOCOL *path; + INFINIBAND_DEVICE_PATH *ibpath; + EFI_DEVICE_PATH_PROTOCOL *end; + size_t prefix_len; + size_t len; + + /* Find parent EFI device */ + efidev = efidev_parent ( ib_srp->ibdev->dev ); + if ( ! efidev ) + return NULL; + + /* Calculate device path length */ + prefix_len = efi_path_len ( efidev->path ); + len = ( prefix_len + sizeof ( *ibpath ) + sizeof ( *end ) ); + + /* Allocate device path */ + path = zalloc ( len ); + if ( ! path ) + return NULL; + + /* Construct device path */ + memcpy ( path, efidev->path, prefix_len ); + ibpath = ( ( ( void * ) path ) + prefix_len ); + ibpath->Header.Type = MESSAGING_DEVICE_PATH; + ibpath->Header.SubType = MSG_INFINIBAND_DP; + ibpath->Header.Length[0] = sizeof ( *ibpath ); + ibpath->ResourceFlags = INFINIBAND_RESOURCE_FLAG_STORAGE_PROTOCOL; + memcpy ( ibpath->PortGid, &sbft->ib.dgid, sizeof ( ibpath->PortGid ) ); + memcpy ( &ibpath->ServiceId, &sbft->ib.service_id, + sizeof ( ibpath->ServiceId ) ); + memcpy ( &ibpath->TargetPortId, &id->ib.ioc_guid, + sizeof ( ibpath->TargetPortId ) ); + memcpy ( &ibpath->DeviceId, &id->ib.id_ext, + sizeof ( ibpath->DeviceId ) ); + end = ( ( ( void * ) ibpath ) + sizeof ( *ibpath ) ); + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + + return path; +} + +/** + * Construct EFI device path for USB function + * + * @v func USB function + * @ret path EFI device path, or NULL on error + * + * The caller is responsible for eventually calling free() on the + * allocated device path. + */ +EFI_DEVICE_PATH_PROTOCOL * efi_usb_path ( struct usb_function *func ) { + struct usb_device *usb = func->usb; + struct efi_device *efidev; + EFI_DEVICE_PATH_PROTOCOL *path; + EFI_DEVICE_PATH_PROTOCOL *end; + USB_DEVICE_PATH *usbpath; + unsigned int count; + size_t prefix_len; + size_t len; + + /* Sanity check */ + assert ( func->desc.count >= 1 ); + + /* Find parent EFI device */ + efidev = efidev_parent ( &func->dev ); + if ( ! efidev ) + return NULL; + + /* Calculate device path length */ + count = ( usb_depth ( usb ) + 1 ); + prefix_len = efi_path_len ( efidev->path ); + len = ( prefix_len + ( count * sizeof ( *usbpath ) ) + + sizeof ( *end ) ); + + /* Allocate device path */ + path = zalloc ( len ); + if ( ! path ) + return NULL; + + /* Construct device path */ + memcpy ( path, efidev->path, prefix_len ); + end = ( ( ( void * ) path ) + len - sizeof ( *end ) ); + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + usbpath = ( ( ( void * ) end ) - sizeof ( *usbpath ) ); + usbpath->InterfaceNumber = func->interface[0]; + for ( ; usb ; usbpath--, usb = usb->port->hub->usb ) { + usbpath->Header.Type = MESSAGING_DEVICE_PATH; + usbpath->Header.SubType = MSG_USB_DP; + usbpath->Header.Length[0] = sizeof ( *usbpath ); + usbpath->ParentPortNumber = ( usb->port->address - 1 ); + } + + return path; +} + +/** + * Describe object as an EFI device path + * + * @v intf Interface + * @ret path EFI device path, or NULL + * + * The caller is responsible for eventually calling free() on the + * allocated device path. + */ +EFI_DEVICE_PATH_PROTOCOL * efi_describe ( struct interface *intf ) { + struct interface *dest; + efi_describe_TYPE ( void * ) *op = + intf_get_dest_op ( intf, efi_describe, &dest ); + void *object = intf_object ( dest ); + EFI_DEVICE_PATH_PROTOCOL *path; + + if ( op ) { + path = op ( object ); + } else { + path = NULL; + } + + intf_put ( dest ); + return path; +} diff --git a/src/interface/efi/efi_pxe.c b/src/interface/efi/efi_pxe.c new file mode 100644 index 00000000..4422dd28 --- /dev/null +++ b/src/interface/efi/efi_pxe.c @@ -0,0 +1,1711 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * EFI PXE base code protocol + * + */ + +/* Downgrade user experience if configured to do so + * + * See comments in efi_snp.c + */ +#ifdef EFI_DOWNGRADE_UX +static EFI_GUID dummy_pxe_base_code_protocol_guid = { + 0x70647523, 0x2320, 0x7477, + { 0x66, 0x20, 0x23, 0x6d, 0x6f, 0x72, 0x6f, 0x6e } +}; +#define efi_pxe_base_code_protocol_guid dummy_pxe_base_code_protocol_guid +#endif + +/** A PXE base code */ +struct efi_pxe { + /** Reference count */ + struct refcnt refcnt; + /** Underlying network device */ + struct net_device *netdev; + /** Name */ + const char *name; + /** List of PXE base codes */ + struct list_head list; + + /** Installed handle */ + EFI_HANDLE handle; + /** PXE base code protocol */ + EFI_PXE_BASE_CODE_PROTOCOL base; + /** PXE base code mode */ + EFI_PXE_BASE_CODE_MODE mode; + /** Apple NetBoot protocol */ + EFI_APPLE_NET_BOOT_PROTOCOL apple; + + /** TCP/IP network-layer protocol */ + struct tcpip_net_protocol *tcpip; + /** Network-layer protocol */ + struct net_protocol *net; + + /** Data transfer buffer */ + struct xfer_buffer buf; + + /** (M)TFTP download interface */ + struct interface tftp; + /** Block size (for TFTP) */ + size_t blksize; + /** Overall return status */ + int rc; + + /** UDP interface */ + struct interface udp; + /** List of received UDP packets */ + struct list_head queue; + /** UDP interface closer process */ + struct process process; +}; + +/** + * Free PXE base code + * + * @v refcnt Reference count + */ +static void efi_pxe_free ( struct refcnt *refcnt ) { + struct efi_pxe *pxe = container_of ( refcnt, struct efi_pxe, refcnt ); + + netdev_put ( pxe->netdev ); + free ( pxe ); +} + +/** List of PXE base codes */ +static LIST_HEAD ( efi_pxes ); + +/** + * Locate PXE base code + * + * @v handle EFI handle + * @ret pxe PXE base code, or NULL + */ +static struct efi_pxe * efi_pxe_find ( EFI_HANDLE handle ) { + struct efi_pxe *pxe; + + /* Locate base code */ + list_for_each_entry ( pxe, &efi_pxes, list ) { + if ( pxe->handle == handle ) + return pxe; + } + + return NULL; +} + +/****************************************************************************** + * + * IP addresses + * + ****************************************************************************** + */ + +/** + * An EFI socket address + * + */ +struct sockaddr_efi { + /** Socket address family (part of struct @c sockaddr) */ + sa_family_t se_family; + /** Flags (part of struct @c sockaddr_tcpip) */ + uint16_t se_flags; + /** TCP/IP port (part of struct @c sockaddr_tcpip) */ + uint16_t se_port; + /** Scope ID (part of struct @c sockaddr_tcpip) + * + * For link-local or multicast addresses, this is the network + * device index. + */ + uint16_t se_scope_id; + /** IP address */ + EFI_IP_ADDRESS se_addr; + /** Padding + * + * This ensures that a struct @c sockaddr_tcpip is large + * enough to hold a socket address for any TCP/IP address + * family. + */ + char pad[ sizeof ( struct sockaddr ) - + ( sizeof ( sa_family_t ) /* se_family */ + + sizeof ( uint16_t ) /* se_flags */ + + sizeof ( uint16_t ) /* se_port */ + + sizeof ( uint16_t ) /* se_scope_id */ + + sizeof ( EFI_IP_ADDRESS ) /* se_addr */ ) ]; +} __attribute__ (( packed, may_alias )); + +/** + * Populate socket address from EFI IP address + * + * @v pxe PXE base code + * @v ip EFI IP address + * @v sa Socket address to fill in + */ +static void efi_pxe_ip_sockaddr ( struct efi_pxe *pxe, EFI_IP_ADDRESS *ip, + struct sockaddr *sa ) { + union { + struct sockaddr sa; + struct sockaddr_efi se; + } *sockaddr = container_of ( sa, typeof ( *sockaddr ), sa ); + + /* Initialise socket address */ + memset ( sockaddr, 0, sizeof ( *sockaddr ) ); + sockaddr->sa.sa_family = pxe->tcpip->sa_family; + memcpy ( &sockaddr->se.se_addr, ip, pxe->net->net_addr_len ); + sockaddr->se.se_scope_id = pxe->netdev->index; +} + +/** + * Transcribe EFI IP address (for debugging) + * + * @v pxe PXE base code + * @v ip EFI IP address + * @ret text Transcribed IP address + */ +static const char * efi_pxe_ip_ntoa ( struct efi_pxe *pxe, + EFI_IP_ADDRESS *ip ) { + + return pxe->net->ntoa ( ip ); +} + +/** + * Populate local IP address + * + * @v pxe PXE base code + * @ret rc Return status code + */ +static int efi_pxe_ip ( struct efi_pxe *pxe ) { + EFI_PXE_BASE_CODE_MODE *mode = &pxe->mode; + struct in_addr address; + struct in_addr netmask; + + /* It's unclear which of the potentially many IPv6 addresses + * is supposed to be used. + */ + if ( mode->UsingIpv6 ) + return -ENOTSUP; + + /* Fetch IP address and subnet mask */ + fetch_ipv4_setting ( netdev_settings ( pxe->netdev ), &ip_setting, + &address ); + fetch_ipv4_setting ( netdev_settings ( pxe->netdev ), &netmask_setting, + &netmask ); + + /* Populate IP address and subnet mask */ + memset ( &mode->StationIp, 0, sizeof ( mode->StationIp ) ); + memcpy ( &mode->StationIp, &address, sizeof ( address ) ); + memset ( &mode->SubnetMask, 0, sizeof ( mode->SubnetMask ) ); + memcpy ( &mode->SubnetMask, &netmask, sizeof ( netmask ) ); + + return 0; +} + +/** + * Check if IP address matches filter + * + * @v pxe PXE base code + * @v ip EFI IP address + * @ret is_match IP address matches filter + */ +static int efi_pxe_ip_filter ( struct efi_pxe *pxe, EFI_IP_ADDRESS *ip ) { + EFI_PXE_BASE_CODE_MODE *mode = &pxe->mode; + EFI_PXE_BASE_CODE_IP_FILTER *filter = &mode->IpFilter; + uint8_t filters = filter->Filters; + union { + EFI_IP_ADDRESS ip; + struct in_addr in; + struct in6_addr in6; + } *u = container_of ( ip, typeof ( *u ), ip ); + size_t addr_len = pxe->net->net_addr_len; + unsigned int i; + + /* Match everything, if applicable */ + if ( filters & EFI_PXE_BASE_CODE_IP_FILTER_PROMISCUOUS ) + return 1; + + /* Match all multicasts, if applicable */ + if ( filters & EFI_PXE_BASE_CODE_IP_FILTER_PROMISCUOUS_MULTICAST ) { + if ( mode->UsingIpv6 ) { + if ( IN6_IS_ADDR_MULTICAST ( &u->in6 ) ) + return 1; + } else { + if ( IN_IS_MULTICAST ( u->in.s_addr ) ) + return 1; + } + } + + /* Match IPv4 broadcasts, if applicable */ + if ( filters & EFI_PXE_BASE_CODE_IP_FILTER_BROADCAST ) { + if ( ( ! mode->UsingIpv6 ) && + ( u->in.s_addr == INADDR_BROADCAST ) ) + return 1; + } + + /* Match station address, if applicable */ + if ( filters & EFI_PXE_BASE_CODE_IP_FILTER_STATION_IP ) { + if ( memcmp ( ip, &mode->StationIp, addr_len ) == 0 ) + return 1; + } + + /* Match explicit addresses, if applicable */ + for ( i = 0 ; i < filter->IpCnt ; i++ ) { + if ( memcmp ( ip, &filter->IpList[i], addr_len ) == 0 ) + return 1; + } + + return 0; +} + +/****************************************************************************** + * + * Data transfer buffer + * + ****************************************************************************** + */ + +/** + * Reallocate PXE data transfer buffer + * + * @v xferbuf Data transfer buffer + * @v len New length (or zero to free buffer) + * @ret rc Return status code + */ +static int efi_pxe_buf_realloc ( struct xfer_buffer *xferbuf __unused, + size_t len __unused ) { + + /* Can never reallocate: return EFI_BUFFER_TOO_SMALL */ + return -ERANGE; +} + +/** + * Write data to PXE data transfer buffer + * + * @v xferbuf Data transfer buffer + * @v offset Starting offset + * @v data Data to copy + * @v len Length of data + */ +static void efi_pxe_buf_write ( struct xfer_buffer *xferbuf, size_t offset, + const void *data, size_t len ) { + + /* Copy data to buffer */ + memcpy ( ( xferbuf->data + offset ), data, len ); +} + +/** PXE data transfer buffer operations */ +static struct xfer_buffer_operations efi_pxe_buf_operations = { + .realloc = efi_pxe_buf_realloc, + .write = efi_pxe_buf_write, +}; + +/****************************************************************************** + * + * (M)TFTP download interface + * + ****************************************************************************** + */ + +/** + * Close PXE (M)TFTP download interface + * + * @v pxe PXE base code + * @v rc Reason for close + */ +static void efi_pxe_tftp_close ( struct efi_pxe *pxe, int rc ) { + + /* Restart interface */ + intf_restart ( &pxe->tftp, rc ); + + /* Record overall status */ + pxe->rc = rc; +} + +/** + * Check PXE (M)TFTP download flow control window + * + * @v pxe PXE base code + * @ret len Length of window + */ +static size_t efi_pxe_tftp_window ( struct efi_pxe *pxe ) { + + /* Return requested blocksize */ + return pxe->blksize; +} + +/** + * Receive new PXE (M)TFTP download data + * + * @v pxe PXE base code + * @v iobuf I/O buffer + * @v meta Transfer metadata + * @ret rc Return status code + */ +static int efi_pxe_tftp_deliver ( struct efi_pxe *pxe, + struct io_buffer *iobuf, + struct xfer_metadata *meta ) { + int rc; + + /* Deliver to data transfer buffer */ + if ( ( rc = xferbuf_deliver ( &pxe->buf, iob_disown ( iobuf ), + meta ) ) != 0 ) + goto err_deliver; + + return 0; + + err_deliver: + efi_pxe_tftp_close ( pxe, rc ); + return rc; +} + +/** PXE file data transfer interface operations */ +static struct interface_operation efi_pxe_tftp_operations[] = { + INTF_OP ( xfer_deliver, struct efi_pxe *, efi_pxe_tftp_deliver ), + INTF_OP ( xfer_window, struct efi_pxe *, efi_pxe_tftp_window ), + INTF_OP ( intf_close, struct efi_pxe *, efi_pxe_tftp_close ), +}; + +/** PXE file data transfer interface descriptor */ +static struct interface_descriptor efi_pxe_tftp_desc = + INTF_DESC ( struct efi_pxe, tftp, efi_pxe_tftp_operations ); + +/** + * Open (M)TFTP download interface + * + * @v pxe PXE base code + * @v ip EFI IP address + * @v filename Filename + * @ret rc Return status code + */ +static int efi_pxe_tftp_open ( struct efi_pxe *pxe, EFI_IP_ADDRESS *ip, + const char *filename ) { + struct sockaddr server; + struct uri *uri; + int rc; + + /* Parse server address and filename */ + efi_pxe_ip_sockaddr ( pxe, ip, &server ); + uri = pxe_uri ( &server, filename ); + if ( ! uri ) { + DBGC ( pxe, "PXE %s could not parse %s:%s\n", pxe->name, + efi_pxe_ip_ntoa ( pxe, ip ), filename ); + rc = -ENOTSUP; + goto err_parse; + } + + /* Open URI */ + if ( ( rc = xfer_open_uri ( &pxe->tftp, uri ) ) != 0 ) { + DBGC ( pxe, "PXE %s could not open: %s\n", + pxe->name, strerror ( rc ) ); + goto err_open; + } + + err_open: + uri_put ( uri ); + err_parse: + return rc; +} + +/****************************************************************************** + * + * UDP interface + * + ****************************************************************************** + */ + +/** EFI UDP pseudo-header */ +struct efi_pxe_udp_pseudo_header { + /** Network-layer protocol */ + struct net_protocol *net; + /** Destination port */ + uint16_t dest_port; + /** Source port */ + uint16_t src_port; +} __attribute__ (( packed )); + +/** + * Close UDP interface + * + * @v pxe PXE base code + * @v rc Reason for close + */ +static void efi_pxe_udp_close ( struct efi_pxe *pxe, int rc ) { + struct io_buffer *iobuf; + struct io_buffer *tmp; + + /* Release our claim on SNP devices, if applicable */ + if ( process_running ( &pxe->process ) ) + efi_snp_release(); + + /* Stop process */ + process_del ( &pxe->process ); + + /* Restart UDP interface */ + intf_restart ( &pxe->udp, rc ); + + /* Flush any received UDP packets */ + list_for_each_entry_safe ( iobuf, tmp, &pxe->queue, list ) { + list_del ( &iobuf->list ); + free_iob ( iobuf ); + } +} + +/** + * Receive UDP packet + * + * @v pxe PXE base code + * @v iobuf I/O buffer + * @v meta Data transfer metadata + * @ret rc Return status code + */ +static int efi_pxe_udp_deliver ( struct efi_pxe *pxe, struct io_buffer *iobuf, + struct xfer_metadata *meta ) { + struct sockaddr_efi *se_src; + struct sockaddr_efi *se_dest; + struct tcpip_net_protocol *tcpip; + struct net_protocol *net; + struct efi_pxe_udp_pseudo_header *pshdr; + size_t addr_len; + size_t pshdr_len; + int rc; + + /* Sanity checks */ + assert ( meta != NULL ); + se_src = ( ( struct sockaddr_efi * ) meta->src ); + assert ( se_src != NULL ); + se_dest = ( ( struct sockaddr_efi * ) meta->dest ); + assert ( se_dest != NULL ); + assert ( se_src->se_family == se_dest->se_family ); + + /* Determine protocol */ + tcpip = tcpip_net_protocol ( se_src->se_family ); + if ( ! tcpip ) { + rc = -ENOTSUP; + goto err_unsupported; + } + net = tcpip->net_protocol; + addr_len = net->net_addr_len; + + /* Construct pseudo-header */ + pshdr_len = ( sizeof ( *pshdr ) + ( 2 * addr_len ) ); + if ( ( rc = iob_ensure_headroom ( iobuf, pshdr_len ) ) != 0 ) + goto err_headroom; + memcpy ( iob_push ( iobuf, addr_len ), &se_src->se_addr, addr_len ); + memcpy ( iob_push ( iobuf, addr_len ), &se_dest->se_addr, addr_len ); + pshdr = iob_push ( iobuf, sizeof ( *pshdr ) ); + pshdr->net = net; + pshdr->dest_port = ntohs ( se_dest->se_port ); + pshdr->src_port = ntohs ( se_src->se_port ); + + /* Add to queue */ + list_add_tail ( &iobuf->list, &pxe->queue ); + + return 0; + + err_unsupported: + err_headroom: + free_iob ( iobuf ); + return rc; +} + +/** PXE UDP interface operations */ +static struct interface_operation efi_pxe_udp_operations[] = { + INTF_OP ( xfer_deliver, struct efi_pxe *, efi_pxe_udp_deliver ), + INTF_OP ( intf_close, struct efi_pxe *, efi_pxe_udp_close ), +}; + +/** PXE UDP interface descriptor */ +static struct interface_descriptor efi_pxe_udp_desc = + INTF_DESC ( struct efi_pxe, udp, efi_pxe_udp_operations ); + +/** + * Open UDP interface + * + * @v pxe PXE base code + * @ret rc Return status code + */ +static int efi_pxe_udp_open ( struct efi_pxe *pxe ) { + int rc; + + /* If interface is already open, then cancel the scheduled close */ + if ( process_running ( &pxe->process ) ) { + process_del ( &pxe->process ); + return 0; + } + + /* Open promiscuous UDP interface */ + if ( ( rc = udp_open_promisc ( &pxe->udp ) ) != 0 ) { + DBGC ( pxe, "PXE %s could not open UDP connection: %s\n", + pxe->name, strerror ( rc ) ); + return rc; + } + + /* Claim network devices */ + efi_snp_claim(); + + return 0; +} + +/** + * Schedule close of UDP interface + * + * @v pxe PXE base code + */ +static void efi_pxe_udp_schedule_close ( struct efi_pxe *pxe ) { + + /* The EFI PXE base code protocol does not provide any + * explicit UDP open/close methods. To avoid the overhead of + * reopening a socket for each read/write operation, we start + * a process which will close the socket immediately if the + * next call into iPXE is anything other than a UDP + * read/write. + */ + process_add ( &pxe->process ); +} + +/** + * Scheduled close of UDP interface + * + * @v pxe PXE base code + */ +static void efi_pxe_udp_scheduled_close ( struct efi_pxe *pxe ) { + + /* Close UDP interface */ + efi_pxe_udp_close ( pxe, 0 ); +} + +/** UDP close process descriptor */ +static struct process_descriptor efi_pxe_process_desc = + PROC_DESC_ONCE ( struct efi_pxe, process, efi_pxe_udp_scheduled_close ); + +/****************************************************************************** + * + * Fake DHCP packets + * + ****************************************************************************** + */ + +/** + * Name fake DHCP packet + * + * @v pxe PXE base code + * @v packet Packet + * @ret name Name of packet + */ +static const char * efi_pxe_fake_name ( struct efi_pxe *pxe, + EFI_PXE_BASE_CODE_PACKET *packet ) { + EFI_PXE_BASE_CODE_MODE *mode = &pxe->mode; + + if ( packet == &mode->DhcpDiscover ) { + return "DhcpDiscover"; + } else if ( packet == &mode->DhcpAck ) { + return "DhcpAck"; + } else if ( packet == &mode->ProxyOffer ) { + return "ProxyOffer"; + } else if ( packet == &mode->PxeDiscover ) { + return "PxeDiscover"; + } else if ( packet == &mode->PxeReply ) { + return "PxeReply"; + } else if ( packet == &mode->PxeBisReply ) { + return "PxeBisReply"; + } else { + return ""; + } +} + +/** + * Construct fake DHCP packet and flag + * + * @v pxe PXE base code + * @v fake Fake packet constructor + * @v packet Packet to fill in + * @ret exists Packet existence flag + */ +static BOOLEAN efi_pxe_fake ( struct efi_pxe *pxe, + int ( * fake ) ( struct net_device *netdev, + void *data, size_t len ), + EFI_PXE_BASE_CODE_PACKET *packet ) { + EFI_PXE_BASE_CODE_MODE *mode = &pxe->mode; + struct dhcp_packet dhcppkt; + struct dhcphdr *dhcphdr; + unsigned int len; + int rc; + + /* The fake packet constructors do not support IPv6 */ + if ( mode->UsingIpv6 ) + return FALSE; + + /* Attempt to construct packet */ + if ( ( rc = fake ( pxe->netdev, packet, sizeof ( *packet ) ) != 0 ) ) { + DBGC ( pxe, "PXE %s could not fake %s: %s\n", pxe->name, + efi_pxe_fake_name ( pxe, packet ), strerror ( rc ) ); + return FALSE; + } + + /* The WDS bootstrap wdsmgfw.efi has a buggy DHCPv4 packet + * parser which does not correctly handle DHCP padding bytes. + * Specifically, if a padding byte (i.e. a zero) is + * encountered, the parse will first increment the pointer by + * one to skip over the padding byte but will then drop into + * the code path for handling normal options, which increments + * the pointer by two to skip over the (already-skipped) type + * field and the (non-existent) length field. + * + * The upshot of this bug in WDS is that the parser will fail + * with an error 0xc0000023 if the number of spare bytes after + * the end of the options is not an exact multiple of three. + * + * Work around this buggy parser by adding an explicit + * DHCP_END tag. + */ + dhcphdr = container_of ( &packet->Dhcpv4.BootpOpcode, + struct dhcphdr, op ); + dhcppkt_init ( &dhcppkt, dhcphdr, sizeof ( *packet ) ); + len = dhcppkt_len ( &dhcppkt ); + if ( len < sizeof ( *packet ) ) + packet->Raw[len] = DHCP_END; + + return TRUE; +} + +/** + * Construct fake DHCP packets + * + * @v pxe PXE base code + */ +static void efi_pxe_fake_all ( struct efi_pxe *pxe ) { + EFI_PXE_BASE_CODE_MODE *mode = &pxe->mode; + + /* Construct fake packets */ + mode->DhcpDiscoverValid = + efi_pxe_fake ( pxe, create_fakedhcpdiscover, + &mode->DhcpDiscover ); + mode->DhcpAckReceived = + efi_pxe_fake ( pxe, create_fakedhcpack, + &mode->DhcpAck ); + mode->PxeReplyReceived = + efi_pxe_fake ( pxe, create_fakepxebsack, + &mode->PxeReply ); +} + +/****************************************************************************** + * + * Base code protocol + * + ****************************************************************************** + */ + +/** + * Start PXE base code + * + * @v base PXE base code protocol + * @v use_ipv6 Use IPv6 + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI efi_pxe_start ( EFI_PXE_BASE_CODE_PROTOCOL *base, + BOOLEAN use_ipv6 ) { + struct efi_pxe *pxe = container_of ( base, struct efi_pxe, base ); + EFI_PXE_BASE_CODE_MODE *mode = &pxe->mode; + struct tcpip_net_protocol *ipv6 = tcpip_net_protocol ( AF_INET6 ); + sa_family_t family = ( use_ipv6 ? AF_INET6 : AF_INET ); + int rc; + + DBGC ( pxe, "PXE %s START %s\n", pxe->name, ( ipv6 ? "IPv6" : "IPv4" )); + + /* Initialise mode structure */ + memset ( mode, 0, sizeof ( *mode ) ); + mode->AutoArp = TRUE; + mode->TTL = DEFAULT_TTL; + mode->ToS = DEFAULT_ToS; + mode->IpFilter.Filters = + ( EFI_PXE_BASE_CODE_IP_FILTER_STATION_IP | + EFI_PXE_BASE_CODE_IP_FILTER_BROADCAST | + EFI_PXE_BASE_CODE_IP_FILTER_PROMISCUOUS | + EFI_PXE_BASE_CODE_IP_FILTER_PROMISCUOUS_MULTICAST ); + + /* Check for IPv4/IPv6 support */ + mode->Ipv6Supported = ( ipv6 != NULL ); + mode->Ipv6Available = ( ipv6 != NULL ); + pxe->tcpip = tcpip_net_protocol ( family ); + if ( ! pxe->tcpip ) { + DBGC ( pxe, "PXE %s has no support for %s\n", + pxe->name, socket_family_name ( family ) ); + return EFI_UNSUPPORTED; + } + pxe->net = pxe->tcpip->net_protocol; + mode->UsingIpv6 = use_ipv6; + + /* Populate station IP address */ + if ( ( rc = efi_pxe_ip ( pxe ) ) != 0 ) + return rc; + + /* Construct fake DHCP packets */ + efi_pxe_fake_all ( pxe ); + + /* Record that base code is started */ + mode->Started = TRUE; + DBGC ( pxe, "PXE %s using %s\n", + pxe->name, pxe->net->ntoa ( &mode->StationIp ) ); + + return 0; +} + +/** + * Stop PXE base code + * + * @v base PXE base code protocol + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI efi_pxe_stop ( EFI_PXE_BASE_CODE_PROTOCOL *base ) { + struct efi_pxe *pxe = container_of ( base, struct efi_pxe, base ); + EFI_PXE_BASE_CODE_MODE *mode = &pxe->mode; + + DBGC ( pxe, "PXE %s STOP\n", pxe->name ); + + /* Record that base code is stopped */ + mode->Started = FALSE; + + /* Close TFTP */ + efi_pxe_tftp_close ( pxe, 0 ); + + /* Close UDP */ + efi_pxe_udp_close ( pxe, 0 ); + + return 0; +} + +/** + * Perform DHCP + * + * @v base PXE base code protocol + * @v sort Offers should be sorted + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI efi_pxe_dhcp ( EFI_PXE_BASE_CODE_PROTOCOL *base, + BOOLEAN sort ) { + struct efi_pxe *pxe = container_of ( base, struct efi_pxe, base ); + struct net_device *netdev = pxe->netdev; + int rc; + + DBGC ( pxe, "PXE %s DHCP %s\n", + pxe->name, ( sort ? "sorted" : "unsorted" ) ); + + /* Claim network devices */ + efi_snp_claim(); + + /* Initiate configuration */ + if ( ( rc = netdev_configure_all ( netdev ) ) != 0 ) { + DBGC ( pxe, "PXE %s could not initiate configuration: %s\n", + pxe->name, strerror ( rc ) ); + goto err_configure; + } + + /* Wait for configuration to complete (or time out) */ + while ( netdev_configuration_in_progress ( netdev ) ) + step(); + + /* Report timeout if configuration failed */ + if ( ! netdev_configuration_ok ( netdev ) ) { + rc = -ETIMEDOUT; + goto err_timeout; + } + + /* Update station IP address */ + if ( ( rc = efi_pxe_ip ( pxe ) ) != 0 ) + goto err_ip; + + /* Update faked DHCP packets */ + efi_pxe_fake_all ( pxe ); + + err_ip: + err_timeout: + err_configure: + efi_snp_release(); + return EFIRC ( rc ); +} + +/** + * Perform boot server discovery + * + * @v base PXE base code protocol + * @v type Boot server type + * @v layer Boot server layer + * @v bis Use boot integrity services + * @v info Additional information + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_pxe_discover ( EFI_PXE_BASE_CODE_PROTOCOL *base, UINT16 type, UINT16 *layer, + BOOLEAN bis, EFI_PXE_BASE_CODE_DISCOVER_INFO *info ) { + struct efi_pxe *pxe = container_of ( base, struct efi_pxe, base ); + EFI_IP_ADDRESS *ip; + unsigned int i; + + DBGC ( pxe, "PXE %s DISCOVER type %d layer %d%s\n", + pxe->name, type, *layer, ( bis ? " bis" : "" ) ); + if ( info ) { + DBGC ( pxe, "%s%s%s%s %s", + ( info->UseMCast ? " mcast" : "" ), + ( info->UseBCast ? " bcast" : "" ), + ( info->UseUCast ? " ucast" : "" ), + ( info->MustUseList ? " list" : "" ), + efi_pxe_ip_ntoa ( pxe, &info->ServerMCastIp ) ); + for ( i = 0 ; i < info->IpCnt ; i++ ) { + ip = &info->SrvList[i].IpAddr; + DBGC ( pxe, " %d%s:%s", info->SrvList[i].Type, + ( info->SrvList[i].AcceptAnyResponse ? + ":any" : "" ), efi_pxe_ip_ntoa ( pxe, ip ) ); + } + } + DBGC ( pxe, "\n" ); + + /* Not used by any bootstrap I can find to test with */ + return EFI_UNSUPPORTED; +} + +/** + * Perform (M)TFTP + * + * @v base PXE base code protocol + * @v opcode TFTP opcode + * @v data Data buffer + * @v overwrite Overwrite file + * @v len Length of data buffer + * @v blksize Block size + * @v ip Server address + * @v filename Filename + * @v info Additional information + * @v callback Pass packets to callback instead of data buffer + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_pxe_mtftp ( EFI_PXE_BASE_CODE_PROTOCOL *base, + EFI_PXE_BASE_CODE_TFTP_OPCODE opcode, VOID *data, + BOOLEAN overwrite, UINT64 *len, UINTN *blksize, + EFI_IP_ADDRESS *ip, UINT8 *filename, + EFI_PXE_BASE_CODE_MTFTP_INFO *info, BOOLEAN callback ) { + struct efi_pxe *pxe = container_of ( base, struct efi_pxe, base ); + int rc; + + DBGC ( pxe, "PXE %s MTFTP %d%s %p+%llx", pxe->name, opcode, + ( overwrite ? " overwrite" : "" ), data, *len ); + if ( blksize ) + DBGC ( pxe, " blksize %zd", ( ( size_t ) *blksize ) ); + DBGC ( pxe, " %s:%s", efi_pxe_ip_ntoa ( pxe, ip ), filename ); + if ( info ) { + DBGC ( pxe, " %s:%d:%d:%d:%d", + efi_pxe_ip_ntoa ( pxe, &info->MCastIp ), + info->CPort, info->SPort, info->ListenTimeout, + info->TransmitTimeout ); + } + DBGC ( pxe, "%s\n", ( callback ? " callback" : "" ) ); + + /* Fail unless operation is supported */ + if ( ! ( ( opcode == EFI_PXE_BASE_CODE_TFTP_READ_FILE ) || + ( opcode == EFI_PXE_BASE_CODE_MTFTP_READ_FILE ) ) ) { + DBGC ( pxe, "PXE %s unsupported MTFTP opcode %d\n", + pxe->name, opcode ); + rc = -ENOTSUP; + goto err_opcode; + } + + /* Claim network devices */ + efi_snp_claim(); + + /* Determine block size. Ignore the requested block size + * unless we are using callbacks, since limiting HTTP to a + * 512-byte TCP window is not sensible. + */ + pxe->blksize = ( ( callback && blksize ) ? *blksize : -1UL ); + + /* Initialise data transfer buffer */ + pxe->buf.data = data; + pxe->buf.len = *len; + + /* Open download */ + if ( ( rc = efi_pxe_tftp_open ( pxe, ip, + ( ( const char * ) filename ) ) ) != 0 ) + goto err_open; + + /* Wait for download to complete */ + pxe->rc = -EINPROGRESS; + while ( pxe->rc == -EINPROGRESS ) + step(); + if ( ( rc = pxe->rc ) != 0 ) { + DBGC ( pxe, "PXE %s download failed: %s\n", + pxe->name, strerror ( rc ) ); + goto err_download; + } + + err_download: + efi_pxe_tftp_close ( pxe, rc ); + err_open: + efi_snp_release(); + err_opcode: + return EFIRC ( rc ); +} + +/** + * Transmit UDP packet + * + * @v base PXE base code protocol + * @v flags Operation flags + * @v dest_ip Destination address + * @v dest_port Destination port + * @v gateway Gateway address + * @v src_ip Source address + * @v src_port Source port + * @v hdr_len Header length + * @v hdr Header data + * @v len Length + * @v data Data + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_pxe_udp_write ( EFI_PXE_BASE_CODE_PROTOCOL *base, UINT16 flags, + EFI_IP_ADDRESS *dest_ip, + EFI_PXE_BASE_CODE_UDP_PORT *dest_port, + EFI_IP_ADDRESS *gateway, EFI_IP_ADDRESS *src_ip, + EFI_PXE_BASE_CODE_UDP_PORT *src_port, + UINTN *hdr_len, VOID *hdr, UINTN *len, VOID *data ) { + struct efi_pxe *pxe = container_of ( base, struct efi_pxe, base ); + EFI_PXE_BASE_CODE_MODE *mode = &pxe->mode; + struct io_buffer *iobuf; + struct xfer_metadata meta; + union { + struct sockaddr_tcpip st; + struct sockaddr sa; + } dest; + union { + struct sockaddr_tcpip st; + struct sockaddr sa; + } src; + int rc; + + DBGC2 ( pxe, "PXE %s UDP WRITE ", pxe->name ); + if ( src_ip ) + DBGC2 ( pxe, "%s", efi_pxe_ip_ntoa ( pxe, src_ip ) ); + DBGC2 ( pxe, ":" ); + if ( src_port && + ( ! ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_SRC_PORT ) ) ) { + DBGC2 ( pxe, "%d", *src_port ); + } else { + DBGC2 ( pxe, "*" ); + } + DBGC2 ( pxe, "->%s:%d", efi_pxe_ip_ntoa ( pxe, dest_ip ), *dest_port ); + if ( gateway ) + DBGC2 ( pxe, " via %s", efi_pxe_ip_ntoa ( pxe, gateway ) ); + if ( hdr_len ) + DBGC2 ( pxe, " %p+%zx", hdr, ( ( size_t ) *hdr_len ) ); + DBGC2 ( pxe, " %p+%zx", data, ( ( size_t ) *len ) ); + if ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_MAY_FRAGMENT ) + DBGC2 ( pxe, " frag" ); + DBGC2 ( pxe, "\n" ); + + /* Open UDP connection (if applicable) */ + if ( ( rc = efi_pxe_udp_open ( pxe ) ) != 0 ) + goto err_open; + + /* Construct destination address */ + efi_pxe_ip_sockaddr ( pxe, dest_ip, &dest.sa ); + dest.st.st_port = htons ( *dest_port ); + + /* Construct source address */ + efi_pxe_ip_sockaddr ( pxe, ( src_ip ? src_ip : &mode->StationIp ), + &src.sa ); + if ( src_port && + ( ! ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_SRC_PORT ) ) ) { + src.st.st_port = htons ( *src_port ); + } else { + /* The API does not allow for a sensible concept of + * binding to a local port, so just use a random value. + */ + src.st.st_port = ( random() | htons ( 1024 ) ); + if ( src_port ) + *src_port = ntohs ( src.st.st_port ); + } + + /* Allocate I/O buffer */ + iobuf = xfer_alloc_iob ( &pxe->udp, + ( *len + ( hdr_len ? *hdr_len : 0 ) ) ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Populate I/O buffer */ + if ( hdr_len ) + memcpy ( iob_put ( iobuf, *hdr_len ), hdr, *hdr_len ); + memcpy ( iob_put ( iobuf, *len ), data, *len ); + + /* Construct metadata */ + memset ( &meta, 0, sizeof ( meta ) ); + meta.src = &src.sa; + meta.dest = &dest.sa; + meta.netdev = pxe->netdev; + + /* Deliver I/O buffer */ + if ( ( rc = xfer_deliver ( &pxe->udp, iob_disown ( iobuf ), + &meta ) ) != 0 ) { + DBGC ( pxe, "PXE %s could not transmit: %s\n", + pxe->name, strerror ( rc ) ); + goto err_deliver; + } + + err_deliver: + free_iob ( iobuf ); + err_alloc: + efi_pxe_udp_schedule_close ( pxe ); + err_open: + return EFIRC ( rc ); +} + +/** + * Receive UDP packet + * + * @v base PXE base code protocol + * @v flags Operation flags + * @v dest_ip Destination address + * @v dest_port Destination port + * @v src_ip Source address + * @v src_port Source port + * @v hdr_len Header length + * @v hdr Header data + * @v len Length + * @v data Data + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_pxe_udp_read ( EFI_PXE_BASE_CODE_PROTOCOL *base, UINT16 flags, + EFI_IP_ADDRESS *dest_ip, + EFI_PXE_BASE_CODE_UDP_PORT *dest_port, + EFI_IP_ADDRESS *src_ip, + EFI_PXE_BASE_CODE_UDP_PORT *src_port, + UINTN *hdr_len, VOID *hdr, UINTN *len, VOID *data ) { + struct efi_pxe *pxe = container_of ( base, struct efi_pxe, base ); + struct io_buffer *iobuf; + struct efi_pxe_udp_pseudo_header *pshdr; + EFI_IP_ADDRESS *actual_dest_ip; + EFI_IP_ADDRESS *actual_src_ip; + size_t addr_len; + size_t frag_len; + int rc; + + DBGC2 ( pxe, "PXE %s UDP READ ", pxe->name ); + if ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_USE_FILTER ) { + DBGC2 ( pxe, "(filter)" ); + } else if ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_DEST_IP ) { + DBGC2 ( pxe, "*" ); + } else if ( dest_ip ) { + DBGC2 ( pxe, "%s", efi_pxe_ip_ntoa ( pxe, dest_ip ) ); + } + DBGC2 ( pxe, ":" ); + if ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_DEST_PORT ) { + DBGC2 ( pxe, "*" ); + } else if ( dest_port ) { + DBGC2 ( pxe, "%d", *dest_port ); + } else { + DBGC2 ( pxe, "" ); + } + DBGC2 ( pxe, "<-" ); + if ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_SRC_IP ) { + DBGC2 ( pxe, "*" ); + } else if ( src_ip ) { + DBGC2 ( pxe, "%s", efi_pxe_ip_ntoa ( pxe, src_ip ) ); + } else { + DBGC2 ( pxe, "" ); + } + DBGC2 ( pxe, ":" ); + if ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_SRC_PORT ) { + DBGC2 ( pxe, "*" ); + } else if ( src_port ) { + DBGC2 ( pxe, "%d", *src_port ); + } else { + DBGC2 ( pxe, "" ); + } + if ( hdr_len ) + DBGC2 ( pxe, " %p+%zx", hdr, ( ( size_t ) *hdr_len ) ); + DBGC2 ( pxe, " %p+%zx\n", data, ( ( size_t ) *len ) ); + + /* Open UDP connection (if applicable) */ + if ( ( rc = efi_pxe_udp_open ( pxe ) ) != 0 ) + goto err_open; + + /* Try receiving a packet, if the queue is empty */ + if ( list_empty ( &pxe->queue ) ) + step(); + + /* Remove first packet from the queue */ + iobuf = list_first_entry ( &pxe->queue, struct io_buffer, list ); + if ( ! iobuf ) { + rc = -ETIMEDOUT; /* "no packet" */ + goto err_empty; + } + list_del ( &iobuf->list ); + + /* Strip pseudo-header */ + pshdr = iobuf->data; + addr_len = ( pshdr->net->net_addr_len ); + iob_pull ( iobuf, sizeof ( *pshdr ) ); + actual_dest_ip = iobuf->data; + iob_pull ( iobuf, addr_len ); + actual_src_ip = iobuf->data; + iob_pull ( iobuf, addr_len ); + DBGC2 ( pxe, "PXE %s UDP RX %s:%d", pxe->name, + pshdr->net->ntoa ( actual_dest_ip ), pshdr->dest_port ); + DBGC2 ( pxe, "<-%s:%d len %#zx\n", pshdr->net->ntoa ( actual_src_ip ), + pshdr->src_port, iob_len ( iobuf ) ); + + /* Filter based on network-layer protocol */ + if ( pshdr->net != pxe->net ) { + DBGC2 ( pxe, "PXE %s filtered out %s packet\n", + pxe->name, pshdr->net->name ); + rc = -ETIMEDOUT; /* "no packet" */ + goto err_filter; + } + + /* Filter based on port numbers */ + if ( ! ( ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_DEST_PORT ) || + ( dest_port && ( *dest_port == pshdr->dest_port ) ) ) ) { + DBGC2 ( pxe, "PXE %s filtered out destination port %d\n", + pxe->name, pshdr->dest_port ); + rc = -ETIMEDOUT; /* "no packet" */ + goto err_filter; + } + if ( ! ( ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_SRC_PORT ) || + ( src_port && ( *src_port == pshdr->src_port ) ) ) ) { + DBGC2 ( pxe, "PXE %s filtered out source port %d\n", + pxe->name, pshdr->src_port ); + rc = -ETIMEDOUT; /* "no packet" */ + goto err_filter; + } + + /* Filter based on source IP address */ + if ( ! ( ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_SRC_IP ) || + ( src_ip && + ( memcmp ( src_ip, actual_src_ip, addr_len ) == 0 ) ) ) ) { + DBGC2 ( pxe, "PXE %s filtered out source IP %s\n", + pxe->name, pshdr->net->ntoa ( actual_src_ip ) ); + rc = -ETIMEDOUT; /* "no packet" */ + goto err_filter; + } + + /* Filter based on destination IP address */ + if ( ! ( ( ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_USE_FILTER ) && + efi_pxe_ip_filter ( pxe, actual_dest_ip ) ) || + ( ( ! ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_USE_FILTER ) ) && + ( ( flags & EFI_PXE_BASE_CODE_UDP_OPFLAGS_ANY_DEST_IP ) || + ( dest_ip && ( memcmp ( dest_ip, actual_dest_ip, + addr_len ) == 0 ) ) ) ) ) ) { + DBGC2 ( pxe, "PXE %s filtered out destination IP %s\n", + pxe->name, pshdr->net->ntoa ( actual_dest_ip ) ); + rc = -ETIMEDOUT; /* "no packet" */ + goto err_filter; + } + + /* Fill in addresses and port numbers */ + if ( dest_ip ) + memcpy ( dest_ip, actual_dest_ip, addr_len ); + if ( dest_port ) + *dest_port = pshdr->dest_port; + if ( src_ip ) + memcpy ( src_ip, actual_src_ip, addr_len ); + if ( src_port ) + *src_port = pshdr->src_port; + + /* Fill in header, if applicable */ + if ( hdr_len ) { + frag_len = iob_len ( iobuf ); + if ( frag_len > *hdr_len ) + frag_len = *hdr_len; + memcpy ( hdr, iobuf->data, frag_len ); + iob_pull ( iobuf, frag_len ); + *hdr_len = frag_len; + } + + /* Fill in data buffer */ + frag_len = iob_len ( iobuf ); + if ( frag_len > *len ) + frag_len = *len; + memcpy ( data, iobuf->data, frag_len ); + iob_pull ( iobuf, frag_len ); + *len = frag_len; + + /* Check for overflow */ + if ( iob_len ( iobuf ) ) { + rc = -ERANGE; + goto err_too_short; + } + + /* Success */ + rc = 0; + + err_too_short: + err_filter: + free_iob ( iobuf ); + err_empty: + efi_pxe_udp_schedule_close ( pxe ); + err_open: + return EFIRC ( rc ); +} + +/** + * Set receive filter + * + * @v base PXE base code protocol + * @v filter Receive filter + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_pxe_set_ip_filter ( EFI_PXE_BASE_CODE_PROTOCOL *base, + EFI_PXE_BASE_CODE_IP_FILTER *filter ) { + struct efi_pxe *pxe = container_of ( base, struct efi_pxe, base ); + EFI_PXE_BASE_CODE_MODE *mode = &pxe->mode; + unsigned int i; + + DBGC ( pxe, "PXE %s SET IP FILTER %02x", + pxe->name, filter->Filters ); + for ( i = 0 ; i < filter->IpCnt ; i++ ) { + DBGC ( pxe, " %s", + efi_pxe_ip_ntoa ( pxe, &filter->IpList[i] ) ); + } + DBGC ( pxe, "\n" ); + + /* Update filter */ + memcpy ( &mode->IpFilter, filter, sizeof ( mode->IpFilter ) ); + + return 0; +} + +/** + * Resolve MAC address + * + * @v base PXE base code protocol + * @v ip IP address + * @v mac MAC address to fill in + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI efi_pxe_arp ( EFI_PXE_BASE_CODE_PROTOCOL *base, + EFI_IP_ADDRESS *ip, + EFI_MAC_ADDRESS *mac ) { + struct efi_pxe *pxe = container_of ( base, struct efi_pxe, base ); + + DBGC ( pxe, "PXE %s ARP %s %p\n", + pxe->name, efi_pxe_ip_ntoa ( pxe, ip ), mac ); + + /* Not used by any bootstrap I can find to test with */ + return EFI_UNSUPPORTED; +} + +/** + * Set parameters + * + * @v base PXE base code protocol + * @v autoarp Automatic ARP packet generation + * @v sendguid Send GUID as client hardware address + * @v ttl IP time to live + * @v tos IP type of service + * @v callback Make callbacks + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_pxe_set_parameters ( EFI_PXE_BASE_CODE_PROTOCOL *base, + BOOLEAN *autoarp, BOOLEAN *sendguid, UINT8 *ttl, + UINT8 *tos, BOOLEAN *callback ) { + struct efi_pxe *pxe = container_of ( base, struct efi_pxe, base ); + EFI_PXE_BASE_CODE_MODE *mode = &pxe->mode; + + DBGC ( pxe, "PXE %s SET PARAMETERS", pxe->name ); + if ( autoarp ) + DBGC ( pxe, " %s", ( *autoarp ? "autoarp" : "noautoarp" ) ); + if ( sendguid ) + DBGC ( pxe, " %s", ( *sendguid ? "sendguid" : "sendmac" ) ); + if ( ttl ) + DBGC ( pxe, " ttl %d", *ttl ); + if ( tos ) + DBGC ( pxe, " tos %d", *tos ); + if ( callback ) { + DBGC ( pxe, " %s", + ( *callback ? "callback" : "nocallback" ) ); + } + DBGC ( pxe, "\n" ); + + /* Update parameters */ + if ( autoarp ) + mode->AutoArp = *autoarp; + if ( sendguid ) + mode->SendGUID = *sendguid; + if ( ttl ) + mode->TTL = *ttl; + if ( tos ) + mode->ToS = *tos; + if ( callback ) + mode->MakeCallbacks = *callback; + + return 0; +} + +/** + * Set IP address + * + * @v base PXE base code protocol + * @v ip IP address + * @v netmask Subnet mask + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_pxe_set_station_ip ( EFI_PXE_BASE_CODE_PROTOCOL *base, + EFI_IP_ADDRESS *ip, EFI_IP_ADDRESS *netmask ) { + struct efi_pxe *pxe = container_of ( base, struct efi_pxe, base ); + EFI_PXE_BASE_CODE_MODE *mode = &pxe->mode; + + DBGC ( pxe, "PXE %s SET STATION IP ", pxe->name ); + if ( ip ) + DBGC ( pxe, "%s", efi_pxe_ip_ntoa ( pxe, ip ) ); + if ( netmask ) + DBGC ( pxe, "/%s", efi_pxe_ip_ntoa ( pxe, netmask ) ); + DBGC ( pxe, "\n" ); + + /* Update IP address and netmask */ + if ( ip ) + memcpy ( &mode->StationIp, ip, sizeof ( mode->StationIp ) ); + if ( netmask ) + memcpy ( &mode->SubnetMask, netmask, sizeof (mode->SubnetMask)); + + return 0; +} + +/** + * Update cached DHCP packets + * + * @v base PXE base code protocol + * @v dhcpdisc_ok DHCPDISCOVER is valid + * @v dhcpack_ok DHCPACK received + * @v proxyoffer_ok ProxyDHCPOFFER received + * @v pxebsdisc_ok PxeBsDISCOVER valid + * @v pxebsack_ok PxeBsACK received + * @v pxebsbis_ok PxeBsBIS received + * @v dhcpdisc DHCPDISCOVER packet + * @v dhcpack DHCPACK packet + * @v proxyoffer ProxyDHCPOFFER packet + * @v pxebsdisc PxeBsDISCOVER packet + * @v pxebsack PxeBsACK packet + * @v pxebsbis PxeBsBIS packet + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_pxe_set_packets ( EFI_PXE_BASE_CODE_PROTOCOL *base, BOOLEAN *dhcpdisc_ok, + BOOLEAN *dhcpack_ok, BOOLEAN *proxyoffer_ok, + BOOLEAN *pxebsdisc_ok, BOOLEAN *pxebsack_ok, + BOOLEAN *pxebsbis_ok, EFI_PXE_BASE_CODE_PACKET *dhcpdisc, + EFI_PXE_BASE_CODE_PACKET *dhcpack, + EFI_PXE_BASE_CODE_PACKET *proxyoffer, + EFI_PXE_BASE_CODE_PACKET *pxebsdisc, + EFI_PXE_BASE_CODE_PACKET *pxebsack, + EFI_PXE_BASE_CODE_PACKET *pxebsbis ) { + struct efi_pxe *pxe = container_of ( base, struct efi_pxe, base ); + EFI_PXE_BASE_CODE_MODE *mode = &pxe->mode; + + DBGC ( pxe, "PXE %s SET PACKETS\n", pxe->name ); + + /* Update fake packet flags */ + if ( dhcpdisc_ok ) + mode->DhcpDiscoverValid = *dhcpdisc_ok; + if ( dhcpack_ok ) + mode->DhcpAckReceived = *dhcpack_ok; + if ( proxyoffer_ok ) + mode->ProxyOfferReceived = *proxyoffer_ok; + if ( pxebsdisc_ok ) + mode->PxeDiscoverValid = *pxebsdisc_ok; + if ( pxebsack_ok ) + mode->PxeReplyReceived = *pxebsack_ok; + if ( pxebsbis_ok ) + mode->PxeBisReplyReceived = *pxebsbis_ok; + + /* Update fake packet contents */ + if ( dhcpdisc ) + memcpy ( &mode->DhcpDiscover, dhcpdisc, sizeof ( *dhcpdisc ) ); + if ( dhcpack ) + memcpy ( &mode->DhcpAck, dhcpack, sizeof ( *dhcpack ) ); + if ( proxyoffer ) + memcpy ( &mode->ProxyOffer, proxyoffer, sizeof ( *proxyoffer )); + if ( pxebsdisc ) + memcpy ( &mode->PxeDiscover, pxebsdisc, sizeof ( *pxebsdisc ) ); + if ( pxebsack ) + memcpy ( &mode->PxeReply, pxebsack, sizeof ( *pxebsack ) ); + if ( pxebsbis ) + memcpy ( &mode->PxeBisReply, pxebsbis, sizeof ( *pxebsbis ) ); + + return 0; +} + +/** PXE base code protocol */ +static EFI_PXE_BASE_CODE_PROTOCOL efi_pxe_base_code_protocol = { + .Revision = EFI_PXE_BASE_CODE_PROTOCOL_REVISION, + .Start = efi_pxe_start, + .Stop = efi_pxe_stop, + .Dhcp = efi_pxe_dhcp, + .Discover = efi_pxe_discover, + .Mtftp = efi_pxe_mtftp, + .UdpWrite = efi_pxe_udp_write, + .UdpRead = efi_pxe_udp_read, + .SetIpFilter = efi_pxe_set_ip_filter, + .Arp = efi_pxe_arp, + .SetParameters = efi_pxe_set_parameters, + .SetStationIp = efi_pxe_set_station_ip, + .SetPackets = efi_pxe_set_packets, +}; + +/****************************************************************************** + * + * Apple NetBoot protocol + * + ****************************************************************************** + */ + +/** + * Get DHCP/BSDP response + * + * @v packet Packet + * @v len Length of data buffer + * @v data Data buffer + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_apple_get_response ( EFI_PXE_BASE_CODE_PACKET *packet, UINTN *len, + VOID *data ) { + + /* Check length */ + if ( *len < sizeof ( *packet ) ) { + *len = sizeof ( *packet ); + return EFI_BUFFER_TOO_SMALL; + } + + /* Copy packet */ + memcpy ( data, packet, sizeof ( *packet ) ); + *len = sizeof ( *packet ); + + return EFI_SUCCESS; +} + +/** + * Get DHCP response + * + * @v apple Apple NetBoot protocol + * @v len Length of data buffer + * @v data Data buffer + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_apple_get_dhcp_response ( EFI_APPLE_NET_BOOT_PROTOCOL *apple, + UINTN *len, VOID *data ) { + struct efi_pxe *pxe = container_of ( apple, struct efi_pxe, apple ); + + return efi_apple_get_response ( &pxe->mode.DhcpAck, len, data ); +} + +/** + * Get BSDP response + * + * @v apple Apple NetBoot protocol + * @v len Length of data buffer + * @v data Data buffer + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_apple_get_bsdp_response ( EFI_APPLE_NET_BOOT_PROTOCOL *apple, + UINTN *len, VOID *data ) { + struct efi_pxe *pxe = container_of ( apple, struct efi_pxe, apple ); + + return efi_apple_get_response ( &pxe->mode.PxeReply, len, data ); +} + +/** Apple NetBoot protocol */ +static EFI_APPLE_NET_BOOT_PROTOCOL efi_apple_net_boot_protocol = { + .GetDhcpResponse = efi_apple_get_dhcp_response, + .GetBsdpResponse = efi_apple_get_bsdp_response, +}; + +/****************************************************************************** + * + * Installer + * + ****************************************************************************** + */ + +/** + * Install PXE base code protocol + * + * @v handle EFI handle + * @v netdev Underlying network device + * @ret rc Return status code + */ +int efi_pxe_install ( EFI_HANDLE handle, struct net_device *netdev ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct tcpip_net_protocol *ipv6 = tcpip_net_protocol ( AF_INET6 ); + struct efi_pxe *pxe; + struct in_addr ip; + BOOLEAN use_ipv6; + int leak = 0; + EFI_STATUS efirc; + int rc; + + /* Allocate and initialise structure */ + pxe = zalloc ( sizeof ( *pxe ) ); + if ( ! pxe ) { + rc = -ENOMEM; + goto err_alloc; + } + ref_init ( &pxe->refcnt, efi_pxe_free ); + pxe->netdev = netdev_get ( netdev ); + pxe->name = netdev->name; + pxe->handle = handle; + memcpy ( &pxe->base, &efi_pxe_base_code_protocol, sizeof ( pxe->base )); + pxe->base.Mode = &pxe->mode; + memcpy ( &pxe->apple, &efi_apple_net_boot_protocol, + sizeof ( pxe->apple ) ); + pxe->buf.op = &efi_pxe_buf_operations; + intf_init ( &pxe->tftp, &efi_pxe_tftp_desc, &pxe->refcnt ); + intf_init ( &pxe->udp, &efi_pxe_udp_desc, &pxe->refcnt ); + INIT_LIST_HEAD ( &pxe->queue ); + process_init_stopped ( &pxe->process, &efi_pxe_process_desc, + &pxe->refcnt ); + + /* Crude heuristic: assume that we prefer to use IPv4 if we + * have an IPv4 address for the network device, otherwise + * prefer IPv6 (if available). + */ + fetch_ipv4_setting ( netdev_settings ( netdev ), &ip_setting, &ip ); + use_ipv6 = ( ip.s_addr ? FALSE : ( ipv6 != NULL ) ); + + /* Start base code */ + efi_pxe_start ( &pxe->base, use_ipv6 ); + + /* Install PXE base code protocol */ + if ( ( efirc = bs->InstallMultipleProtocolInterfaces ( + &handle, + &efi_pxe_base_code_protocol_guid, &pxe->base, + &efi_apple_net_boot_protocol_guid, &pxe->apple, + NULL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( pxe, "PXE %s could not install base code protocol: %s\n", + pxe->name, strerror ( rc ) ); + goto err_install_protocol; + } + + /* Transfer reference to list and return */ + list_add_tail ( &pxe->list, &efi_pxes ); + DBGC ( pxe, "PXE %s installed for %s\n", + pxe->name, efi_handle_name ( handle ) ); + return 0; + + if ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( + handle, + &efi_pxe_base_code_protocol_guid, &pxe->base, + &efi_apple_net_boot_protocol_guid, &pxe->apple, + NULL ) ) != 0 ) { + DBGC ( pxe, "PXE %s could not uninstall: %s\n", + pxe->name, strerror ( -EEFI ( efirc ) ) ); + efi_nullify_pxe ( &pxe->base ); + efi_nullify_apple ( &pxe->apple ); + leak = 1; + } + err_install_protocol: + if ( ! leak ) + ref_put ( &pxe->refcnt ); + err_alloc: + if ( leak ) + DBGC ( pxe, "PXE %s nullified and leaked\n", pxe->name ); + return rc; +} + +/** + * Uninstall PXE base code protocol + * + * @v handle EFI handle + */ +void efi_pxe_uninstall ( EFI_HANDLE handle ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_pxe *pxe; + int leak = 0; + EFI_STATUS efirc; + + /* Locate PXE base code */ + pxe = efi_pxe_find ( handle ); + if ( ! handle ) { + DBG ( "PXE could not find base code for %s\n", + efi_handle_name ( handle ) ); + return; + } + + /* Stop base code */ + efi_pxe_stop ( &pxe->base ); + + /* Uninstall PXE base code protocol */ + if ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( + handle, + &efi_pxe_base_code_protocol_guid, &pxe->base, + &efi_apple_net_boot_protocol_guid, &pxe->apple, + NULL ) ) != 0 ) { + DBGC ( pxe, "PXE %s could not uninstall: %s\n", + pxe->name, strerror ( -EEFI ( efirc ) ) ); + efi_nullify_pxe ( &pxe->base ); + efi_nullify_apple ( &pxe->apple ); + leak = 1; + } + + /* Remove from list and drop list's reference */ + list_del ( &pxe->list ); + if ( ! leak ) + ref_put ( &pxe->refcnt ); + + /* Report leakage, if applicable */ + if ( leak ) + DBGC ( pxe, "PXE %s nullified and leaked\n", pxe->name ); +} diff --git a/src/interface/efi/efi_time.c b/src/interface/efi/efi_time.c new file mode 100644 index 00000000..983a0ef5 --- /dev/null +++ b/src/interface/efi/efi_time.c @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include + +/** @file + * + * EFI time source + * + */ + +/** + * Get current time in seconds + * + * @ret time Time, in seconds + */ +static time_t efi_get_time ( void ) { + EFI_RUNTIME_SERVICES *rs = efi_systab->RuntimeServices; + EFI_TIME time; + struct tm tm; + EFI_STATUS efirc; + int rc; + + /* Get current time and date */ + if ( ( efirc = rs->GetTime ( &time, NULL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( rs, "EFITIME could not get system time: %s\n", + strerror ( rc ) ); + /* Nothing meaningful we can return */ + return 0; + } + + /* Construct broken-down time */ + memset ( &tm, 0, sizeof ( tm ) ); + tm.tm_sec = time.Second; + tm.tm_min = time.Minute; + tm.tm_hour = time.Hour; + tm.tm_mday = time.Day; + tm.tm_mon = ( time.Month - 1 ); + tm.tm_year = ( time.Year - 1900 ); + DBGC ( rs, "EFITIME is %04d-%02d-%02d %02d:%02d:%02d\n", + ( tm.tm_year + 1900 ), ( tm.tm_mon + 1 ), + tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec ); + + /* Convert to seconds since the Epoch */ + return mktime ( &tm ); +} + +PROVIDE_TIME ( efi, time_now, efi_get_time ); diff --git a/src/interface/efi/efi_usb.c b/src/interface/efi/efi_usb.c new file mode 100644 index 00000000..55b5bc88 --- /dev/null +++ b/src/interface/efi/efi_usb.c @@ -0,0 +1,1339 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * EFI USB I/O PROTOCOL + * + */ + +/** + * Transcribe data direction (for debugging) + * + * @v direction Data direction + * @ret text Transcribed data direction + */ +static const char * efi_usb_direction_name ( EFI_USB_DATA_DIRECTION direction ){ + + switch ( direction ) { + case EfiUsbDataIn: return "in"; + case EfiUsbDataOut: return "out"; + case EfiUsbNoData: return "none"; + default: return ""; + } +} + +/****************************************************************************** + * + * Endpoints + * + ****************************************************************************** + */ + +/** + * Poll USB bus (from endpoint event timer) + * + * @v event EFI event + * @v context EFI USB endpoint + */ +static VOID EFIAPI efi_usb_timer ( EFI_EVENT event __unused, + VOID *context ) { + struct efi_usb_endpoint *usbep = context; + struct usb_function *func = usbep->usbintf->usbdev->func; + + /* Poll bus */ + usb_poll ( func->usb->port->hub->bus ); + + /* Refill endpoint */ + if ( usbep->ep.open ) + usb_refill ( &usbep->ep ); +} + +/** + * Get endpoint MTU + * + * @v usbintf EFI USB interface + * @v endpoint Endpoint address + * @ret mtu Endpoint MTU, or negative error + */ +static int efi_usb_mtu ( struct efi_usb_interface *usbintf, + unsigned int endpoint ) { + struct efi_usb_device *usbdev = usbintf->usbdev; + struct usb_interface_descriptor *interface; + struct usb_endpoint_descriptor *desc; + + /* Locate cached interface descriptor */ + interface = usb_interface_descriptor ( usbdev->config, + usbintf->interface, + usbintf->alternate ); + if ( ! interface ) { + DBGC ( usbdev, "USBDEV %s alt %d has no interface descriptor\n", + usbintf->name, usbintf->alternate ); + return -ENOENT; + } + + /* Locate and copy cached endpoint descriptor */ + for_each_interface_descriptor ( desc, usbdev->config, interface ) { + if ( ( desc->header.type == USB_ENDPOINT_DESCRIPTOR ) && + ( desc->endpoint == endpoint ) ) + return USB_ENDPOINT_MTU ( le16_to_cpu ( desc->sizes ) ); + } + + DBGC ( usbdev, "USBDEV %s alt %d ep %02x has no descriptor\n", + usbintf->name, usbintf->alternate, endpoint ); + return -ENOENT; +} + +/** + * Check if endpoint is open + * + * @v usbintf EFI USB interface + * @v endpoint Endpoint address + * @ret is_open Endpoint is open + */ +static int efi_usb_is_open ( struct efi_usb_interface *usbintf, + unsigned int endpoint ) { + unsigned int index = USB_ENDPOINT_IDX ( endpoint ); + struct efi_usb_endpoint *usbep = usbintf->endpoint[index]; + + return ( usbep && usbep->ep.open ); +} + +/** + * Open endpoint + * + * @v usbintf EFI USB interface + * @v endpoint Endpoint address + * @v attributes Endpoint attributes + * @v interval Interval (in milliseconds) + * @v driver Driver operations + * @ret rc Return status code + */ +static int efi_usb_open ( struct efi_usb_interface *usbintf, + unsigned int endpoint, unsigned int attributes, + unsigned int interval, + struct usb_endpoint_driver_operations *driver ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_usb_device *usbdev = usbintf->usbdev; + struct efi_usb_endpoint *usbep; + unsigned int index = USB_ENDPOINT_IDX ( endpoint ); + int mtu; + EFI_STATUS efirc; + int rc; + + /* Allocate structure, if needed. Once allocated, we leave + * the endpoint structure in place until the device is + * removed, to work around external UEFI code that closes the + * endpoint at illegal times. + */ + usbep = usbintf->endpoint[index]; + if ( ! usbep ) { + usbep = zalloc ( sizeof ( *usbep ) ); + if ( ! usbep ) { + rc = -ENOMEM; + goto err_alloc; + } + usbep->usbintf = usbintf; + usbintf->endpoint[index] = usbep; + } + + /* Get endpoint MTU */ + mtu = efi_usb_mtu ( usbintf, endpoint ); + if ( mtu < 0 ) { + rc = mtu; + goto err_mtu; + } + + /* Allocate and initialise structure */ + usb_endpoint_init ( &usbep->ep, usbdev->func->usb, driver ); + usb_endpoint_describe ( &usbep->ep, endpoint, attributes, mtu, 0, + ( interval << 3 /* microframes */ ) ); + + /* Open endpoint */ + if ( ( rc = usb_endpoint_open ( &usbep->ep ) ) != 0 ) { + DBGC ( usbdev, "USBDEV %s %s could not open: %s\n", + usbintf->name, usb_endpoint_name ( &usbep->ep ), + strerror ( rc ) ); + goto err_open; + } + DBGC ( usbdev, "USBDEV %s %s opened\n", + usbintf->name, usb_endpoint_name ( &usbep->ep ) ); + + /* Create event */ + if ( ( efirc = bs->CreateEvent ( ( EVT_TIMER | EVT_NOTIFY_SIGNAL ), + TPL_CALLBACK, efi_usb_timer, usbep, + &usbep->event ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( usbdev, "USBDEV %s %s could not create event: %s\n", + usbintf->name, usb_endpoint_name ( &usbep->ep ), + strerror ( rc ) ); + goto err_event; + } + + return 0; + + bs->CloseEvent ( usbep->event ); + err_event: + usb_endpoint_close ( &usbep->ep ); + err_open: + err_mtu: + err_alloc: + return rc; +} + +/** + * Close endpoint + * + * @v usbep EFI USB endpoint + */ +static void efi_usb_close ( struct efi_usb_endpoint *usbep ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_usb_interface *usbintf = usbep->usbintf; + struct efi_usb_device *usbdev = usbintf->usbdev; + unsigned int index = USB_ENDPOINT_IDX ( usbep->ep.address ); + + /* Sanity check */ + assert ( usbintf->endpoint[index] == usbep ); + + /* Cancel timer (if applicable) and close event */ + bs->SetTimer ( usbep->event, TimerCancel, 0 ); + bs->CloseEvent ( usbep->event ); + + /* Close endpoint */ + usb_endpoint_close ( &usbep->ep ); + DBGC ( usbdev, "USBDEV %s %s closed\n", + usbintf->name, usb_endpoint_name ( &usbep->ep ) ); +} + +/** + * Close all endpoints + * + * @v usbintf EFI USB interface + */ +static void efi_usb_close_all ( struct efi_usb_interface *usbintf ) { + struct efi_usb_endpoint *usbep; + unsigned int i; + + for ( i = 0 ; i < ( sizeof ( usbintf->endpoint ) / + sizeof ( usbintf->endpoint[0] ) ) ; i++ ) { + usbep = usbintf->endpoint[i]; + if ( usbep && usbep->ep.open ) + efi_usb_close ( usbep ); + } +} + +/** + * Free all endpoints + * + * @v usbintf EFI USB interface + */ +static void efi_usb_free_all ( struct efi_usb_interface *usbintf ) { + struct efi_usb_endpoint *usbep; + unsigned int i; + + for ( i = 0 ; i < ( sizeof ( usbintf->endpoint ) / + sizeof ( usbintf->endpoint[0] ) ) ; i++ ) { + usbep = usbintf->endpoint[i]; + if ( usbep ) { + assert ( ! usbep->ep.open ); + free ( usbep ); + usbintf->endpoint[i] = NULL; + } + } +} + +/** + * Complete synchronous transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void efi_usb_sync_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf __unused, int rc ) { + struct efi_usb_endpoint *usbep = + container_of ( ep, struct efi_usb_endpoint, ep ); + + /* Record completion status */ + usbep->rc = rc; +} + +/** Synchronous endpoint operations */ +static struct usb_endpoint_driver_operations efi_usb_sync_driver = { + .complete = efi_usb_sync_complete, +}; + +/** + * Perform synchronous transfer + * + * @v usbintf USB endpoint + * @v endpoint Endpoint address + * @v attributes Endpoint attributes + * @v timeout Timeout (in milliseconds) + * @v data Data buffer + * @v len Length of data buffer + * @ret rc Return status code + */ +static int efi_usb_sync_transfer ( struct efi_usb_interface *usbintf, + unsigned int endpoint, + unsigned int attributes, + unsigned int timeout, + void *data, size_t *len ) { + struct efi_usb_device *usbdev = usbintf->usbdev; + struct efi_usb_endpoint *usbep; + struct io_buffer *iobuf; + unsigned int index = USB_ENDPOINT_IDX ( endpoint ); + unsigned int i; + int rc; + + /* Open endpoint, if applicable */ + if ( ( ! efi_usb_is_open ( usbintf, endpoint ) ) && + ( ( rc = efi_usb_open ( usbintf, endpoint, attributes, 0, + &efi_usb_sync_driver ) ) != 0 ) ) { + goto err_open; + } + usbep = usbintf->endpoint[index]; + + /* Allocate and construct I/O buffer */ + iobuf = alloc_iob ( *len ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err_alloc; + } + iob_put ( iobuf, *len ); + if ( ! ( endpoint & USB_ENDPOINT_IN ) ) + memcpy ( iobuf->data, data, *len ); + + /* Initialise completion status */ + usbep->rc = -EINPROGRESS; + + /* Enqueue transfer */ + if ( ( rc = usb_stream ( &usbep->ep, iobuf, 0 ) ) != 0 ) { + DBGC ( usbdev, "USBDEV %s %s could not enqueue: %s\n", + usbintf->name, usb_endpoint_name ( &usbep->ep ), + strerror ( rc ) ); + goto err_stream; + } + + /* Wait for completion */ + rc = -ETIMEDOUT; + for ( i = 0 ; ( ( timeout == 0 ) || ( i < timeout ) ) ; i++ ) { + + /* Poll bus */ + usb_poll ( usbdev->func->usb->port->hub->bus ); + + /* Check for completion */ + if ( usbep->rc != -EINPROGRESS ) { + rc = usbep->rc; + break; + } + + /* Delay */ + mdelay ( 1 ); + } + + /* Check for errors */ + if ( rc != 0 ) { + DBGC ( usbdev, "USBDEV %s %s failed: %s\n", usbintf->name, + usb_endpoint_name ( &usbep->ep ), strerror ( rc ) ); + goto err_completion; + } + + /* Copy completion to data buffer, if applicable */ + assert ( iob_len ( iobuf ) <= *len ); + if ( endpoint & USB_ENDPOINT_IN ) + memcpy ( data, iobuf->data, iob_len ( iobuf ) ); + *len = iob_len ( iobuf ); + + /* Free I/O buffer */ + free_iob ( iobuf ); + + /* Leave endpoint open */ + return 0; + + err_completion: + err_stream: + free_iob ( iobuf ); + err_alloc: + efi_usb_close ( usbep ); + err_open: + return EFIRC ( rc ); +} + +/** + * Complete asynchronous transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void efi_usb_async_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct efi_usb_endpoint *usbep = + container_of ( ep, struct efi_usb_endpoint, ep ); + UINT32 status; + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto drop; + + /* Construct status */ + status = ( ( rc == 0 ) ? 0 : EFI_USB_ERR_SYSTEM ); + + /* Report completion */ + usbep->callback ( iobuf->data, iob_len ( iobuf ), usbep->context, + status ); + + drop: + /* Recycle or free I/O buffer */ + if ( usbep->ep.open ) { + usb_recycle ( &usbep->ep, iobuf ); + } else { + free_iob ( iobuf ); + } +} + +/** Asynchronous endpoint operations */ +static struct usb_endpoint_driver_operations efi_usb_async_driver = { + .complete = efi_usb_async_complete, +}; + +/** + * Start asynchronous transfer + * + * @v usbintf EFI USB interface + * @v endpoint Endpoint address + * @v interval Interval (in milliseconds) + * @v len Transfer length + * @v callback Callback function + * @v context Context for callback function + * @ret rc Return status code + */ +static int efi_usb_async_start ( struct efi_usb_interface *usbintf, + unsigned int endpoint, unsigned int interval, + size_t len, + EFI_ASYNC_USB_TRANSFER_CALLBACK callback, + void *context ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_usb_device *usbdev = usbintf->usbdev; + struct efi_usb_endpoint *usbep; + unsigned int index = USB_ENDPOINT_IDX ( endpoint ); + EFI_STATUS efirc; + int rc; + + /* Fail if endpoint is already open */ + if ( efi_usb_is_open ( usbintf, endpoint ) ) { + rc = -EINVAL; + goto err_already_open; + } + + /* Open endpoint */ + if ( ( rc = efi_usb_open ( usbintf, endpoint, + USB_ENDPOINT_ATTR_INTERRUPT, interval, + &efi_usb_async_driver ) ) != 0 ) + goto err_open; + usbep = usbintf->endpoint[index]; + + /* Record callback parameters */ + usbep->callback = callback; + usbep->context = context; + + /* Prefill endpoint */ + usb_refill_init ( &usbep->ep, 0, len, EFI_USB_ASYNC_FILL ); + if ( ( rc = usb_prefill ( &usbep->ep ) ) != 0 ) { + DBGC ( usbdev, "USBDEV %s %s could not prefill: %s\n", + usbintf->name, usb_endpoint_name ( &usbep->ep ), + strerror ( rc ) ); + goto err_prefill; + } + + /* Start timer */ + if ( ( efirc = bs->SetTimer ( usbep->event, TimerPeriodic, + ( interval * 10000 ) ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( usbdev, "USBDEV %s %s could not set timer: %s\n", + usbintf->name, usb_endpoint_name ( &usbep->ep ), + strerror ( rc ) ); + goto err_timer; + } + + return 0; + + bs->SetTimer ( usbep->event, TimerCancel, 0 ); + err_timer: + err_prefill: + efi_usb_close ( usbep ); + err_open: + err_already_open: + return rc; +} + +/** + * Stop asynchronous transfer + * + * @v usbintf EFI USB interface + * @v endpoint Endpoint address + */ +static void efi_usb_async_stop ( struct efi_usb_interface *usbintf, + unsigned int endpoint ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_usb_endpoint *usbep; + unsigned int index = USB_ENDPOINT_IDX ( endpoint ); + + /* Do nothing if endpoint is already closed */ + if ( ! efi_usb_is_open ( usbintf, endpoint ) ) + return; + usbep = usbintf->endpoint[index]; + + /* Stop timer */ + bs->SetTimer ( usbep->event, TimerCancel, 0 ); + + /* Close endpoint */ + efi_usb_close ( usbep ); +} + +/****************************************************************************** + * + * USB I/O protocol + * + ****************************************************************************** + */ + +/** + * Perform control transfer + * + * @v usbio USB I/O protocol + * @v packet Setup packet + * @v direction Data direction + * @v timeout Timeout (in milliseconds) + * @v data Data buffer + * @v len Length of data + * @ret status Transfer status + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_control_transfer ( EFI_USB_IO_PROTOCOL *usbio, + EFI_USB_DEVICE_REQUEST *packet, + EFI_USB_DATA_DIRECTION direction, + UINT32 timeout, VOID *data, UINTN len, + UINT32 *status ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + unsigned int request = ( packet->RequestType | + USB_REQUEST_TYPE ( packet->Request ) ); + unsigned int value = le16_to_cpu ( packet->Value ); + unsigned int index = le16_to_cpu ( packet->Index ); + EFI_TPL saved_tpl; + int rc; + + DBGC2 ( usbdev, "USBDEV %s control %04x:%04x:%04x:%04x %s %dms " + "%p+%zx\n", usbintf->name, request, value, index, + le16_to_cpu ( packet->Length ), + efi_usb_direction_name ( direction ), timeout, data, + ( ( size_t ) len ) ); + + /* Raise TPL */ + saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + + /* Clear status */ + *status = 0; + + /* Block attempts to change the device configuration, since + * this is logically impossible to do given the constraints of + * the EFI_USB_IO_PROTOCOL design. + */ + if ( ( request == USB_SET_CONFIGURATION ) && + ( value != usbdev->config->config ) ) { + DBGC ( usbdev, "USBDEV %s cannot set configuration %d: not " + "logically possible\n", usbintf->name, index ); + rc = -ENOTSUP; + goto err_change_config; + } + + /* If we are selecting a new alternate setting then close all + * open endpoints. + */ + if ( ( request == USB_SET_INTERFACE ) && + ( value != usbintf->alternate ) ) + efi_usb_close_all ( usbintf ); + + /* Issue control transfer */ + if ( ( rc = usb_control ( usbdev->func->usb, request, value, index, + data, len ) ) != 0 ) { + DBGC ( usbdev, "USBDEV %s control %04x:%04x:%04x:%04x %p+%zx " + "failed: %s\n", usbintf->name, request, value, index, + le16_to_cpu ( packet->Length ), data, ( ( size_t ) len ), + strerror ( rc ) ); + *status = EFI_USB_ERR_SYSTEM; + goto err_control; + } + + /* Update alternate setting, if applicable */ + if ( request == USB_SET_INTERFACE ) { + usbintf->alternate = value; + DBGC ( usbdev, "USBDEV %s alt %d selected\n", + usbintf->name, usbintf->alternate ); + } + + err_control: + err_change_config: + bs->RestoreTPL ( saved_tpl ); + return EFIRC ( rc ); +} + +/** + * Perform bulk transfer + * + * @v usbio USB I/O protocol + * @v endpoint Endpoint address + * @v data Data buffer + * @v len Length of data + * @v timeout Timeout (in milliseconds) + * @ret status Transfer status + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_bulk_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, VOID *data, + UINTN *len, UINTN timeout, UINT32 *status ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + size_t actual = *len; + EFI_TPL saved_tpl; + int rc; + + DBGC2 ( usbdev, "USBDEV %s bulk %s %p+%zx %dms\n", usbintf->name, + ( ( endpoint & USB_ENDPOINT_IN ) ? "IN" : "OUT" ), data, + ( ( size_t ) *len ), ( ( unsigned int ) timeout ) ); + + /* Raise TPL */ + saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + + /* Clear status */ + *status = 0; + + /* Perform synchronous transfer */ + if ( ( rc = efi_usb_sync_transfer ( usbintf, endpoint, + USB_ENDPOINT_ATTR_BULK, timeout, + data, &actual ) ) != 0 ) { + /* Assume that any error represents a timeout */ + *status = EFI_USB_ERR_TIMEOUT; + goto err_transfer; + } + + err_transfer: + bs->RestoreTPL ( saved_tpl ); + return EFIRC ( rc ); +} + +/** + * Perform synchronous interrupt transfer + * + * @v usbio USB I/O protocol + * @v endpoint Endpoint address + * @v data Data buffer + * @v len Length of data + * @v timeout Timeout (in milliseconds) + * @ret status Transfer status + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_sync_interrupt_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, + VOID *data, UINTN *len, UINTN timeout, + UINT32 *status ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + size_t actual = *len; + EFI_TPL saved_tpl; + int rc; + + DBGC2 ( usbdev, "USBDEV %s sync intr %s %p+%zx %dms\n", usbintf->name, + ( ( endpoint & USB_ENDPOINT_IN ) ? "IN" : "OUT" ), data, + ( ( size_t ) *len ), ( ( unsigned int ) timeout ) ); + + /* Raise TPL */ + saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + + /* Clear status */ + *status = 0; + + /* Perform synchronous transfer */ + if ( ( rc = efi_usb_sync_transfer ( usbintf, endpoint, + USB_ENDPOINT_ATTR_INTERRUPT, + timeout, data, &actual ) ) != 0 ) { + /* Assume that any error represents a timeout */ + *status = EFI_USB_ERR_TIMEOUT; + goto err_transfer; + } + + err_transfer: + bs->RestoreTPL ( saved_tpl ); + return EFIRC ( rc ); +} + +/** + * Perform asynchronous interrupt transfer + * + * @v usbio USB I/O protocol + * @v endpoint Endpoint address + * @v start Start (rather than stop) transfer + * @v interval Polling interval (in milliseconds) + * @v len Data length + * @v callback Callback function + * @v context Context for callback function + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_async_interrupt_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, + BOOLEAN start, UINTN interval, UINTN len, + EFI_ASYNC_USB_TRANSFER_CALLBACK callback, + VOID *context ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + EFI_TPL saved_tpl; + int rc; + + DBGC2 ( usbdev, "USBDEV %s async intr %s len %#zx int %d %p/%p\n", + usbintf->name, + ( ( endpoint & USB_ENDPOINT_IN ) ? "IN" : "OUT" ), + ( ( size_t ) len ), ( ( unsigned int ) interval ), + callback, context ); + + /* Raise TPL */ + saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + + /* Start/stop transfer as applicable */ + if ( start ) { + + /* Start new transfer */ + if ( ( rc = efi_usb_async_start ( usbintf, endpoint, interval, + len, callback, + context ) ) != 0 ) + goto err_start; + + } else { + + /* Stop transfer */ + efi_usb_async_stop ( usbintf, endpoint ); + + /* Success */ + rc = 0; + + } + + err_start: + bs->RestoreTPL ( saved_tpl ); + return EFIRC ( rc ); +} + +/** + * Perform synchronous isochronous transfer + * + * @v usbio USB I/O protocol + * @v endpoint Endpoint address + * @v data Data buffer + * @v len Length of data + * @ret status Transfer status + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_isochronous_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, + VOID *data, UINTN len, UINT32 *status ) { + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + + DBGC2 ( usbdev, "USBDEV %s sync iso %s %p+%zx\n", usbintf->name, + ( ( endpoint & USB_ENDPOINT_IN ) ? "IN" : "OUT" ), data, + ( ( size_t ) len ) ); + + /* Clear status */ + *status = 0; + + /* Not supported */ + return EFI_UNSUPPORTED; +} + +/** + * Perform asynchronous isochronous transfers + * + * @v usbio USB I/O protocol + * @v endpoint Endpoint address + * @v data Data buffer + * @v len Length of data + * @v callback Callback function + * @v context Context for callback function + * @ret status Transfer status + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_async_isochronous_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, + VOID *data, UINTN len, + EFI_ASYNC_USB_TRANSFER_CALLBACK callback, + VOID *context ) { + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + + DBGC2 ( usbdev, "USBDEV %s async iso %s %p+%zx %p/%p\n", usbintf->name, + ( ( endpoint & USB_ENDPOINT_IN ) ? "IN" : "OUT" ), data, + ( ( size_t ) len ), callback, context ); + + /* Not supported */ + return EFI_UNSUPPORTED; +} + +/** + * Get device descriptor + * + * @v usbio USB I/O protocol + * @ret efidesc EFI device descriptor + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_get_device_descriptor ( EFI_USB_IO_PROTOCOL *usbio, + EFI_USB_DEVICE_DESCRIPTOR *efidesc ) { + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + + DBGC2 ( usbdev, "USBDEV %s get device descriptor\n", usbintf->name ); + + /* Copy cached device descriptor */ + memcpy ( efidesc, &usbdev->func->usb->device, sizeof ( *efidesc ) ); + + return 0; +} + +/** + * Get configuration descriptor + * + * @v usbio USB I/O protocol + * @ret efidesc EFI interface descriptor + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_get_config_descriptor ( EFI_USB_IO_PROTOCOL *usbio, + EFI_USB_CONFIG_DESCRIPTOR *efidesc ) { + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + + DBGC2 ( usbdev, "USBDEV %s get configuration descriptor\n", + usbintf->name ); + + /* Copy cached configuration descriptor */ + memcpy ( efidesc, usbdev->config, sizeof ( *efidesc ) ); + + return 0; +} + +/** + * Get interface descriptor + * + * @v usbio USB I/O protocol + * @ret efidesc EFI interface descriptor + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_get_interface_descriptor ( EFI_USB_IO_PROTOCOL *usbio, + EFI_USB_INTERFACE_DESCRIPTOR *efidesc ) { + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + struct usb_interface_descriptor *desc; + + DBGC2 ( usbdev, "USBDEV %s get interface descriptor\n", usbintf->name ); + + /* Locate cached interface descriptor */ + desc = usb_interface_descriptor ( usbdev->config, usbintf->interface, + usbintf->alternate ); + if ( ! desc ) { + DBGC ( usbdev, "USBDEV %s alt %d has no interface descriptor\n", + usbintf->name, usbintf->alternate ); + return -ENOENT; + } + + /* Copy cached interface descriptor */ + memcpy ( efidesc, desc, sizeof ( *efidesc ) ); + + return 0; +} + +/** + * Get endpoint descriptor + * + * @v usbio USB I/O protocol + * @v address Endpoint index + * @ret efidesc EFI interface descriptor + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_get_endpoint_descriptor ( EFI_USB_IO_PROTOCOL *usbio, UINT8 index, + EFI_USB_ENDPOINT_DESCRIPTOR *efidesc ) { + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + struct usb_interface_descriptor *interface; + struct usb_endpoint_descriptor *desc; + + DBGC2 ( usbdev, "USBDEV %s get endpoint %d descriptor\n", + usbintf->name, index ); + + /* Locate cached interface descriptor */ + interface = usb_interface_descriptor ( usbdev->config, + usbintf->interface, + usbintf->alternate ); + if ( ! interface ) { + DBGC ( usbdev, "USBDEV %s alt %d has no interface descriptor\n", + usbintf->name, usbintf->alternate ); + return -ENOENT; + } + + /* Locate and copy cached endpoint descriptor */ + for_each_interface_descriptor ( desc, usbdev->config, interface ) { + if ( ( desc->header.type == USB_ENDPOINT_DESCRIPTOR ) && + ( index-- == 0 ) ) { + memcpy ( efidesc, desc, sizeof ( *efidesc ) ); + return 0; + } + } + return -ENOENT; +} + +/** + * Get string descriptor + * + * @v usbio USB I/O protocol + * @v language Language ID + * @v index String index + * @ret string String + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_get_string_descriptor ( EFI_USB_IO_PROTOCOL *usbio, UINT16 language, + UINT8 index, CHAR16 **string ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + struct usb_descriptor_header header; + VOID *buffer; + size_t len; + EFI_TPL saved_tpl; + EFI_STATUS efirc; + int rc; + + DBGC2 ( usbdev, "USBDEV %s get string %d:%d descriptor\n", + usbintf->name, language, index ); + + /* Raise TPL */ + saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + + /* Read descriptor header */ + if ( ( rc = usb_get_descriptor ( usbdev->func->usb, 0, + USB_STRING_DESCRIPTOR, index, + language, &header, + sizeof ( header ) ) ) != 0 ) { + DBGC ( usbdev, "USBDEV %s could not get string %d:%d " + "descriptor header: %s\n", usbintf->name, language, + index, strerror ( rc ) ); + goto err_get_header; + } + len = header.len; + if ( len < sizeof ( header ) ) { + DBGC ( usbdev, "USBDEV %s underlength string %d:%d\n", + usbintf->name, language, index ); + rc = -EINVAL; + goto err_len; + } + + /* Allocate buffer */ + if ( ( efirc = bs->AllocatePool ( EfiBootServicesData, len, + &buffer ) ) != 0 ) { + rc = -EEFI ( efirc ); + goto err_alloc; + } + + /* Read whole descriptor */ + if ( ( rc = usb_get_descriptor ( usbdev->func->usb, 0, + USB_STRING_DESCRIPTOR, index, + language, buffer, len ) ) != 0 ) { + DBGC ( usbdev, "USBDEV %s could not get string %d:%d " + "descriptor: %s\n", usbintf->name, language, + index, strerror ( rc ) ); + goto err_get_descriptor; + } + + /* Shuffle down and terminate string */ + memmove ( buffer, ( buffer + sizeof ( header ) ), + ( len - sizeof ( header ) ) ); + memset ( ( buffer + len - sizeof ( header ) ), 0, sizeof ( **string ) ); + + /* Restore TPL */ + bs->RestoreTPL ( saved_tpl ); + + /* Return allocated string */ + *string = buffer; + return 0; + + err_get_descriptor: + bs->FreePool ( buffer ); + err_alloc: + err_len: + err_get_header: + bs->RestoreTPL ( saved_tpl ); + return EFIRC ( rc ); +} + +/** + * Get supported languages + * + * @v usbio USB I/O protocol + * @ret languages Language ID table + * @ret len Length of language ID table + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_get_supported_languages ( EFI_USB_IO_PROTOCOL *usbio, + UINT16 **languages, UINT16 *len ) { + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + + DBGC2 ( usbdev, "USBDEV %s get supported languages\n", usbintf->name ); + + /* Return cached supported languages */ + *languages = usbdev->lang; + *len = usbdev->lang_len; + + return 0; +} + +/** + * Reset port + * + * @v usbio USB I/O protocol + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +efi_usb_port_reset ( EFI_USB_IO_PROTOCOL *usbio ) { + struct efi_usb_interface *usbintf = + container_of ( usbio, struct efi_usb_interface, usbio ); + struct efi_usb_device *usbdev = usbintf->usbdev; + + DBGC2 ( usbdev, "USBDEV %s reset port\n", usbintf->name ); + + /* This is logically impossible to do, since resetting the + * port may destroy state belonging to other + * EFI_USB_IO_PROTOCOL instances belonging to the same USB + * device. (This is yet another artifact of the incredibly + * poor design of the EFI_USB_IO_PROTOCOL.) + */ + return EFI_INVALID_PARAMETER; +} + +/** USB I/O protocol */ +static EFI_USB_IO_PROTOCOL efi_usb_io_protocol = { + .UsbControlTransfer = efi_usb_control_transfer, + .UsbBulkTransfer = efi_usb_bulk_transfer, + .UsbAsyncInterruptTransfer = efi_usb_async_interrupt_transfer, + .UsbSyncInterruptTransfer = efi_usb_sync_interrupt_transfer, + .UsbIsochronousTransfer = efi_usb_isochronous_transfer, + .UsbAsyncIsochronousTransfer = efi_usb_async_isochronous_transfer, + .UsbGetDeviceDescriptor = efi_usb_get_device_descriptor, + .UsbGetConfigDescriptor = efi_usb_get_config_descriptor, + .UsbGetInterfaceDescriptor = efi_usb_get_interface_descriptor, + .UsbGetEndpointDescriptor = efi_usb_get_endpoint_descriptor, + .UsbGetStringDescriptor = efi_usb_get_string_descriptor, + .UsbGetSupportedLanguages = efi_usb_get_supported_languages, + .UsbPortReset = efi_usb_port_reset, +}; + +/****************************************************************************** + * + * USB driver + * + ****************************************************************************** + */ + +/** + * Install interface + * + * @v usbdev EFI USB device + * @v interface Interface number + * @ret rc Return status code + */ +static int efi_usb_install ( struct efi_usb_device *usbdev, + unsigned int interface ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct usb_function *func = usbdev->func; + struct efi_usb_interface *usbintf; + EFI_STATUS efirc; + int rc; + + /* Allocate and initialise structure */ + usbintf = zalloc ( sizeof ( *usbintf ) ); + if ( ! usbintf ) { + rc = -ENOMEM; + goto err_alloc; + } + snprintf ( usbintf->name, sizeof ( usbintf->name ), "%s[%d]", + usbdev->name, interface ); + usbintf->usbdev = usbdev; + usbintf->interface = interface; + memcpy ( &usbintf->usbio, &efi_usb_io_protocol, + sizeof ( usbintf->usbio ) ); + + /* Construct device path */ + usbintf->path = efi_usb_path ( func ); + if ( ! usbintf->path ) { + rc = -ENODEV; + goto err_path; + } + + /* Add to list of interfaces */ + list_add_tail ( &usbintf->list, &usbdev->interfaces ); + + /* Install protocols */ + if ( ( efirc = bs->InstallMultipleProtocolInterfaces ( + &usbintf->handle, + &efi_usb_io_protocol_guid, &usbintf->usbio, + &efi_device_path_protocol_guid, usbintf->path, + NULL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( usbdev, "USBDEV %s could not install protocols: %s\n", + usbintf->name, strerror ( rc ) ); + goto err_install_protocol; + } + + DBGC ( usbdev, "USBDEV %s installed as %s\n", + usbintf->name, efi_handle_name ( usbintf->handle ) ); + return 0; + + bs->UninstallMultipleProtocolInterfaces ( + usbintf->handle, + &efi_usb_io_protocol_guid, &usbintf->usbio, + &efi_device_path_protocol_guid, usbintf->path, + NULL ); + err_install_protocol: + efi_usb_close_all ( usbintf ); + efi_usb_free_all ( usbintf ); + list_del ( &usbintf->list ); + free ( usbintf->path ); + err_path: + free ( usbintf ); + err_alloc: + return rc; +} + +/** + * Uninstall interface + * + * @v usbintf EFI USB interface + */ +static void efi_usb_uninstall ( struct efi_usb_interface *usbintf ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_usb_device *usbdev = usbintf->usbdev; + + DBGC ( usbdev, "USBDEV %s uninstalling %s\n", + usbintf->name, efi_handle_name ( usbintf->handle ) ); + + /* Disconnect controllers. This should not be necessary, but + * seems to be required on some platforms to avoid failures + * when uninstalling protocols. + */ + bs->DisconnectController ( usbintf->handle, NULL, NULL ); + + /* Uninstall protocols */ + bs->UninstallMultipleProtocolInterfaces ( + usbintf->handle, + &efi_usb_io_protocol_guid, &usbintf->usbio, + &efi_device_path_protocol_guid, usbintf->path, + NULL ); + + /* Close and free all endpoints */ + efi_usb_close_all ( usbintf ); + efi_usb_free_all ( usbintf ); + + /* Remove from list of interfaces */ + list_del ( &usbintf->list ); + + /* Free device path */ + free ( usbintf->path ); + + /* Free interface */ + free ( usbintf ); +} + +/** + * Uninstall all interfaces + * + * @v usbdev EFI USB device + */ +static void efi_usb_uninstall_all ( struct efi_usb_device *efiusb ) { + struct efi_usb_interface *usbintf; + + /* Uninstall all interfaces */ + while ( ( usbintf = list_first_entry ( &efiusb->interfaces, + struct efi_usb_interface, + list ) ) ) { + efi_usb_uninstall ( usbintf ); + } +} + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int efi_usb_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct usb_device *usb = func->usb; + struct efi_usb_device *usbdev; + struct efi_usb_interface *usbintf; + struct usb_descriptor_header header; + struct usb_descriptor_header *lang; + size_t config_len; + size_t lang_len; + unsigned int i; + int rc; + + /* Get configuration length */ + config_len = le16_to_cpu ( config->len ); + + /* Get supported languages descriptor header */ + if ( ( rc = usb_get_descriptor ( usb, 0, USB_STRING_DESCRIPTOR, 0, 0, + &header, sizeof ( header ) ) ) != 0 ) { + /* Assume no strings are present */ + header.len = 0; + } + lang_len = ( ( header.len >= sizeof ( header ) ) ? + ( header.len - sizeof ( header ) ) : 0 ); + + /* Allocate and initialise structure */ + usbdev = zalloc ( sizeof ( *usbdev ) + config_len + + sizeof ( *lang ) + lang_len ); + if ( ! usbdev ) { + rc = -ENOMEM; + goto err_alloc; + } + usb_func_set_drvdata ( func, usbdev ); + usbdev->name = func->name; + usbdev->func = func; + usbdev->config = ( ( ( void * ) usbdev ) + sizeof ( *usbdev ) ); + memcpy ( usbdev->config, config, config_len ); + lang = ( ( ( void * ) usbdev->config ) + config_len ); + usbdev->lang = ( ( ( void * ) lang ) + sizeof ( *lang ) ); + usbdev->lang_len = lang_len; + INIT_LIST_HEAD ( &usbdev->interfaces ); + + /* Get supported languages descriptor, if applicable */ + if ( lang_len && + ( ( rc = usb_get_descriptor ( usb, 0, USB_STRING_DESCRIPTOR, + 0, 0, lang, header.len ) ) != 0 ) ) { + DBGC ( usbdev, "USBDEV %s could not get supported languages: " + "%s\n", usbdev->name, strerror ( rc ) ); + goto err_get_languages; + } + + /* Install interfaces */ + for ( i = 0 ; i < func->desc.count ; i++ ) { + if ( ( rc = efi_usb_install ( usbdev, + func->interface[i] ) ) != 0 ) + goto err_install; + } + + /* Connect any external drivers */ + list_for_each_entry ( usbintf, &usbdev->interfaces, list ) + bs->ConnectController ( usbintf->handle, NULL, NULL, TRUE ); + + return 0; + + err_install: + efi_usb_uninstall_all ( usbdev ); + assert ( list_empty ( &usbdev->interfaces ) ); + err_get_languages: + free ( usbdev ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void efi_usb_remove ( struct usb_function *func ) { + struct efi_usb_device *usbdev = usb_func_get_drvdata ( func ); + + /* Uninstall all interfaces */ + efi_usb_uninstall_all ( usbdev ); + assert ( list_empty ( &usbdev->interfaces ) ); + + /* Free device */ + free ( usbdev ); +} + +/** USB I/O protocol device IDs */ +static struct usb_device_id efi_usb_ids[] = { + { + .name = "usbio", + .vendor = USB_ANY_ID, + .product = USB_ANY_ID, + }, +}; + +/** USB I/O protocol driver */ +struct usb_driver usbio_driver __usb_fallback_driver = { + .ids = efi_usb_ids, + .id_count = ( sizeof ( efi_usb_ids ) / sizeof ( efi_usb_ids[0] ) ), + .class = USB_CLASS_ID ( USB_ANY_ID, USB_ANY_ID, USB_ANY_ID ), + .score = USB_SCORE_FALLBACK, + .probe = efi_usb_probe, + .remove = efi_usb_remove, +}; diff --git a/src/interface/efi/efi_utils.c b/src/interface/efi/efi_utils.c new file mode 100644 index 00000000..f8ffce91 --- /dev/null +++ b/src/interface/efi/efi_utils.c @@ -0,0 +1,196 @@ +/* + * Copyright (C) 2011 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * EFI utilities + * + */ + +/** + * Locate parent device supporting a given protocol + * + * @v device EFI device handle + * @v protocol Protocol GUID + * @v parent Parent EFI device handle to fill in + * @ret rc Return status code + */ +int efi_locate_device ( EFI_HANDLE device, EFI_GUID *protocol, + EFI_HANDLE *parent ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + union { + EFI_DEVICE_PATH_PROTOCOL *path; + void *interface; + } path; + EFI_DEVICE_PATH_PROTOCOL *devpath; + EFI_STATUS efirc; + int rc; + + /* Get device path */ + if ( ( efirc = bs->OpenProtocol ( device, + &efi_device_path_protocol_guid, + &path.interface, + efi_image_handle, device, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ + rc = -EEFI ( efirc ); + DBGC ( device, "EFIDEV %s cannot open device path: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + goto err_open_device_path; + } + devpath = path.path; + + /* Check for presence of specified protocol */ + if ( ( efirc = bs->LocateDevicePath ( protocol, &devpath, + parent ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( device, "EFIDEV %s has no parent supporting %s: %s\n", + efi_handle_name ( device ), + efi_guid_ntoa ( protocol ), strerror ( rc ) ); + goto err_locate_protocol; + } + + /* Success */ + rc = 0; + + err_locate_protocol: + bs->CloseProtocol ( device, &efi_device_path_protocol_guid, + efi_image_handle, device ); + err_open_device_path: + return rc; +} + +/** + * Add EFI device as child of another EFI device + * + * @v parent EFI parent device handle + * @v child EFI child device handle + * @ret rc Return status code + */ +int efi_child_add ( EFI_HANDLE parent, EFI_HANDLE child ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *devpath; + EFI_STATUS efirc; + int rc; + + /* Re-open the device path protocol */ + if ( ( efirc = bs->OpenProtocol ( parent, + &efi_device_path_protocol_guid, + &devpath, + efi_image_handle, child, + EFI_OPEN_PROTOCOL_BY_CHILD_CONTROLLER + ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( parent, "EFIDEV %s could not add child", + efi_handle_name ( parent ) ); + DBGC ( parent, " %s: %s\n", + efi_handle_name ( child ), strerror ( rc ) ); + DBGC_EFI_OPENERS ( parent, parent, + &efi_device_path_protocol_guid ); + return rc; + } + + DBGC2 ( parent, "EFIDEV %s added child", efi_handle_name ( parent ) ); + DBGC2 ( parent, " %s\n", efi_handle_name ( child ) ); + return 0; +} + +/** + * Remove EFI device as child of another EFI device + * + * @v parent EFI parent device handle + * @v child EFI child device handle + */ +void efi_child_del ( EFI_HANDLE parent, EFI_HANDLE child ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + + bs->CloseProtocol ( parent, &efi_device_path_protocol_guid, + efi_image_handle, child ); + DBGC2 ( parent, "EFIDEV %s removed child", efi_handle_name ( parent ) ); + DBGC2 ( parent, " %s\n", efi_handle_name ( child ) ); +} + +/** + * Get underlying PCI device information + * + * @v device EFI device handle + * @v prefix Device name prefix + * @v dev Generic device to fill in + * @ret rc Return status code + */ +static int efi_pci_info ( EFI_HANDLE device, const char *prefix, + struct device *dev ) { + EFI_HANDLE pci_device; + struct pci_device pci; + int rc; + + /* Find parent PCI device */ + if ( ( rc = efi_locate_device ( device, &efi_pci_io_protocol_guid, + &pci_device ) ) != 0 ) { + DBGC ( device, "EFIDEV %s is not a PCI device: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + return rc; + } + + /* Get PCI device information */ + if ( ( rc = efipci_info ( pci_device, &pci ) ) != 0 ) { + DBGC ( device, "EFIDEV %s could not get PCI information: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + return rc; + } + + /* Populate device information */ + memcpy ( &dev->desc, &pci.dev.desc, sizeof ( dev->desc ) ); + snprintf ( dev->name, sizeof ( dev->name ), "%s-%s", + prefix, pci.dev.name ); + + return 0; +} + +/** + * Get underlying device information + * + * @v device EFI device handle + * @v prefix Device name prefix + * @v dev Generic device to fill in + */ +void efi_device_info ( EFI_HANDLE device, const char *prefix, + struct device *dev ) { + int rc; + + /* Try getting underlying PCI device information */ + if ( ( rc = efi_pci_info ( device, prefix, dev ) ) == 0 ) + return; + + /* If we cannot get any underlying device information, fall + * back to providing information about the EFI handle. + */ + DBGC ( device, "EFIDEV %s could not get underlying device " + "information\n", efi_handle_name ( device ) ); + dev->desc.bus_type = BUS_TYPE_EFI; + snprintf ( dev->name, sizeof ( dev->name ), "%s-%p", prefix, device ); +} diff --git a/src/interface/efi/efi_watchdog.c b/src/interface/efi/efi_watchdog.c new file mode 100644 index 00000000..7061f81d --- /dev/null +++ b/src/interface/efi/efi_watchdog.c @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * EFI watchdog holdoff timer + * + */ + +#include +#include +#include +#include +#include +#include + +/** Watchdog holdoff interval (in seconds) */ +#define WATCHDOG_HOLDOFF_SECS 10 + +/** Watchdog timeout (in seconds) */ +#define WATCHDOG_TIMEOUT_SECS ( 5 * 60 ) + +/** Watchdog code (to be logged on watchdog timeout) */ +#define WATCHDOG_CODE 0x6950584544454144ULL + +/** Watchdog data (to be logged on watchdog timeout) */ +#define WATCHDOG_DATA L"iPXE"; + +/** + * Hold off watchdog timer + * + * @v retry Retry timer + * @v over Failure indicator + */ +static void efi_watchdog_expired ( struct retry_timer *timer, + int over __unused ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + static CHAR16 data[] = WATCHDOG_DATA; + EFI_STATUS efirc; + int rc; + + DBGC2 ( timer, "EFI holding off watchdog timer\n" ); + + /* Restart this holdoff timer */ + start_timer_fixed ( timer, ( WATCHDOG_HOLDOFF_SECS * TICKS_PER_SEC ) ); + + /* Reset watchdog timer */ + if ( ( efirc = bs->SetWatchdogTimer ( WATCHDOG_TIMEOUT_SECS, + WATCHDOG_CODE, sizeof ( data ), + data ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( timer, "EFI could not set watchdog timer: %s\n", + strerror ( rc ) ); + return; + } +} + +/** Watchdog holdoff timer */ +struct retry_timer efi_watchdog = TIMER_INIT ( efi_watchdog_expired ); diff --git a/src/interface/efi/efi_wrap.c b/src/interface/efi/efi_wrap.c new file mode 100644 index 00000000..5c02a7ee --- /dev/null +++ b/src/interface/efi/efi_wrap.c @@ -0,0 +1,1249 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * EFI image wrapping + * + */ + +#include +#include +#include +#include +#include +#include + +/** Colour for debug messages */ +#define colour &efi_systab + +/** + * Convert EFI status code to text + * + * @v efirc EFI status code + * @ret text EFI status code text + */ +static const char * efi_status ( EFI_STATUS efirc ) { + static char buf[ 19 /* "0xXXXXXXXXXXXXXXXX" + NUL */ ]; + + switch ( efirc ) { + case EFI_SUCCESS : return "0"; + case EFI_LOAD_ERROR : return "LOAD_ERROR"; + case EFI_INVALID_PARAMETER : return "INVALID_PARAMETER"; + case EFI_UNSUPPORTED : return "UNSUPPORTED"; + case EFI_BAD_BUFFER_SIZE : return "BAD_BUFFER_SIZE"; + case EFI_BUFFER_TOO_SMALL : return "BUFFER_TOO_SMALL"; + case EFI_NOT_READY : return "NOT_READY"; + case EFI_DEVICE_ERROR : return "DEVICE_ERROR"; + case EFI_WRITE_PROTECTED : return "WRITE_PROTECTED"; + case EFI_OUT_OF_RESOURCES : return "OUT_OF_RESOURCES"; + case EFI_VOLUME_CORRUPTED : return "VOLUME_CORRUPTED"; + case EFI_VOLUME_FULL : return "VOLUME_FULL"; + case EFI_NO_MEDIA : return "NO_MEDIA"; + case EFI_MEDIA_CHANGED : return "MEDIA_CHANGED"; + case EFI_NOT_FOUND : return "NOT_FOUND"; + case EFI_ACCESS_DENIED : return "ACCESS_DENIED"; + case EFI_NO_RESPONSE : return "NO_RESPONSE"; + case EFI_NO_MAPPING : return "NO_MAPPING"; + case EFI_TIMEOUT : return "TIMEOUT"; + case EFI_NOT_STARTED : return "NOT_STARTED"; + case EFI_ALREADY_STARTED : return "ALREADY_STARTED"; + case EFI_ABORTED : return "ABORTED"; + case EFI_ICMP_ERROR : return "ICMP_ERROR"; + case EFI_TFTP_ERROR : return "TFTP_ERROR"; + case EFI_PROTOCOL_ERROR : return "PROTOCOL_ERROR"; + case EFI_INCOMPATIBLE_VERSION : return "INCOMPATIBLE_VERSION"; + case EFI_SECURITY_VIOLATION : return "SECURITY_VIOLATION"; + case EFI_CRC_ERROR : return "CRC_ERROR"; + case EFI_END_OF_MEDIA : return "END_OF_MEDIA"; + case EFI_END_OF_FILE : return "END_OF_FILE"; + case EFI_INVALID_LANGUAGE : return "INVALID_LANGUAGE"; + case EFI_COMPROMISED_DATA : return "COMPROMISED_DATA"; + case EFI_WARN_UNKNOWN_GLYPH : return "WARN_UNKNOWN_GLYPH"; + case EFI_WARN_DELETE_FAILURE : return "WARN_DELETE_FAILURE"; + case EFI_WARN_WRITE_FAILURE : return "WARN_WRITE_FAILURE"; + case EFI_WARN_BUFFER_TOO_SMALL : return "WARN_BUFFER_TOO_SMALL"; + case EFI_WARN_STALE_DATA : return "WARN_STALE_DATA"; + default: + snprintf ( buf, sizeof ( buf ), "%#lx", + ( unsigned long ) efirc ); + return buf; + } +} + +/** + * Convert EFI boolean to text + * + * @v boolean Boolean value + * @ret text Boolean value text + */ +static const char * efi_boolean ( BOOLEAN boolean ) { + + return ( boolean ? "TRUE" : "FALSE" ); +} + +/** + * Convert EFI TPL to text + * + * @v tpl Task priority level + * @ret text Task priority level as text + */ +static const char * efi_tpl ( EFI_TPL tpl ) { + static char buf[ 19 /* "0xXXXXXXXXXXXXXXXX" + NUL */ ]; + + switch ( tpl ) { + case TPL_APPLICATION: return "Application"; + case TPL_CALLBACK: return "Callback"; + case TPL_NOTIFY: return "Notify"; + case TPL_HIGH_LEVEL: return "HighLevel"; + default: + snprintf ( buf, sizeof ( buf ), "%#lx", + ( unsigned long ) tpl ); + return buf; + } +} + +/** + * Convert EFI allocation type to text + * + * @v type Allocation type + * @ret text Allocation type as text + */ +static const char * efi_allocate_type ( EFI_ALLOCATE_TYPE type ) { + static char buf[ 11 /* "0xXXXXXXXX" + NUL */ ]; + + switch ( type ) { + case AllocateAnyPages: return "AnyPages"; + case AllocateMaxAddress: return "MaxAddress"; + case AllocateAddress: return "Address"; + default: + snprintf ( buf, sizeof ( buf ), "%#x", type ); + return buf; + } +} + +/** + * Convert EFI memory type to text + * + * @v type Memory type + * @ret text Memory type as text + */ +static const char * efi_memory_type ( EFI_MEMORY_TYPE type ) { + static char buf[ 11 /* "0xXXXXXXXX" + NUL */ ]; + + switch ( type ) { + case EfiReservedMemoryType: return "Reserved"; + case EfiLoaderCode: return "LoaderCode"; + case EfiLoaderData: return "LoaderData"; + case EfiBootServicesCode: return "BootCode"; + case EfiBootServicesData: return "BootData"; + case EfiRuntimeServicesCode: return "RuntimeCode"; + case EfiRuntimeServicesData: return "RuntimeData"; + case EfiConventionalMemory: return "Conventional"; + case EfiUnusableMemory: return "Unusable"; + case EfiACPIReclaimMemory: return "ACPIReclaim"; + case EfiACPIMemoryNVS: return "ACPINVS"; + case EfiMemoryMappedIO: return "MMIO"; + case EfiMemoryMappedIOPortSpace:return "PIO"; + case EfiPalCode: return "PalCode"; + case EfiPersistentMemory: return "Persistent"; + default: + snprintf ( buf, sizeof ( buf ), "%#x", type ); + return buf; + } +} + +/** + * Convert EFI timer delay type to text + * + * @v type Timer delay type + * @ret text Timer delay type as text + */ +static const char * efi_timer_delay ( EFI_TIMER_DELAY type ) { + static char buf[ 11 /* "0xXXXXXXXX" + NUL */ ]; + + switch ( type ) { + case TimerCancel: return "Cancel"; + case TimerPeriodic: return "Periodic"; + case TimerRelative: return "Relative"; + default: + snprintf ( buf, sizeof ( buf ), "%#x", type ); + return buf; + } +} + +/** + * Wrap RaiseTPL() + * + */ +static EFI_TPL EFIAPI +efi_raise_tpl_wrapper ( EFI_TPL new_tpl ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_TPL old_tpl; + + DBGCP ( colour, "RaiseTPL ( %s ) ", efi_tpl ( new_tpl ) ); + old_tpl = bs->RaiseTPL ( new_tpl ); + DBGCP ( colour, "= %s -> %p\n", efi_tpl ( old_tpl ), retaddr ); + return old_tpl; +} + +/** + * Wrap RestoreTPL() + * + */ +static VOID EFIAPI +efi_restore_tpl_wrapper ( EFI_TPL old_tpl ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + + DBGCP ( colour, "RestoreTPL ( %s ) ", efi_tpl ( old_tpl ) ); + bs->RestoreTPL ( old_tpl ); + DBGCP ( colour, "-> %p\n", retaddr ); +} + +/** + * Wrap AllocatePages() + * + */ +static EFI_STATUS EFIAPI +efi_allocate_pages_wrapper ( EFI_ALLOCATE_TYPE type, + EFI_MEMORY_TYPE memory_type, UINTN pages, + EFI_PHYSICAL_ADDRESS *memory ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC2 ( colour, "AllocatePages ( %s, %s, %#llx, %#llx ) ", + efi_allocate_type ( type ), efi_memory_type ( memory_type ), + ( ( unsigned long long ) pages ), + ( ( unsigned long long ) *memory ) ); + efirc = bs->AllocatePages ( type, memory_type, pages, memory ); + DBGC2 ( colour, "= %s ( %#llx ) -> %p\n", efi_status ( efirc ), + ( ( unsigned long long ) *memory ), retaddr ); + return efirc; +} + +/** + * Wrap FreePages() + * + */ +static EFI_STATUS EFIAPI +efi_free_pages_wrapper ( EFI_PHYSICAL_ADDRESS memory, UINTN pages ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC2 ( colour, "FreePages ( %#llx, %#llx ) ", + ( ( unsigned long long ) memory ), + ( ( unsigned long long ) pages ) ); + efirc = bs->FreePages ( memory, pages ); + DBGC2 ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap GetMemoryMap() + * + */ +static EFI_STATUS EFIAPI +efi_get_memory_map_wrapper ( UINTN *memory_map_size, + EFI_MEMORY_DESCRIPTOR *memory_map, UINTN *map_key, + UINTN *descriptor_size, + UINT32 *descriptor_version ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_MEMORY_DESCRIPTOR *desc; + size_t remaining; + EFI_STATUS efirc; + + DBGC ( colour, "GetMemoryMap ( %#llx, %p ) ", + ( ( unsigned long long ) *memory_map_size ), memory_map ); + efirc = bs->GetMemoryMap ( memory_map_size, memory_map, map_key, + descriptor_size, descriptor_version ); + DBGC ( colour, "= %s ( %#llx, %#llx, %#llx, v%d", + efi_status ( efirc ), + ( ( unsigned long long ) *memory_map_size ), + ( ( unsigned long long ) *map_key ), + ( ( unsigned long long ) *descriptor_size ), + *descriptor_version ); + if ( DBG_EXTRA && ( efirc == 0 ) ) { + DBGC2 ( colour, ",\n" ); + for ( desc = memory_map, remaining = *memory_map_size ; + remaining >= *descriptor_size ; + desc = ( ( ( void * ) desc ) + *descriptor_size ), + remaining -= *descriptor_size ) { + DBGC2 ( colour, "%#016llx+%#08llx %#016llx " + "%s\n", desc->PhysicalStart, + ( desc->NumberOfPages * EFI_PAGE_SIZE ), + desc->Attribute, + efi_memory_type ( desc->Type ) ); + } + } else { + DBGC ( colour, " " ); + } + DBGC ( colour, ") -> %p\n", retaddr ); + return efirc; +} + +/** + * Wrap AllocatePool() + * + */ +static EFI_STATUS EFIAPI +efi_allocate_pool_wrapper ( EFI_MEMORY_TYPE pool_type, UINTN size, + VOID **buffer ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC2 ( colour, "AllocatePool ( %s, %#llx ) ", + efi_memory_type ( pool_type ), + ( ( unsigned long long ) size ) ); + efirc = bs->AllocatePool ( pool_type, size, buffer ); + DBGC2 ( colour, "= %s ( %p ) -> %p\n", + efi_status ( efirc ), *buffer, retaddr ); + return efirc; +} + +/** + * Wrap FreePool() + * + */ +static EFI_STATUS EFIAPI +efi_free_pool_wrapper ( VOID *buffer ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC2 ( colour, "FreePool ( %p ) ", buffer ); + efirc = bs->FreePool ( buffer ); + DBGC2 ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap CreateEvent() + * + */ +static EFI_STATUS EFIAPI +efi_create_event_wrapper ( UINT32 type, EFI_TPL notify_tpl, + EFI_EVENT_NOTIFY notify_function, + VOID *notify_context, EFI_EVENT *event ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "CreateEvent ( %#x, %s, %p, %p ) ", + type, efi_tpl ( notify_tpl ), notify_function, notify_context ); + efirc = bs->CreateEvent ( type, notify_tpl, notify_function, + notify_context, event ); + DBGC ( colour, "= %s ( %p ) -> %p\n", + efi_status ( efirc ), *event, retaddr ); + return efirc; +} + +/** + * Wrap SetTimer() + * + */ +static EFI_STATUS EFIAPI +efi_set_timer_wrapper ( EFI_EVENT event, EFI_TIMER_DELAY type, + UINT64 trigger_time ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "SetTimer ( %p, %s, %ld.%07ld00s ) ", + event, efi_timer_delay ( type ), + ( ( unsigned long ) ( trigger_time / 10000000 ) ), + ( ( unsigned long ) ( trigger_time % 10000000 ) ) ); + efirc = bs->SetTimer ( event, type, trigger_time ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap WaitForEvent() + * + */ +static EFI_STATUS EFIAPI +efi_wait_for_event_wrapper ( UINTN number_of_events, EFI_EVENT *event, + UINTN *index ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + unsigned int i; + EFI_STATUS efirc; + + DBGC ( colour, "WaitForEvent (" ); + for ( i = 0 ; i < number_of_events ; i++ ) + DBGC ( colour, " %p", event[i] ); + DBGC ( colour, " ) " ); + efirc = bs->WaitForEvent ( number_of_events, event, index ); + DBGC ( colour, "= %s", efi_status ( efirc ) ); + if ( efirc == 0 ) + DBGC ( colour, " ( %p )", event[*index] ); + DBGC ( colour, " -> %p\n", retaddr ); + return efirc; +} + +/** + * Wrap SignalEvent() + * + */ +static EFI_STATUS EFIAPI +efi_signal_event_wrapper ( EFI_EVENT event ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC2 ( colour, "SignalEvent ( %p ) ", event ); + efirc = bs->SignalEvent ( event ); + DBGC2 ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap CloseEvent() + * + */ +static EFI_STATUS EFIAPI +efi_close_event_wrapper ( EFI_EVENT event ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "CloseEvent ( %p ) ", event ); + efirc = bs->SignalEvent ( event ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} +/** + * Wrap CheckEvent() + * + */ +static EFI_STATUS EFIAPI +efi_check_event_wrapper ( EFI_EVENT event ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGCP ( colour, "CheckEvent ( %p ) ", event ); + efirc = bs->SignalEvent ( event ); + DBGCP ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap InstallProtocolInterface() + * + */ +static EFI_STATUS EFIAPI +efi_install_protocol_interface_wrapper ( EFI_HANDLE *handle, EFI_GUID *protocol, + EFI_INTERFACE_TYPE interface_type, + VOID *interface ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "InstallProtocolInterface ( %s, %s, %d, %p ) ", + efi_handle_name ( *handle ), efi_guid_ntoa ( protocol ), + interface_type, interface ); + efirc = bs->InstallProtocolInterface ( handle, protocol, interface_type, + interface ); + DBGC ( colour, "= %s ( %s ) -> %p\n", + efi_status ( efirc ), efi_handle_name ( *handle ), retaddr ); + return efirc; +} + +/** + * Wrap ReinstallProtocolInterface() + * + */ +static EFI_STATUS EFIAPI +efi_reinstall_protocol_interface_wrapper ( EFI_HANDLE handle, + EFI_GUID *protocol, + VOID *old_interface, + VOID *new_interface ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "ReinstallProtocolInterface ( %s, %s, %p, %p ) ", + efi_handle_name ( handle ), efi_guid_ntoa ( protocol ), + old_interface, new_interface ); + efirc = bs->ReinstallProtocolInterface ( handle, protocol, + old_interface, new_interface ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap UninstallProtocolInterface() + * + */ +static EFI_STATUS EFIAPI +efi_uninstall_protocol_interface_wrapper ( EFI_HANDLE handle, + EFI_GUID *protocol, + VOID *interface ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "UninstallProtocolInterface ( %s, %s, %p ) ", + efi_handle_name ( handle ), efi_guid_ntoa ( protocol ), + interface ); + efirc = bs->UninstallProtocolInterface ( handle, protocol, interface ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap HandleProtocol() + * + */ +static EFI_STATUS EFIAPI +efi_handle_protocol_wrapper ( EFI_HANDLE handle, EFI_GUID *protocol, + VOID **interface ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "HandleProtocol ( %s, %s ) ", + efi_handle_name ( handle ), efi_guid_ntoa ( protocol ) ); + efirc = bs->HandleProtocol ( handle, protocol, interface ); + DBGC ( colour, "= %s ( %p ) -> %p\n", + efi_status ( efirc ), *interface, retaddr ); + return efirc; +} + +/** + * Wrap RegisterProtocolNotify() + * + */ +static EFI_STATUS EFIAPI +efi_register_protocol_notify_wrapper ( EFI_GUID *protocol, EFI_EVENT event, + VOID **registration ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "RegisterProtocolNotify ( %s, %p ) ", + efi_guid_ntoa ( protocol ), event ); + efirc = bs->RegisterProtocolNotify ( protocol, event, registration ); + DBGC ( colour, "= %s ( %p ) -> %p\n", + efi_status ( efirc ), *registration, retaddr ); + return efirc; +} + +/** + * Wrap LocateHandle() + * + */ +static EFI_STATUS EFIAPI +efi_locate_handle_wrapper ( EFI_LOCATE_SEARCH_TYPE search_type, + EFI_GUID *protocol, VOID *search_key, + UINTN *buffer_size, EFI_HANDLE *buffer ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + unsigned int i; + EFI_STATUS efirc; + + DBGC ( colour, "LocateHandle ( %s, %s, %p, %zd ) ", + efi_locate_search_type_name ( search_type ), + efi_guid_ntoa ( protocol ), search_key, + ( ( size_t ) *buffer_size ) ); + efirc = bs->LocateHandle ( search_type, protocol, search_key, + buffer_size, buffer ); + DBGC ( colour, "= %s ( %zd", efi_status ( efirc ), + ( ( size_t ) *buffer_size ) ); + if ( efirc == 0 ) { + DBGC ( colour, ", {" ); + for ( i = 0; i < ( *buffer_size / sizeof ( buffer[0] ) ); i++ ){ + DBGC ( colour, "%s%s", ( i ? ", " : " " ), + efi_handle_name ( buffer[i] ) ); + } + DBGC ( colour, " }" ); + } + DBGC ( colour, " ) -> %p\n", retaddr ); + return efirc; +} + +/** + * Wrap LocateDevicePath() + * + */ +static EFI_STATUS EFIAPI +efi_locate_device_path_wrapper ( EFI_GUID *protocol, + EFI_DEVICE_PATH_PROTOCOL **device_path, + EFI_HANDLE *device ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "LocateDevicePath ( %s, %s ) ", + efi_guid_ntoa ( protocol ), efi_devpath_text ( *device_path ) ); + efirc = bs->LocateDevicePath ( protocol, device_path, device ); + DBGC ( colour, "= %s ( %s, ", + efi_status ( efirc ), efi_devpath_text ( *device_path ) ); + DBGC ( colour, "%s ) -> %p\n", efi_handle_name ( *device ), retaddr ); + return efirc; +} + +/** + * Wrap InstallConfigurationTable() + * + */ +static EFI_STATUS EFIAPI +efi_install_configuration_table_wrapper ( EFI_GUID *guid, VOID *table ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "InstallConfigurationTable ( %s, %p ) ", + efi_guid_ntoa ( guid ), table ); + efirc = bs->InstallConfigurationTable ( guid, table ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap LoadImage() + * + */ +static EFI_STATUS EFIAPI +efi_load_image_wrapper ( BOOLEAN boot_policy, EFI_HANDLE parent_image_handle, + EFI_DEVICE_PATH_PROTOCOL *device_path, + VOID *source_buffer, UINTN source_size, + EFI_HANDLE *image_handle ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "LoadImage ( %s, %s, ", efi_boolean ( boot_policy ), + efi_handle_name ( parent_image_handle ) ); + DBGC ( colour, "%s, %p, %#llx ) ", + efi_devpath_text ( device_path ), source_buffer, + ( ( unsigned long long ) source_size ) ); + efirc = bs->LoadImage ( boot_policy, parent_image_handle, device_path, + source_buffer, source_size, image_handle ); + DBGC ( colour, "= %s ( ", efi_status ( efirc ) ); + if ( efirc == 0 ) + DBGC ( colour, "%s ", efi_handle_name ( *image_handle ) ); + DBGC ( colour, ") -> %p\n", retaddr ); + + /* Wrap the new image */ + if ( efirc == 0 ) + efi_wrap ( *image_handle ); + + return efirc; +} + +/** + * Wrap StartImage() + * + */ +static EFI_STATUS EFIAPI +efi_start_image_wrapper ( EFI_HANDLE image_handle, UINTN *exit_data_size, + CHAR16 **exit_data ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "StartImage ( %s ) ", efi_handle_name ( image_handle ) ); + efirc = bs->StartImage ( image_handle, exit_data_size, exit_data ); + DBGC ( colour, "= %s", efi_status ( efirc ) ); + if ( ( efirc != 0 ) && exit_data && *exit_data_size ) + DBGC ( colour, " ( \"%ls\" )", *exit_data ); + DBGC ( colour, " -> %p\n", retaddr ); + if ( ( efirc != 0 ) && exit_data && *exit_data_size ) + DBGC_HD ( colour, *exit_data, *exit_data_size ); + return efirc; +} + +/** + * Wrap Exit() + * + */ +static EFI_STATUS EFIAPI +efi_exit_wrapper ( EFI_HANDLE image_handle, EFI_STATUS exit_status, + UINTN exit_data_size, CHAR16 *exit_data ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + if ( ( exit_status != 0 ) && exit_data && exit_data_size ) + DBGC_HD ( colour, exit_data, exit_data_size ); + DBGC ( colour, "Exit ( %s, %s", + efi_handle_name ( image_handle ), efi_status ( exit_status ) ); + if ( ( exit_status != 0 ) && exit_data && exit_data_size ) + DBGC ( colour, ", \"%ls\"", exit_data ); + DBGC ( colour, " ) " ); + efirc = bs->Exit ( image_handle, exit_status, exit_data_size, + exit_data ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap UnloadImage() + * + */ +static EFI_STATUS EFIAPI +efi_unload_image_wrapper ( EFI_HANDLE image_handle ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "UnloadImage ( %s ) ", + efi_handle_name ( image_handle ) ); + efirc = bs->UnloadImage ( image_handle ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap ExitBootServices() + * + */ +static EFI_STATUS EFIAPI +efi_exit_boot_services_wrapper ( EFI_HANDLE image_handle, UINTN map_key ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "ExitBootServices ( %s, %#llx ) ", + efi_handle_name ( image_handle ), + ( ( unsigned long long ) map_key ) ); + efirc = bs->ExitBootServices ( image_handle, map_key ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap GetNextMonotonicCount() + * + */ +static EFI_STATUS EFIAPI +efi_get_next_monotonic_count_wrapper ( UINT64 *count ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGCP ( colour, "GetNextMonotonicCount() " ); + efirc = bs->GetNextMonotonicCount ( count ); + DBGCP ( colour, "= %s ( %#llx ) -> %p\n", + efi_status ( efirc ), *count, retaddr ); + return efirc; +} + +/** + * Wrap Stall() + * + */ +static EFI_STATUS EFIAPI +efi_stall_wrapper ( UINTN microseconds ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC2 ( colour, "Stall ( %ld.%06lds ) ", + ( ( unsigned long ) ( microseconds / 1000000 ) ), + ( ( unsigned long ) ( microseconds % 1000000 ) ) ); + efirc = bs->Stall ( microseconds ); + DBGC2 ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap SetWatchdogTimer() + * + */ +static EFI_STATUS EFIAPI +efi_set_watchdog_timer_wrapper ( UINTN timeout, UINT64 watchdog_code, + UINTN data_size, CHAR16 *watchdog_data ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "SetWatchdogTimer ( %lds, %#llx, %#llx, %p ) ", + ( ( unsigned long ) timeout ), watchdog_code, + ( ( unsigned long long ) data_size ), watchdog_data ); + efirc = bs->SetWatchdogTimer ( timeout, watchdog_code, data_size, + watchdog_data ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap ConnectController() + * + */ +static EFI_STATUS EFIAPI +efi_connect_controller_wrapper ( EFI_HANDLE controller_handle, + EFI_HANDLE *driver_image_handle, + EFI_DEVICE_PATH_PROTOCOL *remaining_path, + BOOLEAN recursive ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_HANDLE *tmp; + EFI_STATUS efirc; + + DBGC ( colour, "ConnectController ( %s, {", + efi_handle_name ( controller_handle ) ); + if ( driver_image_handle ) { + for ( tmp = driver_image_handle ; *tmp ; tmp++ ) { + DBGC ( colour, "%s%s", + ( ( tmp == driver_image_handle ) ? " " : ", " ), + efi_handle_name ( *tmp ) ); + } + } + DBGC ( colour, " }, %s, %s ) ", efi_devpath_text ( remaining_path ), + efi_boolean ( recursive ) ); + efirc = bs->ConnectController ( controller_handle, driver_image_handle, + remaining_path, recursive ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap DisconnectController() + * + */ +static EFI_STATUS EFIAPI +efi_disconnect_controller_wrapper ( EFI_HANDLE controller_handle, + EFI_HANDLE driver_image_handle, + EFI_HANDLE child_handle ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "DisconnectController ( %s", + efi_handle_name ( controller_handle ) ); + DBGC ( colour, ", %s", efi_handle_name ( driver_image_handle ) ); + DBGC ( colour, ", %s ) ", efi_handle_name ( child_handle ) ); + efirc = bs->DisconnectController ( controller_handle, + driver_image_handle, + child_handle ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap OpenProtocol() + * + */ +static EFI_STATUS EFIAPI +efi_open_protocol_wrapper ( EFI_HANDLE handle, EFI_GUID *protocol, + VOID **interface, EFI_HANDLE agent_handle, + EFI_HANDLE controller_handle, UINT32 attributes ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "OpenProtocol ( %s, %s, ", + efi_handle_name ( handle ), efi_guid_ntoa ( protocol ) ); + DBGC ( colour, "%s, ", efi_handle_name ( agent_handle ) ); + DBGC ( colour, "%s, %s ) ", efi_handle_name ( controller_handle ), + efi_open_attributes_name ( attributes ) ); + efirc = bs->OpenProtocol ( handle, protocol, interface, agent_handle, + controller_handle, attributes ); + DBGC ( colour, "= %s ( %p ) -> %p\n", + efi_status ( efirc ), *interface, retaddr ); + return efirc; +} + +/** + * Wrap CloseProtocol() + * + */ +static EFI_STATUS EFIAPI +efi_close_protocol_wrapper ( EFI_HANDLE handle, EFI_GUID *protocol, + EFI_HANDLE agent_handle, + EFI_HANDLE controller_handle ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "CloseProtocol ( %s, %s, ", + efi_handle_name ( handle ), efi_guid_ntoa ( protocol ) ); + DBGC ( colour, "%s, ", efi_handle_name ( agent_handle ) ); + DBGC ( colour, "%s ) ", efi_handle_name ( controller_handle ) ); + efirc = bs->CloseProtocol ( handle, protocol, agent_handle, + controller_handle ); + DBGC ( colour, "= %s -> %p\n", + efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap OpenProtocolInformation() + * + */ +static EFI_STATUS EFIAPI +efi_open_protocol_information_wrapper ( EFI_HANDLE handle, EFI_GUID *protocol, + EFI_OPEN_PROTOCOL_INFORMATION_ENTRY + **entry_buffer, + UINTN *entry_count ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "OpenProtocolInformation ( %s, %s ) ", + efi_handle_name ( handle ), efi_guid_ntoa ( protocol ) ); + efirc = bs->OpenProtocolInformation ( handle, protocol, entry_buffer, + entry_count ); + DBGC ( colour, "= %s ( %p, %#llx ) -> %p\n", + efi_status ( efirc ), *entry_buffer, + ( ( unsigned long long ) *entry_count ), retaddr ); + return efirc; +} + +/** + * Wrap ProtocolsPerHandle() + * + */ +static EFI_STATUS EFIAPI +efi_protocols_per_handle_wrapper ( EFI_HANDLE handle, + EFI_GUID ***protocol_buffer, + UINTN *protocol_buffer_count ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + unsigned int i; + EFI_STATUS efirc; + + DBGC ( colour, "ProtocolsPerHandle ( %s ) ", + efi_handle_name ( handle ) ); + efirc = bs->ProtocolsPerHandle ( handle, protocol_buffer, + protocol_buffer_count ); + DBGC ( colour, "= %s", efi_status ( efirc ) ); + if ( efirc == 0 ) { + DBGC ( colour, " ( {" ); + for ( i = 0 ; i < *protocol_buffer_count ; i++ ) { + DBGC ( colour, "%s%s", ( i ? ", " : " " ), + efi_guid_ntoa ( (*protocol_buffer)[i] ) ); + } + DBGC ( colour, " } )" ); + } + DBGC ( colour, " -> %p\n", retaddr ); + return efirc; +} + +/** + * Wrap LocateHandleBuffer() + * + */ +static EFI_STATUS EFIAPI +efi_locate_handle_buffer_wrapper ( EFI_LOCATE_SEARCH_TYPE search_type, + EFI_GUID *protocol, VOID *search_key, + UINTN *no_handles, EFI_HANDLE **buffer ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + unsigned int i; + EFI_STATUS efirc; + + DBGC ( colour, "LocateHandleBuffer ( %s, %s, %p ) ", + efi_locate_search_type_name ( search_type ), + efi_guid_ntoa ( protocol ), search_key ); + efirc = bs->LocateHandleBuffer ( search_type, protocol, search_key, + no_handles, buffer ); + DBGC ( colour, "= %s", efi_status ( efirc ) ); + if ( efirc == 0 ) { + DBGC ( colour, " ( %d, {", ( ( unsigned int ) *no_handles ) ); + for ( i = 0 ; i < *no_handles ; i++ ) { + DBGC ( colour, "%s%s", ( i ? ", " : " " ), + efi_handle_name ( (*buffer)[i] ) ); + } + DBGC ( colour, " } )" ); + } + DBGC ( colour, " -> %p\n", retaddr ); + return efirc; +} + +/** + * Wrap LocateProtocol() + * + */ +static EFI_STATUS EFIAPI +efi_locate_protocol_wrapper ( EFI_GUID *protocol, VOID *registration, + VOID **interface ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "LocateProtocol ( %s, %p ) ", + efi_guid_ntoa ( protocol ), registration ); + efirc = bs->LocateProtocol ( protocol, registration, interface ); + DBGC ( colour, "= %s ( %p ) -> %p\n", + efi_status ( efirc ), *interface, retaddr ); + return efirc; +} + +/** Maximum number of interfaces for wrapped ...MultipleProtocolInterfaces() */ +#define MAX_WRAP_MULTI 20 + +/** + * Wrap InstallMultipleProtocolInterfaces() + * + */ +static EFI_STATUS EFIAPI +efi_install_multiple_protocol_interfaces_wrapper ( EFI_HANDLE *handle, ... ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_GUID *protocol[ MAX_WRAP_MULTI + 1 ]; + VOID *interface[MAX_WRAP_MULTI]; + VA_LIST ap; + unsigned int i; + EFI_STATUS efirc; + + DBGC ( colour, "InstallMultipleProtocolInterfaces ( %s", + efi_handle_name ( *handle ) ); + memset ( protocol, 0, sizeof ( protocol ) ); + memset ( interface, 0, sizeof ( interface ) ); + VA_START ( ap, handle ); + for ( i = 0 ; ( protocol[i] = VA_ARG ( ap, EFI_GUID * ) ) ; i++ ) { + if ( i == MAX_WRAP_MULTI ) { + VA_END ( ap ); + efirc = EFI_OUT_OF_RESOURCES; + DBGC ( colour, " ) = %s " + "-> %p\n", efi_status ( efirc ), retaddr ); + return efirc; + } + interface[i] = VA_ARG ( ap, VOID * ); + DBGC ( colour, ", %s, %p", + efi_guid_ntoa ( protocol[i] ), interface[i] ); + } + VA_END ( ap ); + DBGC ( colour, " ) " ); + efirc = bs->InstallMultipleProtocolInterfaces ( handle, + protocol[0], interface[0], protocol[1], interface[1], + protocol[2], interface[2], protocol[3], interface[3], + protocol[4], interface[4], protocol[5], interface[5], + protocol[6], interface[6], protocol[7], interface[7], + protocol[8], interface[8], protocol[9], interface[9], + protocol[10], interface[10], protocol[11], interface[11], + protocol[12], interface[12], protocol[13], interface[13], + protocol[14], interface[14], protocol[15], interface[15], + protocol[16], interface[16], protocol[17], interface[17], + protocol[18], interface[18], protocol[19], interface[19], + NULL ); + DBGC ( colour, "= %s ( %s ) -> %p\n", + efi_status ( efirc ), efi_handle_name ( *handle ), retaddr ); + return efirc; +} + +/** + * Wrap UninstallMultipleProtocolInterfaces() + * + */ +static EFI_STATUS EFIAPI +efi_uninstall_multiple_protocol_interfaces_wrapper ( EFI_HANDLE handle, ... ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_GUID *protocol[ MAX_WRAP_MULTI + 1 ]; + VOID *interface[MAX_WRAP_MULTI]; + VA_LIST ap; + unsigned int i; + EFI_STATUS efirc; + + DBGC ( colour, "UninstallMultipleProtocolInterfaces ( %s", + efi_handle_name ( handle ) ); + memset ( protocol, 0, sizeof ( protocol ) ); + memset ( interface, 0, sizeof ( interface ) ); + VA_START ( ap, handle ); + for ( i = 0 ; ( protocol[i] = VA_ARG ( ap, EFI_GUID * ) ) ; i++ ) { + if ( i == MAX_WRAP_MULTI ) { + VA_END ( ap ); + efirc = EFI_OUT_OF_RESOURCES; + DBGC ( colour, " ) = %s " + "-> %p\n", efi_status ( efirc ), retaddr ); + return efirc; + } + interface[i] = VA_ARG ( ap, VOID * ); + DBGC ( colour, ", %s, %p", + efi_guid_ntoa ( protocol[i] ), interface[i] ); + } + VA_END ( ap ); + DBGC ( colour, " ) " ); + efirc = bs->UninstallMultipleProtocolInterfaces ( handle, + protocol[0], interface[0], protocol[1], interface[1], + protocol[2], interface[2], protocol[3], interface[3], + protocol[4], interface[4], protocol[5], interface[5], + protocol[6], interface[6], protocol[7], interface[7], + protocol[8], interface[8], protocol[9], interface[9], + protocol[10], interface[10], protocol[11], interface[11], + protocol[12], interface[12], protocol[13], interface[13], + protocol[14], interface[14], protocol[15], interface[15], + protocol[16], interface[16], protocol[17], interface[17], + protocol[18], interface[18], protocol[19], interface[19], + NULL ); + DBGC ( colour, "= %s -> %p\n", + efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap CreateEventEx() + * + */ +static EFI_STATUS EFIAPI +efi_create_event_ex_wrapper ( UINT32 type, EFI_TPL notify_tpl, + EFI_EVENT_NOTIFY notify_function, + CONST VOID *notify_context, + CONST EFI_GUID *event_group, EFI_EVENT *event ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "CreateEventEx ( %#x, %s, %p, %p, %s ) ", + type, efi_tpl ( notify_tpl ), notify_function, notify_context, + efi_guid_ntoa ( event_group ) ); + efirc = bs->CreateEventEx ( type, notify_tpl, notify_function, + notify_context, event_group, event ); + DBGC ( colour, "= %s ( %p ) -> %p\n", + efi_status ( efirc ), *event, retaddr ); + return efirc; +} + +/** + * Build table wrappers + * + * @ret systab Wrapped system table + */ +EFI_SYSTEM_TABLE * efi_wrap_systab ( void ) { + static EFI_SYSTEM_TABLE efi_systab_wrapper; + static EFI_BOOT_SERVICES efi_bs_wrapper; + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + + /* Build boot services table wrapper */ + memcpy ( &efi_bs_wrapper, bs, sizeof ( efi_bs_wrapper ) ); + efi_bs_wrapper.RaiseTPL = efi_raise_tpl_wrapper; + efi_bs_wrapper.RestoreTPL = efi_restore_tpl_wrapper; + efi_bs_wrapper.AllocatePages = efi_allocate_pages_wrapper; + efi_bs_wrapper.FreePages = efi_free_pages_wrapper; + efi_bs_wrapper.GetMemoryMap = efi_get_memory_map_wrapper; + efi_bs_wrapper.AllocatePool = efi_allocate_pool_wrapper; + efi_bs_wrapper.FreePool = efi_free_pool_wrapper; + efi_bs_wrapper.CreateEvent = efi_create_event_wrapper; + efi_bs_wrapper.SetTimer = efi_set_timer_wrapper; + efi_bs_wrapper.WaitForEvent = efi_wait_for_event_wrapper; + efi_bs_wrapper.SignalEvent = efi_signal_event_wrapper; + efi_bs_wrapper.CloseEvent = efi_close_event_wrapper; + efi_bs_wrapper.CheckEvent = efi_check_event_wrapper; + efi_bs_wrapper.InstallProtocolInterface + = efi_install_protocol_interface_wrapper; + efi_bs_wrapper.ReinstallProtocolInterface + = efi_reinstall_protocol_interface_wrapper; + efi_bs_wrapper.UninstallProtocolInterface + = efi_uninstall_protocol_interface_wrapper; + efi_bs_wrapper.HandleProtocol = efi_handle_protocol_wrapper; + efi_bs_wrapper.RegisterProtocolNotify + = efi_register_protocol_notify_wrapper; + efi_bs_wrapper.LocateHandle = efi_locate_handle_wrapper; + efi_bs_wrapper.LocateDevicePath = efi_locate_device_path_wrapper; + efi_bs_wrapper.InstallConfigurationTable + = efi_install_configuration_table_wrapper; + efi_bs_wrapper.LoadImage = efi_load_image_wrapper; + efi_bs_wrapper.StartImage = efi_start_image_wrapper; + efi_bs_wrapper.Exit = efi_exit_wrapper; + efi_bs_wrapper.UnloadImage = efi_unload_image_wrapper; + efi_bs_wrapper.ExitBootServices = efi_exit_boot_services_wrapper; + efi_bs_wrapper.GetNextMonotonicCount + = efi_get_next_monotonic_count_wrapper; + efi_bs_wrapper.Stall = efi_stall_wrapper; + efi_bs_wrapper.SetWatchdogTimer = efi_set_watchdog_timer_wrapper; + efi_bs_wrapper.ConnectController + = efi_connect_controller_wrapper; + efi_bs_wrapper.DisconnectController + = efi_disconnect_controller_wrapper; + efi_bs_wrapper.OpenProtocol = efi_open_protocol_wrapper; + efi_bs_wrapper.CloseProtocol = efi_close_protocol_wrapper; + efi_bs_wrapper.OpenProtocolInformation + = efi_open_protocol_information_wrapper; + efi_bs_wrapper.ProtocolsPerHandle + = efi_protocols_per_handle_wrapper; + efi_bs_wrapper.LocateHandleBuffer + = efi_locate_handle_buffer_wrapper; + efi_bs_wrapper.LocateProtocol = efi_locate_protocol_wrapper; + efi_bs_wrapper.InstallMultipleProtocolInterfaces + = efi_install_multiple_protocol_interfaces_wrapper; + efi_bs_wrapper.UninstallMultipleProtocolInterfaces + = efi_uninstall_multiple_protocol_interfaces_wrapper; + efi_bs_wrapper.CreateEventEx = efi_create_event_ex_wrapper; + + /* Build system table wrapper */ + memcpy ( &efi_systab_wrapper, efi_systab, + sizeof ( efi_systab_wrapper ) ); + efi_systab_wrapper.BootServices = &efi_bs_wrapper; + + return &efi_systab_wrapper; +} + +/** + * Wrap the calls made by a loaded image + * + * @v handle Image handle + */ +void efi_wrap ( EFI_HANDLE handle ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + union { + EFI_LOADED_IMAGE_PROTOCOL *image; + void *intf; + } loaded; + EFI_STATUS efirc; + int rc; + + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return; + + /* Open loaded image protocol */ + if ( ( efirc = bs->OpenProtocol ( handle, + &efi_loaded_image_protocol_guid, + &loaded.intf, efi_image_handle, NULL, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ + rc = -EEFI ( efirc ); + DBGC ( colour, "WRAP %s could not get loaded image protocol: " + "%s\n", efi_handle_name ( handle ), strerror ( rc ) ); + return; + } + + /* Provide system table wrapper to image */ + loaded.image->SystemTable = efi_wrap_systab(); + DBGC ( colour, "WRAP %s at base %p has protocols:\n", + efi_handle_name ( handle ), loaded.image->ImageBase ); + DBGC_EFI_PROTOCOLS ( colour, handle ); + DBGC ( colour, "WRAP %s parent", efi_handle_name ( handle ) ); + DBGC ( colour, " %s\n", efi_handle_name ( loaded.image->ParentHandle )); + DBGC ( colour, "WRAP %s device", efi_handle_name ( handle ) ); + DBGC ( colour, " %s\n", efi_handle_name ( loaded.image->DeviceHandle )); + DBGC ( colour, "WRAP %s file", efi_handle_name ( handle ) ); + DBGC ( colour, " %s\n", efi_devpath_text ( loaded.image->FilePath ) ); + + /* Close loaded image protocol */ + bs->CloseProtocol ( handle, &efi_loaded_image_protocol_guid, + efi_image_handle, NULL ); +} diff --git a/src/interface/efi/efidrvprefix.c b/src/interface/efi/efidrvprefix.c new file mode 100644 index 00000000..ac7d9437 --- /dev/null +++ b/src/interface/efi/efidrvprefix.c @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2009 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include + +/** + * EFI entry point + * + * @v image_handle Image handle + * @v systab System table + * @ret efirc EFI return status code + */ +EFI_STATUS EFIAPI _efidrv_start ( EFI_HANDLE image_handle, + EFI_SYSTEM_TABLE *systab ) { + EFI_BOOT_SERVICES *bs; + EFI_TPL saved_tpl; + EFI_STATUS efirc; + + /* Initialise stack cookie */ + efi_init_stack_guard ( image_handle ); + + /* Initialise EFI environment */ + if ( ( efirc = efi_init ( image_handle, systab ) ) != 0 ) + return efirc; + + /* Raise TPL */ + bs = efi_systab->BootServices; + saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + + /* Initialise iPXE environment */ + initialise(); + startup(); + + /* Restore TPL */ + bs->RestoreTPL ( saved_tpl ); + + return 0; +} + +/** + * Probe EFI root bus + * + * @v rootdev EFI root device + */ +static int efi_probe ( struct root_device *rootdev __unused ) { + + /* Do nothing */ + return 0; +} + +/** + * Remove EFI root bus + * + * @v rootdev EFI root device + */ +static void efi_remove ( struct root_device *rootdev __unused ) { + + efi_driver_disconnect_all(); +} + +/** EFI root device driver */ +static struct root_driver efi_root_driver = { + .probe = efi_probe, + .remove = efi_remove, +}; + +/** EFI root device */ +struct root_device efi_root_device __root_device = { + .dev = { .name = "EFI" }, + .driver = &efi_root_driver, +}; diff --git a/src/interface/efi/efiprefix.c b/src/interface/efi/efiprefix.c new file mode 100644 index 00000000..2c5a5b31 --- /dev/null +++ b/src/interface/efi/efiprefix.c @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2009 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * EFI entry point + * + * @v image_handle Image handle + * @v systab System table + * @ret efirc EFI return status code + */ +EFI_STATUS EFIAPI _efi_start ( EFI_HANDLE image_handle, + EFI_SYSTEM_TABLE *systab ) { + EFI_STATUS efirc; + int rc; + + /* Initialise stack cookie */ + efi_init_stack_guard ( image_handle ); + + /* Initialise EFI environment */ + if ( ( efirc = efi_init ( image_handle, systab ) ) != 0 ) + goto err_init; + + /* Record autoboot device (if any) */ + efi_set_autoboot(); + + /* Claim SNP devices for use by iPXE */ + efi_snp_claim(); + + /* Start watchdog holdoff timer */ + efi_watchdog_start(); + + /* Call to main() */ + if ( ( rc = main() ) != 0 ) { + efirc = EFIRC ( rc ); + goto err_main; + } + + err_main: + efi_watchdog_stop(); + efi_snp_release(); + efi_loaded_image->Unload ( image_handle ); + efi_driver_reconnect_all(); + err_init: + return efirc; +} + +/** + * Probe EFI root bus + * + * @v rootdev EFI root device + */ +static int efi_probe ( struct root_device *rootdev __unused ) { + + /* Unloaded any blacklisted drivers */ + efi_unload_blacklist(); + + /* Connect our drivers */ + return efi_driver_connect_all(); +} + +/** + * Remove EFI root bus + * + * @v rootdev EFI root device + */ +static void efi_remove ( struct root_device *rootdev __unused ) { + + /* Disconnect our drivers */ + efi_driver_disconnect_all(); +} + +/** EFI root device driver */ +static struct root_driver efi_root_driver = { + .probe = efi_probe, + .remove = efi_remove, +}; + +/** EFI root device */ +struct root_device efi_root_device __root_device = { + .dev = { .name = "EFI" }, + .driver = &efi_root_driver, +}; diff --git a/src/interface/hyperv/vmbus.c b/src/interface/hyperv/vmbus.c new file mode 100644 index 00000000..e50fe995 --- /dev/null +++ b/src/interface/hyperv/vmbus.c @@ -0,0 +1,1467 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * Hyper-V virtual machine bus + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** VMBus initial GPADL ID + * + * This is an opaque value with no meaning. The Linux kernel uses + * 0xe1e10. + */ +#define VMBUS_GPADL_MAGIC 0x18ae0000 + +/** Current (i.e. most recently issued) GPADL ID */ +static unsigned int vmbus_gpadl = VMBUS_GPADL_MAGIC; + +/** Obsolete GPADL ID threshold + * + * When the Hyper-V connection is reset, any previous GPADLs are + * automatically rendered obsolete. + */ +unsigned int vmbus_obsolete_gpadl; + +/** + * Post message + * + * @v hv Hyper-V hypervisor + * @v header Message header + * @v len Length of message (including header) + * @ret rc Return status code + */ +static int vmbus_post_message ( struct hv_hypervisor *hv, + const struct vmbus_message_header *header, + size_t len ) { + struct vmbus *vmbus = hv->vmbus; + int rc; + + /* Post message */ + if ( ( rc = hv_post_message ( hv, VMBUS_MESSAGE_ID, VMBUS_MESSAGE_TYPE, + header, len ) ) != 0 ) { + DBGC ( vmbus, "VMBUS %p could not post message: %s\n", + vmbus, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Post empty message + * + * @v hv Hyper-V hypervisor + * @v type Message type + * @ret rc Return status code + */ +static int vmbus_post_empty_message ( struct hv_hypervisor *hv, + unsigned int type ) { + struct vmbus_message_header header = { .type = cpu_to_le32 ( type ) }; + + return vmbus_post_message ( hv, &header, sizeof ( header ) ); +} + +/** + * Wait for received message of any type + * + * @v hv Hyper-V hypervisor + * @ret rc Return status code + */ +static int vmbus_wait_for_any_message ( struct hv_hypervisor *hv ) { + struct vmbus *vmbus = hv->vmbus; + int rc; + + /* Wait for message */ + if ( ( rc = hv_wait_for_message ( hv, VMBUS_MESSAGE_SINT ) ) != 0 ) { + DBGC ( vmbus, "VMBUS %p failed waiting for message: %s\n", + vmbus, strerror ( rc ) ); + return rc; + } + + /* Sanity check */ + if ( hv->message->received.type != cpu_to_le32 ( VMBUS_MESSAGE_TYPE ) ){ + DBGC ( vmbus, "VMBUS %p invalid message type %d\n", + vmbus, le32_to_cpu ( hv->message->received.type ) ); + return -EINVAL; + } + + return 0; +} + +/** + * Wait for received message of a specified type, ignoring any others + * + * @v hv Hyper-V hypervisor + * @v type Message type + * @ret rc Return status code + */ +static int vmbus_wait_for_message ( struct hv_hypervisor *hv, + unsigned int type ) { + struct vmbus *vmbus = hv->vmbus; + const struct vmbus_message_header *header = &vmbus->message->header; + int rc; + + /* Loop until specified message arrives, or until an error occurs */ + while ( 1 ) { + + /* Wait for message */ + if ( ( rc = vmbus_wait_for_any_message ( hv ) ) != 0 ) + return rc; + + /* Check for requested message type */ + if ( header->type == cpu_to_le32 ( type ) ) + return 0; + + /* Ignore any other messages (e.g. due to additional + * channels being offered at runtime). + */ + DBGC ( vmbus, "VMBUS %p ignoring message type %d (expecting " + "%d)\n", vmbus, le32_to_cpu ( header->type ), type ); + } +} + +/** + * Initiate contact + * + * @v hv Hyper-V hypervisor + * @v raw VMBus protocol (raw) version + * @ret rc Return status code + */ +static int vmbus_initiate_contact ( struct hv_hypervisor *hv, + unsigned int raw ) { + struct vmbus *vmbus = hv->vmbus; + const struct vmbus_version_response *version = &vmbus->message->version; + struct vmbus_initiate_contact initiate; + int rc; + + /* Construct message */ + memset ( &initiate, 0, sizeof ( initiate ) ); + initiate.header.type = cpu_to_le32 ( VMBUS_INITIATE_CONTACT ); + initiate.version.raw = cpu_to_le32 ( raw ); + initiate.intr = virt_to_phys ( vmbus->intr ); + initiate.monitor_in = virt_to_phys ( vmbus->monitor_in ); + initiate.monitor_out = virt_to_phys ( vmbus->monitor_out ); + + /* Post message */ + if ( ( rc = vmbus_post_message ( hv, &initiate.header, + sizeof ( initiate ) ) ) != 0 ) + return rc; + + /* Wait for response */ + if ( ( rc = vmbus_wait_for_message ( hv, VMBUS_VERSION_RESPONSE ) ) !=0) + return rc; + + /* Check response */ + if ( ! version->supported ) { + DBGC ( vmbus, "VMBUS %p requested version not supported\n", + vmbus ); + return -ENOTSUP; + } + + DBGC ( vmbus, "VMBUS %p initiated contact using version %d.%d\n", + vmbus, le16_to_cpu ( initiate.version.major ), + le16_to_cpu ( initiate.version.minor ) ); + return 0; +} + +/** + * Terminate contact + * + * @v hv Hyper-V hypervisor + * @ret rc Return status code + */ +static int vmbus_unload ( struct hv_hypervisor *hv ) { + int rc; + + /* Post message */ + if ( ( rc = vmbus_post_empty_message ( hv, VMBUS_UNLOAD ) ) != 0 ) + return rc; + + /* Wait for response */ + if ( ( rc = vmbus_wait_for_message ( hv, VMBUS_UNLOAD_RESPONSE ) ) != 0) + return rc; + + return 0; +} + +/** + * Negotiate protocol version + * + * @v hv Hyper-V hypervisor + * @ret rc Return status code + */ +static int vmbus_negotiate_version ( struct hv_hypervisor *hv ) { + int rc; + + /* We require the ability to disconnect from and reconnect to + * VMBus; if we don't have this then there is no (viable) way + * for a loaded operating system to continue to use any VMBus + * devices. (There is also a small but non-zero risk that the + * host will continue to write to our interrupt and monitor + * pages, since the VMBUS_UNLOAD message in earlier versions + * is essentially a no-op.) + * + * This requires us to ensure that the host supports protocol + * version 3.0 (VMBUS_VERSION_WIN8_1). However, we can't + * actually _use_ protocol version 3.0, since doing so causes + * an iSCSI-booted Windows Server 2012 R2 VM to crash due to a + * NULL pointer dereference in vmbus.sys. + * + * To work around this problem, we first ensure that we can + * connect using protocol v3.0, then disconnect and reconnect + * using the oldest known protocol. + */ + + /* Initiate contact to check for required protocol support */ + if ( ( rc = vmbus_initiate_contact ( hv, VMBUS_VERSION_WIN8_1 ) ) != 0 ) + return rc; + + /* Terminate contact */ + if ( ( rc = vmbus_unload ( hv ) ) != 0 ) + return rc; + + /* Reinitiate contact using the oldest known protocol version */ + if ( ( rc = vmbus_initiate_contact ( hv, VMBUS_VERSION_WS2008 ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Establish GPA descriptor list + * + * @v vmdev VMBus device + * @v data Data buffer + * @v len Length of data buffer + * @ret gpadl GPADL ID, or negative error + */ +int vmbus_establish_gpadl ( struct vmbus_device *vmdev, userptr_t data, + size_t len ) { + struct hv_hypervisor *hv = vmdev->hv; + struct vmbus *vmbus = hv->vmbus; + physaddr_t addr = user_to_phys ( data, 0 ); + unsigned int pfn_count = hv_pfn_count ( addr, len ); + struct { + struct vmbus_gpadl_header gpadlhdr; + struct vmbus_gpa_range range; + uint64_t pfn[pfn_count]; + } __attribute__ (( packed )) gpadlhdr; + const struct vmbus_gpadl_created *created = &vmbus->message->created; + unsigned int gpadl; + unsigned int i; + int rc; + + /* Allocate GPADL ID */ + gpadl = ++vmbus_gpadl; + + /* Construct message */ + memset ( &gpadlhdr, 0, sizeof ( gpadlhdr ) ); + gpadlhdr.gpadlhdr.header.type = cpu_to_le32 ( VMBUS_GPADL_HEADER ); + gpadlhdr.gpadlhdr.channel = cpu_to_le32 ( vmdev->channel ); + gpadlhdr.gpadlhdr.gpadl = cpu_to_le32 ( gpadl ); + gpadlhdr.gpadlhdr.range_len = + cpu_to_le16 ( ( sizeof ( gpadlhdr.range ) + + sizeof ( gpadlhdr.pfn ) ) ); + gpadlhdr.gpadlhdr.range_count = cpu_to_le16 ( 1 ); + gpadlhdr.range.len = cpu_to_le32 ( len ); + gpadlhdr.range.offset = cpu_to_le32 ( addr & ( PAGE_SIZE - 1 ) ); + for ( i = 0 ; i < pfn_count ; i++ ) + gpadlhdr.pfn[i] = ( ( addr / PAGE_SIZE ) + i ); + + /* Post message */ + if ( ( rc = vmbus_post_message ( hv, &gpadlhdr.gpadlhdr.header, + sizeof ( gpadlhdr ) ) ) != 0 ) + return rc; + + /* Wait for response */ + if ( ( rc = vmbus_wait_for_message ( hv, VMBUS_GPADL_CREATED ) ) != 0 ) + return rc; + + /* Check response */ + if ( created->channel != cpu_to_le32 ( vmdev->channel ) ) { + DBGC ( vmdev, "VMBUS %s unexpected GPADL channel %d\n", + vmdev->dev.name, le32_to_cpu ( created->channel ) ); + return -EPROTO; + } + if ( created->gpadl != cpu_to_le32 ( gpadl ) ) { + DBGC ( vmdev, "VMBUS %s unexpected GPADL ID %#08x\n", + vmdev->dev.name, le32_to_cpu ( created->gpadl ) ); + return -EPROTO; + } + if ( created->status != 0 ) { + DBGC ( vmdev, "VMBUS %s GPADL creation failed: %#08x\n", + vmdev->dev.name, le32_to_cpu ( created->status ) ); + return -EPROTO; + } + + DBGC ( vmdev, "VMBUS %s GPADL %#08x is [%08lx,%08lx)\n", + vmdev->dev.name, gpadl, addr, ( addr + len ) ); + return gpadl; +} + +/** + * Tear down GPA descriptor list + * + * @v vmdev VMBus device + * @v gpadl GPADL ID + * @ret rc Return status code + */ +int vmbus_gpadl_teardown ( struct vmbus_device *vmdev, unsigned int gpadl ) { + struct hv_hypervisor *hv = vmdev->hv; + struct vmbus *vmbus = hv->vmbus; + struct vmbus_gpadl_teardown teardown; + const struct vmbus_gpadl_torndown *torndown = &vmbus->message->torndown; + int rc; + + /* If GPADL is obsolete (i.e. was created before the most + * recent Hyper-V reset), then we will never receive a + * response to the teardown message. Since the GPADL is + * already destroyed as far as the hypervisor is concerned, no + * further action is required. + */ + if ( vmbus_gpadl_is_obsolete ( gpadl ) ) + return 0; + + /* Construct message */ + memset ( &teardown, 0, sizeof ( teardown ) ); + teardown.header.type = cpu_to_le32 ( VMBUS_GPADL_TEARDOWN ); + teardown.channel = cpu_to_le32 ( vmdev->channel ); + teardown.gpadl = cpu_to_le32 ( gpadl ); + + /* Post message */ + if ( ( rc = vmbus_post_message ( hv, &teardown.header, + sizeof ( teardown ) ) ) != 0 ) + return rc; + + /* Wait for response */ + if ( ( rc = vmbus_wait_for_message ( hv, VMBUS_GPADL_TORNDOWN ) ) != 0 ) + return rc; + + /* Check response */ + if ( torndown->gpadl != cpu_to_le32 ( gpadl ) ) { + DBGC ( vmdev, "VMBUS %s unexpected GPADL ID %#08x\n", + vmdev->dev.name, le32_to_cpu ( torndown->gpadl ) ); + return -EPROTO; + } + + return 0; +} + +/** + * Open VMBus channel + * + * @v vmdev VMBus device + * @v op Channel operations + * @v out_len Outbound ring buffer length + * @v in_len Inbound ring buffer length + * @v mtu Maximum expected data packet length (including headers) + * @ret rc Return status code + * + * Both outbound and inbound ring buffer lengths must be a power of + * two and a multiple of PAGE_SIZE. The requirement to be a power of + * two is a policy decision taken to simplify the ring buffer indexing + * logic. + */ +int vmbus_open ( struct vmbus_device *vmdev, + struct vmbus_channel_operations *op, + size_t out_len, size_t in_len, size_t mtu ) { + struct hv_hypervisor *hv = vmdev->hv; + struct vmbus *vmbus = hv->vmbus; + struct vmbus_open_channel open; + const struct vmbus_open_channel_result *opened = + &vmbus->message->opened; + size_t len; + void *ring; + void *packet; + int gpadl; + uint32_t open_id; + int rc; + + /* Sanity checks */ + assert ( ( out_len % PAGE_SIZE ) == 0 ); + assert ( ( out_len & ( out_len - 1 ) ) == 0 ); + assert ( ( in_len % PAGE_SIZE ) == 0 ); + assert ( ( in_len & ( in_len - 1 ) ) == 0 ); + assert ( mtu >= ( sizeof ( struct vmbus_packet_header ) + + sizeof ( struct vmbus_packet_footer ) ) ); + + /* Allocate packet buffer */ + packet = malloc ( mtu ); + if ( ! packet ) { + rc = -ENOMEM; + goto err_alloc_packet; + } + + /* Allocate ring buffer */ + len = ( sizeof ( *vmdev->out ) + out_len + + sizeof ( *vmdev->in ) + in_len ); + assert ( ( len % PAGE_SIZE ) == 0 ); + ring = malloc_dma ( len, PAGE_SIZE ); + if ( ! ring ) { + rc = -ENOMEM; + goto err_alloc_ring; + } + memset ( ring, 0, len ); + + /* Establish GPADL for ring buffer */ + gpadl = vmbus_establish_gpadl ( vmdev, virt_to_user ( ring ), len ); + if ( gpadl < 0 ) { + rc = gpadl; + goto err_establish; + } + + /* Construct message */ + memset ( &open, 0, sizeof ( open ) ); + open.header.type = cpu_to_le32 ( VMBUS_OPEN_CHANNEL ); + open.channel = cpu_to_le32 ( vmdev->channel ); + open_id = random(); + open.id = open_id; /* Opaque random value: endianness irrelevant */ + open.gpadl = cpu_to_le32 ( gpadl ); + open.out_pages = ( ( sizeof ( *vmdev->out ) / PAGE_SIZE ) + + ( out_len / PAGE_SIZE ) ); + + /* Post message */ + if ( ( rc = vmbus_post_message ( hv, &open.header, + sizeof ( open ) ) ) != 0 ) + goto err_post_message; + + /* Wait for response */ + if ( ( rc = vmbus_wait_for_message ( hv, + VMBUS_OPEN_CHANNEL_RESULT ) ) != 0) + goto err_wait_for_message; + + /* Check response */ + if ( opened->channel != cpu_to_le32 ( vmdev->channel ) ) { + DBGC ( vmdev, "VMBUS %s unexpected opened channel %#08x\n", + vmdev->dev.name, le32_to_cpu ( opened->channel ) ); + rc = -EPROTO; + goto err_check_response; + } + if ( opened->id != open_id /* Non-endian */ ) { + DBGC ( vmdev, "VMBUS %s unexpected open ID %#08x\n", + vmdev->dev.name, le32_to_cpu ( opened->id ) ); + rc = -EPROTO; + goto err_check_response; + } + if ( opened->status != 0 ) { + DBGC ( vmdev, "VMBUS %s open failed: %#08x\n", + vmdev->dev.name, le32_to_cpu ( opened->status ) ); + rc = -EPROTO; + goto err_check_response; + } + + /* Store channel parameters */ + vmdev->out_len = out_len; + vmdev->in_len = in_len; + vmdev->out = ring; + vmdev->in = ( ring + sizeof ( *vmdev->out ) + out_len ); + vmdev->gpadl = gpadl; + vmdev->op = op; + vmdev->mtu = mtu; + vmdev->packet = packet; + + DBGC ( vmdev, "VMBUS %s channel GPADL %#08x ring " + "[%#08lx,%#08lx,%#08lx)\n", vmdev->dev.name, vmdev->gpadl, + virt_to_phys ( vmdev->out ), virt_to_phys ( vmdev->in ), + ( virt_to_phys ( vmdev->out ) + len ) ); + return 0; + + err_check_response: + err_wait_for_message: + err_post_message: + vmbus_gpadl_teardown ( vmdev, vmdev->gpadl ); + err_establish: + free_dma ( ring, len ); + err_alloc_ring: + free ( packet ); + err_alloc_packet: + return rc; +} + +/** + * Close VMBus channel + * + * @v vmdev VMBus device + */ +void vmbus_close ( struct vmbus_device *vmdev ) { + struct hv_hypervisor *hv = vmdev->hv; + struct vmbus_close_channel close; + size_t len; + int rc; + + /* Construct message */ + memset ( &close, 0, sizeof ( close ) ); + close.header.type = cpu_to_le32 ( VMBUS_CLOSE_CHANNEL ); + close.channel = cpu_to_le32 ( vmdev->channel ); + + /* Post message */ + if ( ( rc = vmbus_post_message ( hv, &close.header, + sizeof ( close ) ) ) != 0 ) { + DBGC ( vmdev, "VMBUS %s failed to close: %s\n", + vmdev->dev.name, strerror ( rc ) ); + /* Continue to attempt to tear down GPADL, so that our + * memory is no longer accessible by the remote VM. + */ + } + + /* Tear down GPADL */ + if ( ( rc = vmbus_gpadl_teardown ( vmdev, vmdev->gpadl ) ) != 0 ) { + DBGC ( vmdev, "VMBUS %s failed to tear down channel GPADL: " + "%s\n", vmdev->dev.name, strerror ( rc ) ); + /* We can't prevent the remote VM from continuing to + * access this memory, so leak it. + */ + return; + } + + /* Free ring buffer */ + len = ( sizeof ( *vmdev->out ) + vmdev->out_len + + sizeof ( *vmdev->in ) + vmdev->in_len ); + free_dma ( vmdev->out, len ); + vmdev->out = NULL; + vmdev->in = NULL; + + /* Free packet buffer */ + free ( vmdev->packet ); + vmdev->packet = NULL; + + DBGC ( vmdev, "VMBUS %s closed\n", vmdev->dev.name ); +} + +/** + * Signal channel via monitor page + * + * @v vmdev VMBus device + */ +static void vmbus_signal_monitor ( struct vmbus_device *vmdev ) { + struct hv_hypervisor *hv = vmdev->hv; + struct vmbus *vmbus = hv->vmbus; + struct hv_monitor_trigger *trigger; + unsigned int group; + unsigned int bit; + + /* Set bit in monitor trigger group */ + group = ( vmdev->monitor / ( 8 * sizeof ( trigger->pending ) )); + bit = ( vmdev->monitor % ( 8 * sizeof ( trigger->pending ) ) ); + trigger = &vmbus->monitor_out->trigger[group]; + set_bit ( bit, trigger ); +} + +/** + * Signal channel via hypervisor event + * + * @v vmdev VMBus device + */ +static void vmbus_signal_event ( struct vmbus_device *vmdev ) { + struct hv_hypervisor *hv = vmdev->hv; + int rc; + + /* Signal hypervisor event */ + if ( ( rc = hv_signal_event ( hv, VMBUS_EVENT_ID, 0 ) ) != 0 ) { + DBGC ( vmdev, "VMBUS %s could not signal event: %s\n", + vmdev->dev.name, strerror ( rc ) ); + return; + } +} + +/** + * Fill outbound ring buffer + * + * @v vmdev VMBus device + * @v prod Producer index + * @v data Data + * @v len Length + * @ret prod New producer index + * + * The caller must ensure that there is sufficient space in the ring + * buffer. + */ +static size_t vmbus_produce ( struct vmbus_device *vmdev, size_t prod, + const void *data, size_t len ) { + size_t first; + size_t second; + + /* Determine fragment lengths */ + first = ( vmdev->out_len - prod ); + if ( first > len ) + first = len; + second = ( len - first ); + + /* Copy fragment(s) */ + memcpy ( &vmdev->out->data[prod], data, first ); + if ( second ) + memcpy ( &vmdev->out->data[0], ( data + first ), second ); + + return ( ( prod + len ) & ( vmdev->out_len - 1 ) ); +} + +/** + * Consume inbound ring buffer + * + * @v vmdev VMBus device + * @v cons Consumer index + * @v data Data buffer, or NULL + * @v len Length to consume + * @ret cons New consumer index + */ +static size_t vmbus_consume ( struct vmbus_device *vmdev, size_t cons, + void *data, size_t len ) { + size_t first; + size_t second; + + /* Determine fragment lengths */ + first = ( vmdev->in_len - cons ); + if ( first > len ) + first = len; + second = ( len - first ); + + /* Copy fragment(s) */ + memcpy ( data, &vmdev->in->data[cons], first ); + if ( second ) + memcpy ( ( data + first ), &vmdev->in->data[0], second ); + + return ( ( cons + len ) & ( vmdev->in_len - 1 ) ); +} + +/** + * Send packet via ring buffer + * + * @v vmdev VMBus device + * @v header Packet header + * @v data Data + * @v len Length of data + * @ret rc Return status code + * + * Send a packet via the outbound ring buffer. All fields in the + * packet header must be filled in, with the exception of the total + * packet length. + */ +static int vmbus_send ( struct vmbus_device *vmdev, + struct vmbus_packet_header *header, + const void *data, size_t len ) { + struct hv_hypervisor *hv = vmdev->hv; + struct vmbus *vmbus = hv->vmbus; + static uint8_t padding[ 8 - 1 ]; + struct vmbus_packet_footer footer; + size_t header_len; + size_t pad_len; + size_t footer_len; + size_t ring_len; + size_t cons; + size_t prod; + size_t old_prod; + size_t fill; + + /* Sanity check */ + assert ( vmdev->out != NULL ); + + /* Calculate lengths */ + header_len = ( le16_to_cpu ( header->hdr_qlen ) * 8 ); + pad_len = ( ( -len ) & ( 8 - 1 ) ); + footer_len = sizeof ( footer ); + ring_len = ( header_len + len + pad_len + footer_len ); + + /* Check that we have enough room in the outbound ring buffer */ + cons = le32_to_cpu ( vmdev->out->cons ); + prod = le32_to_cpu ( vmdev->out->prod ); + old_prod = prod; + fill = ( ( prod - cons ) & ( vmdev->out_len - 1 ) ); + if ( ( fill + ring_len ) >= vmdev->out_len ) { + DBGC ( vmdev, "VMBUS %s ring buffer full\n", vmdev->dev.name ); + return -ENOBUFS; + } + + /* Complete header */ + header->qlen = cpu_to_le16 ( ( ring_len - footer_len ) / 8 ); + + /* Construct footer */ + footer.reserved = 0; + footer.prod = vmdev->out->prod; + + /* Copy packet to buffer */ + DBGC2 ( vmdev, "VMBUS %s sending:\n", vmdev->dev.name ); + DBGC2_HDA ( vmdev, prod, header, header_len ); + prod = vmbus_produce ( vmdev, prod, header, header_len ); + DBGC2_HDA ( vmdev, prod, data, len ); + prod = vmbus_produce ( vmdev, prod, data, len ); + prod = vmbus_produce ( vmdev, prod, padding, pad_len ); + DBGC2_HDA ( vmdev, prod, &footer, sizeof ( footer ) ); + prod = vmbus_produce ( vmdev, prod, &footer, sizeof ( footer ) ); + assert ( ( ( prod - old_prod ) & ( vmdev->out_len - 1 ) ) == ring_len ); + + /* Update producer index */ + wmb(); + vmdev->out->prod = cpu_to_le32 ( prod ); + + /* Return if we do not need to signal the host. This follows + * the logic of hv_need_to_signal() in the Linux driver. + */ + mb(); + if ( vmdev->out->intr_mask ) + return 0; + rmb(); + cons = le32_to_cpu ( vmdev->out->cons ); + if ( cons != old_prod ) + return 0; + + /* Set channel bit in interrupt page */ + set_bit ( vmdev->channel, vmbus->intr->out ); + + /* Signal the host */ + vmdev->signal ( vmdev ); + + return 0; +} + +/** + * Send control packet via ring buffer + * + * @v vmdev VMBus device + * @v xid Transaction ID (or zero to not request completion) + * @v data Data + * @v len Length of data + * @ret rc Return status code + * + * Send data using a VMBUS_DATA_INBAND packet. + */ +int vmbus_send_control ( struct vmbus_device *vmdev, uint64_t xid, + const void *data, size_t len ) { + struct vmbus_packet_header *header = vmdev->packet; + + /* Construct header in packet buffer */ + assert ( header != NULL ); + header->type = cpu_to_le16 ( VMBUS_DATA_INBAND ); + header->hdr_qlen = cpu_to_le16 ( sizeof ( *header ) / 8 ); + header->flags = ( xid ? + cpu_to_le16 ( VMBUS_COMPLETION_REQUESTED ) : 0 ); + header->xid = xid; /* Non-endian */ + + return vmbus_send ( vmdev, header, data, len ); +} + +/** + * Send data packet via ring buffer + * + * @v vmdev VMBus device + * @v xid Transaction ID + * @v data Data + * @v len Length of data + * @v iobuf I/O buffer + * @ret rc Return status code + * + * Send data using a VMBUS_DATA_GPA_DIRECT packet. The caller is + * responsible for ensuring that the I/O buffer remains untouched + * until the corresponding completion has been received. + */ +int vmbus_send_data ( struct vmbus_device *vmdev, uint64_t xid, + const void *data, size_t len, struct io_buffer *iobuf ) { + physaddr_t addr = virt_to_phys ( iobuf->data ); + unsigned int pfn_count = hv_pfn_count ( addr, iob_len ( iobuf ) ); + struct { + struct vmbus_gpa_direct_header gpa; + struct vmbus_gpa_range range; + uint64_t pfn[pfn_count]; + } __attribute__ (( packed )) *header = vmdev->packet; + unsigned int i; + + /* Sanity check */ + assert ( header != NULL ); + assert ( sizeof ( *header ) <= vmdev->mtu ); + + /* Construct header in packet buffer */ + header->gpa.header.type = cpu_to_le16 ( VMBUS_DATA_GPA_DIRECT ); + header->gpa.header.hdr_qlen = cpu_to_le16 ( sizeof ( *header ) / 8 ); + header->gpa.header.flags = cpu_to_le16 ( VMBUS_COMPLETION_REQUESTED ); + header->gpa.header.xid = xid; /* Non-endian */ + header->gpa.range_count = 1; + header->range.len = cpu_to_le32 ( iob_len ( iobuf ) ); + header->range.offset = cpu_to_le32 ( addr & ( PAGE_SIZE - 1 ) ); + for ( i = 0 ; i < pfn_count ; i++ ) + header->pfn[i] = ( ( addr / PAGE_SIZE ) + i ); + + return vmbus_send ( vmdev, &header->gpa.header, data, len ); +} + +/** + * Send completion packet via ring buffer + * + * @v vmdev VMBus device + * @v xid Transaction ID + * @v data Data + * @v len Length of data + * @ret rc Return status code + * + * Send data using a VMBUS_COMPLETION packet. + */ +int vmbus_send_completion ( struct vmbus_device *vmdev, uint64_t xid, + const void *data, size_t len ) { + struct vmbus_packet_header *header = vmdev->packet; + + /* Construct header in packet buffer */ + assert ( header != NULL ); + header->type = cpu_to_le16 ( VMBUS_COMPLETION ); + header->hdr_qlen = cpu_to_le16 ( sizeof ( *header ) / 8 ); + header->flags = 0; + header->xid = xid; /* Non-endian */ + + return vmbus_send ( vmdev, header, data, len ); +} + +/** + * Send cancellation packet via ring buffer + * + * @v vmdev VMBus device + * @v xid Transaction ID + * @ret rc Return status code + * + * Send data using a VMBUS_CANCELLATION packet. + */ +int vmbus_send_cancellation ( struct vmbus_device *vmdev, uint64_t xid ) { + struct vmbus_packet_header *header = vmdev->packet; + + /* Construct header in packet buffer */ + assert ( header != NULL ); + header->type = cpu_to_le16 ( VMBUS_CANCELLATION ); + header->hdr_qlen = cpu_to_le16 ( sizeof ( *header ) / 8 ); + header->flags = 0; + header->xid = xid; /* Non-endian */ + + return vmbus_send ( vmdev, header, NULL, 0 ); +} + +/** + * Get transfer page set from pageset ID + * + * @v vmdev VMBus device + * @v pageset Page set ID (in protocol byte order) + * @ret pages Page set, or NULL if not found + */ +static struct vmbus_xfer_pages * vmbus_xfer_pages ( struct vmbus_device *vmdev, + uint16_t pageset ) { + struct vmbus_xfer_pages *pages; + + /* Locate page set */ + list_for_each_entry ( pages, &vmdev->pages, list ) { + if ( pages->pageset == pageset ) + return pages; + } + + DBGC ( vmdev, "VMBUS %s unrecognised page set ID %#04x\n", + vmdev->dev.name, le16_to_cpu ( pageset ) ); + return NULL; +} + +/** + * Construct I/O buffer list from transfer pages + * + * @v vmdev VMBus device + * @v header Transfer page header + * @v list I/O buffer list to populate + * @ret rc Return status code + */ +static int vmbus_xfer_page_iobufs ( struct vmbus_device *vmdev, + struct vmbus_packet_header *header, + struct list_head *list ) { + struct vmbus_xfer_page_header *page_header = + container_of ( header, struct vmbus_xfer_page_header, header ); + struct vmbus_xfer_pages *pages; + struct io_buffer *iobuf; + struct io_buffer *tmp; + size_t len; + size_t offset; + unsigned int range_count; + unsigned int i; + int rc; + + /* Sanity check */ + assert ( header->type == cpu_to_le16 ( VMBUS_DATA_XFER_PAGES ) ); + + /* Locate page set */ + pages = vmbus_xfer_pages ( vmdev, page_header->pageset ); + if ( ! pages ) { + rc = -ENOENT; + goto err_pages; + } + + /* Allocate and populate I/O buffers */ + range_count = le32_to_cpu ( page_header->range_count ); + for ( i = 0 ; i < range_count ; i++ ) { + + /* Parse header */ + len = le32_to_cpu ( page_header->range[i].len ); + offset = le32_to_cpu ( page_header->range[i].offset ); + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( len ); + if ( ! iobuf ) { + DBGC ( vmdev, "VMBUS %s could not allocate %zd-byte " + "I/O buffer\n", vmdev->dev.name, len ); + rc = -ENOMEM; + goto err_alloc; + } + + /* Add I/O buffer to list */ + list_add ( &iobuf->list, list ); + + /* Populate I/O buffer */ + if ( ( rc = pages->op->copy ( pages, iob_put ( iobuf, len ), + offset, len ) ) != 0 ) { + DBGC ( vmdev, "VMBUS %s could not populate I/O buffer " + "range [%zd,%zd): %s\n", + vmdev->dev.name, offset, len, strerror ( rc ) ); + goto err_copy; + } + } + + return 0; + + err_copy: + err_alloc: + list_for_each_entry_safe ( iobuf, tmp, list, list ) { + list_del ( &iobuf->list ); + free_iob ( iobuf ); + } + err_pages: + return rc; +} + +/** + * Poll ring buffer + * + * @v vmdev VMBus device + * @ret rc Return status code + */ +int vmbus_poll ( struct vmbus_device *vmdev ) { + struct vmbus_packet_header *header = vmdev->packet; + struct list_head list; + void *data; + size_t header_len; + size_t len; + size_t footer_len; + size_t ring_len; + size_t cons; + size_t old_cons; + uint64_t xid; + int rc; + + /* Sanity checks */ + assert ( vmdev->packet != NULL ); + assert ( vmdev->in != NULL ); + + /* Return immediately if buffer is empty */ + if ( ! vmbus_has_data ( vmdev ) ) + return 0; + cons = le32_to_cpu ( vmdev->in->cons ); + old_cons = cons; + + /* Consume (start of) header */ + cons = vmbus_consume ( vmdev, cons, header, sizeof ( *header ) ); + + /* Parse and sanity check header */ + header_len = ( le16_to_cpu ( header->hdr_qlen ) * 8 ); + if ( header_len < sizeof ( *header ) ) { + DBGC ( vmdev, "VMBUS %s received underlength header (%zd " + "bytes)\n", vmdev->dev.name, header_len ); + return -EINVAL; + } + len = ( ( le16_to_cpu ( header->qlen ) * 8 ) - header_len ); + footer_len = sizeof ( struct vmbus_packet_footer ); + ring_len = ( header_len + len + footer_len ); + if ( ring_len > vmdev->mtu ) { + DBGC ( vmdev, "VMBUS %s received overlength packet (%zd " + "bytes)\n", vmdev->dev.name, ring_len ); + return -ERANGE; + } + xid = le64_to_cpu ( header->xid ); + + /* Consume remainder of packet */ + cons = vmbus_consume ( vmdev, cons, + ( ( ( void * ) header ) + sizeof ( *header ) ), + ( ring_len - sizeof ( *header ) ) ); + DBGC2 ( vmdev, "VMBUS %s received:\n", vmdev->dev.name ); + DBGC2_HDA ( vmdev, old_cons, header, ring_len ); + assert ( ( ( cons - old_cons ) & ( vmdev->in_len - 1 ) ) == ring_len ); + + /* Allocate I/O buffers, if applicable */ + INIT_LIST_HEAD ( &list ); + if ( header->type == cpu_to_le16 ( VMBUS_DATA_XFER_PAGES ) ) { + if ( ( rc = vmbus_xfer_page_iobufs ( vmdev, header, + &list ) ) != 0 ) + return rc; + } + + /* Update producer index */ + rmb(); + vmdev->in->cons = cpu_to_le32 ( cons ); + + /* Handle packet */ + data = ( ( ( void * ) header ) + header_len ); + switch ( header->type ) { + + case cpu_to_le16 ( VMBUS_DATA_INBAND ) : + if ( ( rc = vmdev->op->recv_control ( vmdev, xid, data, + len ) ) != 0 ) { + DBGC ( vmdev, "VMBUS %s could not handle control " + "packet: %s\n", + vmdev->dev.name, strerror ( rc ) ); + return rc; + } + break; + + case cpu_to_le16 ( VMBUS_DATA_XFER_PAGES ) : + if ( ( rc = vmdev->op->recv_data ( vmdev, xid, data, len, + &list ) ) != 0 ) { + DBGC ( vmdev, "VMBUS %s could not handle data packet: " + "%s\n", vmdev->dev.name, strerror ( rc ) ); + return rc; + } + break; + + case cpu_to_le16 ( VMBUS_COMPLETION ) : + if ( ( rc = vmdev->op->recv_completion ( vmdev, xid, data, + len ) ) != 0 ) { + DBGC ( vmdev, "VMBUS %s could not handle completion: " + "%s\n", vmdev->dev.name, strerror ( rc ) ); + return rc; + } + break; + + case cpu_to_le16 ( VMBUS_CANCELLATION ) : + if ( ( rc = vmdev->op->recv_cancellation ( vmdev, xid ) ) != 0){ + DBGC ( vmdev, "VMBUS %s could not handle cancellation: " + "%s\n", vmdev->dev.name, strerror ( rc ) ); + return rc; + } + break; + + default: + DBGC ( vmdev, "VMBUS %s unknown packet type %d\n", + vmdev->dev.name, le16_to_cpu ( header->type ) ); + return -ENOTSUP; + } + + return 0; +} + +/** + * Dump channel status (for debugging) + * + * @v vmdev VMBus device + */ +void vmbus_dump_channel ( struct vmbus_device *vmdev ) { + size_t out_prod = le32_to_cpu ( vmdev->out->prod ); + size_t out_cons = le32_to_cpu ( vmdev->out->cons ); + size_t in_prod = le32_to_cpu ( vmdev->in->prod ); + size_t in_cons = le32_to_cpu ( vmdev->in->cons ); + size_t in_len; + size_t first; + size_t second; + + /* Dump ring status */ + DBGC ( vmdev, "VMBUS %s out %03zx:%03zx%s in %03zx:%03zx%s\n", + vmdev->dev.name, out_prod, out_cons, + ( vmdev->out->intr_mask ? "(m)" : "" ), in_prod, in_cons, + ( vmdev->in->intr_mask ? "(m)" : "" ) ); + + /* Dump inbound ring contents, if any */ + if ( in_prod != in_cons ) { + in_len = ( ( in_prod - in_cons ) & + ( vmdev->in_len - 1 ) ); + first = ( vmdev->in_len - in_cons ); + if ( first > in_len ) + first = in_len; + second = ( in_len - first ); + DBGC_HDA ( vmdev, in_cons, &vmdev->in->data[in_cons], first ); + DBGC_HDA ( vmdev, 0, &vmdev->in->data[0], second ); + } +} + +/** + * Find driver for VMBus device + * + * @v vmdev VMBus device + * @ret driver Driver, or NULL + */ +static struct vmbus_driver * vmbus_find_driver ( const union uuid *type ) { + struct vmbus_driver *vmdrv; + + for_each_table_entry ( vmdrv, VMBUS_DRIVERS ) { + if ( memcmp ( &vmdrv->type, type, sizeof ( *type ) ) == 0 ) + return vmdrv; + } + return NULL; +} + +/** + * Probe channels + * + * @v hv Hyper-V hypervisor + * @v parent Parent device + * @ret rc Return status code + */ +static int vmbus_probe_channels ( struct hv_hypervisor *hv, + struct device *parent ) { + struct vmbus *vmbus = hv->vmbus; + const struct vmbus_message_header *header = &vmbus->message->header; + const struct vmbus_offer_channel *offer = &vmbus->message->offer; + const union uuid *type; + union uuid instance; + struct vmbus_driver *driver; + struct vmbus_device *vmdev; + struct vmbus_device *tmp; + unsigned int channel; + int rc; + + /* Post message */ + if ( ( rc = vmbus_post_empty_message ( hv, VMBUS_REQUEST_OFFERS ) ) !=0) + goto err_post_message; + + /* Collect responses */ + while ( 1 ) { + + /* Wait for response */ + if ( ( rc = vmbus_wait_for_any_message ( hv ) ) != 0 ) + goto err_wait_for_any_message; + + /* Handle response */ + if ( header->type == cpu_to_le32 ( VMBUS_OFFER_CHANNEL ) ) { + + /* Parse offer */ + type = &offer->type; + channel = le32_to_cpu ( offer->channel ); + DBGC2 ( vmbus, "VMBUS %p offer %d type %s", + vmbus, channel, uuid_ntoa ( type ) ); + if ( offer->monitored ) + DBGC2 ( vmbus, " monitor %d", offer->monitor ); + DBGC2 ( vmbus, "\n" ); + + /* Look for a driver */ + driver = vmbus_find_driver ( type ); + if ( ! driver ) { + DBGC2 ( vmbus, "VMBUS %p has no driver for " + "type %s\n", vmbus, uuid_ntoa ( type )); + /* Not a fatal error */ + continue; + } + + /* Allocate and initialise device */ + vmdev = zalloc ( sizeof ( *vmdev ) ); + if ( ! vmdev ) { + rc = -ENOMEM; + goto err_alloc_vmdev; + } + memcpy ( &instance, &offer->instance, + sizeof ( instance ) ); + uuid_mangle ( &instance ); + snprintf ( vmdev->dev.name, sizeof ( vmdev->dev.name ), + "{%s}", uuid_ntoa ( &instance ) ); + vmdev->dev.desc.bus_type = BUS_TYPE_HV; + INIT_LIST_HEAD ( &vmdev->dev.children ); + list_add_tail ( &vmdev->dev.siblings, + &parent->children ); + vmdev->dev.parent = parent; + vmdev->hv = hv; + memcpy ( &vmdev->instance, &offer->instance, + sizeof ( vmdev->instance ) ); + vmdev->channel = channel; + vmdev->monitor = offer->monitor; + vmdev->signal = ( offer->monitored ? + vmbus_signal_monitor : + vmbus_signal_event ); + INIT_LIST_HEAD ( &vmdev->pages ); + vmdev->driver = driver; + vmdev->dev.driver_name = driver->name; + DBGC ( vmdev, "VMBUS %s has driver \"%s\"\n", + vmdev->dev.name, vmdev->driver->name ); + + } else if ( header->type == + cpu_to_le32 ( VMBUS_ALL_OFFERS_DELIVERED ) ) { + + /* End of offer list */ + break; + + } else { + DBGC ( vmbus, "VMBUS %p unexpected offer response type " + "%d\n", vmbus, le32_to_cpu ( header->type ) ); + rc = -EPROTO; + goto err_unexpected_offer; + } + } + + /* Probe all devices. We do this only after completing + * enumeration since devices will need to send and receive + * VMBus messages. + */ + list_for_each_entry ( vmdev, &parent->children, dev.siblings ) { + if ( ( rc = vmdev->driver->probe ( vmdev ) ) != 0 ) { + DBGC ( vmdev, "VMBUS %s could not probe: %s\n", + vmdev->dev.name, strerror ( rc ) ); + goto err_probe; + } + } + + return 0; + + err_probe: + /* Remove driver from each device that was already probed */ + list_for_each_entry_continue_reverse ( vmdev, &parent->children, + dev.siblings ) { + vmdev->driver->remove ( vmdev ); + } + err_unexpected_offer: + err_alloc_vmdev: + err_wait_for_any_message: + /* Free any devices allocated (but potentially not yet probed) */ + list_for_each_entry_safe ( vmdev, tmp, &parent->children, + dev.siblings ) { + list_del ( &vmdev->dev.siblings ); + free ( vmdev ); + } + err_post_message: + return rc; +} + + +/** + * Reset channels + * + * @v hv Hyper-V hypervisor + * @v parent Parent device + * @ret rc Return status code + */ +static int vmbus_reset_channels ( struct hv_hypervisor *hv, + struct device *parent ) { + struct vmbus *vmbus = hv->vmbus; + const struct vmbus_message_header *header = &vmbus->message->header; + const struct vmbus_offer_channel *offer = &vmbus->message->offer; + const union uuid *type; + struct vmbus_device *vmdev; + unsigned int channel; + int rc; + + /* Post message */ + if ( ( rc = vmbus_post_empty_message ( hv, VMBUS_REQUEST_OFFERS ) ) !=0) + return rc; + + /* Collect responses */ + while ( 1 ) { + + /* Wait for response */ + if ( ( rc = vmbus_wait_for_any_message ( hv ) ) != 0 ) + return rc; + + /* Handle response */ + if ( header->type == cpu_to_le32 ( VMBUS_OFFER_CHANNEL ) ) { + + /* Parse offer */ + type = &offer->type; + channel = le32_to_cpu ( offer->channel ); + DBGC2 ( vmbus, "VMBUS %p offer %d type %s", + vmbus, channel, uuid_ntoa ( type ) ); + if ( offer->monitored ) + DBGC2 ( vmbus, " monitor %d", offer->monitor ); + DBGC2 ( vmbus, "\n" ); + + /* Do nothing with the offer; we already have all + * of the relevant state from the initial probe. + */ + + } else if ( header->type == + cpu_to_le32 ( VMBUS_ALL_OFFERS_DELIVERED ) ) { + + /* End of offer list */ + break; + + } else { + DBGC ( vmbus, "VMBUS %p unexpected offer response type " + "%d\n", vmbus, le32_to_cpu ( header->type ) ); + return -EPROTO; + } + } + + /* Reset all devices */ + list_for_each_entry ( vmdev, &parent->children, dev.siblings ) { + if ( ( rc = vmdev->driver->reset ( vmdev ) ) != 0 ) { + DBGC ( vmdev, "VMBUS %s could not reset: %s\n", + vmdev->dev.name, strerror ( rc ) ); + /* Continue attempting to reset other devices */ + continue; + } + } + + return 0; +} + +/** + * Remove channels + * + * @v hv Hyper-V hypervisor + * @v parent Parent device + */ +static void vmbus_remove_channels ( struct hv_hypervisor *hv __unused, + struct device *parent ) { + struct vmbus_device *vmdev; + struct vmbus_device *tmp; + + /* Remove devices */ + list_for_each_entry_safe ( vmdev, tmp, &parent->children, + dev.siblings ) { + vmdev->driver->remove ( vmdev ); + assert ( list_empty ( &vmdev->dev.children ) ); + assert ( vmdev->out == NULL ); + assert ( vmdev->in == NULL ); + assert ( vmdev->packet == NULL ); + assert ( list_empty ( &vmdev->pages ) ); + list_del ( &vmdev->dev.siblings ); + free ( vmdev ); + } +} + +/** + * Probe Hyper-V virtual machine bus + * + * @v hv Hyper-V hypervisor + * @v parent Parent device + * @ret rc Return status code + */ +int vmbus_probe ( struct hv_hypervisor *hv, struct device *parent ) { + struct vmbus *vmbus; + int rc; + + /* Allocate and initialise structure */ + vmbus = zalloc ( sizeof ( *vmbus ) ); + if ( ! vmbus ) { + rc = -ENOMEM; + goto err_alloc; + } + hv->vmbus = vmbus; + + /* Initialise message buffer pointer + * + * We use a pointer to the fixed-size Hyper-V received message + * buffer. This allows us to access fields within received + * messages without first checking the message size: any + * fields beyond the end of the message will read as zero. + */ + vmbus->message = ( ( void * ) hv->message->received.data ); + assert ( sizeof ( *vmbus->message ) <= + sizeof ( hv->message->received.data ) ); + + /* Allocate interrupt and monitor pages */ + if ( ( rc = hv_alloc_pages ( hv, &vmbus->intr, &vmbus->monitor_in, + &vmbus->monitor_out, NULL ) ) != 0 ) + goto err_alloc_pages; + + /* Enable message interrupt */ + hv_enable_sint ( hv, VMBUS_MESSAGE_SINT ); + + /* Negotiate protocol version */ + if ( ( rc = vmbus_negotiate_version ( hv ) ) != 0 ) + goto err_negotiate_version; + + /* Enumerate channels */ + if ( ( rc = vmbus_probe_channels ( hv, parent ) ) != 0 ) + goto err_probe_channels; + + return 0; + + vmbus_remove_channels ( hv, parent ); + err_probe_channels: + vmbus_unload ( hv ); + err_negotiate_version: + hv_disable_sint ( hv, VMBUS_MESSAGE_SINT ); + hv_free_pages ( hv, vmbus->intr, vmbus->monitor_in, vmbus->monitor_out, + NULL ); + err_alloc_pages: + free ( vmbus ); + err_alloc: + return rc; +} + +/** + * Reset Hyper-V virtual machine bus + * + * @v hv Hyper-V hypervisor + * @v parent Parent device + * @ret rc Return status code + */ +int vmbus_reset ( struct hv_hypervisor *hv, struct device *parent ) { + struct vmbus *vmbus = hv->vmbus; + int rc; + + /* Mark all existent GPADLs as obsolete */ + vmbus_obsolete_gpadl = vmbus_gpadl; + + /* Clear interrupt and monitor pages */ + memset ( vmbus->intr, 0, PAGE_SIZE ); + memset ( vmbus->monitor_in, 0, PAGE_SIZE ); + memset ( vmbus->monitor_out, 0, PAGE_SIZE ); + + /* Enable message interrupt */ + hv_enable_sint ( hv, VMBUS_MESSAGE_SINT ); + + /* Renegotiate protocol version */ + if ( ( rc = vmbus_negotiate_version ( hv ) ) != 0 ) + return rc; + + /* Reenumerate channels */ + if ( ( rc = vmbus_reset_channels ( hv, parent ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Remove Hyper-V virtual machine bus + * + * @v hv Hyper-V hypervisor + * @v parent Parent device + */ +void vmbus_remove ( struct hv_hypervisor *hv, struct device *parent ) { + struct vmbus *vmbus = hv->vmbus; + + vmbus_remove_channels ( hv, parent ); + vmbus_unload ( hv ); + hv_disable_sint ( hv, VMBUS_MESSAGE_SINT ); + hv_free_pages ( hv, vmbus->intr, vmbus->monitor_in, vmbus->monitor_out, + NULL ); + free ( vmbus ); +} diff --git a/src/interface/xen/xenbus.c b/src/interface/xen/xenbus.c new file mode 100644 index 00000000..5dd01dfa --- /dev/null +++ b/src/interface/xen/xenbus.c @@ -0,0 +1,400 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Xen device bus + * + */ + +/* Disambiguate the various error causes */ +#define ETIMEDOUT_UNKNOWN \ + __einfo_error ( EINFO_ETIMEDOUT_UNKNOWN ) +#define EINFO_ETIMEDOUT_UNKNOWN \ + __einfo_uniqify ( EINFO_ETIMEDOUT, XenbusStateUnknown, \ + "Unknown" ) +#define ETIMEDOUT_INITIALISING \ + __einfo_error ( EINFO_ETIMEDOUT_INITIALISING ) +#define EINFO_ETIMEDOUT_INITIALISING \ + __einfo_uniqify ( EINFO_ETIMEDOUT, XenbusStateInitialising, \ + "Initialising" ) +#define ETIMEDOUT_INITWAIT \ + __einfo_error ( EINFO_ETIMEDOUT_INITWAIT ) +#define EINFO_ETIMEDOUT_INITWAIT \ + __einfo_uniqify ( EINFO_ETIMEDOUT, XenbusStateInitWait, \ + "InitWait" ) +#define ETIMEDOUT_INITIALISED \ + __einfo_error ( EINFO_ETIMEDOUT_INITIALISED ) +#define EINFO_ETIMEDOUT_INITIALISED \ + __einfo_uniqify ( EINFO_ETIMEDOUT, XenbusStateInitialised, \ + "Initialised" ) +#define ETIMEDOUT_CONNECTED \ + __einfo_error ( EINFO_ETIMEDOUT_CONNECTED ) +#define EINFO_ETIMEDOUT_CONNECTED \ + __einfo_uniqify ( EINFO_ETIMEDOUT, XenbusStateConnected, \ + "Connected" ) +#define ETIMEDOUT_CLOSING \ + __einfo_error ( EINFO_ETIMEDOUT_CLOSING ) +#define EINFO_ETIMEDOUT_CLOSING \ + __einfo_uniqify ( EINFO_ETIMEDOUT, XenbusStateClosing, \ + "Closing" ) +#define ETIMEDOUT_CLOSED \ + __einfo_error ( EINFO_ETIMEDOUT_CLOSED ) +#define EINFO_ETIMEDOUT_CLOSED \ + __einfo_uniqify ( EINFO_ETIMEDOUT, XenbusStateClosed, \ + "Closed" ) +#define ETIMEDOUT_RECONFIGURING \ + __einfo_error ( EINFO_ETIMEDOUT_RECONFIGURING ) +#define EINFO_ETIMEDOUT_RECONFIGURING \ + __einfo_uniqify ( EINFO_ETIMEDOUT, XenbusStateReconfiguring, \ + "Reconfiguring" ) +#define ETIMEDOUT_RECONFIGURED \ + __einfo_error ( EINFO_ETIMEDOUT_RECONFIGURED ) +#define EINFO_ETIMEDOUT_RECONFIGURED \ + __einfo_uniqify ( EINFO_ETIMEDOUT, XenbusStateReconfigured, \ + "Reconfigured" ) +#define ETIMEDOUT_STATE( state ) \ + EUNIQ ( EINFO_ETIMEDOUT, (state), ETIMEDOUT_UNKNOWN, \ + ETIMEDOUT_INITIALISING, ETIMEDOUT_INITWAIT, \ + ETIMEDOUT_INITIALISED, ETIMEDOUT_CONNECTED, \ + ETIMEDOUT_CLOSING, ETIMEDOUT_CLOSED, \ + ETIMEDOUT_RECONFIGURING, ETIMEDOUT_RECONFIGURED ) + +/** Maximum time to wait for backend to reach a given state, in ticks */ +#define XENBUS_BACKEND_TIMEOUT ( 5 * TICKS_PER_SEC ) + +/** + * Set device state + * + * @v xendev Xen device + * @v state New state + * @ret rc Return status code + */ +int xenbus_set_state ( struct xen_device *xendev, int state ) { + int rc; + + /* Attempt to set state */ + if ( ( rc = xenstore_write_num ( xendev->xen, state, xendev->key, + "state", NULL ) ) != 0 ) { + DBGC ( xendev, "XENBUS %s could not set state=\"%d\": %s\n", + xendev->key, state, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Get backend state + * + * @v xendev Xen device + * @ret state Backend state, or negative error + */ +int xenbus_backend_state ( struct xen_device *xendev ) { + unsigned long state; + int rc; + + /* Attempt to get backend state */ + if ( ( rc = xenstore_read_num ( xendev->xen, &state, xendev->backend, + "state", NULL ) ) != 0 ) { + DBGC ( xendev, "XENBUS %s could not read %s/state: %s\n", + xendev->key, xendev->backend, strerror ( rc ) ); + return rc; + } + + return state; +} + +/** + * Wait for backend to reach a given state + * + * @v xendev Xen device + * @v state Desired backend state + * @ret rc Return status code + */ +int xenbus_backend_wait ( struct xen_device *xendev, int state ) { + unsigned long started = currticks(); + unsigned long elapsed; + unsigned int attempts = 0; + int current_state; + int rc; + + /* Wait for backend to reach this state */ + do { + + /* Get current backend state */ + current_state = xenbus_backend_state ( xendev ); + if ( current_state < 0 ) { + rc = current_state; + return rc; + } + if ( current_state == state ) + return 0; + + /* Allow time for backend to react */ + cpu_nap(); + + /* XenStore is a very slow interface; any fixed delay + * time would be dwarfed by the XenStore access time. + * We therefore use wall clock to time out this + * operation. + */ + elapsed = ( currticks() - started ); + attempts++; + + } while ( elapsed < XENBUS_BACKEND_TIMEOUT ); + + /* Construct status code from current backend state */ + rc = -ETIMEDOUT_STATE ( current_state ); + DBGC ( xendev, "XENBUS %s timed out after %d attempts waiting for " + "%s/state=\"%d\": %s\n", xendev->key, attempts, xendev->backend, + state, strerror ( rc ) ); + + return rc; +} + +/** + * Find driver for Xen device + * + * @v type Device type + * @ret driver Driver, or NULL + */ +static struct xen_driver * xenbus_find_driver ( const char *type ) { + struct xen_driver *xendrv; + + for_each_table_entry ( xendrv, XEN_DRIVERS ) { + if ( strcmp ( xendrv->type, type ) == 0 ) + return xendrv; + } + return NULL; +} + +/** + * Probe Xen device + * + * @v xen Xen hypervisor + * @v parent Parent device + * @v instance Device instance + * @v driver Device driver + * @ret rc Return status code + */ +static int xenbus_probe_device ( struct xen_hypervisor *xen, + struct device *parent, const char *instance, + struct xen_driver *driver ) { + const char *type = driver->type; + struct xen_device *xendev; + size_t key_len; + int rc; + + /* Allocate and initialise structure */ + key_len = ( 7 /* "device/" */ + strlen ( type ) + 1 /* "/" */ + + strlen ( instance ) + 1 /* NUL */ ); + xendev = zalloc ( sizeof ( *xendev ) + key_len ); + if ( ! xendev ) { + rc = -ENOMEM; + goto err_alloc; + } + snprintf ( xendev->dev.name, sizeof ( xendev->dev.name ), "%s/%s", + type, instance ); + xendev->dev.desc.bus_type = BUS_TYPE_XEN; + INIT_LIST_HEAD ( &xendev->dev.children ); + list_add_tail ( &xendev->dev.siblings, &parent->children ); + xendev->dev.parent = parent; + xendev->xen = xen; + xendev->key = ( ( void * ) ( xendev + 1 ) ); + snprintf ( xendev->key, key_len, "device/%s/%s", type, instance ); + xendev->driver = driver; + xendev->dev.driver_name = driver->name; + DBGC ( xendev, "XENBUS %s has driver \"%s\"\n", xendev->key, + xendev->driver->name ); + + /* Read backend key */ + if ( ( rc = xenstore_read ( xen, &xendev->backend, xendev->key, + "backend", NULL ) ) != 0 ) { + DBGC ( xendev, "XENBUS %s could not read backend: %s\n", + xendev->key, strerror ( rc ) ); + goto err_read_backend; + } + + /* Read backend domain ID */ + if ( ( rc = xenstore_read_num ( xen, &xendev->backend_id, xendev->key, + "backend-id", NULL ) ) != 0 ) { + DBGC ( xendev, "XENBUS %s could not read backend-id: %s\n", + xendev->key, strerror ( rc ) ); + goto err_read_backend_id; + } + DBGC ( xendev, "XENBUS %s backend=\"%s\" in domain %ld\n", + xendev->key, xendev->backend, xendev->backend_id ); + + /* Probe driver */ + if ( ( rc = xendev->driver->probe ( xendev ) ) != 0 ) { + DBGC ( xendev, "XENBUS could not probe %s: %s\n", + xendev->key, strerror ( rc ) ); + goto err_probe; + } + + return 0; + + xendev->driver->remove ( xendev ); + err_probe: + err_read_backend_id: + free ( xendev->backend ); + err_read_backend: + list_del ( &xendev->dev.siblings ); + free ( xendev ); + err_alloc: + return rc; +} + +/** + * Remove Xen device + * + * @v xendev Xen device + */ +static void xenbus_remove_device ( struct xen_device *xendev ) { + + /* Remove device */ + xendev->driver->remove ( xendev ); + free ( xendev->backend ); + list_del ( &xendev->dev.siblings ); + free ( xendev ); +} + +/** + * Probe Xen devices of a given type + * + * @v xen Xen hypervisor + * @v parent Parent device + * @v type Device type + * @ret rc Return status code + */ +static int xenbus_probe_type ( struct xen_hypervisor *xen, + struct device *parent, const char *type ) { + struct xen_driver *driver; + char *children; + char *child; + size_t len; + int rc; + + /* Look for a driver */ + driver = xenbus_find_driver ( type ); + if ( ! driver ) { + DBGC ( xen, "XENBUS has no driver for \"%s\" devices\n", type ); + /* Not a fatal error */ + rc = 0; + goto err_no_driver; + } + + /* Get children of this key */ + if ( ( rc = xenstore_directory ( xen, &children, &len, "device", + type, NULL ) ) != 0 ) { + DBGC ( xen, "XENBUS could not list \"%s\" devices: %s\n", + type, strerror ( rc ) ); + goto err_directory; + } + + /* Probe each child */ + for ( child = children ; child < ( children + len ) ; + child += ( strlen ( child ) + 1 /* NUL */ ) ) { + if ( ( rc = xenbus_probe_device ( xen, parent, child, + driver ) ) != 0 ) + goto err_probe_device; + } + + free ( children ); + return 0; + + err_probe_device: + free ( children ); + err_directory: + err_no_driver: + return rc; +} + +/** + * Probe Xen bus + * + * @v xen Xen hypervisor + * @v parent Parent device + * @ret rc Return status code + */ +int xenbus_probe ( struct xen_hypervisor *xen, struct device *parent ) { + char *types; + char *type; + size_t len; + int rc; + + /* Get children of "device" key */ + if ( ( rc = xenstore_directory ( xen, &types, &len, "device", + NULL ) ) != 0 ) { + DBGC ( xen, "XENBUS could not list device types: %s\n", + strerror ( rc ) ); + goto err_directory; + } + + /* Probe each child type */ + for ( type = types ; type < ( types + len ) ; + type += ( strlen ( type ) + 1 /* NUL */ ) ) { + if ( ( rc = xenbus_probe_type ( xen, parent, type ) ) != 0 ) + goto err_probe_type; + } + + free ( types ); + return 0; + + xenbus_remove ( xen, parent ); + err_probe_type: + free ( types ); + err_directory: + return rc; +} + +/** + * Remove Xen bus + * + * @v xen Xen hypervisor + * @v parent Parent device + */ +void xenbus_remove ( struct xen_hypervisor *xen __unused, + struct device *parent ) { + struct xen_device *xendev; + struct xen_device *tmp; + + /* Remove devices */ + list_for_each_entry_safe ( xendev, tmp, &parent->children, + dev.siblings ) { + xenbus_remove_device ( xendev ); + } +} diff --git a/src/interface/xen/xengrant.c b/src/interface/xen/xengrant.c new file mode 100644 index 00000000..269cd583 --- /dev/null +++ b/src/interface/xen/xengrant.c @@ -0,0 +1,232 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Xen grant tables + * + */ + +/** Grant table version to try setting + * + * Using version 1 grant tables limits guests to using 16TB of + * grantable RAM, and prevents the use of subpage grants. Some + * versions of the Xen hypervisor refuse to allow the grant table + * version to be set after the first grant references have been + * created, so the loaded operating system may be stuck with whatever + * choice we make here. We therefore currently use version 2 grant + * tables, since they give the most flexibility to the loaded OS. + * + * Current versions (7.2.0) of the Windows PV drivers have no support + * for version 2 grant tables, and will merrily create version 1 + * entries in what the hypervisor believes to be a version 2 table. + * This causes some confusion. + * + * Avoid this problem by attempting to use version 1 tables, since + * otherwise we may render Windows unable to boot. + * + * Play nicely with other potential bootloaders by accepting either + * version 1 or version 2 grant tables (if we are unable to set our + * requested version). + */ +#define XENGRANT_TRY_VERSION 1 + +/** + * Initialise grant table + * + * @v xen Xen hypervisor + * @ret rc Return status code + */ +int xengrant_init ( struct xen_hypervisor *xen ) { + struct gnttab_query_size size; + struct gnttab_set_version set_version; + struct gnttab_get_version get_version; + struct grant_entry_v1 *v1; + union grant_entry_v2 *v2; + unsigned int version; + int xenrc; + int rc; + + /* Get grant table size */ + size.dom = DOMID_SELF; + if ( ( xenrc = xengrant_query_size ( xen, &size ) ) != 0 ) { + rc = -EXEN ( xenrc ); + DBGC ( xen, "XENGRANT could not get table size: %s\n", + strerror ( rc ) ); + return rc; + } + xen->grant.len = ( size.nr_frames * PAGE_SIZE ); + + /* Set grant table version, if applicable */ + set_version.version = XENGRANT_TRY_VERSION; + if ( ( xenrc = xengrant_set_version ( xen, &set_version ) ) != 0 ) { + rc = -EXEN ( xenrc ); + DBGC ( xen, "XENGRANT could not set version %d: %s\n", + XENGRANT_TRY_VERSION, strerror ( rc ) ); + /* Continue; use whatever version is current */ + } + + /* Get grant table version */ + get_version.dom = DOMID_SELF; + get_version.pad = 0; + if ( ( xenrc = xengrant_get_version ( xen, &get_version ) ) == 0 ) { + version = get_version.version; + switch ( version ) { + + case 0: + /* Version not yet specified: will be version 1 */ + version = 1; + break; + + case 1 : + /* Version 1 table: nothing special to do */ + break; + + case 2: + /* Version 2 table: configure shift appropriately */ + xen->grant.shift = ( fls ( sizeof ( *v2 ) / + sizeof ( *v1 ) ) - 1 ); + break; + + default: + /* Unsupported version */ + DBGC ( xen, "XENGRANT detected unsupported version " + "%d\n", version ); + return -ENOTSUP; + + } + } else { + rc = -EXEN ( xenrc ); + DBGC ( xen, "XENGRANT could not get version (assuming v1): " + "%s\n", strerror ( rc ) ); + version = 1; + } + + DBGC ( xen, "XENGRANT using v%d table with %d entries\n", + version, xengrant_entries ( xen ) ); + return 0; +} + +/** + * Allocate grant references + * + * @v xen Xen hypervisor + * @v refs Grant references to fill in + * @v count Number of references + * @ret rc Return status code + */ +int xengrant_alloc ( struct xen_hypervisor *xen, grant_ref_t *refs, + unsigned int count ) { + struct grant_entry_header *hdr; + unsigned int entries = xengrant_entries ( xen ); + unsigned int mask = ( entries - 1 ); + unsigned int check = 0; + unsigned int avail; + unsigned int ref; + + /* Fail unless we have enough references available */ + avail = ( entries - xen->grant.used - GNTTAB_NR_RESERVED_ENTRIES ); + if ( avail < count ) { + DBGC ( xen, "XENGRANT cannot allocate %d references (only %d " + "of %d available)\n", count, avail, entries ); + return -ENOBUFS; + } + DBGC ( xen, "XENGRANT allocating %d references (from %d of %d " + "available)\n", count, avail, entries ); + + /* Update number of references used */ + xen->grant.used += count; + + /* Find unused references */ + for ( ref = xen->grant.ref ; count ; ref = ( ( ref + 1 ) & mask ) ) { + + /* Sanity check */ + assert ( check++ < entries ); + + /* Skip reserved references */ + if ( ref < GNTTAB_NR_RESERVED_ENTRIES ) + continue; + + /* Skip in-use references */ + hdr = xengrant_header ( xen, ref ); + if ( readw ( &hdr->flags ) & GTF_type_mask ) + continue; + if ( readw ( &hdr->domid ) == DOMID_SELF ) + continue; + + /* Zero reference */ + xengrant_zero ( xen, hdr ); + + /* Mark reference as in-use. We leave the flags as + * empty (to avoid creating a valid grant table entry) + * and set the domid to DOMID_SELF. + */ + writew ( DOMID_SELF, &hdr->domid ); + DBGC2 ( xen, "XENGRANT allocated ref %d\n", ref ); + + /* Record reference */ + refs[--count] = ref; + } + + /* Update cursor */ + xen->grant.ref = ref; + + return 0; +} + +/** + * Free grant references + * + * @v xen Xen hypervisor + * @v refs Grant references + * @v count Number of references + */ +void xengrant_free ( struct xen_hypervisor *xen, grant_ref_t *refs, + unsigned int count ) { + struct grant_entry_header *hdr; + unsigned int ref; + unsigned int i; + + /* Free references */ + for ( i = 0 ; i < count ; i++ ) { + + /* Sanity check */ + ref = refs[i]; + assert ( ref < xengrant_entries ( xen ) ); + + /* Zero reference */ + hdr = xengrant_header ( xen, ref ); + xengrant_zero ( xen, hdr ); + DBGC2 ( xen, "XENGRANT freed ref %d\n", ref ); + } +} diff --git a/src/interface/xen/xenstore.c b/src/interface/xen/xenstore.c new file mode 100644 index 00000000..a14881fc --- /dev/null +++ b/src/interface/xen/xenstore.c @@ -0,0 +1,554 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * xs_wire.h attempts to define a static error table xsd_errors, which + * interacts badly with the dynamically generated error numbers used + * by iPXE. Prevent this table from being constructed by including + * errno.h only after including xs_wire.h. + * + */ +#include +#include + +/** @file + * + * XenStore interface + * + */ + +/** Request identifier */ +static uint32_t xenstore_req_id; + +/** + * Send XenStore request raw data + * + * @v xen Xen hypervisor + * @v data Data buffer + * @v len Length of data + */ +static void xenstore_send ( struct xen_hypervisor *xen, const void *data, + size_t len ) { + struct xenstore_domain_interface *intf = xen->store.intf; + XENSTORE_RING_IDX prod = readl ( &intf->req_prod ); + XENSTORE_RING_IDX cons; + XENSTORE_RING_IDX idx; + const char *bytes = data; + size_t offset = 0; + size_t fill; + + DBGCP ( intf, "XENSTORE raw request:\n" ); + DBGCP_HDA ( intf, MASK_XENSTORE_IDX ( prod ), data, len ); + + /* Write one byte at a time */ + while ( offset < len ) { + + /* Wait for space to become available */ + while ( 1 ) { + cons = readl ( &intf->req_cons ); + fill = ( prod - cons ); + if ( fill < XENSTORE_RING_SIZE ) + break; + DBGC2 ( xen, "." ); + cpu_nap(); + rmb(); + } + + /* Write byte */ + idx = MASK_XENSTORE_IDX ( prod++ ); + writeb ( bytes[offset++], &intf->req[idx] ); + } + + /* Update producer counter */ + wmb(); + writel ( prod, &intf->req_prod ); + wmb(); +} + +/** + * Send XenStore request string (excluding terminating NUL) + * + * @v xen Xen hypervisor + * @v string String + */ +static void xenstore_send_string ( struct xen_hypervisor *xen, + const char *string ) { + + xenstore_send ( xen, string, strlen ( string ) ); +} + +/** + * Receive XenStore response raw data + * + * @v xen Xen hypervisor + * @v data Data buffer, or NULL to discard data + * @v len Length of data + */ +static void xenstore_recv ( struct xen_hypervisor *xen, void *data, + size_t len ) { + struct xenstore_domain_interface *intf = xen->store.intf; + XENSTORE_RING_IDX cons = readl ( &intf->rsp_cons ); + XENSTORE_RING_IDX prod; + XENSTORE_RING_IDX idx; + char *bytes = data; + size_t offset = 0; + size_t fill; + + DBGCP ( intf, "XENSTORE raw response:\n" ); + + /* Read one byte at a time */ + while ( offset < len ) { + + /* Wait for data to be ready */ + while ( 1 ) { + prod = readl ( &intf->rsp_prod ); + fill = ( prod - cons ); + if ( fill > 0 ) + break; + DBGC2 ( xen, "." ); + cpu_nap(); + rmb(); + } + + /* Read byte */ + idx = MASK_XENSTORE_IDX ( cons++ ); + if ( data ) + bytes[offset++] = readb ( &intf->rsp[idx] ); + } + if ( data ) + DBGCP_HDA ( intf, MASK_XENSTORE_IDX ( cons - len ), data, len ); + + /* Update consumer counter */ + writel ( cons, &intf->rsp_cons ); + wmb(); +} + +/** + * Send XenStore request + * + * @v xen Xen hypervisor + * @v type Message type + * @v req_id Request ID + * @v value Value, or NULL to omit + * @v key Key path components + * @ret rc Return status code + */ +static int xenstore_request ( struct xen_hypervisor *xen, + enum xsd_sockmsg_type type, uint32_t req_id, + const char *value, va_list key ) { + struct xsd_sockmsg msg; + struct evtchn_send event; + const char *string; + va_list tmp; + int xenrc; + int rc; + + /* Construct message header */ + msg.type = type; + msg.req_id = req_id; + msg.tx_id = 0; + msg.len = 0; + DBGC2 ( xen, "XENSTORE request ID %d type %d ", req_id, type ); + + /* Calculate total length */ + va_copy ( tmp, key ); + while ( ( string = va_arg ( tmp, const char * ) ) != NULL ) { + DBGC2 ( xen, "%s%s", ( msg.len ? "/" : "" ), string ); + msg.len += ( strlen ( string ) + 1 /* '/' or NUL */ ); + } + va_end ( tmp ); + if ( value ) { + DBGC2 ( xen, " = \"%s\"", value ); + msg.len += strlen ( value ); + } + DBGC2 ( xen, "\n" ); + + /* Send message */ + xenstore_send ( xen, &msg, sizeof ( msg ) ); + string = va_arg ( key, const char * ); + assert ( string != NULL ); + xenstore_send_string ( xen, string ); + while ( ( string = va_arg ( key, const char * ) ) != NULL ) { + xenstore_send_string ( xen, "/" ); + xenstore_send_string ( xen, string ); + } + xenstore_send ( xen, "", 1 ); /* Separating NUL */ + if ( value ) + xenstore_send_string ( xen, value ); + + /* Notify the back end */ + event.port = xen->store.port; + if ( ( xenrc = xenevent_send ( xen, &event ) ) != 0 ) { + rc = -EXEN ( xenrc ); + DBGC ( xen, "XENSTORE could not notify back end: %s\n", + strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Receive XenStore response + * + * @v xen Xen hypervisor + * @v req_id Request ID + * @v value Value to fill in + * @v len Length to fill in + * @ret rc Return status code + * + * The caller is responsible for eventually calling free() on the + * returned value. Note that the value may comprise multiple + * NUL-terminated strings concatenated together. A terminating NUL + * will always be appended to the returned value. + */ +static int xenstore_response ( struct xen_hypervisor *xen, uint32_t req_id, + char **value, size_t *len ) { + struct xsd_sockmsg msg; + char *string; + int rc; + + /* Wait for response to become available */ + while ( ! xenevent_pending ( xen, xen->store.port ) ) + cpu_nap(); + + /* Receive message header */ + xenstore_recv ( xen, &msg, sizeof ( msg ) ); + *len = msg.len; + + /* Allocate space for response */ + *value = zalloc ( msg.len + 1 /* terminating NUL */ ); + + /* Receive data. Do this even if allocation failed, or if the + * request ID was incorrect, to avoid leaving data in the + * ring. + */ + xenstore_recv ( xen, *value, msg.len ); + + /* Validate request ID */ + if ( msg.req_id != req_id ) { + DBGC ( xen, "XENSTORE response ID mismatch (got %d, expected " + "%d)\n", msg.req_id, req_id ); + rc = -EPROTO; + goto err_req_id; + } + + /* Check for allocation failure */ + if ( ! *value ) { + DBGC ( xen, "XENSTORE could not allocate %d bytes for " + "response\n", msg.len ); + rc = -ENOMEM; + goto err_alloc; + } + + /* Check for explicit errors */ + if ( msg.type == XS_ERROR ) { + DBGC ( xen, "XENSTORE response error \"%s\"\n", *value ); + rc = -EIO; + goto err_explicit; + } + + DBGC2 ( xen, "XENSTORE response ID %d\n", req_id ); + if ( DBG_EXTRA ) { + for ( string = *value ; string < ( *value + msg.len ) ; + string += ( strlen ( string ) + 1 /* NUL */ ) ) { + DBGC2 ( xen, " - \"%s\"\n", string ); + } + } + return 0; + + err_explicit: + err_alloc: + err_req_id: + free ( *value ); + *value = NULL; + return rc; +} + +/** + * Issue a XenStore message + * + * @v xen Xen hypervisor + * @v type Message type + * @v response Response value to fill in, or NULL to discard + * @v len Response length to fill in, or NULL to ignore + * @v request Request value, or NULL to omit + * @v key Key path components + * @ret rc Return status code + */ +static int xenstore_message ( struct xen_hypervisor *xen, + enum xsd_sockmsg_type type, char **response, + size_t *len, const char *request, va_list key ) { + char *response_value; + size_t response_len; + int rc; + + /* Send request */ + if ( ( rc = xenstore_request ( xen, type, ++xenstore_req_id, + request, key ) ) != 0 ) + return rc; + + /* Receive response */ + if ( ( rc = xenstore_response ( xen, xenstore_req_id, &response_value, + &response_len ) ) != 0 ) + return rc; + + /* Return response, if applicable */ + if ( response ) { + *response = response_value; + } else { + free ( response_value ); + } + if ( len ) + *len = response_len; + + return 0; +} + +/** + * Read XenStore value + * + * @v xen Xen hypervisor + * @v value Value to fill in + * @v key Key path components + * @ret rc Return status code + * + * On a successful return, the caller is responsible for calling + * free() on the returned value. + */ +static int xenstore_vread ( struct xen_hypervisor *xen, char **value, + va_list key ) { + + return xenstore_message ( xen, XS_READ, value, NULL, NULL, key ); +} + +/** + * Read XenStore value + * + * @v xen Xen hypervisor + * @v value Value to fill in + * @v ... Key path components + * @ret rc Return status code + * + * On a successful return, the caller is responsible for calling + * free() on the returned value. + */ +__attribute__ (( sentinel )) int +xenstore_read ( struct xen_hypervisor *xen, char **value, ... ) { + va_list key; + int rc; + + va_start ( key, value ); + rc = xenstore_vread ( xen, value, key ); + va_end ( key ); + return rc; +} + +/** + * Read XenStore numeric value + * + * @v xen Xen hypervisor + * @v num Numeric value to fill in + * @v ... Key path components + * @ret rc Return status code + */ +__attribute__ (( sentinel )) int +xenstore_read_num ( struct xen_hypervisor *xen, unsigned long *num, ... ) { + va_list key; + char *value; + char *endp; + int rc; + + /* Try to read text value */ + va_start ( key, num ); + rc = xenstore_vread ( xen, &value, key ); + va_end ( key ); + if ( rc != 0 ) + goto err_read; + + /* Try to parse as numeric value */ + *num = strtoul ( value, &endp, 10 ); + if ( ( *value == '\0' ) || ( *endp != '\0' ) ) { + DBGC ( xen, "XENSTORE found invalid numeric value \"%s\"\n", + value ); + rc = -EINVAL; + goto err_strtoul; + } + + err_strtoul: + free ( value ); + err_read: + return rc; +} + +/** + * Write XenStore value + * + * @v xen Xen hypervisor + * @v value Value + * @v key Key path components + * @ret rc Return status code + */ +static int xenstore_vwrite ( struct xen_hypervisor *xen, const char *value, + va_list key ) { + + return xenstore_message ( xen, XS_WRITE, NULL, NULL, value, key ); +} + +/** + * Write XenStore value + * + * @v xen Xen hypervisor + * @v value Value + * @v ... Key path components + * @ret rc Return status code + */ +__attribute__ (( sentinel )) int +xenstore_write ( struct xen_hypervisor *xen, const char *value, ... ) { + va_list key; + int rc; + + va_start ( key, value ); + rc = xenstore_vwrite ( xen, value, key ); + va_end ( key ); + return rc; +} + +/** + * Write XenStore numeric value + * + * @v xen Xen hypervisor + * @v num Numeric value + * @v ... Key path components + * @ret rc Return status code + */ +__attribute__ (( sentinel )) int +xenstore_write_num ( struct xen_hypervisor *xen, unsigned long num, ... ) { + char value[ 21 /* "18446744073709551615" + NUL */ ]; + va_list key; + int rc; + + /* Construct value */ + snprintf ( value, sizeof ( value ), "%ld", num ); + + /* Write value */ + va_start ( key, num ); + rc = xenstore_vwrite ( xen, value, key ); + va_end ( key ); + return rc; +} + +/** + * Delete XenStore value + * + * @v xen Xen hypervisor + * @v ... Key path components + * @ret rc Return status code + */ +__attribute__ (( sentinel )) int +xenstore_rm ( struct xen_hypervisor *xen, ... ) { + va_list key; + int rc; + + va_start ( key, xen ); + rc = xenstore_message ( xen, XS_RM, NULL, NULL, NULL, key ); + va_end ( key ); + return rc; +} + +/** + * Read XenStore directory + * + * @v xen Xen hypervisor + * @v children Child key names to fill in + * @v len Length of child key names to fill in + * @v ... Key path components + * @ret rc Return status code + */ +__attribute__ (( sentinel )) int +xenstore_directory ( struct xen_hypervisor *xen, char **children, size_t *len, + ... ) { + va_list key; + int rc; + + va_start ( key, len ); + rc = xenstore_message ( xen, XS_DIRECTORY, children, len, NULL, key ); + va_end ( key ); + return rc; +} + +/** + * Dump XenStore directory contents (for debugging) + * + * @v xen Xen hypervisor + * @v key Key + */ +void xenstore_dump ( struct xen_hypervisor *xen, const char *key ) { + char *value; + char *children; + char *child; + char *child_key; + size_t len; + int rc; + + /* Try to dump current key as a value */ + if ( ( rc = xenstore_read ( xen, &value, key, NULL ) ) == 0 ) { + DBGC ( xen, "%s = \"%s\"\n", key, value ); + free ( value ); + } + + /* Try to recurse into each child in turn */ + if ( ( rc = xenstore_directory ( xen, &children, &len, key, + NULL ) ) == 0 ) { + for ( child = children ; child < ( children + len ) ; + child += ( strlen ( child ) + 1 /* NUL */ ) ) { + + /* Construct child key */ + if ( asprintf ( &child_key, "%s/%s", key, child ) < 0 ){ + DBGC ( xen, "XENSTORE could not allocate child " + "key \"%s/%s\"\n", key, child ); + rc = -ENOMEM; + break; + } + + /* Recurse into child key, continuing on error */ + xenstore_dump ( xen, child_key ); + free ( child_key ); + } + free ( children ); + } +} diff --git a/src/libgcc/__divmoddi4.c b/src/libgcc/__divmoddi4.c new file mode 100644 index 00000000..c00acb5a --- /dev/null +++ b/src/libgcc/__divmoddi4.c @@ -0,0 +1,25 @@ +#include "libgcc.h" + +__libgcc int64_t __divmoddi4(int64_t num, int64_t den, int64_t *rem_p) +{ + int minus = 0; + int64_t v; + + if ( num < 0 ) { + num = -num; + minus = 1; + } + if ( den < 0 ) { + den = -den; + minus ^= 1; + } + + v = __udivmoddi4(num, den, (uint64_t *)rem_p); + if ( minus ) { + v = -v; + if ( rem_p ) + *rem_p = -(*rem_p); + } + + return v; +} diff --git a/src/libgcc/implicit.c b/src/libgcc/implicit.c new file mode 100644 index 00000000..645ae6d2 --- /dev/null +++ b/src/libgcc/implicit.c @@ -0,0 +1,26 @@ +/** @file + * + * gcc sometimes likes to insert implicit calls to memcpy() and + * memset(). Unfortunately, there doesn't seem to be any way to + * prevent it from doing this, or to force it to use the optimised + * versions as seen by C code; it insists on inserting symbol + * references to "memcpy" and "memset". We therefore include wrapper + * functions just to keep gcc happy. + * + */ + +#include + +void * gcc_implicit_memcpy ( void *dest, const void *src, + size_t len ) asm ( "memcpy" ); + +void * gcc_implicit_memcpy ( void *dest, const void *src, size_t len ) { + return memcpy ( dest, src, len ); +} + +void * gcc_implicit_memset ( void *dest, int character, + size_t len ) asm ( "memset" ); + +void * gcc_implicit_memset ( void *dest, int character, size_t len ) { + return memset ( dest, character, len ); +} diff --git a/src/net/infiniband/ib_service.c b/src/net/infiniband/ib_service.c new file mode 100644 index 00000000..f035382e --- /dev/null +++ b/src/net/infiniband/ib_service.c @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * Infiniband service records + * + */ + +/** + * Create service record management transaction + * + * @v ibdev Infiniband device + * @v mi Management interface + * @v name Service name + * @v op Management transaction operations + * @ret madx Management transaction, or NULL on error + */ +struct ib_mad_transaction * +ib_create_service_madx ( struct ib_device *ibdev, + struct ib_mad_interface *mi, const char *name, + struct ib_mad_transaction_operations *op ) { + union ib_mad mad; + struct ib_mad_sa *sa = &mad.sa; + struct ib_service_record *svc = &sa->sa_data.service_record; + + /* Construct service record request */ + memset ( sa, 0, sizeof ( *sa ) ); + sa->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; + sa->mad_hdr.class_version = IB_SA_CLASS_VERSION; + sa->mad_hdr.method = IB_MGMT_METHOD_GET; + sa->mad_hdr.attr_id = htons ( IB_SA_ATTR_SERVICE_REC ); + sa->sa_hdr.comp_mask[1] = htonl ( IB_SA_SERVICE_REC_NAME ); + snprintf ( svc->name, sizeof ( svc->name ), "%s", name ); + + /* Create management transaction */ + return ib_create_madx ( ibdev, mi, &mad, NULL, op ); +} diff --git a/src/net/infiniband/xsigo.c b/src/net/infiniband/xsigo.c new file mode 100644 index 00000000..0ee753c3 --- /dev/null +++ b/src/net/infiniband/xsigo.c @@ -0,0 +1,1859 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Xsigo virtual Ethernet devices + * + */ + +/** A Xsigo device */ +struct xsigo_device { + /** Reference count */ + struct refcnt refcnt; + /** Underlying Infiniband device */ + struct ib_device *ibdev; + /** List of Xsigo devices */ + struct list_head list; + /** Device name */ + const char *name; + + /** Link opener timer */ + struct retry_timer opener; + + /** Discovery timer */ + struct retry_timer discovery; + /** Discovery management transaction (if any) */ + struct ib_mad_transaction *madx; + + /** List of configuration managers */ + struct list_head managers; +}; + +/** A Xsigo configuration manager */ +struct xsigo_manager { + /** Reference count */ + struct refcnt refcnt; + /** Xsigo device */ + struct xsigo_device *xdev; + /** List of managers */ + struct list_head list; + /** Device name */ + char name[16]; + /** Manager ID */ + struct xsigo_manager_id id; + + /** Data transfer interface */ + struct interface xfer; + /** Connection timer */ + struct retry_timer reopen; + /** Keepalive timer */ + struct retry_timer keepalive; + /** Transmission process */ + struct process process; + /** Pending transmissions */ + unsigned int pending; + /** Transmit sequence number */ + uint32_t seq; + + /** List of virtual Ethernet devices */ + struct list_head nics; +}; + +/** Configuration manager pending transmissions */ +enum xsigo_manager_pending { + /** Send connection request */ + XCM_TX_CONNECT = 0x0001, + /** Send registration message */ + XCM_TX_REGISTER = 0x0002, +}; + +/** A Xsigo virtual Ethernet device */ +struct xsigo_nic { + /** Configuration manager */ + struct xsigo_manager *xcm; + /** List of virtual Ethernet devices */ + struct list_head list; + /** Device name */ + char name[16]; + + /** Resource identifier */ + union ib_guid resource; + /** MAC address */ + uint8_t mac[ETH_ALEN]; + /** Network ID */ + unsigned long network; +}; + +/** Configuration manager service ID */ +static union ib_guid xcm_service_id = { + .bytes = XCM_SERVICE_ID, +}; + +/** List of all Xsigo devices */ +static LIST_HEAD ( xsigo_devices ); + +/** + * Free Xsigo device + * + * @v refcnt Reference count + */ +static void xsigo_free ( struct refcnt *refcnt ) { + struct xsigo_device *xdev = + container_of ( refcnt, struct xsigo_device, refcnt ); + + /* Sanity checks */ + assert ( ! timer_running ( &xdev->opener ) ); + assert ( ! timer_running ( &xdev->discovery ) ); + assert ( xdev->madx == NULL ); + assert ( list_empty ( &xdev->managers ) ); + + /* Drop reference to Infiniband device */ + ibdev_put ( xdev->ibdev ); + + /* Free device */ + free ( xdev ); +} + +/** + * Free configuration manager + * + * @v refcnt Reference count + */ +static void xcm_free ( struct refcnt *refcnt ) { + struct xsigo_manager *xcm = + container_of ( refcnt, struct xsigo_manager, refcnt ); + + /* Sanity checks */ + assert ( ! timer_running ( &xcm->reopen ) ); + assert ( ! timer_running ( &xcm->keepalive ) ); + assert ( ! process_running ( &xcm->process ) ); + assert ( list_empty ( &xcm->nics ) ); + + /* Drop reference to Xsigo device */ + ref_put ( &xcm->xdev->refcnt ); + + /* Free manager */ + free ( xcm ); +} + +/**************************************************************************** + * + * Virtual Ethernet (XVE) devices + * + **************************************************************************** + */ + +/** + * Create virtual Ethernet device + * + * @v xcm Configuration manager + * @v resource Resource identifier + * @v mac Ethernet MAC + * @v network Network identifier + * @v name Device name + * @ret rc Return status code + */ +static int xve_create ( struct xsigo_manager *xcm, union ib_guid *resource, + const uint8_t *mac, unsigned long network, + unsigned long qkey, const char *name ) { + struct xsigo_device *xdev = xcm->xdev; + struct ib_device *ibdev = xdev->ibdev; + struct xsigo_nic *xve; + struct ib_address_vector broadcast; + int rc; + + /* Allocate and initialise structure */ + xve = zalloc ( sizeof ( *xve ) ); + if ( ! xve ) { + rc = -ENOMEM; + goto err_alloc; + } + xve->xcm = xcm; + snprintf ( xve->name, sizeof ( xve->name ), "%s", name ); + memcpy ( &xve->resource, resource, sizeof ( xve->resource ) ); + memcpy ( xve->mac, mac, ETH_ALEN ); + xve->network = network; + DBGC ( xve, "XVE %s created for %s " IB_GUID_FMT "\n", + xve->name, xcm->name, IB_GUID_ARGS ( resource ) ); + DBGC ( xve, "XVE %s is MAC %s on network %ld\n", + xve->name, eth_ntoa ( mac ), network ); + + /* Construct broadcast address vector */ + memset ( &broadcast, 0, sizeof ( broadcast ) ); + broadcast.qpn = IB_QPN_BROADCAST; + broadcast.qkey = qkey; + broadcast.gid_present = 1; + broadcast.gid.dwords[0] = htonl ( XVE_PREFIX ); + broadcast.gid.words[2] = htons ( ibdev->pkey ); + broadcast.gid.dwords[3] = htonl ( network ); + + /* Create EoIB device */ + if ( ( rc = eoib_create ( ibdev, xve->mac, &broadcast, + xve->name ) ) != 0 ) { + DBGC ( xve, "XVE %s could not create EoIB device: %s\n", + xve->name, strerror ( rc ) ); + goto err_create; + } + + /* Add to list of virtual Ethernet devices. Do this only + * after creating the EoIB device, so that our net device + * notifier won't attempt to send an operational state update + * before we have acknowledged the installation. + */ + list_add ( &xve->list, &xcm->nics ); + + return 0; + + list_del ( &xve->list ); + err_create: + free ( xve ); + err_alloc: + return rc; +} + +/** + * Find virtual Ethernet device + * + * @v xcm Configuration manager + * @v resource Resource identifier + * @ret xve Virtual Ethernet device, or NULL + */ +static struct xsigo_nic * xve_find ( struct xsigo_manager *xcm, + union ib_guid *resource ) { + struct xsigo_nic *xve; + + list_for_each_entry ( xve, &xcm->nics, list ) { + if ( memcmp ( resource, &xve->resource, + sizeof ( *resource ) ) == 0 ) + return xve; + } + return NULL; +} + +/** + * Destroy virtual Ethernet device + * + * @v xve Virtual Ethernet device + */ +static void xve_destroy ( struct xsigo_nic *xve ) { + struct xsigo_manager *xcm = xve->xcm; + struct xsigo_device *xdev = xcm->xdev; + struct ib_device *ibdev = xdev->ibdev; + struct eoib_device *eoib; + + /* Destroy corresponding EoIB device, if any */ + if ( ( eoib = eoib_find ( ibdev, xve->mac ) ) ) + eoib_destroy ( eoib ); + + /* Remove from list of virtual Ethernet devices */ + list_del ( &xve->list ); + + /* Free virtual Ethernet device */ + DBGC ( xve, "XVE %s destroyed\n", xve->name ); + free ( xve ); +} + +/** + * Update virtual Ethernet device MTU + * + * @v xve Virtual Ethernet device + * @v eoib EoIB device + * @v mtu New MTU (excluding Ethernet and EoIB headers) + * @ret rc Return status code + */ +static int xve_update_mtu ( struct xsigo_nic *xve, struct eoib_device *eoib, + size_t mtu ) { + struct net_device *netdev = eoib->netdev; + size_t max; + + /* Check that we can support this MTU */ + max = ( IB_MAX_PAYLOAD_SIZE - ( sizeof ( struct ethhdr ) + + sizeof ( struct eoib_header ) ) ); + if ( mtu > max ) { + DBGC ( xve, "XVE %s cannot support MTU %zd (max %zd)\n", + xve->name, mtu, max ); + return -ERANGE; + } + + /* Update MTU. No need to close/reopen the network device, + * since our Infiniband stack uses a fixed MTU anyway. Note + * that the network device sees the Ethernet frame header but + * not the EoIB header. + */ + netdev->max_pkt_len = ( mtu + sizeof ( struct ethhdr ) ); + netdev->mtu = mtu; + DBGC ( xve, "XVE %s has MTU %zd\n", xve->name, mtu ); + + return 0; +} + +/** + * Open virtual Ethernet device + * + * @v xve Virtual Ethernet device + * @v eoib EoIB device + * @v open New administrative state + * @ret rc Return status code + */ +static int xve_open ( struct xsigo_nic *xve, struct eoib_device *eoib ) { + struct net_device *netdev = eoib->netdev; + int rc; + + /* Do nothing if network device is already open */ + if ( netdev_is_open ( netdev ) ) + return 0; + DBGC ( xve, "XVE %s opening network device\n", xve->name ); + + /* Open network device */ + if ( ( rc = netdev_open ( netdev ) ) != 0 ) { + DBGC ( xve, "XVE %s could not open: %s\n", + xve->name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Close virtual Ethernet device + * + * @v xve Virtual Ethernet device + * @v eoib EoIB device + */ +static void xve_close ( struct xsigo_nic *xve, struct eoib_device *eoib ) { + struct net_device *netdev = eoib->netdev; + + /* Do nothing if network device is already closed */ + if ( ! netdev_is_open ( netdev ) ) + return; + + /* Close network device */ + netdev_close ( netdev ); + DBGC ( xve, "XVE %s closed network device\n", xve->name ); +} + +/** + * Update virtual Ethernet device administrative state + * + * @v xve Virtual Ethernet device + * @v eoib EoIB device + * @v open New administrative state + * @ret rc Return status code + */ +static int xve_update_state ( struct xsigo_nic *xve, struct eoib_device *eoib, + int open ) { + + /* Open or close device, as applicable */ + if ( open ) { + return xve_open ( xve, eoib ); + } else { + xve_close ( xve, eoib ); + return 0; + } +} + +/** + * Update gateway (TCA) + * + * @v xve Virtual Ethernet device + * @v eoib EoIB device + * @v av Address vector, or NULL if no gateway + * @ret rc Return status code + */ +static int xve_update_tca ( struct xsigo_nic *xve, struct eoib_device *eoib, + struct ib_address_vector *av ) { + + /* Update gateway address */ + eoib_set_gateway ( eoib, av ); + if ( av ) { + DBGC ( xve, "XVE %s has TCA " IB_GID_FMT " data %#lx qkey " + "%#lx\n", xve->name, IB_GID_ARGS ( &av->gid ), av->qpn, + av->qkey ); + } else { + DBGC ( xve, "XVE %s has no TCA\n", xve->name ); + } + + /* The Linux driver will modify the local device's link state + * to reflect the EoIB-to-Ethernet gateway's link state, but + * this seems philosophically incorrect since communication + * within the EoIB broadcast domain still works regardless of + * the state of the gateway. + */ + + return 0; +} + +/**************************************************************************** + * + * Server management protocol (XSMP) session messages + * + **************************************************************************** + */ + +/** + * Get session message name (for debugging) + * + * @v type Message type + * @ret name Message name + */ +static const char * xsmp_session_type ( unsigned int type ) { + static char buf[16]; + + switch ( type ) { + case XSMP_SESSION_TYPE_HELLO: return "HELLO"; + case XSMP_SESSION_TYPE_REGISTER: return "REGISTER"; + case XSMP_SESSION_TYPE_CONFIRM: return "CONFIRM"; + case XSMP_SESSION_TYPE_REJECT: return "REJECT"; + case XSMP_SESSION_TYPE_SHUTDOWN: return "SHUTDOWN"; + default: + snprintf ( buf, sizeof ( buf ), "UNKNOWN<%d>", type ); + return buf; + } +} + +/** + * Extract chassis name (for debugging) + * + * @v msg Session message + * @ret chassis Chassis name + */ +static const char * xsmp_chassis_name ( struct xsmp_session_message *msg ) { + static char chassis[ sizeof ( msg->chassis ) + 1 /* NUL */ ]; + + memcpy ( chassis, msg->chassis, sizeof ( msg->chassis ) ); + return chassis; +} + +/** + * Extract session name (for debugging) + * + * @v msg Session message + * @ret session Session name + */ +static const char * xsmp_session_name ( struct xsmp_session_message *msg ) { + static char session[ sizeof ( msg->session ) + 1 /* NUL */ ]; + + memcpy ( session, msg->session, sizeof ( msg->session ) ); + return session; +} + +/** + * Send session message + * + * @v xcm Configuration manager + * @v type Message type + * @ret rc Return status code + */ +static int xsmp_tx_session ( struct xsigo_manager *xcm, unsigned int type ) { + struct xsigo_device *xdev = xcm->xdev; + struct ib_device *ibdev = xdev->ibdev; + struct xsmp_session_message msg; + int rc; + + /* Construct session message */ + memset ( &msg, 0, sizeof ( msg ) ); + msg.hdr.type = XSMP_TYPE_SESSION; + msg.hdr.len = htons ( sizeof ( msg ) ); + msg.hdr.seq = htonl ( ++xcm->seq ); + memcpy ( &msg.hdr.src.guid, &ibdev->gid.s.guid, + sizeof ( msg.hdr.src.guid ) ); + memcpy ( &msg.hdr.dst.guid, &xcm->id.guid, + sizeof ( msg.hdr.dst.guid ) ); + msg.type = type; + msg.len = htons ( sizeof ( msg ) - sizeof ( msg.hdr ) ); + msg.os_type = XSIGO_OS_TYPE_GENERIC; + msg.resources = htons ( XSIGO_RESOURCE_XVE | + XSIGO_RESOURCE_NO_HA ); + msg.boot = htonl ( XSMP_BOOT_PXE ); + DBGCP ( xcm, "XCM %s TX[%d] session %s\n", xcm->name, + ntohl ( msg.hdr.seq ), xsmp_session_type ( msg.type ) ); + DBGCP_HDA ( xcm, 0, &msg, sizeof ( msg ) ); + + /* Send session message */ + if ( ( rc = xfer_deliver_raw ( &xcm->xfer, &msg, + sizeof ( msg ) ) ) != 0 ) { + DBGC ( xcm, "XCM %s TX session %s failed: %s\n", xcm->name, + xsmp_session_type ( msg.type ), strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Send registration message + * + * @v xcm Configuration manager + * @ret rc Return status code + */ +static inline int xsmp_tx_session_register ( struct xsigo_manager *xcm ) { + + DBGC ( xcm, "XCM %s registering with " IB_GUID_FMT "\n", + xcm->name, IB_GUID_ARGS ( &xcm->id.guid ) ); + + /* Send registration message */ + return xsmp_tx_session ( xcm, XSMP_SESSION_TYPE_REGISTER ); +} + +/** + * Send keepalive message + * + * @v xcm Configuration manager + * @ret rc Return status code + */ +static int xsmp_tx_session_hello ( struct xsigo_manager *xcm ) { + + /* Send keepalive message */ + return xsmp_tx_session ( xcm, XSMP_SESSION_TYPE_HELLO ); +} + +/** + * Handle received keepalive message + * + * @v xcm Configuration manager + * @v msg Keepalive message + * @ret rc Return status code + */ +static int xsmp_rx_session_hello ( struct xsigo_manager *xcm, + struct xsmp_session_message *msg __unused ) { + + /* Respond to keepalive message. Note that the XCM doesn't + * seem to actually ever send these. + */ + return xsmp_tx_session_hello ( xcm ); +} + +/** + * Handle received registration confirmation message + * + * @v xcm Configuration manager + * @v msg Registration confirmation message + * @ret rc Return status code + */ +static int xsmp_rx_session_confirm ( struct xsigo_manager *xcm, + struct xsmp_session_message *msg ) { + + DBGC ( xcm, "XCM %s registered with \"%s\" as \"%s\"\n", xcm->name, + xsmp_chassis_name ( msg ), xsmp_session_name ( msg ) ); + + return 0; +} + +/** + * Handle received registration rejection message + * + * @v xcm Configuration manager + * @v msg Registration confirmation message + * @ret rc Return status code + */ +static int xsmp_rx_session_reject ( struct xsigo_manager *xcm, + struct xsmp_session_message *msg ) { + + DBGC ( xcm, "XCM %s rejected by \"%s\":\n", + xcm->name, xsmp_chassis_name ( msg ) ); + DBGC_HDA ( xcm, 0, msg, sizeof ( *msg ) ); + + return -EPERM; +} + +/** + * Handle received shutdown message + * + * @v xcm Configuration manager + * @v msg Registration confirmation message + * @ret rc Return status code + */ +static int xsmp_rx_session_shutdown ( struct xsigo_manager *xcm, + struct xsmp_session_message *msg ) { + + DBGC ( xcm, "XCM %s shut down by \"%s\":\n", + xcm->name, xsmp_chassis_name ( msg ) ); + DBGC_HDA ( xcm, 0, msg, sizeof ( *msg ) ); + + return -ENOTCONN; +} + +/** + * Handle received session message + * + * @v xcm Configuration manager + * @v msg Session message + * @ret rc Return status code + */ +static int xsmp_rx_session ( struct xsigo_manager *xcm, + struct xsmp_session_message *msg ) { + + DBGCP ( xcm, "XCM %s RX[%d] session %s\n", xcm->name, + ntohl ( msg->hdr.seq ), xsmp_session_type ( msg->type ) ); + DBGCP_HDA ( xcm, 0, msg, sizeof ( *msg ) ); + + /* Handle message according to type */ + switch ( msg->type ) { + case XSMP_SESSION_TYPE_HELLO: + return xsmp_rx_session_hello ( xcm, msg ); + case XSMP_SESSION_TYPE_CONFIRM: + return xsmp_rx_session_confirm ( xcm, msg ); + case XSMP_SESSION_TYPE_REJECT: + return xsmp_rx_session_reject ( xcm, msg ); + case XSMP_SESSION_TYPE_SHUTDOWN: + return xsmp_rx_session_shutdown ( xcm, msg ); + default: + DBGC ( xcm, "XCM %s RX[%d] session unexpected %s:\n", xcm->name, + ntohl ( msg->hdr.seq ), xsmp_session_type ( msg->type )); + DBGC_HDA ( xcm, 0, msg, sizeof ( *msg ) ); + return -EPROTO; + } +} + +/**************************************************************************** + * + * Server management protocol (XSMP) virtual Ethernet (XVE) messages + * + **************************************************************************** + */ + +/** + * Get virtual Ethernet message name (for debugging) + * + * @v type Message type + * @ret name Message name + */ +static const char * xsmp_xve_type ( unsigned int type ) { + static char buf[16]; + + switch ( type ) { + case XSMP_XVE_TYPE_INSTALL: return "INSTALL"; + case XSMP_XVE_TYPE_DELETE: return "DELETE"; + case XSMP_XVE_TYPE_UPDATE: return "UPDATE"; + case XSMP_XVE_TYPE_OPER_UP: return "OPER_UP"; + case XSMP_XVE_TYPE_OPER_DOWN: return "OPER_DOWN"; + case XSMP_XVE_TYPE_OPER_REQ: return "OPER_REQ"; + case XSMP_XVE_TYPE_READY: return "READY"; + default: + snprintf ( buf, sizeof ( buf ), "UNKNOWN<%d>", type ); + return buf; + } +} + +/** + * Send virtual Ethernet message + * + * @v xcm Configuration manager + * @v msg Partial message + * @ret rc Return status code + */ +static int xsmp_tx_xve ( struct xsigo_manager *xcm, + struct xsmp_xve_message *msg ) { + struct xsigo_device *xdev = xcm->xdev; + struct ib_device *ibdev = xdev->ibdev; + int rc; + + /* Fill in common header fields */ + msg->hdr.type = XSMP_TYPE_XVE; + msg->hdr.len = htons ( sizeof ( *msg ) ); + msg->hdr.seq = htonl ( ++xcm->seq ); + memcpy ( &msg->hdr.src.guid, &ibdev->gid.s.guid, + sizeof ( msg->hdr.src.guid ) ); + memcpy ( &msg->hdr.dst.guid, &xcm->id.guid, + sizeof ( msg->hdr.dst.guid ) ); + msg->len = htons ( sizeof ( *msg ) - sizeof ( msg->hdr ) ); + DBGCP ( xcm, "XCM %s TX[%d] xve %s code %#02x\n", xcm->name, + ntohl ( msg->hdr.seq ), xsmp_xve_type ( msg->type ), + msg->code ); + DBGCP_HDA ( xcm, 0, msg, sizeof ( *msg ) ); + + /* Send virtual Ethernet message */ + if ( ( rc = xfer_deliver_raw ( &xcm->xfer, msg, + sizeof ( *msg ) ) ) != 0 ) { + DBGC ( xcm, "XCM %s TX xve %s failed: %s\n", xcm->name, + xsmp_xve_type ( msg->type ), strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Send virtual Ethernet message including current device parameters + * + * @v xcm Configuration manager + * @v msg Partial virtual Ethernet message + * @v xve Virtual Ethernet device + * @v eoib EoIB device + * @ret rc Return status code + */ +static int xsmp_tx_xve_params ( struct xsigo_manager *xcm, + struct xsmp_xve_message *msg, + struct xsigo_nic *xve, + struct eoib_device *eoib ) { + struct xsigo_device *xdev = xcm->xdev; + struct ib_device *ibdev = xdev->ibdev; + struct net_device *netdev = eoib->netdev; + + /* Set successful response code */ + msg->code = 0; + + /* Include network identifier, MTU, and current HCA parameters */ + msg->network = htonl ( xve->network ); + msg->mtu = htons ( netdev->max_pkt_len - sizeof ( struct ethhdr ) ); + msg->hca.prefix_le.qword = bswap_64 ( ibdev->gid.s.prefix.qword ); + msg->hca.pkey = htons ( ibdev->pkey ); + msg->hca.qkey = msg->tca.qkey; + if ( eoib->qp ) { + msg->hca.data = htonl ( eoib->qp->ext_qpn ); + msg->hca.qkey = htons ( eoib->qp->qkey ); + } + + /* The message type field is (ab)used to return the current + * operational status. + */ + if ( msg->type == XSMP_XVE_TYPE_OPER_REQ ) { + msg->type = ( netdev_is_open ( netdev ) ? + XSMP_XVE_TYPE_OPER_UP : XSMP_XVE_TYPE_OPER_DOWN ); + } + + /* Send message */ + DBGC ( xve, "XVE %s network %d MTU %d ctrl %#x data %#x qkey %#04x " + "%s\n", xve->name, ntohl ( msg->network ), ntohs ( msg->mtu ), + ntohl ( msg->hca.ctrl ), ntohl ( msg->hca.data ), + ntohs ( msg->hca.qkey ), xsmp_xve_type ( msg->type ) ); + + return xsmp_tx_xve ( xcm, msg ); +} + +/** + * Send virtual Ethernet error response + * + * @v xcm Configuration manager + * @v msg Partial virtual Ethernet message + * @ret rc Return status code + */ +static inline int xsmp_tx_xve_nack ( struct xsigo_manager *xcm, + struct xsmp_xve_message *msg ) { + + /* Set error response code. (There aren't any meaningful + * detailed response codes defined by the wire protocol.) + */ + msg->code = XSMP_XVE_CODE_ERROR; + + /* Send message */ + return xsmp_tx_xve ( xcm, msg ); +} + +/** + * Send virtual Ethernet notification + * + * @v xcm Configuration manager + * @v type Message type + * @v xve Virtual Ethernet device + * @v eoib EoIB device + * @ret rc Return status code + */ +static int xsmp_tx_xve_notify ( struct xsigo_manager *xcm, + unsigned int type, + struct xsigo_nic *xve, + struct eoib_device *eoib ) { + struct xsmp_xve_message msg; + + /* Construct message */ + memset ( &msg, 0, sizeof ( msg ) ); + msg.type = type; + memcpy ( &msg.resource, &xve->resource, sizeof ( msg.resource ) ); + + /* Send message */ + return xsmp_tx_xve_params ( xcm, &msg, xve, eoib ); +} + +/** + * Send virtual Ethernet current operational state + * + * @v xcm Configuration manager + * @v xve Virtual Ethernet device + * @v eoib EoIB device + * @ret rc Return status code + */ +static inline int xsmp_tx_xve_oper ( struct xsigo_manager *xcm, + struct xsigo_nic *xve, + struct eoib_device *eoib ) { + + /* Send notification */ + return xsmp_tx_xve_notify ( xcm, XSMP_XVE_TYPE_OPER_REQ, xve, eoib ); +} + +/** + * Handle received virtual Ethernet modification message + * + * @v xcm Configuration manager + * @v msg Virtual Ethernet message + * @v update Update bitmask + * @ret rc Return status code + */ +static int xsmp_rx_xve_modify ( struct xsigo_manager *xcm, + struct xsmp_xve_message *msg, + unsigned int update ) { + struct xsigo_device *xdev = xcm->xdev; + struct ib_device *ibdev = xdev->ibdev; + struct xsigo_nic *xve; + struct eoib_device *eoib; + struct ib_address_vector tca; + size_t mtu; + int rc; + + /* Avoid returning uninitialised HCA parameters in response */ + memset ( &msg->hca, 0, sizeof ( msg->hca ) ); + + /* Find virtual Ethernet device */ + xve = xve_find ( xcm, &msg->resource ); + if ( ! xve ) { + DBGC ( xcm, "XCM %s unrecognised resource " IB_GUID_FMT "\n", + xcm->name, IB_GUID_ARGS ( &msg->resource ) ); + rc = -ENOENT; + goto err_no_xve; + } + + /* Find corresponding EoIB device */ + eoib = eoib_find ( ibdev, xve->mac ); + if ( ! eoib ) { + DBGC ( xve, "XVE %s has no EoIB device\n", xve->name ); + rc = -EPIPE; + goto err_no_eoib; + } + + /* The Xsigo management software fails to create the EoIB + * multicast group. This is a fundamental design flaw. + */ + eoib_force_group_creation ( eoib ); + + /* Extract modifiable parameters. Note that the TCA GID is + * erroneously transmitted as little-endian. + */ + mtu = ntohs ( msg->mtu ); + tca.qpn = ntohl ( msg->tca.data ); + tca.qkey = ntohs ( msg->tca.qkey ); + tca.gid_present = 1; + tca.gid.s.prefix.qword = bswap_64 ( msg->tca.prefix_le.qword ); + tca.gid.s.guid.qword = bswap_64 ( msg->guid_le.qword ); + + /* Update MTU, if applicable */ + if ( ( update & XSMP_XVE_UPDATE_MTU ) && + ( ( rc = xve_update_mtu ( xve, eoib, mtu ) ) != 0 ) ) + goto err_mtu; + update &= ~XSMP_XVE_UPDATE_MTU; + + /* Update admin state, if applicable */ + if ( ( update & XSMP_XVE_UPDATE_STATE ) && + ( ( rc = xve_update_state ( xve, eoib, msg->state ) ) != 0 ) ) + goto err_state; + update &= ~XSMP_XVE_UPDATE_STATE; + + /* Remove gateway, if applicable */ + if ( ( update & XSMP_XVE_UPDATE_GW_DOWN ) && + ( ( rc = xve_update_tca ( xve, eoib, NULL ) ) != 0 ) ) + goto err_gw_down; + update &= ~XSMP_XVE_UPDATE_GW_DOWN; + + /* Update gateway, if applicable */ + if ( ( update & XSMP_XVE_UPDATE_GW_CHANGE ) && + ( ( rc = xve_update_tca ( xve, eoib, &tca ) ) != 0 ) ) + goto err_gw_change; + update &= ~XSMP_XVE_UPDATE_GW_CHANGE; + + /* Warn about unexpected updates */ + if ( update ) { + DBGC ( xve, "XVE %s unrecognised update(s) %#08x\n", + xve->name, update ); + } + + xsmp_tx_xve_params ( xcm, msg, xve, eoib ); + return 0; + + err_gw_change: + err_gw_down: + err_state: + err_mtu: + err_no_eoib: + err_no_xve: + /* Send NACK */ + xsmp_tx_xve_nack ( xcm, msg ); + return rc; +} + +/** + * Handle received virtual Ethernet installation message + * + * @v xcm Configuration manager + * @v msg Virtual Ethernet message + * @ret rc Return status code + */ +static int xsmp_rx_xve_install ( struct xsigo_manager *xcm, + struct xsmp_xve_message *msg ) { + union { + struct xsmp_xve_mac msg; + uint8_t raw[ETH_ALEN]; + } mac; + char name[ sizeof ( msg->name ) + 1 /* NUL */ ]; + unsigned long network; + unsigned long qkey; + unsigned int update; + int rc; + + /* Demangle MAC address (which is erroneously transmitted as + * little-endian). + */ + mac.msg.high = bswap_16 ( msg->mac_le.high ); + mac.msg.low = bswap_32 ( msg->mac_le.low ); + + /* Extract interface name (which may not be NUL-terminated) */ + memcpy ( name, msg->name, ( sizeof ( name ) - 1 /* NUL */ ) ); + name[ sizeof ( name ) - 1 /* NUL */ ] = '\0'; + + /* Extract remaining message parameters */ + network = ntohl ( msg->network ); + qkey = ntohs ( msg->tca.qkey ); + DBGC2 ( xcm, "XCM %s " IB_GUID_FMT " install \"%s\" %s net %ld qkey " + "%#lx\n", xcm->name, IB_GUID_ARGS ( &msg->resource ), name, + eth_ntoa ( mac.raw ), network, qkey ); + + /* Create virtual Ethernet device, if applicable */ + if ( ( xve_find ( xcm, &msg->resource ) == NULL ) && + ( ( rc = xve_create ( xcm, &msg->resource, mac.raw, network, + qkey, name ) ) != 0 ) ) + goto err_create; + + /* Handle remaining parameters as for a modification message */ + update = XSMP_XVE_UPDATE_MTU; + if ( msg->uplink == XSMP_XVE_UPLINK ) + update |= XSMP_XVE_UPDATE_GW_CHANGE; + return xsmp_rx_xve_modify ( xcm, msg, update ); + + err_create: + /* Send NACK */ + xsmp_tx_xve_nack ( xcm, msg ); + return rc; +} + +/** + * Handle received virtual Ethernet deletion message + * + * @v xcm Configuration manager + * @v msg Virtual Ethernet message + * @ret rc Return status code + */ +static int xsmp_rx_xve_delete ( struct xsigo_manager *xcm, + struct xsmp_xve_message *msg ) { + struct xsigo_nic *xve; + + DBGC2 ( xcm, "XCM %s " IB_GUID_FMT " delete\n", + xcm->name, IB_GUID_ARGS ( &msg->resource ) ); + + /* Destroy virtual Ethernet device (if any) */ + if ( ( xve = xve_find ( xcm, &msg->resource ) ) ) + xve_destroy ( xve ); + + /* Send ACK */ + msg->code = 0; + xsmp_tx_xve ( xcm, msg ); + + return 0; +} + +/** + * Handle received virtual Ethernet update message + * + * @v xcm Configuration manager + * @v msg Virtual Ethernet message + * @ret rc Return status code + */ +static int xsmp_rx_xve_update ( struct xsigo_manager *xcm, + struct xsmp_xve_message *msg ) { + unsigned int update = ntohl ( msg->update ); + + DBGC2 ( xcm, "XCM %s " IB_GUID_FMT " update (%08x)\n", + xcm->name, IB_GUID_ARGS ( &msg->resource ), update ); + + /* Handle as a modification message */ + return xsmp_rx_xve_modify ( xcm, msg, update ); +} + +/** + * Handle received virtual Ethernet operational request message + * + * @v xcm Configuration manager + * @v msg Virtual Ethernet message + * @ret rc Return status code + */ +static int xsmp_rx_xve_oper_req ( struct xsigo_manager *xcm, + struct xsmp_xve_message *msg ) { + + DBGC2 ( xcm, "XCM %s " IB_GUID_FMT " operational request\n", + xcm->name, IB_GUID_ARGS ( &msg->resource ) ); + + /* Handle as a nullipotent modification message */ + return xsmp_rx_xve_modify ( xcm, msg, 0 ); +} + +/** + * Handle received virtual Ethernet readiness message + * + * @v xcm Configuration manager + * @v msg Virtual Ethernet message + * @ret rc Return status code + */ +static int xsmp_rx_xve_ready ( struct xsigo_manager *xcm, + struct xsmp_xve_message *msg ) { + int rc; + + DBGC2 ( xcm, "XCM %s " IB_GUID_FMT " ready\n", + xcm->name, IB_GUID_ARGS ( &msg->resource ) ); + + /* Handle as a nullipotent modification message */ + if ( ( rc = xsmp_rx_xve_modify ( xcm, msg, 0 ) ) != 0 ) + return rc; + + /* Send an unsolicited operational state update, since there + * is no other way to convey the current operational state. + */ + msg->type = XSMP_XVE_TYPE_OPER_REQ; + if ( ( rc = xsmp_rx_xve_modify ( xcm, msg, 0 ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Handle received virtual Ethernet message + * + * @v xcm Configuration manager + * @v msg Virtual Ethernet message + * @ret rc Return status code + */ +static int xsmp_rx_xve ( struct xsigo_manager *xcm, + struct xsmp_xve_message *msg ) { + + DBGCP ( xcm, "XCM %s RX[%d] xve %s\n", xcm->name, + ntohl ( msg->hdr.seq ), xsmp_xve_type ( msg->type ) ); + DBGCP_HDA ( xcm, 0, msg, sizeof ( *msg ) ); + + /* Handle message according to type */ + switch ( msg->type ) { + case XSMP_XVE_TYPE_INSTALL: + return xsmp_rx_xve_install ( xcm, msg ); + case XSMP_XVE_TYPE_DELETE: + return xsmp_rx_xve_delete ( xcm, msg ); + case XSMP_XVE_TYPE_UPDATE: + return xsmp_rx_xve_update ( xcm, msg ); + case XSMP_XVE_TYPE_OPER_REQ: + return xsmp_rx_xve_oper_req ( xcm, msg ); + case XSMP_XVE_TYPE_READY: + return xsmp_rx_xve_ready ( xcm, msg ); + default: + DBGC ( xcm, "XCM %s RX[%d] xve unexpected %s:\n", xcm->name, + ntohl ( msg->hdr.seq ), xsmp_xve_type ( msg->type ) ); + DBGC_HDA ( xcm, 0, msg, sizeof ( *msg ) ); + return -EPROTO; + } +} + +/**************************************************************************** + * + * Configuration managers (XCM) + * + **************************************************************************** + */ + +/** + * Close configuration manager connection + * + * @v xcm Configuration manager + * @v rc Reason for close + */ +static void xcm_close ( struct xsigo_manager *xcm, int rc ) { + + DBGC ( xcm, "XCM %s closed: %s\n", xcm->name, strerror ( rc ) ); + + /* Stop transmission process */ + process_del ( &xcm->process ); + + /* Stop keepalive timer */ + stop_timer ( &xcm->keepalive ); + + /* Restart data transfer interface */ + intf_restart ( &xcm->xfer, rc ); + + /* Schedule reconnection attempt */ + start_timer ( &xcm->reopen ); +} + +/** + * Send data to configuration manager + * + * @v xcm Configuration manager + */ +static void xcm_step ( struct xsigo_manager *xcm ) { + int rc; + + /* Do nothing unless we have something to send */ + if ( ! xcm->pending ) + return; + + /* Send (empty) connection request, if applicable */ + if ( xcm->pending & XCM_TX_CONNECT ) { + if ( ( rc = xfer_deliver_raw ( &xcm->xfer, NULL, 0 ) ) != 0 ) { + DBGC ( xcm, "XCM %s could not send connection request: " + "%s\n", xcm->name, strerror ( rc ) ); + goto err; + } + xcm->pending &= ~XCM_TX_CONNECT; + return; + } + + /* Wait until data transfer interface is connected */ + if ( ! xfer_window ( &xcm->xfer ) ) + return; + + /* Send registration message, if applicable */ + if ( xcm->pending & XCM_TX_REGISTER ) { + if ( ( rc = xsmp_tx_session_register ( xcm ) ) != 0 ) + goto err; + xcm->pending &= ~XCM_TX_REGISTER; + return; + } + + return; + + err: + xcm_close ( xcm, rc ); +} + +/** + * Receive data from configuration manager + * + * @v xcm Configuration manager + * @v iobuf I/O buffer + * @v meta Data transfer metadata + * @ret rc Return status code + */ +static int xcm_deliver ( struct xsigo_manager *xcm, struct io_buffer *iobuf, + struct xfer_metadata *meta __unused ) { + union xsmp_message *msg; + size_t len = iob_len ( iobuf ); + int rc; + + /* Sanity check */ + if ( len < sizeof ( msg->hdr ) ) { + DBGC ( xcm, "XCM %s underlength message:\n", xcm->name ); + DBGC_HDA ( xcm, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EPROTO; + goto out; + } + msg = iobuf->data; + + /* Handle message according to type */ + if ( ! msg->hdr.type ) { + + /* Ignore unused communication manager private data blocks */ + rc = 0; + + } else if ( ( msg->hdr.type == XSMP_TYPE_SESSION ) && + ( len >= sizeof ( msg->sess ) ) ) { + + /* Session message */ + rc = xsmp_rx_session ( xcm, &msg->sess ); + + } else if ( ( msg->hdr.type == XSMP_TYPE_XVE ) && + ( len >= sizeof ( msg->xve ) ) ) { + + /* Virtual Ethernet message */ + xsmp_rx_xve ( xcm, &msg->xve ); + + /* Virtual Ethernet message errors are non-fatal */ + rc = 0; + + } else { + + /* Unknown message */ + DBGC ( xcm, "XCM %s unexpected message type %d:\n", + xcm->name, msg->hdr.type ); + DBGC_HDA ( xcm, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EPROTO; + } + + out: + free_iob ( iobuf ); + if ( rc != 0 ) + xcm_close ( xcm, rc ); + return rc; +} + +/** Configuration manager data transfer interface operations */ +static struct interface_operation xcm_xfer_op[] = { + INTF_OP ( xfer_deliver, struct xsigo_manager *, xcm_deliver ), + INTF_OP ( xfer_window_changed, struct xsigo_manager *, xcm_step ), + INTF_OP ( intf_close, struct xsigo_manager *, xcm_close ), +}; + +/** Configuration manager data transfer interface descriptor */ +static struct interface_descriptor xcm_xfer_desc = + INTF_DESC ( struct xsigo_manager, xfer, xcm_xfer_op ); + +/** Configuration manager process descriptor */ +static struct process_descriptor xcm_process_desc = + PROC_DESC_ONCE ( struct xsigo_manager, process, xcm_step ); + +/** + * Handle configuration manager connection timer expiry + * + * @v timer Connection timer + * @v fail Failure indicator + */ +static void xcm_reopen ( struct retry_timer *timer, int fail __unused ) { + struct xsigo_manager *xcm = + container_of ( timer, struct xsigo_manager, reopen ); + struct xsigo_device *xdev = xcm->xdev; + struct ib_device *ibdev = xdev->ibdev; + union ib_gid gid; + int rc; + + /* Stop transmission process */ + process_del ( &xcm->process ); + + /* Stop keepalive timer */ + stop_timer ( &xcm->keepalive ); + + /* Restart data transfer interface */ + intf_restart ( &xcm->xfer, -ECANCELED ); + + /* Reset sequence number */ + xcm->seq = 0; + + /* Construct GID */ + memcpy ( &gid.s.prefix, &ibdev->gid.s.prefix, sizeof ( gid.s.prefix ) ); + memcpy ( &gid.s.guid, &xcm->id.guid, sizeof ( gid.s.guid ) ); + DBGC ( xcm, "XCM %s connecting to " IB_GID_FMT "\n", + xcm->name, IB_GID_ARGS ( &gid ) ); + + /* Open CMRC connection */ + if ( ( rc = ib_cmrc_open ( &xcm->xfer, ibdev, &gid, + &xcm_service_id, xcm->name ) ) != 0 ) { + DBGC ( xcm, "XCM %s could not open CMRC connection: %s\n", + xcm->name, strerror ( rc ) ); + start_timer ( &xcm->reopen ); + return; + } + + /* Schedule transmissions */ + xcm->pending |= ( XCM_TX_CONNECT | XCM_TX_REGISTER ); + process_add ( &xcm->process ); + + /* Start keepalive timer */ + start_timer_fixed ( &xcm->keepalive, XSIGO_KEEPALIVE_INTERVAL ); + + return; +} + +/** + * Handle configuration manager keepalive timer expiry + * + * @v timer Connection timer + * @v fail Failure indicator + */ +static void xcm_keepalive ( struct retry_timer *timer, int fail __unused ) { + struct xsigo_manager *xcm = + container_of ( timer, struct xsigo_manager, keepalive ); + int rc; + + /* Send keepalive message. The server won't actually respond + * to these, but it gives the RC queue pair a chance to + * complain if it doesn't ever at least get an ACK. + */ + if ( ( rc = xsmp_tx_session_hello ( xcm ) ) != 0 ) { + xcm_close ( xcm, rc ); + return; + } + + /* Restart keepalive timer */ + start_timer_fixed ( &xcm->keepalive, XSIGO_KEEPALIVE_INTERVAL ); +} + +/** + * Create configuration manager + * + * @v xsigo Xsigo device + * @v id Configuration manager ID + * @ret rc Return status code + */ +static int xcm_create ( struct xsigo_device *xdev, + struct xsigo_manager_id *id ) { + struct xsigo_manager *xcm; + + /* Allocate and initialise structure */ + xcm = zalloc ( sizeof ( *xcm ) ); + if ( ! xcm ) + return -ENOMEM; + ref_init ( &xcm->refcnt, xcm_free ); + xcm->xdev = xdev; + ref_get ( &xcm->xdev->refcnt ); + snprintf ( xcm->name, sizeof ( xcm->name ), "%s:xcm-%d", + xdev->name, ntohs ( id->lid ) ); + memcpy ( &xcm->id, id, sizeof ( xcm->id ) ); + intf_init ( &xcm->xfer, &xcm_xfer_desc, &xcm->refcnt ); + timer_init ( &xcm->keepalive, xcm_keepalive, &xcm->refcnt ); + timer_init ( &xcm->reopen, xcm_reopen, &xcm->refcnt ); + process_init_stopped ( &xcm->process, &xcm_process_desc, &xcm->refcnt ); + INIT_LIST_HEAD ( &xcm->nics ); + + /* Start timer to open connection */ + start_timer_nodelay ( &xcm->reopen ); + + /* Add to list of managers and transfer reference to list */ + list_add ( &xcm->list, &xdev->managers ); + DBGC ( xcm, "XCM %s created for " IB_GUID_FMT " (LID %d)\n", xcm->name, + IB_GUID_ARGS ( &xcm->id.guid ), ntohs ( id->lid ) ); + return 0; +} + +/** + * Find configuration manager + * + * @v xsigo Xsigo device + * @v id Configuration manager ID + * @ret xcm Configuration manager, or NULL + */ +static struct xsigo_manager * xcm_find ( struct xsigo_device *xdev, + struct xsigo_manager_id *id ) { + struct xsigo_manager *xcm; + union ib_guid *guid = &id->guid; + + /* Find configuration manager */ + list_for_each_entry ( xcm, &xdev->managers, list ) { + if ( memcmp ( guid, &xcm->id.guid, sizeof ( *guid ) ) == 0 ) + return xcm; + } + return NULL; +} + +/** + * Destroy configuration manager + * + * @v xcm Configuration manager + */ +static void xcm_destroy ( struct xsigo_manager *xcm ) { + struct xsigo_nic *xve; + + /* Remove all EoIB NICs */ + while ( ( xve = list_first_entry ( &xcm->nics, struct xsigo_nic, + list ) ) ) { + xve_destroy ( xve ); + } + + /* Stop transmission process */ + process_del ( &xcm->process ); + + /* Stop timers */ + stop_timer ( &xcm->keepalive ); + stop_timer ( &xcm->reopen ); + + /* Shut down data transfer interface */ + intf_shutdown ( &xcm->xfer, 0 ); + + /* Remove from list of managers and drop list's reference */ + DBGC ( xcm, "XCM %s destroyed\n", xcm->name ); + list_del ( &xcm->list ); + ref_put ( &xcm->refcnt ); +} + +/** + * Synchronise list of configuration managers + * + * @v xdev Xsigo device + * @v ids List of manager IDs + * @v count Number of manager IDs + * @ret rc Return status code + */ +static int xcm_list ( struct xsigo_device *xdev, struct xsigo_manager_id *ids, + unsigned int count ) { + struct xsigo_manager_id *id; + struct xsigo_manager *xcm; + struct xsigo_manager *tmp; + struct list_head list; + unsigned int i; + int rc; + + /* Create list of managers to be retained */ + INIT_LIST_HEAD ( &list ); + for ( i = 0, id = ids ; i < count ; i++, id++ ) { + if ( ( xcm = xcm_find ( xdev, id ) ) ) { + list_del ( &xcm->list ); + list_add_tail ( &xcm->list, &list ); + } + } + + /* Destroy any managers not in the list */ + list_for_each_entry_safe ( xcm, tmp, &xdev->managers, list ) + xcm_destroy ( xcm ); + list_splice ( &list, &xdev->managers ); + + /* Create any new managers in the list, and force reconnection + * for any changed LIDs. + */ + for ( i = 0, id = ids ; i < count ; i++, id++ ) { + if ( ( xcm = xcm_find ( xdev, id ) ) ) { + if ( xcm->id.lid != id->lid ) + start_timer_nodelay ( &xcm->reopen ); + continue; + } + if ( ( rc = xcm_create ( xdev, id ) ) != 0 ) { + DBGC ( xdev, "XDEV %s could not create manager: %s\n", + xdev->name, strerror ( rc ) ); + return rc; + } + } + + return 0; +} + +/**************************************************************************** + * + * Configuration manager discovery + * + **************************************************************************** + */ + +/** A stage of discovery */ +struct xsigo_discovery { + /** Name */ + const char *name; + /** Management transaction operations */ + struct ib_mad_transaction_operations op; +}; + +/** + * Handle configuration manager lookup completion + * + * @v ibdev Infiniband device + * @v mi Management interface + * @v madx Management transaction + * @v rc Status code + * @v mad Received MAD (or NULL on error) + * @v av Source address vector (or NULL on error) + */ +static void xsigo_xcm_complete ( struct ib_device *ibdev, + struct ib_mad_interface *mi __unused, + struct ib_mad_transaction *madx, + int rc, union ib_mad *mad, + struct ib_address_vector *av __unused ) { + struct xsigo_device *xdev = ib_madx_get_ownerdata ( madx ); + union xsigo_mad *xsmad = container_of ( mad, union xsigo_mad, mad ); + struct xsigo_managers_reply *reply = &xsmad->reply; + + /* Check for failures */ + if ( ( rc == 0 ) && ( mad->hdr.status != htons ( IB_MGMT_STATUS_OK ) ) ) + rc = -ENODEV; + if ( rc != 0 ) { + DBGC ( xdev, "XDEV %s manager lookup failed: %s\n", + xdev->name, strerror ( rc ) ); + goto out; + } + + /* Sanity checks */ + if ( reply->count > ( sizeof ( reply->manager ) / + sizeof ( reply->manager[0] ) ) ) { + DBGC ( xdev, "XDEV %s has too many managers (%d)\n", + xdev->name, reply->count ); + goto out; + } + + /* Synchronise list of managers */ + if ( ( rc = xcm_list ( xdev, reply->manager, reply->count ) ) != 0 ) + goto out; + + /* Report an empty list of managers */ + if ( reply->count == 0 ) + DBGC ( xdev, "XDEV %s has no managers\n", xdev->name ); + + /* Delay next discovery attempt */ + start_timer_fixed ( &xdev->discovery, XSIGO_DISCOVERY_SUCCESS_DELAY ); + +out: + /* Destroy the completed transaction */ + ib_destroy_madx ( ibdev, ibdev->gsi, madx ); + xdev->madx = NULL; +} + +/** Configuration manager lookup discovery stage */ +static struct xsigo_discovery xsigo_xcm_discovery = { + .name = "manager", + .op = { + .complete = xsigo_xcm_complete, + }, +}; + +/** + * Handle directory service lookup completion + * + * @v ibdev Infiniband device + * @v mi Management interface + * @v madx Management transaction + * @v rc Status code + * @v mad Received MAD (or NULL on error) + * @v av Source address vector (or NULL on error) + */ +static void xsigo_xds_complete ( struct ib_device *ibdev, + struct ib_mad_interface *mi __unused, + struct ib_mad_transaction *madx, + int rc, union ib_mad *mad, + struct ib_address_vector *av __unused ) { + struct xsigo_device *xdev = ib_madx_get_ownerdata ( madx ); + union xsigo_mad *xsmad = container_of ( mad, union xsigo_mad, mad ); + struct xsigo_managers_request *request = &xsmad->request; + struct ib_service_record *svc; + struct ib_address_vector dest; + union ib_guid *guid; + + /* Allow for reuse of transaction pointer */ + xdev->madx = NULL; + + /* Check for failures */ + if ( ( rc == 0 ) && ( mad->hdr.status != htons ( IB_MGMT_STATUS_OK ) ) ) + rc = -ENODEV; + if ( rc != 0 ) { + DBGC ( xdev, "XDEV %s directory lookup failed: %s\n", + xdev->name, strerror ( rc ) ); + goto out; + } + + /* Construct address vector */ + memset ( &dest, 0, sizeof ( dest ) ); + svc = &mad->sa.sa_data.service_record; + dest.lid = ntohs ( svc->data16[0] ); + dest.sl = ibdev->sm_sl; + dest.qpn = IB_QPN_GSI; + dest.qkey = IB_QKEY_GSI; + guid = ( ( union ib_guid * ) &svc->data64[0] ); + DBGC2 ( xdev, "XDEV %s found directory at LID %d GUID " IB_GUID_FMT + "\n", xdev->name, dest.lid, IB_GUID_ARGS ( guid ) ); + + /* Construct request (reusing MAD buffer) */ + memset ( request, 0, sizeof ( *request ) ); + request->mad_hdr.mgmt_class = XSIGO_MGMT_CLASS; + request->mad_hdr.class_version = XSIGO_MGMT_CLASS_VERSION; + request->mad_hdr.method = IB_MGMT_METHOD_GET; + request->mad_hdr.attr_id = htons ( XSIGO_ATTR_XCM_REQUEST ); + memcpy ( &request->server.guid, &ibdev->gid.s.guid, + sizeof ( request->server.guid ) ); + snprintf ( request->os_version, sizeof ( request->os_version ), + "%s %s", product_short_name, product_version ); + snprintf ( request->arch, sizeof ( request->arch ), _S2 ( ARCH ) ); + request->os_type = XSIGO_OS_TYPE_GENERIC; + request->resources = htons ( XSIGO_RESOURCES_PRESENT | + XSIGO_RESOURCE_XVE | + XSIGO_RESOURCE_NO_HA ); + + /* The handling of this request on the server side is a + * textbook example of how not to design a wire protocol. The + * server uses the _driver_ version number to determine which + * fields are present. + */ + request->driver_version = htonl ( 0x2a2a2a ); + + /* The build version field is ignored unless it happens to + * contain the substring "xg-". + */ + snprintf ( request->build, sizeof ( request->build ), + "not-xg-%08lx", build_id ); + + /* The server side user interface occasionally has no way to + * refer to an entry with an empty hostname. + */ + fetch_string_setting ( NULL, &hostname_setting, request->hostname, + sizeof ( request->hostname ) ); + if ( ! request->hostname[0] ) { + snprintf ( request->hostname, sizeof ( request->hostname ), + "%s-" IB_GUID_FMT, product_short_name, + IB_GUID_ARGS ( &ibdev->gid.s.guid ) ); + } + + /* Start configuration manager lookup */ + xdev->madx = ib_create_madx ( ibdev, ibdev->gsi, mad, &dest, + &xsigo_xcm_discovery.op ); + if ( ! xdev->madx ) { + DBGC ( xdev, "XDEV %s could not start manager lookup\n", + xdev->name ); + goto out; + } + ib_madx_set_ownerdata ( xdev->madx, xdev ); + +out: + /* Destroy the completed transaction */ + ib_destroy_madx ( ibdev, ibdev->gsi, madx ); +} + +/** Directory service lookup discovery stage */ +static struct xsigo_discovery xsigo_xds_discovery = { + .name = "directory", + .op = { + .complete = xsigo_xds_complete, + }, +}; + +/** + * Discover configuration managers + * + * @v timer Retry timer + * @v over Failure indicator + */ +static void xsigo_discover ( struct retry_timer *timer, int over __unused ) { + struct xsigo_device *xdev = + container_of ( timer, struct xsigo_device, discovery ); + struct ib_device *ibdev = xdev->ibdev; + struct xsigo_discovery *discovery; + + /* Restart timer */ + start_timer_fixed ( &xdev->discovery, XSIGO_DISCOVERY_FAILURE_DELAY ); + + /* Cancel any pending discovery transaction */ + if ( xdev->madx ) { + discovery = container_of ( xdev->madx->op, + struct xsigo_discovery, op ); + DBGC ( xdev, "XDEV %s timed out waiting for %s lookup\n", + xdev->name, discovery->name ); + ib_destroy_madx ( ibdev, ibdev->gsi, xdev->madx ); + xdev->madx = NULL; + } + + /* Start directory service lookup */ + xdev->madx = ib_create_service_madx ( ibdev, ibdev->gsi, + XDS_SERVICE_NAME, + &xsigo_xds_discovery.op ); + if ( ! xdev->madx ) { + DBGC ( xdev, "XDEV %s could not start directory lookup\n", + xdev->name ); + return; + } + ib_madx_set_ownerdata ( xdev->madx, xdev ); +} + +/**************************************************************************** + * + * Infiniband device driver + * + **************************************************************************** + */ + +/** + * Open link and start discovery + * + * @v opener Link opener + * @v over Failure indicator + */ +static void xsigo_ib_open ( struct retry_timer *opener, int over __unused ) { + struct xsigo_device *xdev = + container_of ( opener, struct xsigo_device, opener ); + struct ib_device *ibdev = xdev->ibdev; + int rc; + + /* Open Infiniband device */ + if ( ( rc = ib_open ( ibdev ) ) != 0 ) { + DBGC ( xdev, "XDEV %s could not open: %s\n", + xdev->name, strerror ( rc ) ); + /* Delay and try again */ + start_timer_fixed ( &xdev->opener, XSIGO_OPEN_RETRY_DELAY ); + return; + } + + /* If link is already up, then start discovery */ + if ( ib_link_ok ( ibdev ) ) + start_timer_nodelay ( &xdev->discovery ); +} + +/** + * Probe Xsigo device + * + * @v ibdev Infiniband device + * @ret rc Return status code + */ +static int xsigo_ib_probe ( struct ib_device *ibdev ) { + struct xsigo_device *xdev; + + /* Allocate and initialise structure */ + xdev = zalloc ( sizeof ( *xdev ) ); + if ( ! xdev ) + return -ENOMEM; + ref_init ( &xdev->refcnt, xsigo_free ); + xdev->ibdev = ibdev_get ( ibdev ); + xdev->name = ibdev->name; + timer_init ( &xdev->opener, xsigo_ib_open, &xdev->refcnt ); + timer_init ( &xdev->discovery, xsigo_discover, &xdev->refcnt ); + INIT_LIST_HEAD ( &xdev->managers ); + + /* Start timer to open Infiniband device. (We are currently + * within the Infiniband device probe callback list; opening + * the device here would have interesting side-effects.) + */ + start_timer_nodelay ( &xdev->opener ); + + /* Add to list of devices and transfer reference to list */ + list_add_tail ( &xdev->list, &xsigo_devices ); + DBGC ( xdev, "XDEV %s created for " IB_GUID_FMT "\n", + xdev->name, IB_GUID_ARGS ( &ibdev->gid.s.guid ) ); + return 0; +} + +/** + * Handle device or link status change + * + * @v ibdev Infiniband device + */ +static void xsigo_ib_notify ( struct ib_device *ibdev ) { + struct xsigo_device *xdev; + + /* Stop/restart discovery on any attached devices */ + list_for_each_entry ( xdev, &xsigo_devices, list ) { + + /* Skip non-attached devices */ + if ( xdev->ibdev != ibdev ) + continue; + + /* Stop any ongoing discovery */ + if ( xdev->madx ) { + ib_destroy_madx ( ibdev, ibdev->gsi, xdev->madx ); + xdev->madx = NULL; + } + stop_timer ( &xdev->discovery ); + + /* If link is up, then start discovery */ + if ( ib_link_ok ( ibdev ) ) + start_timer_nodelay ( &xdev->discovery ); + } +} + +/** + * Remove Xsigo device + * + * @v ibdev Infiniband device + */ +static void xsigo_ib_remove ( struct ib_device *ibdev ) { + struct xsigo_device *xdev; + struct xsigo_device *tmp; + + /* Remove any attached Xsigo devices */ + list_for_each_entry_safe ( xdev, tmp, &xsigo_devices, list ) { + + /* Skip non-attached devices */ + if ( xdev->ibdev != ibdev ) + continue; + + /* Stop any ongoing discovery */ + if ( xdev->madx ) { + ib_destroy_madx ( ibdev, ibdev->gsi, xdev->madx ); + xdev->madx = NULL; + } + stop_timer ( &xdev->discovery ); + + /* Destroy all configuration managers */ + xcm_list ( xdev, NULL, 0 ); + + /* Close Infiniband device, if applicable */ + if ( ! timer_running ( &xdev->opener ) ) + ib_close ( xdev->ibdev ); + + /* Stop link opener */ + stop_timer ( &xdev->opener ); + + /* Remove from list of devices and drop list's reference */ + DBGC ( xdev, "XDEV %s destroyed\n", xdev->name ); + list_del ( &xdev->list ); + ref_put ( &xdev->refcnt ); + } +} + +/** Xsigo Infiniband driver */ +struct ib_driver xsigo_ib_driver __ib_driver = { + .name = "Xsigo", + .probe = xsigo_ib_probe, + .notify = xsigo_ib_notify, + .remove = xsigo_ib_remove, +}; + +/**************************************************************************** + * + * Network device driver + * + **************************************************************************** + */ + +/** + * Handle device or link status change + * + * @v netdev Network device + */ +static void xsigo_net_notify ( struct net_device *netdev ) { + struct xsigo_device *xdev; + struct ib_device *ibdev; + struct xsigo_manager *xcm; + struct xsigo_nic *xve; + struct eoib_device *eoib; + + /* Send current operational state to XCM, if applicable */ + list_for_each_entry ( xdev, &xsigo_devices, list ) { + ibdev = xdev->ibdev; + list_for_each_entry ( xcm, &xdev->managers, list ) { + list_for_each_entry ( xve, &xcm->nics, list ) { + eoib = eoib_find ( ibdev, xve->mac ); + if ( ! eoib ) + continue; + if ( eoib->netdev != netdev ) + continue; + xsmp_tx_xve_oper ( xcm, xve, eoib ); + } + } + } +} + +/** Xsigo network driver */ +struct net_driver xsigo_net_driver __net_driver = { + .name = "Xsigo", + .notify = xsigo_net_notify, +}; diff --git a/src/net/pccrc.c b/src/net/pccrc.c new file mode 100644 index 00000000..4cd82cd1 --- /dev/null +++ b/src/net/pccrc.c @@ -0,0 +1,818 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Peer Content Caching and Retrieval: Content Identification [MS-PCCRC] + * + */ + +/****************************************************************************** + * + * Utility functions + * + ****************************************************************************** + */ + +/** + * Transcribe hash value (for debugging) + * + * @v info Content information + * @v hash Hash value + * @ret string Hash value string + */ +static inline const char * +peerdist_info_hash_ntoa ( const struct peerdist_info *info, const void *hash ) { + static char buf[ ( 2 * PEERDIST_DIGEST_MAX_SIZE ) + 1 /* NUL */ ]; + size_t digestsize = info->digestsize; + + /* Sanity check */ + assert ( info != NULL ); + assert ( digestsize != 0 ); + assert ( base16_encoded_len ( digestsize ) < sizeof ( buf ) ); + + /* Transcribe hash value */ + base16_encode ( hash, digestsize, buf, sizeof ( buf ) ); + return buf; +} + +/** + * Get raw data + * + * @v info Content information + * @v data Data buffer + * @v offset Starting offset + * @v len Length + * @ret rc Return status code + */ +static int peerdist_info_get ( const struct peerdist_info *info, void *data, + size_t offset, size_t len ) { + + /* Sanity check */ + if ( ( offset > info->raw.len ) || + ( len > ( info->raw.len - offset ) ) ) { + DBGC ( info, "PCCRC %p data underrun at [%zx,%zx) of %zx\n", + info, offset, ( offset + len ), info->raw.len ); + return -ERANGE; + } + + /* Copy data */ + copy_from_user ( data, info->raw.data, offset, len ); + + return 0; +} + +/** + * Populate segment hashes + * + * @v segment Content information segment to fill in + * @v hash Segment hash of data + * @v secret Segment secret + */ +static void peerdist_info_segment_hash ( struct peerdist_info_segment *segment, + const void *hash, const void *secret ){ + const struct peerdist_info *info = segment->info; + struct digest_algorithm *digest = info->digest; + uint8_t ctx[digest->ctxsize]; + size_t digestsize = info->digestsize; + size_t secretsize = digestsize; + static const uint16_t magic[] = PEERDIST_SEGMENT_ID_MAGIC; + + /* Sanity check */ + assert ( digestsize <= sizeof ( segment->hash ) ); + assert ( digestsize <= sizeof ( segment->secret ) ); + assert ( digestsize <= sizeof ( segment->id ) ); + + /* Get segment hash of data */ + memcpy ( segment->hash, hash, digestsize ); + + /* Get segment secret */ + memcpy ( segment->secret, secret, digestsize ); + + /* Calculate segment identifier */ + hmac_init ( digest, ctx, segment->secret, &secretsize ); + assert ( secretsize == digestsize ); + hmac_update ( digest, ctx, segment->hash, digestsize ); + hmac_update ( digest, ctx, magic, sizeof ( magic ) ); + hmac_final ( digest, ctx, segment->secret, &secretsize, segment->id ); + assert ( secretsize == digestsize ); +} + +/****************************************************************************** + * + * Content Information version 1 + * + ****************************************************************************** + */ + +/** + * Get number of blocks within a block description + * + * @v info Content information + * @v offset Block description offset + * @ret blocks Number of blocks, or negative error + */ +static int peerdist_info_v1_blocks ( const struct peerdist_info *info, + size_t offset ) { + struct peerdist_info_v1_block raw; + unsigned int blocks; + int rc; + + /* Get block description header */ + if ( ( rc = peerdist_info_get ( info, &raw, offset, + sizeof ( raw ) ) ) != 0 ) + return rc; + + /* Calculate number of blocks */ + blocks = le32_to_cpu ( raw.blocks ); + + return blocks; +} + +/** + * Locate block description + * + * @v info Content information + * @v index Segment index + * @ret offset Block description offset, or negative error + */ +static ssize_t peerdist_info_v1_block_offset ( const struct peerdist_info *info, + unsigned int index ) { + size_t digestsize = info->digestsize; + unsigned int i; + size_t offset; + int blocks; + int rc; + + /* Sanity check */ + assert ( index < info->segments ); + + /* Calculate offset of first block description */ + offset = ( sizeof ( struct peerdist_info_v1 ) + + ( info->segments * + sizeof ( peerdist_info_v1_segment_t ( digestsize ) ) ) ); + + /* Iterate over block descriptions until we find this segment */ + for ( i = 0 ; i < index ; i++ ) { + + /* Get number of blocks */ + blocks = peerdist_info_v1_blocks ( info, offset ); + if ( blocks < 0 ) { + rc = blocks; + DBGC ( info, "PCCRC %p segment %d could not get number " + "of blocks: %s\n", info, i, strerror ( rc ) ); + return rc; + } + + /* Move to next block description */ + offset += sizeof ( peerdist_info_v1_block_t ( digestsize, + blocks ) ); + } + + return offset; +} + +/** + * Populate content information + * + * @v info Content information to fill in + * @ret rc Return status code + */ +static int peerdist_info_v1 ( struct peerdist_info *info ) { + struct peerdist_info_v1 raw; + struct peerdist_info_segment first; + struct peerdist_info_segment last; + size_t first_skip; + size_t last_skip; + size_t last_read; + int rc; + + /* Get raw header */ + if ( ( rc = peerdist_info_get ( info, &raw, 0, sizeof ( raw ) ) ) != 0){ + DBGC ( info, "PCCRC %p could not get V1 content information: " + "%s\n", info, strerror ( rc ) ); + return rc; + } + assert ( raw.version.raw == cpu_to_le16 ( PEERDIST_INFO_V1 ) ); + + /* Determine hash algorithm */ + switch ( raw.hash ) { + case cpu_to_le32 ( PEERDIST_INFO_V1_HASH_SHA256 ) : + info->digest = &sha256_algorithm; + break; + case cpu_to_le32 ( PEERDIST_INFO_V1_HASH_SHA384 ) : + info->digest = &sha384_algorithm; + break; + case cpu_to_le32 ( PEERDIST_INFO_V1_HASH_SHA512 ) : + info->digest = &sha512_algorithm; + break; + default: + DBGC ( info, "PCCRC %p unsupported hash algorithm %#08x\n", + info, le32_to_cpu ( raw.hash ) ); + return -ENOTSUP; + } + info->digestsize = info->digest->digestsize; + assert ( info->digest != NULL ); + DBGC2 ( info, "PCCRC %p using %s[%zd]\n", + info, info->digest->name, ( info->digestsize * 8 ) ); + + /* Calculate number of segments */ + info->segments = le32_to_cpu ( raw.segments ); + + /* Get first segment */ + if ( ( rc = peerdist_info_segment ( info, &first, 0 ) ) != 0 ) + return rc; + + /* Calculate range start offset */ + info->range.start = first.range.start; + + /* Calculate trimmed range start offset */ + first_skip = le32_to_cpu ( raw.first ); + info->trim.start = ( first.range.start + first_skip ); + + /* Get last segment */ + if ( ( rc = peerdist_info_segment ( info, &last, + ( info->segments - 1 ) ) ) != 0 ) + return rc; + + /* Calculate range end offset */ + info->range.end = last.range.end; + + /* Calculate trimmed range end offset */ + if ( raw.last ) { + /* Explicit length to include from last segment is given */ + last_read = le32_to_cpu ( raw.last ); + last_skip = ( last.index ? 0 : first_skip ); + info->trim.end = ( last.range.start + last_skip + last_read ); + } else { + /* No explicit length given: range extends to end of segment */ + info->trim.end = last.range.end; + } + + return 0; +} + +/** + * Populate content information segment + * + * @v segment Content information segment to fill in + * @ret rc Return status code + */ +static int peerdist_info_v1_segment ( struct peerdist_info_segment *segment ) { + const struct peerdist_info *info = segment->info; + size_t digestsize = info->digestsize; + peerdist_info_v1_segment_t ( digestsize ) raw; + ssize_t raw_offset; + int blocks; + int rc; + + /* Sanity checks */ + assert ( segment->index < info->segments ); + + /* Get raw description */ + raw_offset = ( sizeof ( struct peerdist_info_v1 ) + + ( segment->index * sizeof ( raw ) ) ); + if ( ( rc = peerdist_info_get ( info, &raw, raw_offset, + sizeof ( raw ) ) ) != 0 ) { + DBGC ( info, "PCCRC %p segment %d could not get segment " + "description: %s\n", info, segment->index, + strerror ( rc ) ); + return rc; + } + + /* Calculate start offset of this segment */ + segment->range.start = le64_to_cpu ( raw.segment.offset ); + + /* Calculate end offset of this segment */ + segment->range.end = ( segment->range.start + + le32_to_cpu ( raw.segment.len ) ); + + /* Calculate block size of this segment */ + segment->blksize = le32_to_cpu ( raw.segment.blksize ); + + /* Locate block description for this segment */ + raw_offset = peerdist_info_v1_block_offset ( info, segment->index ); + if ( raw_offset < 0 ) { + rc = raw_offset; + return rc; + } + + /* Get number of blocks */ + blocks = peerdist_info_v1_blocks ( info, raw_offset ); + if ( blocks < 0 ) { + rc = blocks; + DBGC ( info, "PCCRC %p segment %d could not get number of " + "blocks: %s\n", info, segment->index, strerror ( rc ) ); + return rc; + } + segment->blocks = blocks; + + /* Calculate segment hashes */ + peerdist_info_segment_hash ( segment, raw.hash, raw.secret ); + + return 0; +} + +/** + * Populate content information block + * + * @v block Content information block to fill in + * @ret rc Return status code + */ +static int peerdist_info_v1_block ( struct peerdist_info_block *block ) { + const struct peerdist_info_segment *segment = block->segment; + const struct peerdist_info *info = segment->info; + size_t digestsize = info->digestsize; + peerdist_info_v1_block_t ( digestsize, segment->blocks ) raw; + ssize_t raw_offset; + int rc; + + /* Sanity checks */ + assert ( block->index < segment->blocks ); + + /* Calculate start offset of this block */ + block->range.start = ( segment->range.start + + ( block->index * segment->blksize ) ); + + /* Calculate end offset of this block */ + block->range.end = ( block->range.start + segment->blksize ); + if ( block->range.end > segment->range.end ) + block->range.end = segment->range.end; + + /* Locate block description */ + raw_offset = peerdist_info_v1_block_offset ( info, segment->index ); + if ( raw_offset < 0 ) { + rc = raw_offset; + return rc; + } + + /* Get block hash */ + raw_offset += offsetof ( typeof ( raw ), hash[block->index] ); + if ( ( rc = peerdist_info_get ( info, block->hash, raw_offset, + digestsize ) ) != 0 ) { + DBGC ( info, "PCCRC %p segment %d block %d could not get " + "hash: %s\n", info, segment->index, block->index, + strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** Content information version 1 operations */ +static struct peerdist_info_operations peerdist_info_v1_operations = { + .info = peerdist_info_v1, + .segment = peerdist_info_v1_segment, + .block = peerdist_info_v1_block, +}; + +/****************************************************************************** + * + * Content Information version 2 + * + ****************************************************************************** + */ + +/** A segment cursor */ +struct peerdist_info_v2_cursor { + /** Raw data offset */ + size_t offset; + /** Number of segments remaining within this chunk */ + unsigned int remaining; + /** Accumulated segment length */ + size_t len; +}; + +/** + * Initialise segment cursor + * + * @v cursor Segment cursor + */ +static inline void +peerdist_info_v2_cursor_init ( struct peerdist_info_v2_cursor *cursor ) { + + /* Initialise cursor */ + cursor->offset = ( sizeof ( struct peerdist_info_v2 ) + + sizeof ( struct peerdist_info_v2_chunk ) ); + cursor->remaining = 0; + cursor->len = 0; +} + +/** + * Update segment cursor to next segment description + * + * @v info Content information + * @v offset Current offset + * @v remaining Number of segments remaining within this chunk + * @ret rc Return status code + */ +static int +peerdist_info_v2_cursor_next ( const struct peerdist_info *info, + struct peerdist_info_v2_cursor *cursor ) { + size_t digestsize = info->digestsize; + peerdist_info_v2_segment_t ( digestsize ) raw; + struct peerdist_info_v2_chunk chunk; + int rc; + + /* Get chunk description if applicable */ + if ( ! cursor->remaining ) { + + /* Get chunk description */ + if ( ( rc = peerdist_info_get ( info, &chunk, + ( cursor->offset - + sizeof ( chunk ) ), + sizeof ( chunk ) ) ) != 0 ) + return rc; + + /* Update number of segments remaining */ + cursor->remaining = ( be32_to_cpu ( chunk.len ) / + sizeof ( raw ) ); + } + + /* Get segment description header */ + if ( ( rc = peerdist_info_get ( info, &raw.segment, cursor->offset, + sizeof ( raw.segment ) ) ) != 0 ) + return rc; + + /* Update cursor */ + cursor->offset += sizeof ( raw ); + cursor->remaining--; + if ( ! cursor->remaining ) + cursor->offset += sizeof ( chunk ); + cursor->len += be32_to_cpu ( raw.segment.len ); + + return 0; +} + +/** + * Get number of segments and total length + * + * @v info Content information + * @v len Length to fill in + * @ret rc Number of segments, or negative error + */ +static int peerdist_info_v2_segments ( const struct peerdist_info *info, + size_t *len ) { + struct peerdist_info_v2_cursor cursor; + unsigned int segments; + int rc; + + /* Iterate over all segments */ + for ( peerdist_info_v2_cursor_init ( &cursor ), segments = 0 ; + cursor.offset < info->raw.len ; segments++ ) { + + /* Update segment cursor */ + if ( ( rc = peerdist_info_v2_cursor_next ( info, + &cursor ) ) != 0 ) { + DBGC ( info, "PCCRC %p segment %d could not update " + "segment cursor: %s\n", + info, segments, strerror ( rc ) ); + return rc; + } + } + + /* Record accumulated length */ + *len = cursor.len; + + return segments; +} + +/** + * Populate content information + * + * @v info Content information to fill in + * @ret rc Return status code + */ +static int peerdist_info_v2 ( struct peerdist_info *info ) { + struct peerdist_info_v2 raw; + size_t len = 0; + int segments; + int rc; + + /* Get raw header */ + if ( ( rc = peerdist_info_get ( info, &raw, 0, sizeof ( raw ) ) ) != 0){ + DBGC ( info, "PCCRC %p could not get V2 content information: " + "%s\n", info, strerror ( rc ) ); + return rc; + } + assert ( raw.version.raw == cpu_to_le16 ( PEERDIST_INFO_V2 ) ); + + /* Determine hash algorithm */ + switch ( raw.hash ) { + case PEERDIST_INFO_V2_HASH_SHA512_TRUNC : + info->digest = &sha512_algorithm; + info->digestsize = ( 256 / 8 ); + break; + default: + DBGC ( info, "PCCRC %p unsupported hash algorithm %#02x\n", + info, raw.hash ); + return -ENOTSUP; + } + assert ( info->digest != NULL ); + DBGC2 ( info, "PCCRC %p using %s[%zd]\n", + info, info->digest->name, ( info->digestsize * 8 ) ); + + /* Calculate number of segments and total length */ + segments = peerdist_info_v2_segments ( info, &len ); + if ( segments < 0 ) { + rc = segments; + DBGC ( info, "PCCRC %p could not get segment count and length: " + "%s\n", info, strerror ( rc ) ); + return rc; + } + info->segments = segments; + + /* Calculate range start offset */ + info->range.start = be64_to_cpu ( raw.offset ); + + /* Calculate trimmed range start offset */ + info->trim.start = ( info->range.start + be32_to_cpu ( raw.first ) ); + + /* Calculate range end offset */ + info->range.end = ( info->range.start + len ); + + /* Calculate trimmed range end offset */ + info->trim.end = ( raw.len ? be64_to_cpu ( raw.len ) : + info->range.end ); + + return 0; +} + +/** + * Populate content information segment + * + * @v segment Content information segment to fill in + * @ret rc Return status code + */ +static int peerdist_info_v2_segment ( struct peerdist_info_segment *segment ) { + const struct peerdist_info *info = segment->info; + size_t digestsize = info->digestsize; + peerdist_info_v2_segment_t ( digestsize ) raw; + struct peerdist_info_v2_cursor cursor; + unsigned int index; + size_t len; + int rc; + + /* Sanity checks */ + assert ( segment->index < info->segments ); + + /* Iterate over all segments before the target segment */ + for ( peerdist_info_v2_cursor_init ( &cursor ), index = 0 ; + index < segment->index ; index++ ) { + + /* Update segment cursor */ + if ( ( rc = peerdist_info_v2_cursor_next ( info, + &cursor ) ) != 0 ) { + DBGC ( info, "PCCRC %p segment %d could not update " + "segment cursor: %s\n", + info, index, strerror ( rc ) ); + return rc; + } + } + + /* Get raw description */ + if ( ( rc = peerdist_info_get ( info, &raw, cursor.offset, + sizeof ( raw ) ) ) != 0 ) { + DBGC ( info, "PCCRC %p segment %d could not get segment " + "description: %s\n", + info, segment->index, strerror ( rc ) ); + return rc; + } + + /* Calculate start offset of this segment */ + segment->range.start = ( info->range.start + cursor.len ); + + /* Calculate end offset of this segment */ + len = be32_to_cpu ( raw.segment.len ); + segment->range.end = ( segment->range.start + len ); + + /* Model as a segment containing a single block */ + segment->blocks = 1; + segment->blksize = len; + + /* Calculate segment hashes */ + peerdist_info_segment_hash ( segment, raw.hash, raw.secret ); + + return 0; +} + +/** + * Populate content information block + * + * @v block Content information block to fill in + * @ret rc Return status code + */ +static int peerdist_info_v2_block ( struct peerdist_info_block *block ) { + const struct peerdist_info_segment *segment = block->segment; + const struct peerdist_info *info = segment->info; + size_t digestsize = info->digestsize; + + /* Sanity checks */ + assert ( block->index < segment->blocks ); + + /* Model as a block covering the whole segment */ + memcpy ( &block->range, &segment->range, sizeof ( block->range ) ); + memcpy ( block->hash, segment->hash, digestsize ); + + return 0; +} + +/** Content information version 2 operations */ +static struct peerdist_info_operations peerdist_info_v2_operations = { + .block = peerdist_info_v2_block, + .segment = peerdist_info_v2_segment, + .info = peerdist_info_v2, +}; + +/****************************************************************************** + * + * Content Information + * + ****************************************************************************** + */ + +/** + * Populate content information + * + * @v data Raw data + * @v len Length of raw data + * @v info Content information to fill in + * @ret rc Return status code + */ +int peerdist_info ( userptr_t data, size_t len, struct peerdist_info *info ) { + union peerdist_info_version version; + int rc; + + /* Initialise structure */ + memset ( info, 0, sizeof ( *info ) ); + info->raw.data = data; + info->raw.len = len; + + /* Get version */ + if ( ( rc = peerdist_info_get ( info, &version, 0, + sizeof ( version ) ) ) != 0 ) { + DBGC ( info, "PCCRC %p could not get version: %s\n", + info, strerror ( rc ) ); + return rc; + } + DBGC2 ( info, "PCCRC %p version %d.%d\n", + info, version.major, version.minor ); + + /* Determine version */ + switch ( version.raw ) { + case cpu_to_le16 ( PEERDIST_INFO_V1 ) : + info->op = &peerdist_info_v1_operations; + break; + case cpu_to_le16 ( PEERDIST_INFO_V2 ) : + info->op = &peerdist_info_v2_operations; + break; + default: + DBGC ( info, "PCCRC %p unsupported version %d.%d\n", + info, version.major, version.minor ); + return -ENOTSUP; + } + assert ( info->op != NULL ); + assert ( info->op->info != NULL ); + + /* Populate content information */ + if ( ( rc = info->op->info ( info ) ) != 0 ) + return rc; + + DBGC2 ( info, "PCCRC %p range [%08zx,%08zx) covers [%08zx,%08zx) with " + "%d segments\n", info, info->range.start, info->range.end, + info->trim.start, info->trim.end, info->segments ); + return 0; +} + +/** + * Populate content information segment + * + * @v info Content information + * @v segment Content information segment to fill in + * @v index Segment index + * @ret rc Return status code + */ +int peerdist_info_segment ( const struct peerdist_info *info, + struct peerdist_info_segment *segment, + unsigned int index ) { + int rc; + + /* Sanity checks */ + assert ( info != NULL ); + assert ( info->op != NULL ); + assert ( info->op->segment != NULL ); + if ( index >= info->segments ) { + DBGC ( info, "PCCRC %p segment %d of [0,%d) out of range\n", + info, index, info->segments ); + return -ERANGE; + } + + /* Initialise structure */ + memset ( segment, 0, sizeof ( *segment ) ); + segment->info = info; + segment->index = index; + + /* Populate content information segment */ + if ( ( rc = info->op->segment ( segment ) ) != 0 ) + return rc; + + DBGC2 ( info, "PCCRC %p segment %d range [%08zx,%08zx) with %d " + "blocks\n", info, segment->index, segment->range.start, + segment->range.end, segment->blocks ); + DBGC2 ( info, "PCCRC %p segment %d digest %s\n", info, segment->index, + peerdist_info_hash_ntoa ( info, segment->hash ) ); + DBGC2 ( info, "PCCRC %p segment %d secret %s\n", info, segment->index, + peerdist_info_hash_ntoa ( info, segment->secret ) ); + DBGC2 ( info, "PCCRC %p segment %d identf %s\n", info, segment->index, + peerdist_info_hash_ntoa ( info, segment->id ) ); + return 0; +} + +/** + * Populate content information block + * + * @v segment Content information segment + * @v block Content information block to fill in + * @v index Block index + * @ret rc Return status code + */ +int peerdist_info_block ( const struct peerdist_info_segment *segment, + struct peerdist_info_block *block, + unsigned int index ) { + const struct peerdist_info *info = segment->info; + size_t start; + size_t end; + int rc; + + /* Sanity checks */ + assert ( segment != NULL ); + assert ( info != NULL ); + assert ( info->op != NULL ); + assert ( info->op->block != NULL ); + if ( index >= segment->blocks ) { + DBGC ( info, "PCCRC %p segment %d block %d of [0,%d) out of " + "range\n", info, segment->index, index, segment->blocks); + return -ERANGE; + } + + /* Initialise structure */ + memset ( block, 0, sizeof ( *block ) ); + block->segment = segment; + block->index = index; + + /* Populate content information block */ + if ( ( rc = info->op->block ( block ) ) != 0 ) + return rc; + + /* Calculate trimmed range */ + start = block->range.start; + if ( start < info->trim.start ) + start = info->trim.start; + end = block->range.end; + if ( end > info->trim.end ) + end = info->trim.end; + if ( end < start ) + end = start; + block->trim.start = start; + block->trim.end = end; + + DBGC2 ( info, "PCCRC %p segment %d block %d hash %s\n", + info, segment->index, block->index, + peerdist_info_hash_ntoa ( info, block->hash ) ); + DBGC2 ( info, "PCCRC %p segment %d block %d range [%08zx,%08zx) covers " + "[%08zx,%08zx)\n", info, segment->index, block->index, + block->range.start, block->range.end, block->trim.start, + block->trim.end ); + return 0; +} diff --git a/src/net/pccrd.c b/src/net/pccrd.c new file mode 100644 index 00000000..04b5dd86 --- /dev/null +++ b/src/net/pccrd.c @@ -0,0 +1,286 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Peer Content Caching and Retrieval: Discovery Protocol [MS-PCCRD] + * + * This protocol manages to ingeniously combine the excessive + * verbosity of XML with a paucity of actual information. For + * example: even in version 2.0 of the protocol it is still not + * possible to discover which peers hold a specific block within a + * given segment. + * + * For added bonus points, version 1.0 of the protocol is specified to + * use a case-sensitive string comparison (for SHA2 digest values) but + * nothing specifies whether the strings in question should be in + * upper or lower case. There are example strings given in the + * specification, but the author skilfully manages to leave the issue + * unresolved by using the somewhat implausible digest value of + * "0200000000000000000000000000000000000000000000000000000000000000". + * + * Just in case you were thinking that the silver lining of the choice + * to use an XML-based protocol would be the ability to generate and + * process messages with standard tools, version 2.0 of the protocol + * places most of the critical information inside a Base64-encoded + * custom binary data structure. Within an XML element, naturally. + * + * I hereby announce this specification to be the 2015 winner of the + * prestigious "UEFI HII API" award for incompetent design. + */ + +/** Discovery request format */ +#define PEERDIST_DISCOVERY_REQUEST \ + "" \ + "" \ + "" \ + "" \ + "urn:schemas-xmlsoap-org:ws:2005:04:discovery" \ + "" \ + "" \ + "http://schemas.xmlsoap.org/ws/2005/04/discovery/Probe" \ + "" \ + "" \ + "urn:uuid:%s" \ + "" \ + "" \ + "" \ + "" \ + "" \ + "PeerDist:PeerDistData" \ + "" \ + "" \ + "%s" \ + "" \ + "" \ + "" \ + "" + +/** + * Construct discovery request + * + * @v uuid Message UUID string + * @v id Segment identifier string + * @ret request Discovery request, or NULL on failure + * + * The request is dynamically allocated; the caller must eventually + * free() the request. + */ +char * peerdist_discovery_request ( const char *uuid, const char *id ) { + char *request; + int len; + + /* Construct request */ + len = asprintf ( &request, PEERDIST_DISCOVERY_REQUEST, uuid, id ); + if ( len < 0 ) + return NULL; + + return request; +} + +/** + * Locate discovery reply tag + * + * @v data Reply data (not NUL-terminated) + * @v len Length of reply data + * @v tag XML tag + * @ret found Found tag (or NULL if not found) + */ +static char * peerdist_discovery_reply_tag ( char *data, size_t len, + const char *tag ) { + size_t tag_len = strlen ( tag ); + + /* Search, allowing for the fact that the reply data is not + * cleanly NUL-terminated and may contain embedded NULs due to + * earlier parsing. + */ + for ( ; len >= tag_len ; data++, len-- ) { + if ( strncmp ( data, tag, tag_len ) == 0 ) + return data; + } + return NULL; +} + +/** + * Locate discovery reply values + * + * @v data Reply data (not NUL-terminated, will be modified) + * @v len Length of reply data + * @v name XML tag name + * @ret values Tag values (or NULL if not found) + * + * The reply data is modified by adding NULs and moving characters as + * needed to produce a NUL-separated list of values, terminated with a + * zero-length string. + * + * This is not supposed to be a full XML parser; it's supposed to + * include just enough functionality to allow PeerDist discovery to + * work with existing implementations. + */ +static char * peerdist_discovery_reply_values ( char *data, size_t len, + const char *name ) { + char buf[ 2 /* "" */ + 1 /* NUL */ ]; + char *open; + char *close; + char *start; + char *end; + char *in; + char *out; + char c; + + /* Locate opening tag */ + snprintf ( buf, sizeof ( buf ), "<%s>", name ); + open = peerdist_discovery_reply_tag ( data, len, buf ); + if ( ! open ) + return NULL; + start = ( open + strlen ( buf ) ); + len -= ( start - data ); + data = start; + + /* Locate closing tag */ + snprintf ( buf, sizeof ( buf ), "", name ); + close = peerdist_discovery_reply_tag ( data, len, buf ); + if ( ! close ) + return NULL; + assert ( close >= open ); + end = close; + + /* Strip initial whitespace, convert other whitespace + * sequences to single NULs, add terminating pair of NULs. + * This will probably overwrite part of the closing tag. + */ + for ( in = start, out = start ; in < end ; in++ ) { + c = *in; + if ( isspace ( c ) ) { + if ( ( out > start ) && ( out[-1] != '\0' ) ) + *(out++) = '\0'; + } else { + *(out++) = c; + } + } + *(out++) = '\0'; + *(out++) = '\0'; + assert ( out < ( close + strlen ( buf ) ) ); + + return start; +} + +/** + * Parse discovery reply + * + * @v data Reply data (not NUL-terminated, will be modified) + * @v len Length of reply data + * @v reply Discovery reply to fill in + * @ret rc Return status code + * + * The discovery reply includes pointers to strings within the + * modified reply data. + */ +int peerdist_discovery_reply ( char *data, size_t len, + struct peerdist_discovery_reply *reply ) { + static const struct peerdist_discovery_block_count zcount = { + .hex = "00000000", + }; + struct peerdist_discovery_block_count *count; + unsigned int max; + unsigned int i; + char *scopes; + char *xaddrs; + char *blockcount; + char *in; + char *out; + size_t skip; + + /* Find tag */ + scopes = peerdist_discovery_reply_values ( data, len, "wsd:Scopes" ); + if ( ! scopes ) { + DBGC ( reply, "PCCRD %p missing tag\n", reply ); + return -ENOENT; + } + + /* Find tag */ + xaddrs = peerdist_discovery_reply_values ( data, len, "wsd:XAddrs" ); + if ( ! xaddrs ) { + DBGC ( reply, "PCCRD %p missing tag\n", reply ); + return -ENOENT; + } + + /* Find tag */ + blockcount = peerdist_discovery_reply_values ( data, len, + "PeerDist:BlockCount" ); + if ( ! blockcount ) { + DBGC ( reply, "PCCRD %p missing tag\n", + reply ); + return -ENOENT; + } + + /* Determine maximum number of segments (according to number + * of entries in the block count list). + */ + max = ( strlen ( blockcount ) / sizeof ( *count ) ); + count = container_of ( blockcount, + struct peerdist_discovery_block_count, hex[0] ); + + /* Eliminate any segments with a zero block count */ + for ( i = 0, in = scopes, out = scopes ; *in ; i++, in += skip ) { + + /* Fail if we have overrun the maximum number of segments */ + if ( i >= max ) { + DBGC ( reply, "PCCRD %p too many segment IDs\n", + reply ); + return -EPROTO; + } + + /* Delete segment if block count is zero */ + skip = ( strlen ( in ) + 1 /* NUL */ ); + if ( memcmp ( count[i].hex, zcount.hex, + sizeof ( zcount.hex ) ) == 0 ) + continue; + strcpy ( out, in ); + out += skip; + } + out[0] = '\0'; /* Ensure list is terminated with a zero-length string */ + + /* Fill in discovery reply */ + reply->ids = scopes; + reply->locations = xaddrs; + + return 0; +} diff --git a/src/net/peerblk.c b/src/net/peerblk.c new file mode 100644 index 00000000..f8994f42 --- /dev/null +++ b/src/net/peerblk.c @@ -0,0 +1,1507 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Peer Content Caching and Retrieval (PeerDist) protocol block downloads + * + */ + +/** PeerDist decryption chunksize + * + * This is a policy decision. + */ +#define PEERBLK_DECRYPT_CHUNKSIZE 2048 + +/** PeerDist maximum number of concurrent raw block downloads + * + * Raw block downloads are expensive if the origin server uses HTTPS, + * since each concurrent download will require local TLS resources + * (including potentially large received encrypted data buffers). + * + * Raw block downloads may also be prohibitively slow to initiate when + * the origin server is using HTTPS and client certificates. Origin + * servers for PeerDist downloads are likely to be running IIS, which + * has a bug that breaks session resumption and requires each + * connection to go through the full client certificate verification. + * + * Limit the total number of concurrent raw block downloads to + * ameliorate these problems. + * + * This is a policy decision. + */ +#define PEERBLK_RAW_MAX 2 + +/** PeerDist raw block download attempt initial progress timeout + * + * This is a policy decision. + */ +#define PEERBLK_RAW_OPEN_TIMEOUT ( 10 * TICKS_PER_SEC ) + +/** PeerDist raw block download attempt ongoing progress timeout + * + * This is a policy decision. + */ +#define PEERBLK_RAW_RX_TIMEOUT ( 15 * TICKS_PER_SEC ) + +/** PeerDist retrieval protocol block download attempt initial progress timeout + * + * This is a policy decision. + */ +#define PEERBLK_RETRIEVAL_OPEN_TIMEOUT ( 3 * TICKS_PER_SEC ) + +/** PeerDist retrieval protocol block download attempt ongoing progress timeout + * + * This is a policy decision. + */ +#define PEERBLK_RETRIEVAL_RX_TIMEOUT ( 5 * TICKS_PER_SEC ) + +/** PeerDist maximum number of full download attempt cycles + * + * This is the maximum number of times that we will try a full cycle + * of download attempts (i.e. a retrieval protocol download attempt + * from each discovered peer plus a raw download attempt from the + * origin server). + * + * This is a policy decision. + */ +#define PEERBLK_MAX_ATTEMPT_CYCLES 4 + +/** PeerDist block download profiler */ +static struct profiler peerblk_download_profiler __profiler = + { .name = "peerblk.download" }; + +/** PeerDist block download attempt success profiler */ +static struct profiler peerblk_attempt_success_profiler __profiler = + { .name = "peerblk.attempt.success" }; + +/** PeerDist block download attempt failure profiler */ +static struct profiler peerblk_attempt_failure_profiler __profiler = + { .name = "peerblk.attempt.failure" }; + +/** PeerDist block download attempt timeout profiler */ +static struct profiler peerblk_attempt_timeout_profiler __profiler = + { .name = "peerblk.attempt.timeout" }; + +/** PeerDist block download discovery success profiler */ +static struct profiler peerblk_discovery_success_profiler __profiler = + { .name = "peerblk.discovery.success" }; + +/** PeerDist block download discovery timeout profiler */ +static struct profiler peerblk_discovery_timeout_profiler __profiler = + { .name = "peerblk.discovery.timeout" }; + +static void peerblk_dequeue ( struct peerdist_block *peerblk ); + +/** + * Get profiling timestamp + * + * @ret timestamp Timestamp + */ +static inline __attribute__ (( always_inline )) unsigned long +peerblk_timestamp ( void ) { + + if ( PROFILING ) { + return currticks(); + } else { + return 0; + } +} + +/** + * Free PeerDist block download + * + * @v refcnt Reference count + */ +static void peerblk_free ( struct refcnt *refcnt ) { + struct peerdist_block *peerblk = + container_of ( refcnt, struct peerdist_block, refcnt ); + + uri_put ( peerblk->uri ); + free ( peerblk->cipherctx ); + free ( peerblk ); +} + +/** + * Reset PeerDist block download attempt + * + * @v peerblk PeerDist block download + * @v rc Reason for reset + */ +static void peerblk_reset ( struct peerdist_block *peerblk, int rc ) { + + /* Stop decryption process */ + process_del ( &peerblk->process ); + + /* Stop timer */ + stop_timer ( &peerblk->timer ); + + /* Abort any current download attempt */ + intf_restart ( &peerblk->raw, rc ); + intf_restart ( &peerblk->retrieval, rc ); + + /* Remove from download queue, if applicable */ + if ( peerblk->queue ) + peerblk_dequeue ( peerblk ); + + /* Empty received data buffer */ + xferbuf_free ( &peerblk->buffer ); + peerblk->pos = 0; + + /* Reset digest and free cipher context */ + digest_init ( peerblk->digest, peerblk->digestctx ); + free ( peerblk->cipherctx ); + peerblk->cipherctx = NULL; + peerblk->cipher = NULL; + + /* Reset trim thresholds */ + peerblk->start = ( peerblk->trim.start - peerblk->range.start ); + peerblk->end = ( peerblk->trim.end - peerblk->range.start ); + assert ( peerblk->start <= peerblk->end ); +} + +/** + * Close PeerDist block download + * + * @v peerblk PeerDist block download + * @v rc Reason for close + */ +static void peerblk_close ( struct peerdist_block *peerblk, int rc ) { + unsigned long now = peerblk_timestamp(); + + /* Profile overall block download */ + profile_custom ( &peerblk_download_profiler, + ( now - peerblk->started ) ); + + /* Reset download attempt */ + peerblk_reset ( peerblk, rc ); + + /* Close discovery */ + peerdisc_close ( &peerblk->discovery ); + + /* Shut down all interfaces */ + intf_shutdown ( &peerblk->retrieval, rc ); + intf_shutdown ( &peerblk->raw, rc ); + intf_shutdown ( &peerblk->xfer, rc ); +} + +/** + * Calculate offset within overall download + * + * @v peerblk PeerDist block download + * @v pos Position within incoming data stream + * @ret offset Offset within overall download + */ +static inline __attribute__ (( always_inline )) size_t +peerblk_offset ( struct peerdist_block *peerblk, size_t pos ) { + + return ( ( pos - peerblk->start ) + peerblk->offset ); +} + +/** + * Deliver download attempt data block + * + * @v peerblk PeerDist block download + * @v iobuf I/O buffer + * @v meta Original data transfer metadata + * @v pos Position within incoming data stream + * @ret rc Return status code + */ +static int peerblk_deliver ( struct peerdist_block *peerblk, + struct io_buffer *iobuf, + struct xfer_metadata *meta, size_t pos ) { + struct xfer_metadata xfer_meta; + size_t len = iob_len ( iobuf ); + size_t start = pos; + size_t end = ( pos + len ); + int rc; + + /* Discard zero-length packets and packets which lie entirely + * outside the trimmed range. + */ + if ( ( start >= peerblk->end ) || ( end <= peerblk->start ) || + ( len == 0 ) ) { + free_iob ( iobuf ); + return 0; + } + + /* Truncate data to within trimmed range */ + if ( start < peerblk->start ) { + iob_pull ( iobuf, ( peerblk->start - start ) ); + start = peerblk->start; + } + if ( end > peerblk->end ) { + iob_unput ( iobuf, ( end - peerblk->end ) ); + end = peerblk->end; + } + + /* Construct metadata */ + memcpy ( &xfer_meta, meta, sizeof ( xfer_meta ) ); + xfer_meta.flags |= XFER_FL_ABS_OFFSET; + xfer_meta.offset = peerblk_offset ( peerblk, start ); + + /* Deliver data */ + if ( ( rc = xfer_deliver ( &peerblk->xfer, iob_disown ( iobuf ), + &xfer_meta ) ) != 0 ) { + DBGC ( peerblk, "PEERBLK %p %d.%d could not deliver data: %s\n", + peerblk, peerblk->segment, peerblk->block, + strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Finish PeerDist block download attempt + * + * @v peerblk PeerDist block download + * @v rc Reason for close + */ +static void peerblk_done ( struct peerdist_block *peerblk, int rc ) { + struct digest_algorithm *digest = peerblk->digest; + struct peerdisc_segment *segment = peerblk->discovery.segment; + struct peerdisc_peer *head; + struct peerdisc_peer *peer; + uint8_t hash[digest->digestsize]; + unsigned long now = peerblk_timestamp(); + + /* Check for errors on completion */ + if ( rc != 0 ) { + DBGC ( peerblk, "PEERBLK %p %d.%d attempt failed: %s\n", + peerblk, peerblk->segment, peerblk->block, + strerror ( rc ) ); + goto err; + } + + /* Check digest */ + digest_final ( digest, peerblk->digestctx, hash ); + if ( memcmp ( hash, peerblk->hash, peerblk->digestsize ) != 0 ) { + DBGC ( peerblk, "PEERBLK %p %d.%d digest mismatch:\n", + peerblk, peerblk->segment, peerblk->block ); + DBGC_HDA ( peerblk, 0, hash, peerblk->digestsize ); + DBGC_HDA ( peerblk, 0, peerblk->hash, peerblk->digestsize ); + rc = -EIO; + goto err; + } + + /* Profile successful attempt */ + profile_custom ( &peerblk_attempt_success_profiler, + ( now - peerblk->attempted ) ); + + /* Report peer statistics */ + head = list_entry ( &segment->peers, struct peerdisc_peer, list ); + peer = ( ( peerblk->peer == head ) ? NULL : peerblk->peer ); + peerdisc_stat ( &peerblk->xfer, peer, &segment->peers ); + + /* Close download */ + peerblk_close ( peerblk, 0 ); + return; + + err: + /* Record failure reason and schedule a retry attempt */ + profile_custom ( &peerblk_attempt_failure_profiler, + ( now - peerblk->attempted ) ); + peerblk_reset ( peerblk, rc ); + peerblk->rc = rc; + start_timer_nodelay ( &peerblk->timer ); +} + +/****************************************************************************** + * + * Raw block download attempts (using an HTTP range request) + * + ****************************************************************************** + */ + +/** + * Open PeerDist raw block download attempt + * + * @v peerblk PeerDist block download + * @ret rc Return status code + */ +static int peerblk_raw_open ( struct peerdist_block *peerblk ) { + struct http_request_range range; + int rc; + + DBGC2 ( peerblk, "PEERBLK %p %d.%d attempting raw range request\n", + peerblk, peerblk->segment, peerblk->block ); + + /* Construct HTTP range */ + memset ( &range, 0, sizeof ( range ) ); + range.start = peerblk->range.start; + range.len = ( peerblk->range.end - peerblk->range.start ); + + /* Initiate range request to retrieve block */ + if ( ( rc = http_open ( &peerblk->raw, &http_get, peerblk->uri, + &range, NULL ) ) != 0 ) { + DBGC ( peerblk, "PEERBLK %p %d.%d could not create range " + "request: %s\n", peerblk, peerblk->segment, + peerblk->block, strerror ( rc ) ); + return rc; + } + + /* Annul HTTP connection (for testing) if applicable. Do not + * report as an immediate error, in order to test our ability + * to recover from a totally unresponsive HTTP server. + */ + if ( inject_fault ( PEERBLK_ANNUL_RATE ) ) + intf_restart ( &peerblk->raw, 0 ); + + /* Start download attempt timer */ + peerblk->rc = -ETIMEDOUT; + start_timer_fixed ( &peerblk->timer, PEERBLK_RAW_OPEN_TIMEOUT ); + + return 0; +} + +/** + * Receive PeerDist raw data + * + * @v peerblk PeerDist block download + * @v iobuf I/O buffer + * @v meta Data transfer metadata + * @ret rc Return status code + */ +static int peerblk_raw_rx ( struct peerdist_block *peerblk, + struct io_buffer *iobuf, + struct xfer_metadata *meta ) { + size_t len = iob_len ( iobuf ); + size_t pos = peerblk->pos; + size_t mid = ( ( peerblk->range.end - peerblk->range.start ) / 2 ); + int rc; + + /* Corrupt received data (for testing) if applicable */ + inject_corruption ( PEERBLK_CORRUPT_RATE, iobuf->data, len ); + + /* Fail if data is delivered out of order, since the streaming + * digest requires strict ordering. + */ + if ( ( rc = xfer_check_order ( meta, &peerblk->pos, len ) ) != 0 ) + goto err; + + /* Add data to digest */ + digest_update ( peerblk->digest, peerblk->digestctx, iobuf->data, len ); + + /* Deliver data */ + if ( ( rc = peerblk_deliver ( peerblk, iob_disown ( iobuf ), meta, + pos ) ) != 0 ) + goto err; + + /* Extend download attempt timer */ + start_timer_fixed ( &peerblk->timer, PEERBLK_RAW_RX_TIMEOUT ); + + /* Stall download attempt (for testing) if applicable */ + if ( ( pos < mid ) && ( ( pos + len ) >= mid ) && + ( ( rc = inject_fault ( PEERBLK_STALL_RATE ) ) != 0 ) ) { + intf_restart ( &peerblk->raw, rc ); + } + + return 0; + + err: + free_iob ( iobuf ); + peerblk_done ( peerblk, rc ); + return rc; +} + +/** + * Close PeerDist raw block download attempt + * + * @v peerblk PeerDist block download + * @v rc Reason for close + */ +static void peerblk_raw_close ( struct peerdist_block *peerblk, int rc ) { + + /* Restart interface */ + intf_restart ( &peerblk->raw, rc ); + + /* Fail immediately if we have an error */ + if ( rc != 0 ) + goto done; + + /* Abort download attempt (for testing) if applicable */ + if ( ( rc = inject_fault ( PEERBLK_ABORT_RATE ) ) != 0 ) + goto done; + + done: + /* Complete download attempt */ + peerblk_done ( peerblk, rc ); +} + +/****************************************************************************** + * + * Block download queue + * + ****************************************************************************** + */ + +/** + * PeerDist block download queue process + * + * @v queue Block download queue + */ +static void peerblk_step ( struct peerdist_block_queue *queue ) { + struct peerdist_block *peerblk; + int rc; + + /* Do nothing yet if we have too many open block downloads */ + if ( queue->count >= queue->max ) + return; + + /* Do nothing unless there are queued block downloads */ + peerblk = list_first_entry ( &queue->list, struct peerdist_block, + queued ); + if ( ! peerblk ) + return; + + /* Reschedule queue process */ + process_add ( &queue->process ); + + /* Remove block from queue */ + list_del ( &peerblk->queued ); + INIT_LIST_HEAD ( &peerblk->queued ); + + /* Attempt download */ + if ( ( rc = queue->open ( peerblk ) ) != 0 ) { + peerblk_close ( peerblk, rc ); + return; + } + + /* Increment open block download count */ + queue->count++; +} + +/** + * Add block to download queue + * + * @v peerblk PeerDist block download + * @v queue Block download queue + */ +static void peerblk_enqueue ( struct peerdist_block *peerblk, + struct peerdist_block_queue *queue ) { + + /* Sanity checks */ + assert ( peerblk->queue == NULL ); + assert ( list_empty ( &peerblk->queued ) ); + + /* Add block to queue */ + peerblk->queue = queue; + list_add_tail ( &peerblk->queued, &queue->list ); + + /* Schedule queue process */ + process_add ( &queue->process ); +} + +/** + * Remove block from download queue + * + * @v peerblk PeerDist block download + */ +static void peerblk_dequeue ( struct peerdist_block *peerblk ) { + struct peerdist_block_queue *queue = peerblk->queue; + + /* Sanity checks */ + assert ( queue != NULL ); + + /* Remove block from queue */ + peerblk->queue = NULL; + if ( list_empty ( &peerblk->queued ) ) { + + /* Open download: decrement count and reschedule queue */ + queue->count--; + process_add ( &queue->process ); + + } else { + + /* Queued download: remove from queue */ + list_del ( &peerblk->queued ); + INIT_LIST_HEAD ( &peerblk->queued ); + } +} + +/** PeerDist block download queue process descriptor */ +static struct process_descriptor peerblk_queue_desc = + PROC_DESC_ONCE ( struct peerdist_block_queue, process, peerblk_step ); + +/** Raw block download queue */ +static struct peerdist_block_queue peerblk_raw_queue = { + .process = PROC_INIT ( peerblk_raw_queue.process, &peerblk_queue_desc ), + .list = LIST_HEAD_INIT ( peerblk_raw_queue.list ), + .max = PEERBLK_RAW_MAX, + .open = peerblk_raw_open, +}; + +/****************************************************************************** + * + * Retrieval protocol block download attempts (using HTTP POST) + * + ****************************************************************************** + */ + +/** + * Construct PeerDist retrieval protocol URI + * + * @v location Peer location + * @ret uri Retrieval URI, or NULL on error + */ +static struct uri * peerblk_retrieval_uri ( const char *location ) { + char uri_string[ 7 /* "http://" */ + strlen ( location ) + + sizeof ( PEERDIST_MAGIC_PATH /* includes NUL */ ) ]; + + /* Construct URI string */ + snprintf ( uri_string, sizeof ( uri_string ), + ( "http://%s" PEERDIST_MAGIC_PATH ), location ); + + /* Parse URI string */ + return parse_uri ( uri_string ); +} + +/** + * Open PeerDist retrieval protocol block download attempt + * + * @v peerblk PeerDist block download + * @v location Peer location + * @ret rc Return status code + */ +static int peerblk_retrieval_open ( struct peerdist_block *peerblk, + const char *location ) { + size_t digestsize = peerblk->digestsize; + peerdist_msg_getblks_t ( digestsize, 1, 0 ) req; + peerblk_msg_blk_t ( digestsize, 0, 0, 0 ) *rsp; + struct http_request_content content; + struct uri *uri; + int rc; + + DBGC2 ( peerblk, "PEERBLK %p %d.%d attempting retrieval from %s\n", + peerblk, peerblk->segment, peerblk->block, location ); + + /* Construct block fetch request */ + memset ( &req, 0, sizeof ( req ) ); + req.getblks.hdr.version.raw = htonl ( PEERDIST_MSG_GETBLKS_VERSION ); + req.getblks.hdr.type = htonl ( PEERDIST_MSG_GETBLKS_TYPE ); + req.getblks.hdr.len = htonl ( sizeof ( req ) ); + req.getblks.hdr.algorithm = htonl ( PEERDIST_MSG_AES_128_CBC ); + req.segment.segment.digestsize = htonl ( digestsize ); + memcpy ( req.segment.id, peerblk->id, digestsize ); + req.ranges.ranges.count = htonl ( 1 ); + req.ranges.range[0].first = htonl ( peerblk->block ); + req.ranges.range[0].count = htonl ( 1 ); + + /* Construct POST request content */ + memset ( &content, 0, sizeof ( content ) ); + content.data = &req; + content.len = sizeof ( req ); + + /* Construct URI */ + if ( ( uri = peerblk_retrieval_uri ( location ) ) == NULL ) { + rc = -ENOMEM; + goto err_uri; + } + + /* Update trim thresholds */ + peerblk->start += offsetof ( typeof ( *rsp ), msg.vrf ); + peerblk->end += offsetof ( typeof ( *rsp ), msg.vrf ); + + /* Initiate HTTP POST to retrieve block */ + if ( ( rc = http_open ( &peerblk->retrieval, &http_post, uri, + NULL, &content ) ) != 0 ) { + DBGC ( peerblk, "PEERBLK %p %d.%d could not create retrieval " + "request: %s\n", peerblk, peerblk->segment, + peerblk->block, strerror ( rc ) ); + goto err_open; + } + + /* Annul HTTP connection (for testing) if applicable. Do not + * report as an immediate error, in order to test our ability + * to recover from a totally unresponsive HTTP server. + */ + if ( inject_fault ( PEERBLK_ANNUL_RATE ) ) + intf_restart ( &peerblk->retrieval, 0 ); + + /* Start download attempt timer */ + peerblk->rc = -ETIMEDOUT; + start_timer_fixed ( &peerblk->timer, PEERBLK_RETRIEVAL_OPEN_TIMEOUT ); + + err_open: + uri_put ( uri ); + err_uri: + return rc; +} + +/** + * Receive PeerDist retrieval protocol data + * + * @v peerblk PeerDist block download + * @v iobuf I/O buffer + * @v meta Data transfer metadata + * @ret rc Return status code + */ +static int peerblk_retrieval_rx ( struct peerdist_block *peerblk, + struct io_buffer *iobuf, + struct xfer_metadata *meta ) { + size_t len = iob_len ( iobuf ); + size_t start; + size_t end; + size_t before; + size_t after; + size_t cut; + int rc; + + /* Some genius at Microsoft thought it would be a great idea + * to place the AES-CBC initialisation vector *after* the + * encrypted data, thereby making it logically impossible to + * decrypt each packet as it arrives. + * + * To work around this mindless stupidity, we deliver the + * ciphertext as-is and later use xfer_buffer() to obtain + * access to the underlying data transfer buffer in order to + * perform the decryption. + * + * There will be some data both before and after the bytes + * corresponding to the trimmed plaintext: a MSG_BLK + * header/footer, some block padding for the AES-CBC cipher, + * and a possibly large quantity of unwanted ciphertext which + * is excluded from the trimmed content range. We store this + * data in a local data transfer buffer. If the amount of + * data to be stored is too large, we will fail allocation and + * so eventually fall back to using a range request (which + * does not require this kind of temporary storage + * allocation). + */ + + /* Corrupt received data (for testing) if applicable */ + inject_corruption ( PEERBLK_CORRUPT_RATE, iobuf->data, len ); + + /* Calculate start and end positions of this buffer */ + start = peerblk->pos; + if ( meta->flags & XFER_FL_ABS_OFFSET ) + start = 0; + start += meta->offset; + end = ( start + len ); + + /* Buffer any data before the trimmed content */ + if ( ( start < peerblk->start ) && ( len > 0 ) ) { + + /* Calculate length of data before the trimmed content */ + before = ( peerblk->start - start ); + if ( before > len ) + before = len; + + /* Buffer data before the trimmed content */ + if ( ( rc = xferbuf_write ( &peerblk->buffer, start, + iobuf->data, before ) ) != 0 ) { + DBGC ( peerblk, "PEERBLK %p %d.%d could not buffer " + "data: %s\n", peerblk, peerblk->segment, + peerblk->block, strerror ( rc ) ); + goto err; + } + } + + /* Buffer any data after the trimmed content */ + if ( ( end > peerblk->end ) && ( len > 0 ) ) { + + /* Calculate length of data after the trimmed content */ + after = ( end - peerblk->end ); + if ( after > len ) + after = len; + + /* Buffer data after the trimmed content */ + cut = ( peerblk->end - peerblk->start ); + if ( ( rc = xferbuf_write ( &peerblk->buffer, + ( end - after - cut ), + ( iobuf->data + len - after ), + after ) ) != 0 ) { + DBGC ( peerblk, "PEERBLK %p %d.%d could not buffer " + "data: %s\n", peerblk, peerblk->segment, + peerblk->block, strerror ( rc ) ); + goto err; + } + } + + /* Deliver any remaining data */ + if ( ( rc = peerblk_deliver ( peerblk, iob_disown ( iobuf ), meta, + start ) ) != 0 ) + goto err; + + /* Update position */ + peerblk->pos = end; + + /* Extend download attempt timer */ + start_timer_fixed ( &peerblk->timer, PEERBLK_RETRIEVAL_RX_TIMEOUT ); + + /* Stall download attempt (for testing) if applicable */ + if ( ( start < peerblk->end ) && ( end >= peerblk->end ) && + ( ( rc = inject_fault ( PEERBLK_STALL_RATE ) ) != 0 ) ) { + intf_restart ( &peerblk->retrieval, rc ); + } + + return 0; + + err: + free_iob ( iobuf ); + peerblk_done ( peerblk, rc ); + return rc; +} + +/** + * Parse retrieval protocol message header + * + * @v peerblk PeerDist block download + * @ret rc Return status code + */ +static int peerblk_parse_header ( struct peerdist_block *peerblk ) { + struct { + struct peerdist_msg_transport_header hdr; + struct peerdist_msg_header msg; + } __attribute__ (( packed )) *msg = peerblk->buffer.data; + struct cipher_algorithm *cipher; + size_t len = peerblk->buffer.len; + size_t keylen = 0; + int rc; + + /* Check message length */ + if ( len < sizeof ( *msg ) ) { + DBGC ( peerblk, "PEERBLK %p %d.%d message too short for header " + "(%zd bytes)\n", peerblk, peerblk->segment, + peerblk->block, len ); + return -ERANGE; + } + + /* Check message type */ + if ( msg->msg.type != htonl ( PEERDIST_MSG_BLK_TYPE ) ) { + DBGC ( peerblk, "PEERBLK %p %d.%d unexpected message type " + "%#08x\n", peerblk, peerblk->segment, peerblk->block, + ntohl ( msg->msg.type ) ); + return -EPROTO; + } + + /* Determine cipher algorithm and key length */ + cipher = &aes_cbc_algorithm; + switch ( msg->msg.algorithm ) { + case htonl ( PEERDIST_MSG_PLAINTEXT ) : + cipher = NULL; + break; + case htonl ( PEERDIST_MSG_AES_128_CBC ) : + keylen = ( 128 / 8 ); + break; + case htonl ( PEERDIST_MSG_AES_192_CBC ) : + keylen = ( 192 / 8 ); + break; + case htonl ( PEERDIST_MSG_AES_256_CBC ) : + keylen = ( 256 / 8 ); + break; + default: + DBGC ( peerblk, "PEERBLK %p %d.%d unrecognised algorithm " + "%#08x\n", peerblk, peerblk->segment, peerblk->block, + ntohl ( msg->msg.algorithm ) ); + return -ENOTSUP; + } + DBGC2 ( peerblk, "PEERBLK %p %d.%d using %s with %zd-bit key\n", + peerblk, peerblk->segment, peerblk->block, + ( cipher ? cipher->name : "plaintext" ), ( 8 * keylen ) ); + + /* Sanity check key length against maximum secret length */ + if ( keylen > peerblk->digestsize ) { + DBGC ( peerblk, "PEERBLK %p %d.%d %zd-byte secret too short " + "for %zd-bit key\n", peerblk, peerblk->segment, + peerblk->block, peerblk->digestsize, ( 8 * keylen ) ); + return -EPROTO; + } + + /* Allocate cipher context, if applicable. Freeing the cipher + * context (on error or otherwise) is handled by peerblk_reset(). + */ + peerblk->cipher = cipher; + assert ( peerblk->cipherctx == NULL ); + if ( cipher ) { + peerblk->cipherctx = malloc ( cipher->ctxsize ); + if ( ! peerblk->cipherctx ) + return -ENOMEM; + } + + /* Initialise cipher, if applicable */ + if ( cipher && + ( rc = cipher_setkey ( cipher, peerblk->cipherctx, peerblk->secret, + keylen ) ) != 0 ) { + DBGC ( peerblk, "PEERBLK %p %d.%d could not set key: %s\n", + peerblk, peerblk->segment, peerblk->block, + strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Parse retrieval protocol message segment and block details + * + * @v peerblk PeerDist block download + * @v buf_len Length of buffered data to fill in + * @ret rc Return status code + */ +static int peerblk_parse_block ( struct peerdist_block *peerblk, + size_t *buf_len ) { + size_t digestsize = peerblk->digestsize; + peerblk_msg_blk_t ( digestsize, 0, 0, 0 ) *msg = peerblk->buffer.data; + size_t len = peerblk->buffer.len; + size_t data_len; + size_t total; + + /* Check message length */ + if ( len < offsetof ( typeof ( *msg ), msg.block.data ) ) { + DBGC ( peerblk, "PEERBLK %p %d.%d message too short for " + "zero-length data (%zd bytes)\n", peerblk, + peerblk->segment, peerblk->block, len ); + return -ERANGE; + } + + /* Check digest size */ + if ( ntohl ( msg->msg.segment.segment.digestsize ) != digestsize ) { + DBGC ( peerblk, "PEERBLK %p %d.%d incorrect digest size %d\n", + peerblk, peerblk->segment, peerblk->block, + ntohl ( msg->msg.segment.segment.digestsize ) ); + return -EPROTO; + } + + /* Check segment ID */ + if ( memcmp ( msg->msg.segment.id, peerblk->id, digestsize ) != 0 ) { + DBGC ( peerblk, "PEERBLK %p %d.%d segment ID mismatch\n", + peerblk, peerblk->segment, peerblk->block ); + return -EPROTO; + } + + /* Check block ID */ + if ( ntohl ( msg->msg.index ) != peerblk->block ) { + DBGC ( peerblk, "PEERBLK %p %d.%d block ID mismatch (got %d)\n", + peerblk, peerblk->segment, peerblk->block, + ntohl ( msg->msg.index ) ); + return -EPROTO; + } + + /* Check for missing blocks */ + data_len = be32_to_cpu ( msg->msg.block.block.len ); + if ( ! data_len ) { + DBGC ( peerblk, "PEERBLK %p %d.%d block not found\n", + peerblk, peerblk->segment, peerblk->block ); + return -ENOENT; + } + + /* Check for underlength blocks */ + if ( data_len < ( peerblk->range.end - peerblk->range.start ) ) { + DBGC ( peerblk, "PEERBLK %p %d.%d underlength block (%zd " + "bytes)\n", peerblk, peerblk->segment, peerblk->block, + data_len ); + return -ERANGE; + } + + /* Calculate buffered data length (i.e. excluding data which + * was delivered to the final data transfer buffer). + */ + *buf_len = ( data_len - ( peerblk->end - peerblk->start ) ); + + /* Describe data before the trimmed content */ + peerblk->decrypt[PEERBLK_BEFORE].xferbuf = &peerblk->buffer; + peerblk->decrypt[PEERBLK_BEFORE].offset = + offsetof ( typeof ( *msg ), msg.block.data ); + peerblk->decrypt[PEERBLK_BEFORE].len = + ( peerblk->start - + offsetof ( typeof ( *msg ), msg.block.data ) ); + total = peerblk->decrypt[PEERBLK_BEFORE].len; + + /* Describe data within the trimmed content */ + peerblk->decrypt[PEERBLK_DURING].offset = + peerblk_offset ( peerblk, peerblk->start ); + peerblk->decrypt[PEERBLK_DURING].len = + ( peerblk->end - peerblk->start ); + total += peerblk->decrypt[PEERBLK_DURING].len; + + /* Describe data after the trimmed content */ + peerblk->decrypt[PEERBLK_AFTER].xferbuf = &peerblk->buffer; + peerblk->decrypt[PEERBLK_AFTER].offset = peerblk->start; + peerblk->decrypt[PEERBLK_AFTER].len = + ( offsetof ( typeof ( *msg ), msg.block.data ) + + *buf_len - peerblk->start ); + total += peerblk->decrypt[PEERBLK_AFTER].len; + + /* Sanity check */ + assert ( total == be32_to_cpu ( msg->msg.block.block.len ) ); + + /* Initialise cipher and digest lengths */ + peerblk->cipher_remaining = total; + peerblk->digest_remaining = + ( peerblk->range.end - peerblk->range.start ); + assert ( peerblk->cipher_remaining >= peerblk->digest_remaining ); + + return 0; +} + +/** + * Parse retrieval protocol message useless details + * + * @v peerblk PeerDist block download + * @v buf_len Length of buffered data + * @v vrf_len Length of uselessness to fill in + * @ret rc Return status code + */ +static int peerblk_parse_useless ( struct peerdist_block *peerblk, + size_t buf_len, size_t *vrf_len ) { + size_t digestsize = peerblk->digestsize; + peerblk_msg_blk_t ( digestsize, buf_len, 0, 0 ) *msg = + peerblk->buffer.data; + size_t len = peerblk->buffer.len; + + /* Check message length */ + if ( len < offsetof ( typeof ( *msg ), msg.vrf.data ) ) { + DBGC ( peerblk, "PEERBLK %p %d.%d message too short for " + "zero-length uselessness (%zd bytes)\n", peerblk, + peerblk->segment, peerblk->block, len ); + return -ERANGE; + } + + /* Extract length of uselessness */ + *vrf_len = be32_to_cpu ( msg->msg.vrf.vrf.len ); + + return 0; +} + +/** + * Parse retrieval protocol message initialisation vector details + * + * @v peerblk PeerDist block download + * @v buf_len Length of buffered data + * @v vrf_len Length of uselessness + * @ret rc Return status code + */ +static int peerblk_parse_iv ( struct peerdist_block *peerblk, size_t buf_len, + size_t vrf_len ) { + size_t digestsize = peerblk->digestsize; + size_t blksize = peerblk->cipher->blocksize; + peerblk_msg_blk_t ( digestsize, buf_len, vrf_len, blksize ) *msg = + peerblk->buffer.data; + size_t len = peerblk->buffer.len; + + /* Check message length */ + if ( len < sizeof ( *msg ) ) { + DBGC ( peerblk, "PEERBLK %p %d.%d message too short for " + "initialisation vector (%zd bytes)\n", peerblk, + peerblk->segment, peerblk->block, len ); + return -ERANGE; + } + + /* Check initialisation vector size */ + if ( ntohl ( msg->msg.iv.iv.blksize ) != blksize ) { + DBGC ( peerblk, "PEERBLK %p %d.%d incorrect IV size %d\n", + peerblk, peerblk->segment, peerblk->block, + ntohl ( msg->msg.iv.iv.blksize ) ); + return -EPROTO; + } + + /* Set initialisation vector */ + cipher_setiv ( peerblk->cipher, peerblk->cipherctx, msg->msg.iv.data ); + + return 0; +} + +/** + * Read from decryption buffers + * + * @v peerblk PeerDist block download + * @v data Data buffer + * @v len Length to read + * @ret rc Return status code + */ +static int peerblk_decrypt_read ( struct peerdist_block *peerblk, + void *data, size_t len ) { + struct peerdist_block_decrypt *decrypt = peerblk->decrypt; + size_t frag_len; + int rc; + + /* Read from each decryption buffer in turn */ + for ( ; len ; decrypt++, data += frag_len, len -= frag_len ) { + + /* Calculate length to use from this buffer */ + frag_len = decrypt->len; + if ( frag_len > len ) + frag_len = len; + if ( ! frag_len ) + continue; + + /* Read from this buffer */ + if ( ( rc = xferbuf_read ( decrypt->xferbuf, decrypt->offset, + data, frag_len ) ) != 0 ) + return rc; + } + + return 0; +} + +/** + * Write to decryption buffers and update offsets and lengths + * + * @v peerblk PeerDist block download + * @v data Data buffer + * @v len Length to read + * @ret rc Return status code + */ +static int peerblk_decrypt_write ( struct peerdist_block *peerblk, + const void *data, size_t len ) { + struct peerdist_block_decrypt *decrypt = peerblk->decrypt; + size_t frag_len; + int rc; + + /* Write to each decryption buffer in turn */ + for ( ; len ; decrypt++, data += frag_len, len -= frag_len ) { + + /* Calculate length to use from this buffer */ + frag_len = decrypt->len; + if ( frag_len > len ) + frag_len = len; + if ( ! frag_len ) + continue; + + /* Write to this buffer */ + if ( ( rc = xferbuf_write ( decrypt->xferbuf, decrypt->offset, + data, frag_len ) ) != 0 ) + return rc; + + /* Update offset and length */ + decrypt->offset += frag_len; + decrypt->len -= frag_len; + } + + return 0; +} + +/** + * Decrypt one chunk of PeerDist retrieval protocol data + * + * @v peerblk PeerDist block download + */ +static void peerblk_decrypt ( struct peerdist_block *peerblk ) { + struct cipher_algorithm *cipher = peerblk->cipher; + struct digest_algorithm *digest = peerblk->digest; + struct xfer_buffer *xferbuf; + size_t cipher_len; + size_t digest_len; + void *data; + int rc; + + /* Sanity check */ + assert ( ( PEERBLK_DECRYPT_CHUNKSIZE % cipher->blocksize ) == 0 ); + + /* Get the underlying data transfer buffer */ + xferbuf = xfer_buffer ( &peerblk->xfer ); + if ( ! xferbuf ) { + DBGC ( peerblk, "PEERBLK %p %d.%d has no underlying data " + "transfer buffer\n", peerblk, peerblk->segment, + peerblk->block ); + rc = -ENOTSUP; + goto err_xfer_buffer; + } + peerblk->decrypt[PEERBLK_DURING].xferbuf = xferbuf; + + /* Calculate cipher and digest lengths */ + cipher_len = PEERBLK_DECRYPT_CHUNKSIZE; + if ( cipher_len > peerblk->cipher_remaining ) + cipher_len = peerblk->cipher_remaining; + digest_len = cipher_len; + if ( digest_len > peerblk->digest_remaining ) + digest_len = peerblk->digest_remaining; + assert ( ( cipher_len & ( cipher->blocksize - 1 ) ) == 0 ); + + /* Allocate temporary data buffer */ + data = malloc ( cipher_len ); + if ( ! data ) { + rc = -ENOMEM; + goto err_alloc_data; + } + + /* Read ciphertext */ + if ( ( rc = peerblk_decrypt_read ( peerblk, data, cipher_len ) ) != 0 ){ + DBGC ( peerblk, "PEERBLK %p %d.%d could not read ciphertext: " + "%s\n", peerblk, peerblk->segment, peerblk->block, + strerror ( rc ) ); + goto err_read; + } + + /* Decrypt data */ + cipher_decrypt ( cipher, peerblk->cipherctx, data, data, cipher_len ); + + /* Add data to digest */ + digest_update ( digest, peerblk->digestctx, data, digest_len ); + + /* Write plaintext */ + if ( ( rc = peerblk_decrypt_write ( peerblk, data, cipher_len ) ) != 0){ + DBGC ( peerblk, "PEERBLK %p %d.%d could not write plaintext: " + "%s\n", peerblk, peerblk->segment, peerblk->block, + strerror ( rc ) ); + goto err_write; + } + + /* Consume input */ + peerblk->cipher_remaining -= cipher_len; + peerblk->digest_remaining -= digest_len; + + /* Free temporary data buffer */ + free ( data ); + + /* Continue processing until all input is consumed */ + if ( peerblk->cipher_remaining ) + return; + + /* Complete download attempt */ + peerblk_done ( peerblk, 0 ); + return; + + err_write: + err_read: + free ( data ); + err_alloc_data: + err_xfer_buffer: + peerblk_done ( peerblk, rc ); +} + +/** + * Close PeerDist retrieval protocol block download attempt + * + * @v peerblk PeerDist block download + * @v rc Reason for close + */ +static void peerblk_retrieval_close ( struct peerdist_block *peerblk, int rc ) { + size_t buf_len; + size_t vrf_len; + + /* Restart interface */ + intf_restart ( &peerblk->retrieval, rc ); + + /* Fail immediately if we have an error */ + if ( rc != 0 ) + goto done; + + /* Abort download attempt (for testing) if applicable */ + if ( ( rc = inject_fault ( PEERBLK_ABORT_RATE ) ) != 0 ) + goto done; + + /* Parse message header */ + if ( ( rc = peerblk_parse_header ( peerblk ) ) != 0 ) + goto done; + + /* Parse message segment and block details */ + if ( ( rc = peerblk_parse_block ( peerblk, &buf_len ) ) != 0 ) + goto done; + + /* If the block was plaintext, then there is nothing more to do */ + if ( ! peerblk->cipher ) + goto done; + + /* Parse message useless details */ + if ( ( rc = peerblk_parse_useless ( peerblk, buf_len, &vrf_len ) ) != 0) + goto done; + + /* Parse message initialisation vector details */ + if ( ( rc = peerblk_parse_iv ( peerblk, buf_len, vrf_len ) ) != 0 ) + goto done; + + /* Fail if decryption length is not aligned to the cipher block size */ + if ( peerblk->cipher_remaining & ( peerblk->cipher->blocksize - 1 ) ) { + DBGC ( peerblk, "PEERBLK %p %d.%d unaligned data length %zd\n", + peerblk, peerblk->segment, peerblk->block, + peerblk->cipher_remaining ); + rc = -EPROTO; + goto done; + } + + /* Stop the download attempt timer: there is no point in + * timing out while decrypting. + */ + stop_timer ( &peerblk->timer ); + + /* Start decryption process */ + process_add ( &peerblk->process ); + return; + + done: + /* Complete download attempt */ + peerblk_done ( peerblk, rc ); +} + +/****************************************************************************** + * + * Retry policy + * + ****************************************************************************** + */ + +/** + * Handle PeerDist retry timer expiry + * + * @v timer Retry timer + * @v over Failure indicator + */ +static void peerblk_expired ( struct retry_timer *timer, int over __unused ) { + struct peerdist_block *peerblk = + container_of ( timer, struct peerdist_block, timer ); + struct peerdisc_segment *segment = peerblk->discovery.segment; + struct peerdisc_peer *head; + unsigned long now = peerblk_timestamp(); + const char *location; + int rc; + + /* Profile discovery timeout, if applicable */ + if ( ( peerblk->peer == NULL ) && ( timer->timeout != 0 ) ) { + profile_custom ( &peerblk_discovery_timeout_profiler, + ( now - peerblk->started ) ); + DBGC ( peerblk, "PEERBLK %p %d.%d discovery timed out after " + "%ld ticks\n", peerblk, peerblk->segment, + peerblk->block, timer->timeout ); + } + + /* Profile download timeout, if applicable */ + if ( ( peerblk->peer != NULL ) && ( timer->timeout != 0 ) ) { + profile_custom ( &peerblk_attempt_timeout_profiler, + ( now - peerblk->attempted ) ); + DBGC ( peerblk, "PEERBLK %p %d.%d timed out after %ld ticks\n", + peerblk, peerblk->segment, peerblk->block, + timer->timeout ); + } + + /* Abort any current download attempt */ + peerblk_reset ( peerblk, -ETIMEDOUT ); + + /* Record attempt start time */ + peerblk->attempted = now; + + /* If we have exceeded our maximum number of attempt cycles + * (each cycle comprising a retrieval protocol download from + * each peer in the list followed by a raw download from the + * origin server), then abort the overall download. + */ + head = list_entry ( &segment->peers, struct peerdisc_peer, list ); + if ( ( peerblk->peer == head ) && + ( ++peerblk->cycles >= PEERBLK_MAX_ATTEMPT_CYCLES ) ) { + rc = peerblk->rc; + assert ( rc != 0 ); + goto err; + } + + /* If we have not yet made any download attempts, then move to + * the start of the peer list. + */ + if ( peerblk->peer == NULL ) + peerblk->peer = head; + + /* Attempt retrieval protocol download from next usable peer */ + list_for_each_entry_continue ( peerblk->peer, &segment->peers, list ) { + + /* Attempt retrieval protocol download from this peer */ + location = peerblk->peer->location; + if ( ( rc = peerblk_retrieval_open ( peerblk, + location ) ) != 0 ) { + /* Non-fatal: continue to try next peer */ + continue; + } + + /* Peer download started */ + return; + } + + /* Add to raw download queue */ + peerblk_enqueue ( peerblk, &peerblk_raw_queue ); + + return; + + err: + peerblk_close ( peerblk, rc ); +} + +/** + * Handle PeerDist peer discovery + * + * @v discovery PeerDist discovery client + */ +static void peerblk_discovered ( struct peerdisc_client *discovery ) { + struct peerdist_block *peerblk = + container_of ( discovery, struct peerdist_block, discovery ); + unsigned long now = peerblk_timestamp(); + + /* Do nothing unless we are still waiting for the initial + * discovery timeout. + */ + if ( ( peerblk->peer != NULL ) || ( peerblk->timer.timeout == 0 ) ) + return; + + /* Schedule an immediate retry */ + start_timer_nodelay ( &peerblk->timer ); + + /* Profile discovery success */ + profile_custom ( &peerblk_discovery_success_profiler, + ( now - peerblk->started ) ); +} + +/****************************************************************************** + * + * Opener + * + ****************************************************************************** + */ + +/** PeerDist block download data transfer interface operations */ +static struct interface_operation peerblk_xfer_operations[] = { + INTF_OP ( intf_close, struct peerdist_block *, peerblk_close ), +}; + +/** PeerDist block download data transfer interface descriptor */ +static struct interface_descriptor peerblk_xfer_desc = + INTF_DESC ( struct peerdist_block, xfer, peerblk_xfer_operations ); + +/** PeerDist block download raw data interface operations */ +static struct interface_operation peerblk_raw_operations[] = { + INTF_OP ( xfer_deliver, struct peerdist_block *, peerblk_raw_rx ), + INTF_OP ( intf_close, struct peerdist_block *, peerblk_raw_close ), +}; + +/** PeerDist block download raw data interface descriptor */ +static struct interface_descriptor peerblk_raw_desc = + INTF_DESC ( struct peerdist_block, raw, peerblk_raw_operations ); + +/** PeerDist block download retrieval protocol interface operations */ +static struct interface_operation peerblk_retrieval_operations[] = { + INTF_OP ( xfer_deliver, struct peerdist_block *, peerblk_retrieval_rx ), + INTF_OP ( intf_close, struct peerdist_block *, peerblk_retrieval_close), +}; + +/** PeerDist block download retrieval protocol interface descriptor */ +static struct interface_descriptor peerblk_retrieval_desc = + INTF_DESC ( struct peerdist_block, retrieval, + peerblk_retrieval_operations ); + +/** PeerDist block download decryption process descriptor */ +static struct process_descriptor peerblk_process_desc = + PROC_DESC ( struct peerdist_block, process, peerblk_decrypt ); + +/** PeerDist block download discovery operations */ +static struct peerdisc_client_operations peerblk_discovery_operations = { + .discovered = peerblk_discovered, +}; + +/** + * Open PeerDist block download + * + * @v xfer Data transfer interface + * @v uri Original URI + * @v info Content information block + * @ret rc Return status code + */ +int peerblk_open ( struct interface *xfer, struct uri *uri, + struct peerdist_info_block *block ) { + const struct peerdist_info_segment *segment = block->segment; + const struct peerdist_info *info = segment->info; + struct digest_algorithm *digest = info->digest; + struct peerdist_block *peerblk; + unsigned long timeout; + size_t digestsize; + int rc; + + /* Allocate and initialise structure */ + peerblk = zalloc ( sizeof ( *peerblk ) + digest->ctxsize ); + if ( ! peerblk ) { + rc = -ENOMEM; + goto err_alloc; + } + ref_init ( &peerblk->refcnt, peerblk_free ); + intf_init ( &peerblk->xfer, &peerblk_xfer_desc, &peerblk->refcnt ); + intf_init ( &peerblk->raw, &peerblk_raw_desc, &peerblk->refcnt ); + intf_init ( &peerblk->retrieval, &peerblk_retrieval_desc, + &peerblk->refcnt ); + peerblk->uri = uri_get ( uri ); + memcpy ( &peerblk->range, &block->range, sizeof ( peerblk->range ) ); + memcpy ( &peerblk->trim, &block->trim, sizeof ( peerblk->trim ) ); + peerblk->offset = ( block->trim.start - info->trim.start ); + peerblk->digest = info->digest; + peerblk->digestsize = digestsize = info->digestsize; + peerblk->digestctx = ( ( ( void * ) peerblk ) + sizeof ( *peerblk ) ); + peerblk->segment = segment->index; + memcpy ( peerblk->id, segment->id, sizeof ( peerblk->id ) ); + memcpy ( peerblk->secret, segment->secret, sizeof ( peerblk->secret ) ); + peerblk->block = block->index; + memcpy ( peerblk->hash, block->hash, sizeof ( peerblk->hash ) ); + xferbuf_malloc_init ( &peerblk->buffer ); + process_init_stopped ( &peerblk->process, &peerblk_process_desc, + &peerblk->refcnt ); + peerdisc_init ( &peerblk->discovery, &peerblk_discovery_operations ); + INIT_LIST_HEAD ( &peerblk->queued ); + timer_init ( &peerblk->timer, peerblk_expired, &peerblk->refcnt ); + DBGC2 ( peerblk, "PEERBLK %p %d.%d id %02x%02x%02x%02x%02x..." + "%02x%02x%02x [%08zx,%08zx)", peerblk, peerblk->segment, + peerblk->block, peerblk->id[0], peerblk->id[1], peerblk->id[2], + peerblk->id[3], peerblk->id[4], peerblk->id[ digestsize - 3 ], + peerblk->id[ digestsize - 2 ], peerblk->id[ digestsize - 1 ], + peerblk->range.start, peerblk->range.end ); + if ( ( peerblk->trim.start != peerblk->range.start ) || + ( peerblk->trim.end != peerblk->range.end ) ) { + DBGC2 ( peerblk, " covers [%08zx,%08zx)", + peerblk->trim.start, peerblk->trim.end ); + } + DBGC2 ( peerblk, "\n" ); + + /* Open discovery */ + if ( ( rc = peerdisc_open ( &peerblk->discovery, peerblk->id, + peerblk->digestsize ) ) != 0 ) + goto err_open_discovery; + + /* Schedule a retry attempt either immediately (if we already + * have some peers) or after the discovery timeout. + */ + timeout = ( list_empty ( &peerblk->discovery.segment->peers ) ? + ( peerdisc_timeout_secs * TICKS_PER_SEC ) : 0 ); + start_timer_fixed ( &peerblk->timer, timeout ); + + /* Record start time */ + peerblk->started = peerblk_timestamp(); + + /* Attach to parent interface, mortalise self, and return */ + intf_plug_plug ( xfer, &peerblk->xfer ); + ref_put ( &peerblk->refcnt ); + return 0; + + err_open_discovery: + peerblk_close ( peerblk, rc ); + err_alloc: + return rc; +} diff --git a/src/net/peerdisc.c b/src/net/peerdisc.c new file mode 100644 index 00000000..55e3f7fa --- /dev/null +++ b/src/net/peerdisc.c @@ -0,0 +1,642 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Peer Content Caching and Retrieval (PeerDist) protocol peer discovery + * + */ + +/** List of discovery segments */ +static LIST_HEAD ( peerdisc_segments ); + +/** Number of repeated discovery attempts */ +#define PEERDISC_REPEAT_COUNT 2 + +/** Time between repeated discovery attempts */ +#define PEERDISC_REPEAT_TIMEOUT ( 1 * TICKS_PER_SEC ) + +/** Default discovery timeout (in seconds) */ +#define PEERDISC_DEFAULT_TIMEOUT_SECS 2 + +/** Recommended discovery timeout (in seconds) + * + * We reduce the recommended discovery timeout whenever a segment + * fails to discover any peers, and restore the default value whenever + * a valid discovery reply is received. We continue to send discovery + * requests even if the recommended timeout is reduced to zero. + * + * This strategy is intended to minimise discovery delays when no + * peers are available on the network, while allowing downloads to + * quickly switch back to using PeerDist acceleration if new peers + * become available. + */ +unsigned int peerdisc_timeout_secs = PEERDISC_DEFAULT_TIMEOUT_SECS; + +/** Hosted cache server */ +static char *peerhost; + +static struct peerdisc_segment * peerdisc_find ( const char *id ); +static int peerdisc_discovered ( struct peerdisc_segment *segment, + const char *location ); + +/****************************************************************************** + * + * Statistics reporting + * + ****************************************************************************** + */ + +/** + * Report peer discovery statistics + * + * @v intf Interface + * @v peer Selected peer (or NULL) + * @v peers List of available peers + */ +void peerdisc_stat ( struct interface *intf, struct peerdisc_peer *peer, + struct list_head *peers ) { + struct interface *dest; + peerdisc_stat_TYPE ( void * ) *op = + intf_get_dest_op ( intf, peerdisc_stat, &dest ); + void *object = intf_object ( dest ); + + if ( op ) { + op ( object, peer, peers ); + } else { + /* Default is to do nothing */ + } + + intf_put ( dest ); +} + +/****************************************************************************** + * + * Discovery sockets + * + ****************************************************************************** + */ + +/** + * Open all PeerDist discovery sockets + * + * @ret rc Return status code + */ +static int peerdisc_socket_open ( void ) { + struct peerdisc_socket *socket; + int rc; + + /* Open each socket */ + for_each_table_entry ( socket, PEERDISC_SOCKETS ) { + if ( ( rc = xfer_open_socket ( &socket->xfer, SOCK_DGRAM, + &socket->address.sa, + NULL ) ) != 0 ) { + DBGC ( socket, "PEERDISC %s could not open socket: " + "%s\n", socket->name, strerror ( rc ) ); + goto err; + } + } + + return 0; + + err: + for_each_table_entry_continue_reverse ( socket, PEERDISC_SOCKETS ) + intf_restart ( &socket->xfer, rc ); + return rc; +} + +/** + * Attempt to transmit PeerDist discovery requests on all sockets + * + * @v uuid Message UUID string + * @v id Segment identifier string + */ +static void peerdisc_socket_tx ( const char *uuid, const char *id ) { + struct peerdisc_socket *socket; + struct net_device *netdev; + struct xfer_metadata meta; + union { + struct sockaddr sa; + struct sockaddr_tcpip st; + } address; + char *request; + size_t len; + int rc; + + /* Construct discovery request */ + request = peerdist_discovery_request ( uuid, id ); + if ( ! request ) + goto err_request; + len = strlen ( request ); + + /* Initialise data transfer metadata */ + memset ( &meta, 0, sizeof ( meta ) ); + meta.dest = &address.sa; + + /* Send message on each socket */ + for_each_table_entry ( socket, PEERDISC_SOCKETS ) { + + /* Initialise socket address */ + memcpy ( &address.sa, &socket->address.sa, + sizeof ( address.sa ) ); + + /* Send message on each open network device */ + for_each_netdev ( netdev ) { + + /* Skip unopened network devices */ + if ( ! netdev_is_open ( netdev ) ) + continue; + address.st.st_scope_id = netdev->index; + + /* Discard request (for test purposes) if applicable */ + if ( inject_fault ( PEERDISC_DISCARD_RATE ) ) + continue; + + /* Transmit request */ + if ( ( rc = xfer_deliver_raw_meta ( &socket->xfer, + request, len, + &meta ) ) != 0 ) { + DBGC ( socket, "PEERDISC %s could not transmit " + "via %s: %s\n", socket->name, + netdev->name, strerror ( rc ) ); + /* Contine to try other net devices/sockets */ + continue; + } + } + } + + free ( request ); + err_request: + return; +} + +/** + * Handle received PeerDist discovery reply + * + * @v socket PeerDist discovery socket + * @v iobuf I/O buffer + * @v meta Data transfer metadata + * @ret rc Return status code + */ +static int peerdisc_socket_rx ( struct peerdisc_socket *socket, + struct io_buffer *iobuf, + struct xfer_metadata *meta __unused ) { + struct peerdist_discovery_reply reply; + struct peerdisc_segment *segment; + char *id; + char *location; + int rc; + + /* Discard reply (for test purposes) if applicable */ + if ( ( rc = inject_fault ( PEERDISC_DISCARD_RATE ) ) != 0 ) + goto err; + + /* Parse reply */ + if ( ( rc = peerdist_discovery_reply ( iobuf->data, iob_len ( iobuf ), + &reply ) ) != 0 ) { + DBGC ( socket, "PEERDISC %s could not parse reply: %s\n", + socket->name, strerror ( rc ) ); + DBGC_HDA ( socket, 0, iobuf->data, iob_len ( iobuf ) ); + goto err; + } + + /* Any kind of discovery reply indicates that there are active + * peers on a local network, so restore the recommended + * discovery timeout to its default value for future requests. + */ + if ( peerdisc_timeout_secs != PEERDISC_DEFAULT_TIMEOUT_SECS ) { + DBGC ( socket, "PEERDISC %s restoring timeout to %d seconds\n", + socket->name, PEERDISC_DEFAULT_TIMEOUT_SECS ); + } + peerdisc_timeout_secs = PEERDISC_DEFAULT_TIMEOUT_SECS; + + /* Iterate over segment IDs */ + for ( id = reply.ids ; *id ; id += ( strlen ( id ) + 1 /* NUL */ ) ) { + + /* Find corresponding segment */ + segment = peerdisc_find ( id ); + if ( ! segment ) { + DBGC ( socket, "PEERDISC %s ignoring reply for %s\n", + socket->name, id ); + continue; + } + + /* Report all discovered peer locations */ + for ( location = reply.locations ; *location ; + location += ( strlen ( location ) + 1 /* NUL */ ) ) { + + /* Report discovered peer location */ + if ( ( rc = peerdisc_discovered ( segment, + location ) ) != 0 ) + goto err; + } + } + + err: + free_iob ( iobuf ); + return rc; +} + +/** + * Close all PeerDist discovery sockets + * + * @v rc Reason for close + */ +static void peerdisc_socket_close ( int rc ) { + struct peerdisc_socket *socket; + + /* Close all sockets */ + for_each_table_entry ( socket, PEERDISC_SOCKETS ) + intf_restart ( &socket->xfer, rc ); +} + +/** PeerDist discovery socket interface operations */ +static struct interface_operation peerdisc_socket_operations[] = { + INTF_OP ( xfer_deliver, struct peerdisc_socket *, peerdisc_socket_rx ), +}; + +/** PeerDist discovery socket interface descriptor */ +static struct interface_descriptor peerdisc_socket_desc = + INTF_DESC ( struct peerdisc_socket, xfer, peerdisc_socket_operations ); + +/** PeerDist discovery IPv4 socket */ +struct peerdisc_socket peerdisc_socket_ipv4 __peerdisc_socket = { + .name = "IPv4", + .address = { + .sin = { + .sin_family = AF_INET, + .sin_port = htons ( PEERDIST_DISCOVERY_PORT ), + .sin_addr.s_addr = htonl ( PEERDIST_DISCOVERY_IPV4 ), + }, + }, + .xfer = INTF_INIT ( peerdisc_socket_desc ), +}; + +/** PeerDist discovery IPv6 socket */ +struct peerdisc_socket peerdisc_socket_ipv6 __peerdisc_socket = { + .name = "IPv6", + .address = { + .sin6 = { + .sin6_family = AF_INET6, + .sin6_port = htons ( PEERDIST_DISCOVERY_PORT ), + .sin6_addr.s6_addr = PEERDIST_DISCOVERY_IPV6, + }, + }, + .xfer = INTF_INIT ( peerdisc_socket_desc ), +}; + +/****************************************************************************** + * + * Discovery segments + * + ****************************************************************************** + */ + +/** + * Free PeerDist discovery segment + * + * @v refcnt Reference count + */ +static void peerdisc_free ( struct refcnt *refcnt ) { + struct peerdisc_segment *segment = + container_of ( refcnt, struct peerdisc_segment, refcnt ); + struct peerdisc_peer *peer; + struct peerdisc_peer *tmp; + + /* Free all discovered peers */ + list_for_each_entry_safe ( peer, tmp, &segment->peers, list ) { + list_del ( &peer->list ); + free ( peer ); + } + + /* Free segment */ + free ( segment ); +} + +/** + * Find PeerDist discovery segment + * + * @v id Segment ID + * @ret segment PeerDist discovery segment, or NULL if not found + */ +static struct peerdisc_segment * peerdisc_find ( const char *id ) { + struct peerdisc_segment *segment; + + /* Look for a matching segment */ + list_for_each_entry ( segment, &peerdisc_segments, list ) { + if ( strcmp ( id, segment->id ) == 0 ) + return segment; + } + + return NULL; +} + +/** + * Add discovered PeerDist peer + * + * @v segment PeerDist discovery segment + * @v location Peer location + * @ret rc Return status code + */ +static int peerdisc_discovered ( struct peerdisc_segment *segment, + const char *location ) { + struct peerdisc_peer *peer; + struct peerdisc_client *peerdisc; + struct peerdisc_client *tmp; + + /* Ignore duplicate peers */ + list_for_each_entry ( peer, &segment->peers, list ) { + if ( strcmp ( peer->location, location ) == 0 ) { + DBGC2 ( segment, "PEERDISC %p duplicate %s\n", + segment, location ); + return 0; + } + } + DBGC2 ( segment, "PEERDISC %p discovered %s\n", segment, location ); + + /* Allocate and initialise structure */ + peer = zalloc ( sizeof ( *peer ) + strlen ( location ) + 1 /* NUL */ ); + if ( ! peer ) + return -ENOMEM; + strcpy ( peer->location, location ); + + /* Add to end of list of peers */ + list_add_tail ( &peer->list, &segment->peers ); + + /* Notify all clients */ + list_for_each_entry_safe ( peerdisc, tmp, &segment->clients, list ) + peerdisc->op->discovered ( peerdisc ); + + return 0; +} + +/** + * Handle discovery timer expiry + * + * @v timer Discovery timer + * @v over Failure indicator + */ +static void peerdisc_expired ( struct retry_timer *timer, int over __unused ) { + struct peerdisc_segment *segment = + container_of ( timer, struct peerdisc_segment, timer ); + + /* Attempt to transmit discovery requests */ + peerdisc_socket_tx ( segment->uuid, segment->id ); + + /* Schedule next transmission, if applicable */ + if ( timer->count < PEERDISC_REPEAT_COUNT ) + start_timer_fixed ( &segment->timer, PEERDISC_REPEAT_TIMEOUT ); +} + +/** + * Create PeerDist discovery segment + * + * @v id Segment ID + * @ret segment PeerDist discovery segment, or NULL on error + */ +static struct peerdisc_segment * peerdisc_create ( const char *id ) { + struct peerdisc_segment *segment; + union { + union uuid uuid; + uint32_t dword[ sizeof ( union uuid ) / sizeof ( uint32_t ) ]; + } random_uuid; + size_t uuid_len; + size_t id_len; + const char *uuid; + char *uuid_copy; + char *id_copy; + unsigned int i; + int rc; + + /* Generate a random message UUID. This does not require high + * quality randomness. + */ + for ( i = 0 ; i < ( sizeof ( random_uuid.dword ) / + sizeof ( random_uuid.dword[0] ) ) ; i++ ) + random_uuid.dword[i] = random(); + uuid = uuid_ntoa ( &random_uuid.uuid ); + + /* Calculate string lengths */ + id_len = ( strlen ( id ) + 1 /* NUL */ ); + uuid_len = ( strlen ( uuid ) + 1 /* NUL */ ); + + /* Allocate and initialise structure */ + segment = zalloc ( sizeof ( *segment ) + id_len + uuid_len ); + if ( ! segment ) + goto err_alloc; + id_copy = ( ( ( void * ) segment ) + sizeof ( *segment ) ); + memcpy ( id_copy, id, id_len ); + uuid_copy = ( ( ( void * ) id_copy ) + id_len ); + memcpy ( uuid_copy, uuid, uuid_len ); + ref_init ( &segment->refcnt, peerdisc_free ); + segment->id = id_copy; + segment->uuid = uuid_copy; + INIT_LIST_HEAD ( &segment->peers ); + INIT_LIST_HEAD ( &segment->clients ); + timer_init ( &segment->timer, peerdisc_expired, &segment->refcnt ); + + /* Add hosted cache server or initiate discovery */ + if ( peerhost ) { + + /* Add hosted cache server to list of peers */ + if ( ( rc = peerdisc_discovered ( segment, peerhost ) ) != 0 ) + goto err_peerhost; + + } else { + + /* Start discovery timer */ + start_timer_nodelay ( &segment->timer ); + DBGC2 ( segment, "PEERDISC %p discovering %s\n", + segment, segment->id ); + } + + /* Add to list of segments, transfer reference to list, and return */ + list_add_tail ( &segment->list, &peerdisc_segments ); + return segment; + + err_peerhost: + ref_put ( &segment->refcnt ); + err_alloc: + return NULL; +} + +/** + * Destroy PeerDist discovery segment + * + * @v segment PeerDist discovery segment + */ +static void peerdisc_destroy ( struct peerdisc_segment *segment ) { + + /* Sanity check */ + assert ( list_empty ( &segment->clients ) ); + + /* Stop timer */ + stop_timer ( &segment->timer ); + + /* Remove from list of segments and drop list's reference */ + list_del ( &segment->list ); + ref_put ( &segment->refcnt ); +} + +/****************************************************************************** + * + * Discovery clients + * + ****************************************************************************** + */ + +/** + * Open PeerDist discovery client + * + * @v peerdisc PeerDist discovery client + * @v id Segment ID + * @v len Length of segment ID + * @ret rc Return status code + */ +int peerdisc_open ( struct peerdisc_client *peerdisc, const void *id, + size_t len ) { + struct peerdisc_segment *segment; + char id_string[ base16_encoded_len ( len ) + 1 /* NUL */ ]; + char *id_chr; + int rc; + + /* Construct ID string */ + base16_encode ( id, len, id_string, sizeof ( id_string ) ); + for ( id_chr = id_string ; *id_chr ; id_chr++ ) + *id_chr = toupper ( *id_chr ); + + /* Sanity check */ + assert ( peerdisc->segment == NULL ); + + /* Open socket if this is the first segment */ + if ( list_empty ( &peerdisc_segments ) && + ( ( rc = peerdisc_socket_open() ) != 0 ) ) + return rc; + + /* Find or create segment */ + if ( ! ( ( segment = peerdisc_find ( id_string ) ) || + ( segment = peerdisc_create ( id_string ) ) ) ) + return -ENOMEM; + + /* Add to list of clients */ + ref_get ( &segment->refcnt ); + peerdisc->segment = segment; + list_add_tail ( &peerdisc->list, &segment->clients ); + + return 0; +} + +/** + * Close PeerDist discovery client + * + * @v peerdisc PeerDist discovery client + */ +void peerdisc_close ( struct peerdisc_client *peerdisc ) { + struct peerdisc_segment *segment = peerdisc->segment; + + /* Ignore if discovery is already closed */ + if ( ! segment ) + return; + + /* If no peers were discovered, reduce the recommended + * discovery timeout to minimise delays on future requests. + */ + if ( list_empty ( &segment->peers ) && peerdisc_timeout_secs ) { + peerdisc_timeout_secs--; + DBGC ( segment, "PEERDISC %p reducing timeout to %d " + "seconds\n", peerdisc, peerdisc_timeout_secs ); + } + + /* Remove from list of clients */ + peerdisc->segment = NULL; + list_del ( &peerdisc->list ); + ref_put ( &segment->refcnt ); + + /* If this was the last clients, destroy the segment */ + if ( list_empty ( &segment->clients ) ) + peerdisc_destroy ( segment ); + + /* If there are no more segments, close the socket */ + if ( list_empty ( &peerdisc_segments ) ) + peerdisc_socket_close ( 0 ); +} + +/****************************************************************************** + * + * Settings + * + ****************************************************************************** + */ + +/** PeerDist hosted cache server setting */ +const struct setting peerhost_setting __setting ( SETTING_MISC, peerhost ) = { + .name = "peerhost", + .description = "PeerDist hosted cache", + .type = &setting_type_string, +}; + +/** + * Apply PeerDist discovery settings + * + * @ret rc Return status code + */ +static int apply_peerdisc_settings ( void ) { + + /* Free any existing hosted cache server */ + free ( peerhost ); + peerhost = NULL; + + /* Fetch hosted cache server */ + fetch_string_setting_copy ( NULL, &peerhost_setting, &peerhost ); + if ( peerhost ) { + DBGC ( &peerhost, "PEERDISC using hosted cache %s\n", + peerhost ); + } + + return 0; +} + +/** PeerDist discovery settings applicator */ +struct settings_applicator peerdisc_applicator __settings_applicator = { + .apply = apply_peerdisc_settings, +}; diff --git a/src/net/peerdist.c b/src/net/peerdist.c new file mode 100644 index 00000000..3210ac0e --- /dev/null +++ b/src/net/peerdist.c @@ -0,0 +1,183 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** @file + * + * Peer Content Caching and Retrieval (PeerDist) protocol + * + * This is quite possibly the ugliest protocol I have ever had the + * misfortune to encounter, and I've encountered multicast TFTP. + */ + +/** PeerDist is globally enabled */ +static long peerdist_enabled = 1; + +/** + * Check whether or not to support PeerDist encoding for this request + * + * @v http HTTP transaction + * @ret supported PeerDist encoding is supported for this request + */ +static int http_peerdist_supported ( struct http_transaction *http ) { + + /* Allow PeerDist to be globally enabled/disabled */ + if ( ! peerdist_enabled ) + return 0; + + /* Support PeerDist encoding only if we can directly access an + * underlying data transfer buffer. Direct access is required + * in order to support decryption of data received via the + * retrieval protocol (which provides the AES initialisation + * vector only after all of the encrypted data has been + * received). + * + * This test simultaneously ensures that we do not attempt to + * use PeerDist encoding on a request which is itself a + * PeerDist individual block download, since the individual + * block downloads do not themselves provide direct access to + * an underlying data transfer buffer. + */ + return ( xfer_buffer ( &http->xfer ) != NULL ); +} + +/** + * Format HTTP "X-P2P-PeerDist" header + * + * @v http HTTP transaction + * @v buf Buffer + * @v len Length of buffer + * @ret len Length of header value, or negative error + */ +static int http_format_p2p_peerdist ( struct http_transaction *http, + char *buf, size_t len ) { + int supported = http_peerdist_supported ( http ); + int missing; + + /* PeerDist wants us to inform the server whenever we make a + * request for data that was missing from local peers + * (presumably for statistical purposes only). We use the + * heuristic of assuming that the combination of "this request + * may not itself use PeerDist content encoding" and "this is + * a range request" probably indicates that we are making a + * PeerDist block raw range request for missing data. + */ + missing = ( http->request.range.len && ( ! supported ) ); + + /* Omit header if PeerDist encoding is not supported and we + * are not reporting a missing data request. + */ + if ( ! ( supported || missing ) ) + return 0; + + /* Construct header */ + return snprintf ( buf, len, "Version=1.1%s", + ( missing ? ", MissingDataRequest=true" : "" ) ); +} + +/** HTTP "X-P2P-PeerDist" header */ +struct http_request_header http_request_p2p_peerdist __http_request_header = { + .name = "X-P2P-PeerDist", + .format = http_format_p2p_peerdist, +}; + +/** + * Format HTTP "X-P2P-PeerDistEx" header + * + * @v http HTTP transaction + * @v buf Buffer + * @v len Length of buffer + * @ret len Length of header value, or negative error + */ +static int http_format_p2p_peerdistex ( struct http_transaction *http, + char *buf, size_t len ) { + int supported = http_peerdist_supported ( http ); + + /* Omit header if PeerDist encoding is not supported */ + if ( ! supported ) + return 0; + + /* Construct header */ + return snprintf ( buf, len, ( "MinContentInformation=1.0, " + "MaxContentInformation=2.0" ) ); +} + +/** HTTP "X-P2P-PeerDist" header */ +struct http_request_header http_request_p2p_peerdistex __http_request_header = { + .name = "X-P2P-PeerDistEx", + .format = http_format_p2p_peerdistex, +}; + +/** + * Initialise PeerDist content encoding + * + * @v http HTTP transaction + * @ret rc Return status code + */ +static int http_peerdist_init ( struct http_transaction *http ) { + + return peermux_filter ( &http->content, &http->transfer, http->uri ); +} + +/** PeerDist HTTP content encoding */ +struct http_content_encoding peerdist_encoding __http_content_encoding = { + .name = "peerdist", + .supported = http_peerdist_supported, + .init = http_peerdist_init, +}; + +/** PeerDist enabled setting */ +const struct setting peerdist_setting __setting ( SETTING_MISC, peerdist ) = { + .name = "peerdist", + .description = "PeerDist enabled", + .type = &setting_type_int8, +}; + +/** + * Apply PeerDist settings + * + * @ret rc Return status code + */ +static int apply_peerdist_settings ( void ) { + + /* Fetch global PeerDist enabled setting */ + if ( fetch_int_setting ( NULL, &peerdist_setting, + &peerdist_enabled ) < 0 ) { + peerdist_enabled = 1; + } + DBGC ( &peerdist_enabled, "PEERDIST is %s\n", + ( peerdist_enabled ? "enabled" : "disabled" ) ); + + return 0; +} + +/** PeerDist settings applicator */ +struct settings_applicator peerdist_applicator __settings_applicator = { + .apply = apply_peerdist_settings, +}; diff --git a/src/net/peermux.c b/src/net/peermux.c new file mode 100644 index 00000000..a391ed37 --- /dev/null +++ b/src/net/peermux.c @@ -0,0 +1,444 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Peer Content Caching and Retrieval (PeerDist) protocol multiplexer + * + */ + +/** + * Free PeerDist download multiplexer + * + * @v refcnt Reference count + */ +static void peermux_free ( struct refcnt *refcnt ) { + struct peerdist_multiplexer *peermux = + container_of ( refcnt, struct peerdist_multiplexer, refcnt ); + + uri_put ( peermux->uri ); + xferbuf_free ( &peermux->buffer ); + free ( peermux ); +} + +/** + * Close PeerDist download multiplexer + * + * @v peermux PeerDist download multiplexer + * @v rc Reason for close + */ +static void peermux_close ( struct peerdist_multiplexer *peermux, int rc ) { + unsigned int i; + + /* Stop block download initiation process */ + process_del ( &peermux->process ); + + /* Shut down all block downloads */ + for ( i = 0 ; i < PEERMUX_MAX_BLOCKS ; i++ ) + intf_shutdown ( &peermux->block[i].xfer, rc ); + + /* Shut down all other interfaces (which may be connected to + * the same object). + */ + intf_nullify ( &peermux->info ); /* avoid potential loops */ + intf_shutdown ( &peermux->xfer, rc ); + intf_shutdown ( &peermux->info, rc ); +} + +/** + * Report progress of PeerDist download + * + * @v peermux PeerDist download multiplexer + * @v progress Progress report to fill in + * @ret ongoing_rc Ongoing job status code (if known) + */ +static int peermux_progress ( struct peerdist_multiplexer *peermux, + struct job_progress *progress ) { + struct peerdist_statistics *stats = &peermux->stats; + unsigned int percentage; + + /* Construct PeerDist status message */ + if ( stats->total ) { + percentage = ( ( 100 * stats->local ) / stats->total ); + snprintf ( progress->message, sizeof ( progress->message ), + "%3d%% from %d peers", percentage, stats->peers ); + } + + return 0; +} + +/** + * Receive content information + * + * @v peermux PeerDist download multiplexer + * @v iobuf I/O buffer + * @v meta Data transfer metadata + * @ret rc Return status code + */ +static int peermux_info_deliver ( struct peerdist_multiplexer *peermux, + struct io_buffer *iobuf, + struct xfer_metadata *meta ) { + int rc; + + /* Add data to buffer */ + if ( ( rc = xferbuf_deliver ( &peermux->buffer, iobuf, meta ) ) != 0 ) + goto err; + + return 0; + + err: + peermux_close ( peermux, rc ); + return rc; +} + +/** + * Close content information interface + * + * @v peermux PeerDist download multiplexer + * @v rc Reason for close + */ +static void peermux_info_close ( struct peerdist_multiplexer *peermux, int rc ){ + struct peerdist_info *info = &peermux->cache.info; + size_t len; + + /* Terminate download on error */ + if ( rc != 0 ) + goto err; + + /* Successfully closing the content information interface + * indicates that the content information has been fully + * received, and initiates the actual PeerDist download. + */ + + /* Shut down content information interface */ + intf_shutdown ( &peermux->info, rc ); + + /* Parse content information */ + if ( ( rc = peerdist_info ( info->raw.data, peermux->buffer.len, + info ) ) != 0 ) { + DBGC ( peermux, "PEERMUX %p could not parse content info: %s\n", + peermux, strerror ( rc ) ); + goto err; + } + + /* Notify recipient of total download size */ + len = ( info->trim.end - info->trim.start ); + if ( ( rc = xfer_seek ( &peermux->xfer, len ) ) != 0 ) { + DBGC ( peermux, "PEERMUX %p could not presize buffer: %s\n", + peermux, strerror ( rc ) ); + goto err; + } + xfer_seek ( &peermux->xfer, 0 ); + + /* Start block download process */ + process_add ( &peermux->process ); + + return; + + err: + peermux_close ( peermux, rc ); +} + +/** + * Initiate multiplexed block download + * + * @v peermux PeerDist download multiplexer + */ +static void peermux_step ( struct peerdist_multiplexer *peermux ) { + struct peerdist_info *info = &peermux->cache.info; + struct peerdist_info_segment *segment = &peermux->cache.segment; + struct peerdist_info_block *block = &peermux->cache.block; + struct peerdist_multiplexed_block *peermblk; + unsigned int next_segment; + unsigned int next_block; + int rc; + + /* Stop initiation process if all block downloads are busy */ + peermblk = list_first_entry ( &peermux->idle, + struct peerdist_multiplexed_block, list ); + if ( ! peermblk ) { + process_del ( &peermux->process ); + return; + } + + /* Increment block index */ + next_block = ( block->index + 1 ); + + /* Move to first/next segment, if applicable */ + if ( next_block >= segment->blocks ) { + + /* Reset block index */ + next_block = 0; + + /* Calculate segment index */ + next_segment = ( segment->info ? ( segment->index + 1 ) : 0 ); + + /* If we have finished all segments and have no + * remaining block downloads, then we are finished. + */ + if ( next_segment >= info->segments ) { + process_del ( &peermux->process ); + if ( list_empty ( &peermux->busy ) ) + peermux_close ( peermux, 0 ); + return; + } + + /* Get content information segment */ + if ( ( rc = peerdist_info_segment ( info, segment, + next_segment ) ) != 0 ) { + DBGC ( peermux, "PEERMUX %p could not get segment %d " + "information: %s\n", peermux, next_segment, + strerror ( rc ) ); + goto err; + } + } + + /* Get content information block */ + if ( ( rc = peerdist_info_block ( segment, block, next_block ) ) != 0 ){ + DBGC ( peermux, "PEERMUX %p could not get segment %d block " + "%d information: %s\n", peermux, segment->index, + next_block, strerror ( rc ) ); + goto err; + } + + /* Ignore block if it lies entirely outside the trimmed range */ + if ( block->trim.start == block->trim.end ) { + DBGC ( peermux, "PEERMUX %p skipping segment %d block %d\n", + peermux, segment->index, block->index ); + return; + } + + /* Start downloading this block */ + if ( ( rc = peerblk_open ( &peermblk->xfer, peermux->uri, + block ) ) != 0 ) { + DBGC ( peermux, "PEERMUX %p could not start download for " + "segment %d block %d: %s\n", peermux, segment->index, + block->index, strerror ( rc ) ); + goto err; + } + + /* Move to list of busy block downloads */ + list_del ( &peermblk->list ); + list_add_tail ( &peermblk->list, &peermux->busy ); + + return; + + err: + peermux_close ( peermux, rc ); +} + +/** + * Receive data from multiplexed block download + * + * @v peermblk PeerDist multiplexed block download + * @v iobuf I/O buffer + * @v meta Data transfer metadata + * @ret rc Return status code + */ +static int peermux_block_deliver ( struct peerdist_multiplexed_block *peermblk, + struct io_buffer *iobuf, + struct xfer_metadata *meta ) { + struct peerdist_multiplexer *peermux = peermblk->peermux; + + /* Sanity check: all block downloads must use absolute + * positions for all deliveries, since they run concurrently. + */ + assert ( meta->flags & XFER_FL_ABS_OFFSET ); + + /* We can't use a simple passthrough interface descriptor, + * since there are multiple block download interfaces. + */ + return xfer_deliver ( &peermux->xfer, iob_disown ( iobuf ), meta ); +} + +/** + * Get multiplexed block download underlying data transfer buffer + * + * @v peermblk PeerDist multiplexed download block + * @ret xferbuf Data transfer buffer, or NULL on error + */ +static struct xfer_buffer * +peermux_block_buffer ( struct peerdist_multiplexed_block *peermblk ) { + struct peerdist_multiplexer *peermux = peermblk->peermux; + + /* We can't use a simple passthrough interface descriptor, + * since there are multiple block download interfaces. + */ + return xfer_buffer ( &peermux->xfer ); +} + +/** + * Record peer discovery statistics + * + * @v peermblk PeerDist multiplexed block download + * @v peer Selected peer (or NULL) + * @v peers List of available peers + */ +static void peermux_block_stat ( struct peerdist_multiplexed_block *peermblk, + struct peerdisc_peer *peer, + struct list_head *peers ) { + struct peerdist_multiplexer *peermux = peermblk->peermux; + struct peerdist_statistics *stats = &peermux->stats; + struct peerdisc_peer *tmp; + unsigned int count = 0; + + /* Record maximum number of available peers */ + list_for_each_entry ( tmp, peers, list ) + count++; + if ( count > stats->peers ) + stats->peers = count; + + /* Update block counts */ + if ( peer ) + stats->local++; + stats->total++; + DBGC2 ( peermux, "PEERMUX %p downloaded %d/%d from %d peers\n", + peermux, stats->local, stats->total, stats->peers ); +} + +/** + * Close multiplexed block download + * + * @v peermblk PeerDist multiplexed block download + * @v rc Reason for close + */ +static void peermux_block_close ( struct peerdist_multiplexed_block *peermblk, + int rc ) { + struct peerdist_multiplexer *peermux = peermblk->peermux; + + /* Move to list of idle downloads */ + list_del ( &peermblk->list ); + list_add_tail ( &peermblk->list, &peermux->idle ); + + /* If any error occurred, terminate the whole multiplexer */ + if ( rc != 0 ) { + peermux_close ( peermux, rc ); + return; + } + + /* Restart data transfer interface */ + intf_restart ( &peermblk->xfer, rc ); + + /* Restart block download initiation process */ + process_add ( &peermux->process ); +} + +/** Data transfer interface operations */ +static struct interface_operation peermux_xfer_operations[] = { + INTF_OP ( job_progress, struct peerdist_multiplexer *, + peermux_progress ), + INTF_OP ( intf_close, struct peerdist_multiplexer *, peermux_close ), +}; + +/** Data transfer interface descriptor */ +static struct interface_descriptor peermux_xfer_desc = + INTF_DESC_PASSTHRU ( struct peerdist_multiplexer, xfer, + peermux_xfer_operations, info ); + +/** Content information interface operations */ +static struct interface_operation peermux_info_operations[] = { + INTF_OP ( xfer_deliver, struct peerdist_multiplexer *, + peermux_info_deliver ), + INTF_OP ( intf_close, struct peerdist_multiplexer *, + peermux_info_close ), +}; + +/** Content information interface descriptor */ +static struct interface_descriptor peermux_info_desc = + INTF_DESC_PASSTHRU ( struct peerdist_multiplexer, info, + peermux_info_operations, xfer ); + +/** Block download data transfer interface operations */ +static struct interface_operation peermux_block_operations[] = { + INTF_OP ( xfer_deliver, struct peerdist_multiplexed_block *, + peermux_block_deliver ), + INTF_OP ( xfer_buffer, struct peerdist_multiplexed_block *, + peermux_block_buffer ), + INTF_OP ( peerdisc_stat, struct peerdist_multiplexed_block *, + peermux_block_stat ), + INTF_OP ( intf_close, struct peerdist_multiplexed_block *, + peermux_block_close ), +}; + +/** Block download data transfer interface descriptor */ +static struct interface_descriptor peermux_block_desc = + INTF_DESC ( struct peerdist_multiplexed_block, xfer, + peermux_block_operations ); + +/** Block download initiation process descriptor */ +static struct process_descriptor peermux_process_desc = + PROC_DESC ( struct peerdist_multiplexer, process, peermux_step ); + +/** + * Add PeerDist content-encoding filter + * + * @v xfer Data transfer interface + * @v info Content information interface + * @v uri Original URI + * @ret rc Return status code + */ +int peermux_filter ( struct interface *xfer, struct interface *info, + struct uri *uri ) { + struct peerdist_multiplexer *peermux; + struct peerdist_multiplexed_block *peermblk; + unsigned int i; + + /* Allocate and initialise structure */ + peermux = zalloc ( sizeof ( *peermux ) ); + if ( ! peermux ) + return -ENOMEM; + ref_init ( &peermux->refcnt, peermux_free ); + intf_init ( &peermux->xfer, &peermux_xfer_desc, &peermux->refcnt ); + intf_init ( &peermux->info, &peermux_info_desc, &peermux->refcnt ); + peermux->uri = uri_get ( uri ); + xferbuf_umalloc_init ( &peermux->buffer, + &peermux->cache.info.raw.data ); + process_init_stopped ( &peermux->process, &peermux_process_desc, + &peermux->refcnt ); + INIT_LIST_HEAD ( &peermux->busy ); + INIT_LIST_HEAD ( &peermux->idle ); + for ( i = 0 ; i < PEERMUX_MAX_BLOCKS ; i++ ) { + peermblk = &peermux->block[i]; + peermblk->peermux = peermux; + list_add_tail ( &peermblk->list, &peermux->idle ); + intf_init ( &peermblk->xfer, &peermux_block_desc, + &peermux->refcnt ); + } + + /* Attach to parent interfaces, mortalise self, and return */ + intf_plug_plug ( &peermux->xfer, xfer ); + intf_plug_plug ( &peermux->info, info ); + ref_put ( &peermux->refcnt ); + return 0; +} diff --git a/src/net/rndis.c b/src/net/rndis.c new file mode 100644 index 00000000..a3b562bc --- /dev/null +++ b/src/net/rndis.c @@ -0,0 +1,1072 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * Remote Network Driver Interface Specification + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * Allocate I/O buffer + * + * @v len Length + * @ret iobuf I/O buffer, or NULL + */ +static struct io_buffer * rndis_alloc_iob ( size_t len ) { + struct rndis_header *header; + struct io_buffer *iobuf; + + /* Allocate I/O buffer and reserve space */ + iobuf = alloc_iob ( sizeof ( *header ) + len ); + if ( iobuf ) + iob_reserve ( iobuf, sizeof ( *header ) ); + + return iobuf; +} + +/** + * Wait for completion + * + * @v rndis RNDIS device + * @v wait_id Request ID + * @ret rc Return status code + */ +static int rndis_wait ( struct rndis_device *rndis, unsigned int wait_id ) { + unsigned int i; + + /* Record query ID */ + rndis->wait_id = wait_id; + + /* Wait for operation to complete */ + for ( i = 0 ; i < RNDIS_MAX_WAIT_MS ; i++ ) { + + /* Check for completion */ + if ( ! rndis->wait_id ) + return rndis->wait_rc; + + /* Poll RNDIS device */ + rndis->op->poll ( rndis ); + + /* Delay for 1ms */ + mdelay ( 1 ); + } + + DBGC ( rndis, "RNDIS %s timed out waiting for ID %#08x\n", + rndis->name, wait_id ); + return -ETIMEDOUT; +} + +/** + * Transmit message + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + * @v type Message type + * @ret rc Return status code + */ +static int rndis_tx_message ( struct rndis_device *rndis, + struct io_buffer *iobuf, unsigned int type ) { + struct rndis_header *header; + int rc; + + /* Prepend RNDIS header */ + header = iob_push ( iobuf, sizeof ( *header ) ); + header->type = cpu_to_le32 ( type ); + header->len = cpu_to_le32 ( iob_len ( iobuf ) ); + + /* Transmit message */ + if ( ( rc = rndis->op->transmit ( rndis, iobuf ) ) != 0 ) { + DBGC ( rndis, "RNDIS %s could not transmit: %s\n", + rndis->name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Complete message transmission + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + * @v rc Packet status code + */ +void rndis_tx_complete_err ( struct rndis_device *rndis, + struct io_buffer *iobuf, int rc ) { + struct net_device *netdev = rndis->netdev; + struct rndis_header *header; + size_t len = iob_len ( iobuf ); + + /* Sanity check */ + if ( len < sizeof ( *header ) ) { + DBGC ( rndis, "RNDIS %s completed underlength transmission:\n", + rndis->name ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + netdev_tx_err ( netdev, NULL, -EINVAL ); + return; + } + header = iobuf->data; + + /* Complete buffer */ + if ( header->type == cpu_to_le32 ( RNDIS_PACKET_MSG ) ) { + netdev_tx_complete_err ( netdev, iobuf, rc ); + } else { + free_iob ( iobuf ); + } +} + +/** + * Transmit data packet + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int rndis_tx_data ( struct rndis_device *rndis, + struct io_buffer *iobuf ) { + struct rndis_packet_message *msg; + size_t len = iob_len ( iobuf ); + int rc; + + /* Prepend packet message header */ + msg = iob_push ( iobuf, sizeof ( *msg ) ); + memset ( msg, 0, sizeof ( *msg ) ); + msg->data.offset = cpu_to_le32 ( sizeof ( *msg ) ); + msg->data.len = cpu_to_le32 ( len ); + + /* Transmit message */ + if ( ( rc = rndis_tx_message ( rndis, iobuf, RNDIS_PACKET_MSG ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Defer transmitted packet + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + * @ret rc Return status code + * + * As with netdev_tx_defer(), the caller must ensure that space in the + * transmit descriptor ring is freed up before calling + * rndis_tx_complete(). + * + * Unlike netdev_tx_defer(), this call may fail. + */ +int rndis_tx_defer ( struct rndis_device *rndis, struct io_buffer *iobuf ) { + struct net_device *netdev = rndis->netdev; + struct rndis_header *header; + struct rndis_packet_message *msg; + + /* Fail unless this was a packet message. Only packet + * messages correspond to I/O buffers in the network device's + * TX queue; other messages cannot be deferred in this way. + */ + assert ( iob_len ( iobuf ) >= sizeof ( *header ) ); + header = iobuf->data; + if ( header->type != cpu_to_le32 ( RNDIS_PACKET_MSG ) ) + return -ENOTSUP; + + /* Strip RNDIS header and packet message header, to return + * this packet to the state in which we received it. + */ + iob_pull ( iobuf, ( sizeof ( *header ) + sizeof ( *msg ) ) ); + + /* Defer packet */ + netdev_tx_defer ( netdev, iobuf ); + + return 0; +} + +/** + * Receive data packet + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + */ +static void rndis_rx_data ( struct rndis_device *rndis, + struct io_buffer *iobuf ) { + struct net_device *netdev = rndis->netdev; + struct rndis_packet_message *msg; + size_t len = iob_len ( iobuf ); + size_t data_offset; + size_t data_len; + int rc; + + /* Sanity check */ + if ( len < sizeof ( *msg ) ) { + DBGC ( rndis, "RNDIS %s received underlength data packet:\n", + rndis->name ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + rc = -EINVAL; + goto err_len; + } + msg = iobuf->data; + + /* Locate and sanity check data buffer */ + data_offset = le32_to_cpu ( msg->data.offset ); + data_len = le32_to_cpu ( msg->data.len ); + if ( ( data_offset > len ) || ( data_len > ( len - data_offset ) ) ) { + DBGC ( rndis, "RNDIS %s data packet data exceeds packet:\n", + rndis->name ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + rc = -EINVAL; + goto err_data; + } + + /* Strip non-data portions */ + iob_pull ( iobuf, data_offset ); + iob_unput ( iobuf, ( iob_len ( iobuf ) - data_len ) ); + + /* Hand off to network stack */ + netdev_rx ( netdev, iob_disown ( iobuf ) ); + + return; + + err_data: + err_len: + /* Report error to network stack */ + netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); +} + +/** + * Transmit initialisation message + * + * @v rndis RNDIS device + * @v id Request ID + * @ret rc Return status code + */ +static int rndis_tx_initialise ( struct rndis_device *rndis, unsigned int id ) { + struct io_buffer *iobuf; + struct rndis_initialise_message *msg; + int rc; + + /* Allocate I/O buffer */ + iobuf = rndis_alloc_iob ( sizeof ( *msg ) ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Construct message */ + msg = iob_put ( iobuf, sizeof ( *msg ) ); + memset ( msg, 0, sizeof ( *msg ) ); + msg->id = id; /* Non-endian */ + msg->major = cpu_to_le32 ( RNDIS_VERSION_MAJOR ); + msg->minor = cpu_to_le32 ( RNDIS_VERSION_MINOR ); + msg->mtu = cpu_to_le32 ( RNDIS_MTU ); + + /* Transmit message */ + if ( ( rc = rndis_tx_message ( rndis, iobuf, + RNDIS_INITIALISE_MSG ) ) != 0 ) + goto err_tx; + + return 0; + + err_tx: + free_iob ( iobuf ); + err_alloc: + return rc; +} + +/** + * Receive initialisation completion + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + */ +static void rndis_rx_initialise ( struct rndis_device *rndis, + struct io_buffer *iobuf ) { + struct rndis_initialise_completion *cmplt; + size_t len = iob_len ( iobuf ); + unsigned int id; + int rc; + + /* Sanity check */ + if ( len < sizeof ( *cmplt ) ) { + DBGC ( rndis, "RNDIS %s received underlength initialisation " + "completion:\n", rndis->name ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + rc = -EINVAL; + goto err_len; + } + cmplt = iobuf->data; + + /* Extract request ID */ + id = cmplt->id; /* Non-endian */ + + /* Check status */ + if ( cmplt->status ) { + DBGC ( rndis, "RNDIS %s received initialisation completion " + "failure %#08x\n", rndis->name, + le32_to_cpu ( cmplt->status ) ); + rc = -EIO; + goto err_status; + } + + /* Success */ + rc = 0; + + err_status: + /* Record completion result if applicable */ + if ( id == rndis->wait_id ) { + rndis->wait_id = 0; + rndis->wait_rc = rc; + } + err_len: + free_iob ( iobuf ); +} + +/** + * Initialise RNDIS + * + * @v rndis RNDIS device + * @ret rc Return status code + */ +static int rndis_initialise ( struct rndis_device *rndis ) { + int rc; + + /* Transmit initialisation message */ + if ( ( rc = rndis_tx_initialise ( rndis, RNDIS_INIT_ID ) ) != 0 ) + return rc; + + /* Wait for response */ + if ( ( rc = rndis_wait ( rndis, RNDIS_INIT_ID ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Transmit halt message + * + * @v rndis RNDIS device + * @ret rc Return status code + */ +static int rndis_tx_halt ( struct rndis_device *rndis ) { + struct io_buffer *iobuf; + struct rndis_halt_message *msg; + int rc; + + /* Allocate I/O buffer */ + iobuf = rndis_alloc_iob ( sizeof ( *msg ) ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Construct message */ + msg = iob_put ( iobuf, sizeof ( *msg ) ); + memset ( msg, 0, sizeof ( *msg ) ); + + /* Transmit message */ + if ( ( rc = rndis_tx_message ( rndis, iobuf, RNDIS_HALT_MSG ) ) != 0 ) + goto err_tx; + + return 0; + + err_tx: + free_iob ( iobuf ); + err_alloc: + return rc; +} + +/** + * Halt RNDIS + * + * @v rndis RNDIS device + * @ret rc Return status code + */ +static int rndis_halt ( struct rndis_device *rndis ) { + int rc; + + /* Transmit halt message */ + if ( ( rc = rndis_tx_halt ( rndis ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Transmit OID message + * + * @v rndis RNDIS device + * @v oid Object ID + * @v data New OID value (or NULL to query current value) + * @v len Length of new OID value + * @ret rc Return status code + */ +static int rndis_tx_oid ( struct rndis_device *rndis, unsigned int oid, + const void *data, size_t len ) { + struct io_buffer *iobuf; + struct rndis_oid_message *msg; + unsigned int type; + int rc; + + /* Allocate I/O buffer */ + iobuf = rndis_alloc_iob ( sizeof ( *msg ) + len ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Construct message. We use the OID as the request ID. */ + msg = iob_put ( iobuf, sizeof ( *msg ) ); + memset ( msg, 0, sizeof ( *msg ) ); + msg->id = oid; /* Non-endian */ + msg->oid = cpu_to_le32 ( oid ); + msg->offset = cpu_to_le32 ( sizeof ( *msg ) ); + msg->len = cpu_to_le32 ( len ); + memcpy ( iob_put ( iobuf, len ), data, len ); + + /* Transmit message */ + type = ( data ? RNDIS_SET_MSG : RNDIS_QUERY_MSG ); + if ( ( rc = rndis_tx_message ( rndis, iobuf, type ) ) != 0 ) + goto err_tx; + + return 0; + + err_tx: + free_iob ( iobuf ); + err_alloc: + return rc; +} + +/** + * Receive query OID completion + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + */ +static void rndis_rx_query_oid ( struct rndis_device *rndis, + struct io_buffer *iobuf ) { + struct net_device *netdev = rndis->netdev; + struct rndis_query_completion *cmplt; + size_t len = iob_len ( iobuf ); + size_t info_offset; + size_t info_len; + unsigned int id; + void *info; + uint32_t *link_status; + int rc; + + /* Sanity check */ + if ( len < sizeof ( *cmplt ) ) { + DBGC ( rndis, "RNDIS %s received underlength query " + "completion:\n", rndis->name ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + rc = -EINVAL; + goto err_len; + } + cmplt = iobuf->data; + + /* Extract request ID */ + id = cmplt->id; /* Non-endian */ + + /* Check status */ + if ( cmplt->status ) { + DBGC ( rndis, "RNDIS %s received query completion failure " + "%#08x\n", rndis->name, le32_to_cpu ( cmplt->status ) ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + rc = -EIO; + goto err_status; + } + + /* Locate and sanity check information buffer */ + info_offset = le32_to_cpu ( cmplt->offset ); + info_len = le32_to_cpu ( cmplt->len ); + if ( ( info_offset > len ) || ( info_len > ( len - info_offset ) ) ) { + DBGC ( rndis, "RNDIS %s query completion information exceeds " + "packet:\n", rndis->name ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + rc = -EINVAL; + goto err_info; + } + info = ( ( ( void * ) cmplt ) + info_offset ); + + /* Handle OID */ + switch ( id ) { + + case RNDIS_OID_802_3_PERMANENT_ADDRESS: + if ( info_len > sizeof ( netdev->hw_addr ) ) + info_len = sizeof ( netdev->hw_addr ); + memcpy ( netdev->hw_addr, info, info_len ); + break; + + case RNDIS_OID_802_3_CURRENT_ADDRESS: + if ( info_len > sizeof ( netdev->ll_addr ) ) + info_len = sizeof ( netdev->ll_addr ); + memcpy ( netdev->ll_addr, info, info_len ); + break; + + case RNDIS_OID_GEN_MEDIA_CONNECT_STATUS: + if ( info_len != sizeof ( *link_status ) ) { + DBGC ( rndis, "RNDIS %s invalid link status:\n", + rndis->name ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + rc = -EPROTO; + goto err_link_status; + } + link_status = info; + if ( *link_status == 0 ) { + DBGC ( rndis, "RNDIS %s link is up\n", rndis->name ); + netdev_link_up ( netdev ); + } else { + DBGC ( rndis, "RNDIS %s link is down: %#08x\n", + rndis->name, le32_to_cpu ( *link_status ) ); + netdev_link_down ( netdev ); + } + break; + + default: + DBGC ( rndis, "RNDIS %s unexpected query completion ID %#08x\n", + rndis->name, id ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + rc = -EPROTO; + goto err_id; + } + + /* Success */ + rc = 0; + + err_id: + err_link_status: + err_info: + err_status: + /* Record completion result if applicable */ + if ( id == rndis->wait_id ) { + rndis->wait_id = 0; + rndis->wait_rc = rc; + } + err_len: + /* Free I/O buffer */ + free_iob ( iobuf ); +} + +/** + * Receive set OID completion + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + */ +static void rndis_rx_set_oid ( struct rndis_device *rndis, + struct io_buffer *iobuf ) { + struct rndis_set_completion *cmplt; + size_t len = iob_len ( iobuf ); + unsigned int id; + int rc; + + /* Sanity check */ + if ( len < sizeof ( *cmplt ) ) { + DBGC ( rndis, "RNDIS %s received underlength set completion:\n", + rndis->name ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + rc = -EINVAL; + goto err_len; + } + cmplt = iobuf->data; + + /* Extract request ID */ + id = cmplt->id; /* Non-endian */ + + /* Check status */ + if ( cmplt->status ) { + DBGC ( rndis, "RNDIS %s received set completion failure " + "%#08x\n", rndis->name, le32_to_cpu ( cmplt->status ) ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + rc = -EIO; + goto err_status; + } + + /* Success */ + rc = 0; + + err_status: + /* Record completion result if applicable */ + if ( id == rndis->wait_id ) { + rndis->wait_id = 0; + rndis->wait_rc = rc; + } + err_len: + /* Free I/O buffer */ + free_iob ( iobuf ); +} + +/** + * Query or set OID + * + * @v rndis RNDIS device + * @v oid Object ID + * @v data New OID value (or NULL to query current value) + * @v len Length of new OID value + * @ret rc Return status code + */ +static int rndis_oid ( struct rndis_device *rndis, unsigned int oid, + const void *data, size_t len ) { + int rc; + + /* Transmit query */ + if ( ( rc = rndis_tx_oid ( rndis, oid, data, len ) ) != 0 ) + return rc; + + /* Wait for response */ + if ( ( rc = rndis_wait ( rndis, oid ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Describe RNDIS device + * + * @v rndis RNDIS device + * @ret rc Return status code + */ +static int rndis_describe ( struct rndis_device *rndis ) { + struct net_device *netdev = rndis->netdev; + int rc; + + /* Assign device name (for debugging) */ + rndis->name = netdev->dev->name; + + /* Open RNDIS device to read MAC addresses */ + if ( ( rc = rndis->op->open ( rndis ) ) != 0 ) { + DBGC ( rndis, "RNDIS %s could not open: %s\n", + rndis->name, strerror ( rc ) ); + goto err_open; + } + + /* Initialise RNDIS */ + if ( ( rc = rndis_initialise ( rndis ) ) != 0 ) + goto err_initialise; + + /* Query permanent MAC address */ + if ( ( rc = rndis_oid ( rndis, RNDIS_OID_802_3_PERMANENT_ADDRESS, + NULL, 0 ) ) != 0 ) + goto err_query_permanent; + + /* Query current MAC address */ + if ( ( rc = rndis_oid ( rndis, RNDIS_OID_802_3_CURRENT_ADDRESS, + NULL, 0 ) ) != 0 ) + goto err_query_current; + + /* Get link status */ + if ( ( rc = rndis_oid ( rndis, RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, + NULL, 0 ) ) != 0 ) + goto err_query_link; + + /* Halt RNDIS device */ + rndis_halt ( rndis ); + + /* Close RNDIS device */ + rndis->op->close ( rndis ); + + return 0; + + err_query_link: + err_query_current: + err_query_permanent: + rndis_halt ( rndis ); + err_initialise: + rndis->op->close ( rndis ); + err_open: + return rc; +} + +/** + * Receive indicate status message + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + */ +static void rndis_rx_status ( struct rndis_device *rndis, + struct io_buffer *iobuf ) { + struct net_device *netdev = rndis->netdev; + struct rndis_indicate_status_message *msg; + size_t len = iob_len ( iobuf ); + unsigned int status; + int rc; + + /* Sanity check */ + if ( len < sizeof ( *msg ) ) { + DBGC ( rndis, "RNDIS %s received underlength status message:\n", + rndis->name ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + rc = -EINVAL; + goto err_len; + } + msg = iobuf->data; + + /* Extract status */ + status = le32_to_cpu ( msg->status ); + + /* Handle status */ + switch ( msg->status ) { + + case RNDIS_STATUS_MEDIA_CONNECT: + DBGC ( rndis, "RNDIS %s link is up\n", rndis->name ); + netdev_link_up ( netdev ); + break; + + case RNDIS_STATUS_MEDIA_DISCONNECT: + DBGC ( rndis, "RNDIS %s link is down\n", rndis->name ); + netdev_link_down ( netdev ); + break; + + case RNDIS_STATUS_WTF_WORLD: + /* Ignore */ + break; + + default: + DBGC ( rndis, "RNDIS %s unexpected status %#08x:\n", + rndis->name, status ); + DBGC_HDA ( rndis, 0, iobuf->data, len ); + rc = -ENOTSUP; + goto err_status; + } + + /* Free I/O buffer */ + free_iob ( iobuf ); + + return; + + err_status: + err_len: + /* Report error via network device statistics */ + netdev_rx_err ( netdev, iobuf, rc ); +} + +/** + * Receive RNDIS message + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + * @v type Message type + */ +static void rndis_rx_message ( struct rndis_device *rndis, + struct io_buffer *iobuf, unsigned int type ) { + struct net_device *netdev = rndis->netdev; + int rc; + + /* Handle packet */ + switch ( type ) { + + case RNDIS_PACKET_MSG: + rndis_rx_data ( rndis, iob_disown ( iobuf ) ); + break; + + case RNDIS_INITIALISE_CMPLT: + rndis_rx_initialise ( rndis, iob_disown ( iobuf ) ); + break; + + case RNDIS_QUERY_CMPLT: + rndis_rx_query_oid ( rndis, iob_disown ( iobuf ) ); + break; + + case RNDIS_SET_CMPLT: + rndis_rx_set_oid ( rndis, iob_disown ( iobuf ) ); + break; + + case RNDIS_INDICATE_STATUS_MSG: + rndis_rx_status ( rndis, iob_disown ( iobuf ) ); + break; + + default: + DBGC ( rndis, "RNDIS %s received unexpected type %#08x\n", + rndis->name, type ); + DBGC_HDA ( rndis, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EPROTO; + goto err_type; + } + + return; + + err_type: + /* Report error via network device statistics */ + netdev_rx_err ( netdev, iobuf, rc ); +} + +/** + * Receive packet from underlying transport layer + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + */ +void rndis_rx ( struct rndis_device *rndis, struct io_buffer *iobuf ) { + struct net_device *netdev = rndis->netdev; + struct rndis_header *header; + unsigned int type; + int rc; + + /* Sanity check */ + if ( iob_len ( iobuf ) < sizeof ( *header ) ) { + DBGC ( rndis, "RNDIS %s received underlength packet:\n", + rndis->name ); + DBGC_HDA ( rndis, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto drop; + } + header = iobuf->data; + + /* Parse and strip header */ + type = le32_to_cpu ( header->type ); + iob_pull ( iobuf, sizeof ( *header ) ); + + /* Handle message */ + rndis_rx_message ( rndis, iob_disown ( iobuf ), type ); + + return; + + drop: + /* Record error */ + netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); +} + +/** + * Discard packet from underlying transport layer + * + * @v rndis RNDIS device + * @v iobuf I/O buffer + * @v rc Packet status code + */ +void rndis_rx_err ( struct rndis_device *rndis, struct io_buffer *iobuf, + int rc ) { + struct net_device *netdev = rndis->netdev; + + /* Record error */ + netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); +} + +/** + * Set receive filter + * + * @v rndis RNDIS device + * @v filter Receive filter + * @ret rc Return status code + */ +static int rndis_filter ( struct rndis_device *rndis, unsigned int filter ) { + uint32_t value = cpu_to_le32 ( filter ); + int rc; + + /* Set receive filter */ + if ( ( rc = rndis_oid ( rndis, RNDIS_OID_GEN_CURRENT_PACKET_FILTER, + &value, sizeof ( value ) ) ) != 0 ) { + DBGC ( rndis, "RNDIS %s could not set receive filter to %#08x: " + "%s\n", rndis->name, filter, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int rndis_open ( struct net_device *netdev ) { + struct rndis_device *rndis = netdev->priv; + int rc; + + /* Open RNDIS device */ + if ( ( rc = rndis->op->open ( rndis ) ) != 0 ) { + DBGC ( rndis, "RNDIS %s could not open: %s\n", + rndis->name, strerror ( rc ) ); + goto err_open; + } + + /* Initialise RNDIS */ + if ( ( rc = rndis_initialise ( rndis ) ) != 0 ) + goto err_initialise; + + /* Set receive filter */ + if ( ( rc = rndis_filter ( rndis, ( RNDIS_FILTER_UNICAST | + RNDIS_FILTER_MULTICAST | + RNDIS_FILTER_ALL_MULTICAST | + RNDIS_FILTER_BROADCAST | + RNDIS_FILTER_PROMISCUOUS ) ) ) != 0) + goto err_set_filter; + + /* Update link status */ + if ( ( rc = rndis_oid ( rndis, RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, + NULL, 0 ) ) != 0 ) + goto err_query_link; + + return 0; + + err_query_link: + err_set_filter: + rndis_halt ( rndis ); + err_initialise: + rndis->op->close ( rndis ); + err_open: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void rndis_close ( struct net_device *netdev ) { + struct rndis_device *rndis = netdev->priv; + + /* Clear receive filter */ + rndis_filter ( rndis, 0 ); + + /* Halt RNDIS device */ + rndis_halt ( rndis ); + + /* Close RNDIS device */ + rndis->op->close ( rndis ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int rndis_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct rndis_device *rndis = netdev->priv; + + /* Transmit data packet */ + return rndis_tx_data ( rndis, iobuf ); +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void rndis_poll ( struct net_device *netdev ) { + struct rndis_device *rndis = netdev->priv; + + /* Poll RNDIS device */ + rndis->op->poll ( rndis ); +} + +/** Network device operations */ +static struct net_device_operations rndis_operations = { + .open = rndis_open, + .close = rndis_close, + .transmit = rndis_transmit, + .poll = rndis_poll, +}; + +/** + * Allocate RNDIS device + * + * @v priv_len Length of private data + * @ret rndis RNDIS device, or NULL on allocation failure + */ +struct rndis_device * alloc_rndis ( size_t priv_len ) { + struct net_device *netdev; + struct rndis_device *rndis; + + /* Allocate and initialise structure */ + netdev = alloc_etherdev ( sizeof ( *rndis ) + priv_len ); + if ( ! netdev ) + return NULL; + netdev_init ( netdev, &rndis_operations ); + rndis = netdev->priv; + rndis->netdev = netdev; + rndis->priv = ( ( ( void * ) rndis ) + sizeof ( *rndis ) ); + + return rndis; +} + +/** + * Register RNDIS device + * + * @v rndis RNDIS device + * @ret rc Return status code + * + * Note that this routine will open and use the RNDIS device in order + * to query the MAC address. The device must be immediately ready for + * use prior to registration. + */ +int register_rndis ( struct rndis_device *rndis ) { + struct net_device *netdev = rndis->netdev; + int rc; + + /* Describe RNDIS device */ + if ( ( rc = rndis_describe ( rndis ) ) != 0 ) + goto err_describe; + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) { + DBGC ( rndis, "RNDIS %s could not register: %s\n", + rndis->name, strerror ( rc ) ); + goto err_register; + } + + return 0; + + unregister_netdev ( netdev ); + err_register: + err_describe: + return rc; +} + +/** + * Unregister RNDIS device + * + * @v rndis RNDIS device + */ +void unregister_rndis ( struct rndis_device *rndis ) { + struct net_device *netdev = rndis->netdev; + + /* Unregister network device */ + unregister_netdev ( netdev ); +} + +/** + * Free RNDIS device + * + * @v rndis RNDIS device + */ +void free_rndis ( struct rndis_device *rndis ) { + struct net_device *netdev = rndis->netdev; + + /* Free network device */ + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} diff --git a/src/net/stp.c b/src/net/stp.c new file mode 100644 index 00000000..3d78400a --- /dev/null +++ b/src/net/stp.c @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Spanning Tree Protocol (STP) + * + */ + +/* Disambiguate the various error causes */ +#define ENOTSUP_PROTOCOL __einfo_error ( EINFO_ENOTSUP_PROTOCOL ) +#define EINFO_ENOTSUP_PROTOCOL \ + __einfo_uniqify ( EINFO_ENOTSUP, 0x02, \ + "Non-STP packet received" ) +#define ENOTSUP_VERSION __einfo_error ( EINFO_ENOTSUP_VERSION ) +#define EINFO_ENOTSUP_VERSION \ + __einfo_uniqify ( EINFO_ENOTSUP, 0x03, \ + "Legacy STP packet received" ) +#define ENOTSUP_TYPE __einfo_error ( EINFO_ENOTSUP_TYPE ) +#define EINFO_ENOTSUP_TYPE \ + __einfo_uniqify ( EINFO_ENOTSUP, 0x04, \ + "Non-RSTP packet received" ) + +/** + * Process incoming STP packets + * + * @v iobuf I/O buffer + * @v netdev Network device + * @v ll_source Link-layer source address + * @v flags Packet flags + * @ret rc Return status code + */ +static int stp_rx ( struct io_buffer *iobuf, struct net_device *netdev, + const void *ll_dest __unused, + const void *ll_source __unused, + unsigned int flags __unused ) { + struct stp_bpdu *stp; + unsigned int hello; + int rc; + + /* Sanity check */ + if ( iob_len ( iobuf ) < sizeof ( *stp ) ) { + DBGC ( netdev, "STP %s received underlength packet (%zd " + "bytes):\n", netdev->name, iob_len ( iobuf ) ); + DBGC_HDA ( netdev, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto done; + } + stp = iobuf->data; + + /* Ignore non-RSTP packets */ + if ( stp->protocol != htons ( STP_PROTOCOL ) ) { + DBGC ( netdev, "STP %s ignoring non-STP packet (protocol " + "%#04x)\n", netdev->name, ntohs ( stp->protocol ) ); + rc = -ENOTSUP_PROTOCOL; + goto done; + } + if ( stp->version < STP_VERSION_RSTP ) { + DBGC ( netdev, "STP %s received legacy STP packet (version " + "%#02x)\n", netdev->name, stp->version ); + rc = -ENOTSUP_VERSION; + goto done; + } + if ( stp->type != STP_TYPE_RSTP ) { + DBGC ( netdev, "STP %s received non-RSTP packet (type %#02x)\n", + netdev->name, stp->type ); + rc = -ENOTSUP_TYPE; + goto done; + } + + /* Dump information */ + DBGC2 ( netdev, "STP %s %s port %#04x flags %#02x hello %d delay %d\n", + netdev->name, eth_ntoa ( stp->sender.mac ), ntohs ( stp->port ), + stp->flags, ntohs ( stp->hello ), ntohs ( stp->delay ) ); + + /* Check if port is forwarding */ + if ( ! ( stp->flags & STP_FL_FORWARDING ) ) { + /* Port is not forwarding: block link for two hello times */ + DBGC ( netdev, "STP %s %s port %#04x flags %#02x is not " + "forwarding\n", + netdev->name, eth_ntoa ( stp->sender.mac ), + ntohs ( stp->port ), stp->flags ); + hello = ( ntohs ( stp->hello ) * ( TICKS_PER_SEC / 256 ) ); + netdev_link_block ( netdev, ( hello * 2 ) ); + rc = -ENETUNREACH; + goto done; + } + + /* Success */ + if ( netdev_link_blocked ( netdev ) ) { + DBGC ( netdev, "STP %s %s port %#04x flags %#02x is " + "forwarding\n", + netdev->name, eth_ntoa ( stp->sender.mac ), + ntohs ( stp->port ), stp->flags ); + } + netdev_link_unblock ( netdev ); + rc = 0; + + done: + free_iob ( iobuf ); + return rc; +} + +/** + * Transcribe STP address + * + * @v net_addr STP address + * @ret string "" + * + * This operation is meaningless for the STP protocol. + */ +static const char * stp_ntoa ( const void *net_addr __unused ) { + return ""; +} + +/** STP network protocol */ +struct net_protocol stp_protocol __net_protocol = { + .name = "STP", + .net_proto = htons ( ETH_P_STP ), + .rx = stp_rx, + .ntoa = stp_ntoa, +}; diff --git a/src/net/tcp/httpauth.c b/src/net/tcp/httpauth.c new file mode 100644 index 00000000..2c57e3d4 --- /dev/null +++ b/src/net/tcp/httpauth.c @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Hyper Text Transfer Protocol (HTTP) authentication + * + */ + +#include +#include +#include +#include + +/** + * Identify authentication scheme + * + * @v http HTTP transaction + * @v name Scheme name + * @ret auth Authentication scheme, or NULL + */ +static struct http_authentication * http_authentication ( const char *name ) { + struct http_authentication *auth; + + /* Identify authentication scheme */ + for_each_table_entry ( auth, HTTP_AUTHENTICATIONS ) { + if ( strcasecmp ( name, auth->name ) == 0 ) + return auth; + } + + return NULL; +} + +/** + * Parse HTTP "WWW-Authenticate" header + * + * @v http HTTP transaction + * @v line Remaining header line + * @ret rc Return status code + */ +static int http_parse_www_authenticate ( struct http_transaction *http, + char *line ) { + struct http_authentication *auth; + char *name; + int rc; + + /* Get scheme name */ + name = http_token ( &line, NULL ); + if ( ! name ) { + DBGC ( http, "HTTP %p malformed WWW-Authenticate \"%s\"\n", + http, line ); + return -EPROTO; + } + + /* Identify scheme */ + auth = http_authentication ( name ); + if ( ! auth ) { + DBGC ( http, "HTTP %p unrecognised authentication scheme " + "\"%s\"\n", http, name ); + /* Ignore; the server may offer other schemes */ + return 0; + } + + /* Use first supported scheme */ + if ( http->response.auth.auth ) + return 0; + http->response.auth.auth = auth; + + /* Parse remaining header line */ + if ( ( rc = auth->parse ( http, line ) ) != 0 ) { + DBGC ( http, "HTTP %p could not parse %s WWW-Authenticate " + "\"%s\": %s\n", http, name, line, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** HTTP "WWW-Authenticate" header */ +struct http_response_header +http_response_www_authenticate __http_response_header = { + .name = "WWW-Authenticate", + .parse = http_parse_www_authenticate, +}; + +/** + * Construct HTTP "Authorization" header + * + * @v http HTTP transaction + * @v buf Buffer + * @v len Length of buffer + * @ret len Length of header value, or negative error + */ +static int http_format_authorization ( struct http_transaction *http, + char *buf, size_t len ) { + struct http_authentication *auth = http->request.auth.auth; + size_t used; + int auth_len; + int rc; + + /* Do nothing unless we have an authentication scheme */ + if ( ! auth ) + return 0; + + /* Construct header */ + used = snprintf ( buf, len, "%s ", auth->name ); + auth_len = auth->format ( http, ( buf + used ), + ( ( used < len ) ? ( len - used ) : 0 ) ); + if ( auth_len < 0 ) { + rc = auth_len; + return rc; + } + used += auth_len; + + return used; +} + +/** HTTP "Authorization" header */ +struct http_request_header http_request_authorization __http_request_header = { + .name = "Authorization", + .format = http_format_authorization, +}; diff --git a/src/net/tcp/httpbasic.c b/src/net/tcp/httpbasic.c new file mode 100644 index 00000000..52a67063 --- /dev/null +++ b/src/net/tcp/httpbasic.c @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Hyper Text Transfer Protocol (HTTP) Basic authentication + * + */ + +#include +#include +#include +#include +#include + +/* Disambiguate the various error causes */ +#define EACCES_USERNAME __einfo_error ( EINFO_EACCES_USERNAME ) +#define EINFO_EACCES_USERNAME \ + __einfo_uniqify ( EINFO_EACCES, 0x01, \ + "No username available for Basic authentication" ) + +/** + * Parse HTTP "WWW-Authenticate" header for Basic authentication + * + * @v http HTTP transaction + * @v line Remaining header line + * @ret rc Return status code + */ +static int http_parse_basic_auth ( struct http_transaction *http, + char *line __unused ) { + + /* Allow HTTP request to be retried if the request had not + * already tried authentication. + */ + if ( ! http->request.auth.auth ) + http->response.flags |= HTTP_RESPONSE_RETRY; + + return 0; +} + +/** + * Perform HTTP Basic authentication + * + * @v http HTTP transaction + * @ret rc Return status code + */ +static int http_basic_authenticate ( struct http_transaction *http ) { + struct http_request_auth_basic *req = &http->request.auth.basic; + + /* Record username and password */ + if ( ! http->uri->user ) { + DBGC ( http, "HTTP %p has no username for Basic " + "authentication\n", http ); + return -EACCES_USERNAME; + } + req->username = http->uri->user; + req->password = ( http->uri->password ? http->uri->password : "" ); + + return 0; +} + +/** + * Construct HTTP "Authorization" header for Basic authentication + * + * @v http HTTP transaction + * @v buf Buffer + * @v len Length of buffer + * @ret len Length of header value, or negative error + */ +static int http_format_basic_auth ( struct http_transaction *http, + char *buf, size_t len ) { + struct http_request_auth_basic *req = &http->request.auth.basic; + size_t user_pw_len = ( strlen ( req->username ) + 1 /* ":" */ + + strlen ( req->password ) ); + char user_pw[ user_pw_len + 1 /* NUL */ ]; + + /* Sanity checks */ + assert ( req->username != NULL ); + assert ( req->password != NULL ); + + /* Construct "user:password" string */ + snprintf ( user_pw, sizeof ( user_pw ), "%s:%s", + req->username, req->password ); + + /* Construct response */ + return base64_encode ( user_pw, user_pw_len, buf, len ); +} + +/** HTTP Basic authentication scheme */ +struct http_authentication http_basic_auth __http_authentication = { + .name = "Basic", + .parse = http_parse_basic_auth, + .authenticate = http_basic_authenticate, + .format = http_format_basic_auth, +}; + +/* Drag in HTTP authentication support */ +REQUIRING_SYMBOL ( http_basic_auth ); +REQUIRE_OBJECT ( httpauth ); diff --git a/src/net/tcp/httpblock.c b/src/net/tcp/httpblock.c new file mode 100644 index 00000000..1abd6b34 --- /dev/null +++ b/src/net/tcp/httpblock.c @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Hyper Text Transfer Protocol (HTTP) block device + * + */ + +#include +#include +#include +#include +#include +#include + +/** Block size used for HTTP block device requests */ +#define HTTP_BLKSIZE 512 + +/** + * Read from block device + * + * @v http HTTP transaction + * @v data Data interface + * @v lba Starting logical block address + * @v count Number of logical blocks + * @v buffer Data buffer + * @v len Length of data buffer + * @ret rc Return status code + */ +int http_block_read ( struct http_transaction *http, struct interface *data, + uint64_t lba, unsigned int count, userptr_t buffer, + size_t len ) { + struct http_request_range range; + int rc; + + /* Sanity check */ + assert ( len == ( count * HTTP_BLKSIZE ) ); + + /* Construct request range descriptor */ + range.start = ( lba * HTTP_BLKSIZE ); + range.len = len; + + /* Start a range request to retrieve the block(s) */ + if ( ( rc = http_open ( data, &http_get, http->uri, &range, + NULL ) ) != 0 ) + goto err_open; + + /* Insert block device translator */ + if ( ( rc = block_translate ( data, buffer, len ) ) != 0 ) { + DBGC ( http, "HTTP %p could not insert block translator: %s\n", + http, strerror ( rc ) ); + goto err_translate; + } + + return 0; + + err_translate: + intf_restart ( data, rc ); + err_open: + return rc; +} + +/** + * Read block device capacity + * + * @v control Control interface + * @v data Data interface + * @ret rc Return status code + */ +int http_block_read_capacity ( struct http_transaction *http, + struct interface *data ) { + int rc; + + /* Start a HEAD request to retrieve the capacity */ + if ( ( rc = http_open ( data, &http_head, http->uri, NULL, + NULL ) ) != 0 ) + goto err_open; + + /* Insert block device translator */ + if ( ( rc = block_translate ( data, UNULL, HTTP_BLKSIZE ) ) != 0 ) { + DBGC ( http, "HTTP %p could not insert block translator: %s\n", + http, strerror ( rc ) ); + goto err_translate; + } + + return 0; + + err_translate: + intf_restart ( data, rc ); + err_open: + return rc; +} diff --git a/src/net/tcp/httpconn.c b/src/net/tcp/httpconn.c new file mode 100644 index 00000000..5121ff6c --- /dev/null +++ b/src/net/tcp/httpconn.c @@ -0,0 +1,324 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Hyper Text Transfer Protocol (HTTP) connection management + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** HTTP pooled connection expiry time */ +#define HTTP_CONN_EXPIRY ( 10 * TICKS_PER_SEC ) + +/** HTTP connection pool */ +static LIST_HEAD ( http_connection_pool ); + +/** + * Identify HTTP scheme + * + * @v uri URI + * @ret scheme HTTP scheme, or NULL + */ +static struct http_scheme * http_scheme ( struct uri *uri ) { + struct http_scheme *scheme; + + /* Sanity check */ + if ( ! uri->scheme ) + return NULL; + + /* Identify scheme */ + for_each_table_entry ( scheme, HTTP_SCHEMES ) { + if ( strcmp ( uri->scheme, scheme->name ) == 0 ) + return scheme; + } + + return NULL; +} + +/** + * Free HTTP connection + * + * @v refcnt Reference count + */ +static void http_conn_free ( struct refcnt *refcnt ) { + struct http_connection *conn = + container_of ( refcnt, struct http_connection, refcnt ); + + /* Free connection */ + uri_put ( conn->uri ); + free ( conn ); +} + +/** + * Close HTTP connection + * + * @v conn HTTP connection + * @v rc Reason for close + */ +static void http_conn_close ( struct http_connection *conn, int rc ) { + + /* Remove from connection pool, if applicable */ + pool_del ( &conn->pool ); + + /* Shut down interfaces */ + intf_shutdown ( &conn->socket, rc ); + intf_shutdown ( &conn->xfer, rc ); + if ( rc == 0 ) { + DBGC2 ( conn, "HTTPCONN %p closed %s://%s\n", + conn, conn->scheme->name, conn->uri->host ); + } else { + DBGC ( conn, "HTTPCONN %p closed %s://%s: %s\n", + conn, conn->scheme->name, conn->uri->host, + strerror ( rc ) ); + } +} + +/** + * Disconnect idle HTTP connection + * + * @v pool Pooled connection + */ +static void http_conn_expired ( struct pooled_connection *pool ) { + struct http_connection *conn = + container_of ( pool, struct http_connection, pool ); + + /* Close connection */ + http_conn_close ( conn, 0 /* Not an error to close idle connection */ ); +} + +/** + * Receive data from transport layer interface + * + * @v http HTTP connection + * @v iobuf I/O buffer + * @v meta Transfer metadata + * @ret rc Return status code + */ +static int http_conn_socket_deliver ( struct http_connection *conn, + struct io_buffer *iobuf, + struct xfer_metadata *meta ) { + + /* Mark connection as alive */ + pool_alive ( &conn->pool ); + + /* Pass on to data transfer interface */ + return xfer_deliver ( &conn->xfer, iobuf, meta ); +} + +/** + * Close HTTP connection transport layer interface + * + * @v http HTTP connection + * @v rc Reason for close + */ +static void http_conn_socket_close ( struct http_connection *conn, int rc ) { + + /* If we are reopenable (i.e. we are a recycled connection + * from the connection pool, and we have received no data from + * the underlying socket since we were pooled), then suggest + * that the client should reopen the connection. + */ + if ( pool_is_reopenable ( &conn->pool ) ) + pool_reopen ( &conn->xfer ); + + /* Close the connection */ + http_conn_close ( conn, rc ); +} + +/** + * Recycle this connection after closing + * + * @v http HTTP connection + */ +static void http_conn_xfer_recycle ( struct http_connection *conn ) { + + /* Mark connection as recyclable */ + pool_recyclable ( &conn->pool ); + DBGC2 ( conn, "HTTPCONN %p keepalive enabled\n", conn ); +} + +/** + * Close HTTP connection data transfer interface + * + * @v conn HTTP connection + * @v rc Reason for close + */ +static void http_conn_xfer_close ( struct http_connection *conn, int rc ) { + + /* Add to the connection pool if keepalive is enabled and no + * error occurred. + */ + if ( ( rc == 0 ) && pool_is_recyclable ( &conn->pool ) ) { + intf_restart ( &conn->xfer, rc ); + pool_add ( &conn->pool, &http_connection_pool, + HTTP_CONN_EXPIRY ); + DBGC2 ( conn, "HTTPCONN %p pooled %s://%s\n", + conn, conn->scheme->name, conn->uri->host ); + return; + } + + /* Otherwise, close the connection */ + http_conn_close ( conn, rc ); +} + +/** HTTP connection socket interface operations */ +static struct interface_operation http_conn_socket_operations[] = { + INTF_OP ( xfer_deliver, struct http_connection *, + http_conn_socket_deliver ), + INTF_OP ( intf_close, struct http_connection *, + http_conn_socket_close ), +}; + +/** HTTP connection socket interface descriptor */ +static struct interface_descriptor http_conn_socket_desc = + INTF_DESC_PASSTHRU ( struct http_connection, socket, + http_conn_socket_operations, xfer ); + +/** HTTP connection data transfer interface operations */ +static struct interface_operation http_conn_xfer_operations[] = { + INTF_OP ( pool_recycle, struct http_connection *, + http_conn_xfer_recycle ), + INTF_OP ( intf_close, struct http_connection *, + http_conn_xfer_close ), +}; + +/** HTTP connection data transfer interface descriptor */ +static struct interface_descriptor http_conn_xfer_desc = + INTF_DESC_PASSTHRU ( struct http_connection, xfer, + http_conn_xfer_operations, socket ); + +/** + * Connect to an HTTP server + * + * @v xfer Data transfer interface + * @v uri Connection URI + * @ret rc Return status code + * + * HTTP connections are pooled. The caller should be prepared to + * receive a pool_reopen() message. + */ +int http_connect ( struct interface *xfer, struct uri *uri ) { + struct http_connection *conn; + struct http_scheme *scheme; + struct sockaddr_tcpip server; + struct interface *socket; + unsigned int port; + int rc; + + /* Identify scheme */ + scheme = http_scheme ( uri ); + if ( ! scheme ) + return -ENOTSUP; + + /* Sanity check */ + if ( ! uri->host ) + return -EINVAL; + + /* Identify port */ + port = uri_port ( uri, scheme->port ); + + /* Look for a reusable connection in the pool. Reuse the most + * recent connection in order to accommodate authentication + * schemes that break the stateless nature of HTTP and rely on + * the same connection being reused for authentication + * responses. + */ + list_for_each_entry_reverse ( conn, &http_connection_pool, pool.list ) { + + /* Sanity checks */ + assert ( conn->uri != NULL ); + assert ( conn->uri->host != NULL ); + + /* Reuse connection, if possible */ + if ( ( scheme == conn->scheme ) && + ( strcmp ( uri->host, conn->uri->host ) == 0 ) && + ( port == uri_port ( conn->uri, scheme->port ) ) ) { + + /* Remove from connection pool, stop timer, + * attach to parent interface, and return. + */ + pool_del ( &conn->pool ); + intf_plug_plug ( &conn->xfer, xfer ); + DBGC2 ( conn, "HTTPCONN %p reused %s://%s:%d\n", conn, + conn->scheme->name, conn->uri->host, port ); + return 0; + } + } + + /* Allocate and initialise structure */ + conn = zalloc ( sizeof ( *conn ) ); + if ( ! conn ) { + rc = -ENOMEM; + goto err_alloc; + } + ref_init ( &conn->refcnt, http_conn_free ); + conn->uri = uri_get ( uri ); + conn->scheme = scheme; + intf_init ( &conn->socket, &http_conn_socket_desc, &conn->refcnt ); + intf_init ( &conn->xfer, &http_conn_xfer_desc, &conn->refcnt ); + pool_init ( &conn->pool, http_conn_expired, &conn->refcnt ); + + /* Open socket */ + memset ( &server, 0, sizeof ( server ) ); + server.st_port = htons ( port ); + socket = &conn->socket; + if ( scheme->filter && + ( ( rc = scheme->filter ( socket, uri->host, &socket ) ) != 0 ) ) + goto err_filter; + if ( ( rc = xfer_open_named_socket ( socket, SOCK_STREAM, + ( struct sockaddr * ) &server, + uri->host, NULL ) ) != 0 ) + goto err_open; + + /* Attach to parent interface, mortalise self, and return */ + intf_plug_plug ( &conn->xfer, xfer ); + ref_put ( &conn->refcnt ); + + DBGC2 ( conn, "HTTPCONN %p created %s://%s:%d\n", conn, + conn->scheme->name, conn->uri->host, port ); + return 0; + + err_open: + err_filter: + DBGC2 ( conn, "HTTPCONN %p could not create %s://%s:%d: %s\n", conn, + conn->scheme->name, conn->uri->host, port, strerror ( rc ) ); + http_conn_close ( conn, rc ); + ref_put ( &conn->refcnt ); + err_alloc: + return rc; +} diff --git a/src/net/tcp/httpdigest.c b/src/net/tcp/httpdigest.c new file mode 100644 index 00000000..4074078c --- /dev/null +++ b/src/net/tcp/httpdigest.c @@ -0,0 +1,309 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Hyper Text Transfer Protocol (HTTP) Digest authentication + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Disambiguate the various error causes */ +#define EACCES_USERNAME __einfo_error ( EINFO_EACCES_USERNAME ) +#define EINFO_EACCES_USERNAME \ + __einfo_uniqify ( EINFO_EACCES, 0x01, \ + "No username available for Digest authentication" ) + +/** An HTTP Digest "WWW-Authenticate" response field */ +struct http_digest_field { + /** Name */ + const char *name; + /** Offset */ + size_t offset; +}; + +/** Define an HTTP Digest "WWW-Authenticate" response field */ +#define HTTP_DIGEST_FIELD( _name ) { \ + .name = #_name, \ + .offset = offsetof ( struct http_transaction, \ + response.auth.digest._name ), \ + } + +/** + * Set HTTP Digest "WWW-Authenticate" response field value + * + * @v http HTTP transaction + * @v field Response field + * @v value Field value + */ +static inline void +http_digest_field ( struct http_transaction *http, + struct http_digest_field *field, char *value ) { + char **ptr; + + ptr = ( ( ( void * ) http ) + field->offset ); + *ptr = value; +} + +/** HTTP Digest "WWW-Authenticate" fields */ +static struct http_digest_field http_digest_fields[] = { + HTTP_DIGEST_FIELD ( realm ), + HTTP_DIGEST_FIELD ( qop ), + HTTP_DIGEST_FIELD ( algorithm ), + HTTP_DIGEST_FIELD ( nonce ), + HTTP_DIGEST_FIELD ( opaque ), +}; + +/** + * Parse HTTP "WWW-Authenticate" header for Digest authentication + * + * @v http HTTP transaction + * @v line Remaining header line + * @ret rc Return status code + */ +static int http_parse_digest_auth ( struct http_transaction *http, + char *line ) { + struct http_digest_field *field; + char *key; + char *value; + unsigned int i; + + /* Process fields */ + while ( ( key = http_token ( &line, &value ) ) ) { + for ( i = 0 ; i < ( sizeof ( http_digest_fields ) / + sizeof ( http_digest_fields[0] ) ) ; i++){ + field = &http_digest_fields[i]; + if ( strcasecmp ( key, field->name ) == 0 ) + http_digest_field ( http, field, value ); + } + } + + /* Allow HTTP request to be retried if the request had not + * already tried authentication. + */ + if ( ! http->request.auth.auth ) + http->response.flags |= HTTP_RESPONSE_RETRY; + + return 0; +} + +/** + * Initialise HTTP Digest + * + * @v ctx Digest context + * @v string Initial string + */ +static void http_digest_init ( struct md5_context *ctx ) { + + /* Initialise MD5 digest */ + digest_init ( &md5_algorithm, ctx ); +} + +/** + * Update HTTP Digest with new data + * + * @v ctx Digest context + * @v string String to append + */ +static void http_digest_update ( struct md5_context *ctx, const char *string ) { + static const char colon = ':'; + + /* Add (possibly colon-separated) field to MD5 digest */ + if ( ctx->len ) + digest_update ( &md5_algorithm, ctx, &colon, sizeof ( colon ) ); + digest_update ( &md5_algorithm, ctx, string, strlen ( string ) ); +} + +/** + * Finalise HTTP Digest + * + * @v ctx Digest context + * @v out Buffer for digest output + * @v len Buffer length + */ +static void http_digest_final ( struct md5_context *ctx, char *out, + size_t len ) { + uint8_t digest[MD5_DIGEST_SIZE]; + + /* Finalise and base16-encode MD5 digest */ + digest_final ( &md5_algorithm, ctx, digest ); + base16_encode ( digest, sizeof ( digest ), out, len ); +} + +/** + * Perform HTTP Digest authentication + * + * @v http HTTP transaction + * @ret rc Return status code + */ +static int http_digest_authenticate ( struct http_transaction *http ) { + struct http_request_auth_digest *req = &http->request.auth.digest; + struct http_response_auth_digest *rsp = &http->response.auth.digest; + char ha1[ base16_encoded_len ( MD5_DIGEST_SIZE ) + 1 /* NUL */ ]; + char ha2[ base16_encoded_len ( MD5_DIGEST_SIZE ) + 1 /* NUL */ ]; + static const char md5sess[] = "MD5-sess"; + static const char md5[] = "MD5"; + struct md5_context ctx; + const char *password; + + /* Check for required response parameters */ + if ( ! rsp->realm ) { + DBGC ( http, "HTTP %p has no realm for Digest authentication\n", + http ); + return -EINVAL; + } + if ( ! rsp->nonce ) { + DBGC ( http, "HTTP %p has no nonce for Digest authentication\n", + http ); + return -EINVAL; + } + + /* Record username and password */ + if ( ! http->uri->user ) { + DBGC ( http, "HTTP %p has no username for Digest " + "authentication\n", http ); + return -EACCES_USERNAME; + } + req->username = http->uri->user; + password = ( http->uri->password ? http->uri->password : "" ); + + /* Handle quality of protection */ + if ( rsp->qop ) { + + /* Use "auth" in subsequent request */ + req->qop = "auth"; + + /* Generate a client nonce */ + snprintf ( req->cnonce, sizeof ( req->cnonce ), + "%08lx", random() ); + + /* Determine algorithm */ + req->algorithm = md5; + if ( rsp->algorithm && + ( strcasecmp ( rsp->algorithm, md5sess ) == 0 ) ) { + req->algorithm = md5sess; + } + } + + /* Generate HA1 */ + http_digest_init ( &ctx ); + http_digest_update ( &ctx, req->username ); + http_digest_update ( &ctx, rsp->realm ); + http_digest_update ( &ctx, password ); + http_digest_final ( &ctx, ha1, sizeof ( ha1 ) ); + if ( req->algorithm == md5sess ) { + http_digest_init ( &ctx ); + http_digest_update ( &ctx, ha1 ); + http_digest_update ( &ctx, rsp->nonce ); + http_digest_update ( &ctx, req->cnonce ); + http_digest_final ( &ctx, ha1, sizeof ( ha1 ) ); + } + + /* Generate HA2 */ + http_digest_init ( &ctx ); + http_digest_update ( &ctx, http->request.method->name ); + http_digest_update ( &ctx, http->request.uri ); + http_digest_final ( &ctx, ha2, sizeof ( ha2 ) ); + + /* Generate response */ + http_digest_init ( &ctx ); + http_digest_update ( &ctx, ha1 ); + http_digest_update ( &ctx, rsp->nonce ); + if ( req->qop ) { + http_digest_update ( &ctx, HTTP_DIGEST_NC ); + http_digest_update ( &ctx, req->cnonce ); + http_digest_update ( &ctx, req->qop ); + } + http_digest_update ( &ctx, ha2 ); + http_digest_final ( &ctx, req->response, sizeof ( req->response ) ); + + return 0; +} + +/** + * Construct HTTP "Authorization" header for Digest authentication + * + * @v http HTTP transaction + * @v buf Buffer + * @v len Length of buffer + * @ret len Length of header value, or negative error + */ +static int http_format_digest_auth ( struct http_transaction *http, + char *buf, size_t len ) { + struct http_request_auth_digest *req = &http->request.auth.digest; + struct http_response_auth_digest *rsp = &http->response.auth.digest; + size_t used = 0; + + /* Sanity checks */ + assert ( rsp->realm != NULL ); + assert ( rsp->nonce != NULL ); + assert ( req->username != NULL ); + if ( req->qop ) { + assert ( req->algorithm != NULL ); + assert ( req->cnonce[0] != '\0' ); + } + assert ( req->response[0] != '\0' ); + + /* Construct response */ + used += ssnprintf ( ( buf + used ), ( len - used ), + "realm=\"%s\", nonce=\"%s\", uri=\"%s\", " + "username=\"%s\"", rsp->realm, rsp->nonce, + http->request.uri, req->username ); + if ( rsp->opaque ) { + used += ssnprintf ( ( buf + used ), ( len - used ), + ", opaque=\"%s\"", rsp->opaque ); + } + if ( req->qop ) { + used += ssnprintf ( ( buf + used ), ( len - used ), + ", qop=%s, algorithm=%s, cnonce=\"%s\", " + "nc=" HTTP_DIGEST_NC, req->qop, + req->algorithm, req->cnonce ); + } + used += ssnprintf ( ( buf + used ), ( len - used ), + ", response=\"%s\"", req->response ); + + return used; +} + +/** HTTP Digest authentication scheme */ +struct http_authentication http_digest_auth __http_authentication = { + .name = "Digest", + .parse = http_parse_digest_auth, + .authenticate = http_digest_authenticate, + .format = http_format_digest_auth, +}; + +/* Drag in HTTP authentication support */ +REQUIRING_SYMBOL ( http_digest_auth ); +REQUIRE_OBJECT ( httpauth ); diff --git a/src/net/tcp/httpgce.c b/src/net/tcp/httpgce.c new file mode 100644 index 00000000..c5d87902 --- /dev/null +++ b/src/net/tcp/httpgce.c @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Google Compute Engine (GCE) metadata retrieval + * + * For some unspecified "security" reason, the Google Compute Engine + * metadata server will refuse any requests that do not include the + * non-standard HTTP header "Metadata-Flavor: Google". + */ + +#include +#include +#include + +/** Metadata host name + * + * This is used to identify metadata requests, in the absence of any + * more robust mechanism. + */ +#define GCE_METADATA_HOST_NAME "metadata.google.internal" + +/** + * Construct HTTP "Metadata-Flavor" header + * + * @v http HTTP transaction + * @v buf Buffer + * @v len Length of buffer + * @ret len Length of header value, or negative error + */ +static int http_format_metadata_flavor ( struct http_transaction *http, + char *buf, size_t len ) { + + /* Do nothing unless this appears to be a Google Compute + * Engine metadata request. + */ + if ( strcasecmp ( http->request.host, GCE_METADATA_HOST_NAME ) != 0 ) + return 0; + + /* Construct host URI */ + return snprintf ( buf, len, "Google" ); +} + +/** HTTP "Metadata-Flavor" header */ +struct http_request_header http_request_metadata_flavor __http_request_header ={ + .name = "Metadata-Flavor", + .format = http_format_metadata_flavor, +}; diff --git a/src/net/tcp/httpntlm.c b/src/net/tcp/httpntlm.c new file mode 100644 index 00000000..25187bd1 --- /dev/null +++ b/src/net/tcp/httpntlm.c @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** + * @file + * + * Hyper Text Transfer Protocol (HTTP) NTLM authentication + * + */ + +#include +#include +#include +#include +#include +#include +#include + +struct http_authentication http_ntlm_auth __http_authentication; + +/** Workstation name used for NTLM authentication */ +static const char http_ntlm_workstation[] = "iPXE"; + +/** + * Parse HTTP "WWW-Authenticate" header for NTLM authentication + * + * @v http HTTP transaction + * @v line Remaining header line + * @ret rc Return status code + */ +static int http_parse_ntlm_auth ( struct http_transaction *http, char *line ) { + struct http_response_auth_ntlm *rsp = &http->response.auth.ntlm; + char *copy; + int len; + int rc; + + /* Create temporary copy of Base64-encoded challenge message */ + copy = strdup ( line ); + if ( ! copy ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Decode challenge message, overwriting the original */ + len = base64_decode ( copy, line, strlen ( line ) ); + if ( len < 0 ) { + rc = len; + DBGC ( http, "HTTP %p could not decode NTLM challenge " + "\"%s\": %s\n", http, copy, strerror ( rc ) ); + goto err_decode; + } + + /* Parse challenge, if present */ + if ( len ) { + rsp->challenge = ( ( void * ) line ); + if ( ( rc = ntlm_challenge ( rsp->challenge, len, + &rsp->info ) ) != 0 ) { + DBGC ( http, "HTTP %p could not parse NTLM challenge: " + "%s\n", http, strerror ( rc ) ); + goto err_challenge; + } + } + + /* Allow HTTP request to be retried if the request had not + * already tried authentication. Note that NTLM requires an + * additional round trip to obtain the challenge message, + * which is not present in the initial WWW-Authenticate. + */ + if ( ( http->request.auth.auth == NULL ) || + ( ( http->request.auth.auth == &http_ntlm_auth ) && + ( http->request.auth.ntlm.len == 0 ) && len ) ) { + http->response.flags |= HTTP_RESPONSE_RETRY; + } + + /* Success */ + rc = 0; + + err_challenge: + err_decode: + free ( copy ); + err_alloc: + return rc; +} + +/** + * Perform HTTP NTLM authentication + * + * @v http HTTP transaction + * @ret rc Return status code + */ +static int http_ntlm_authenticate ( struct http_transaction *http ) { + struct http_request_auth_ntlm *req = &http->request.auth.ntlm; + struct http_response_auth_ntlm *rsp = &http->response.auth.ntlm; + struct ntlm_key key; + const char *domain; + char *username; + const char *password; + + /* If we have no challenge yet, then just send a Negotiate message */ + if ( ! rsp->challenge ) { + DBGC ( http, "HTTP %p sending NTLM Negotiate\n", http ); + return 0; + } + + /* Record username */ + if ( ! http->uri->user ) { + DBGC ( http, "HTTP %p has no username for NTLM " + "authentication\n", http ); + return -EACCES; + } + req->username = http->uri->user; + password = ( http->uri->password ? http->uri->password : "" ); + + /* Split NetBIOS [domain\]username */ + username = ( ( char * ) req->username ); + domain = netbios_domain ( &username ); + + /* Generate key */ + ntlm_key ( domain, username, password, &key ); + + /* Generate responses */ + ntlm_response ( &rsp->info, &key, NULL, &req->lm, &req->nt ); + + /* Calculate Authenticate message length */ + req->len = ntlm_authenticate_len ( &rsp->info, domain, username, + http_ntlm_workstation ); + + /* Restore NetBIOS [domain\]username */ + netbios_domain_undo ( domain, username ); + + return 0; +} + +/** + * Construct HTTP "Authorization" header for NTLM authentication + * + * @v http HTTP transaction + * @v buf Buffer + * @v len Length of buffer + * @ret len Length of header value, or negative error + */ +static int http_format_ntlm_auth ( struct http_transaction *http, + char *buf, size_t len ) { + struct http_request_auth_ntlm *req = &http->request.auth.ntlm; + struct http_response_auth_ntlm *rsp = &http->response.auth.ntlm; + struct ntlm_authenticate *auth; + const char *domain; + char *username; + size_t check; + + /* If we have no challenge yet, then just send a Negotiate message */ + if ( ! rsp->challenge ) { + return base64_encode ( &ntlm_negotiate, + sizeof ( ntlm_negotiate ), buf, len ); + } + + /* Skip allocation if just calculating length */ + if ( ! len ) + return base64_encoded_len ( req->len ); + + /* Allocate temporary buffer for Authenticate message */ + auth = malloc ( req->len ); + if ( ! auth ) + return -ENOMEM; + + /* Split NetBIOS [domain\]username */ + username = ( ( char * ) req->username ); + domain = netbios_domain ( &username ); + + /* Construct raw Authenticate message */ + check = ntlm_authenticate ( &rsp->info, domain, username, + http_ntlm_workstation, &req->lm, + &req->nt, auth ); + assert ( check == req->len ); + + /* Restore NetBIOS [domain\]username */ + netbios_domain_undo ( domain, username ); + + /* Base64-encode Authenticate message */ + len = base64_encode ( auth, req->len, buf, len ); + + /* Free raw Authenticate message */ + free ( auth ); + + return len; +} + +/** HTTP NTLM authentication scheme */ +struct http_authentication http_ntlm_auth __http_authentication = { + .name = "NTLM", + .parse = http_parse_ntlm_auth, + .authenticate = http_ntlm_authenticate, + .format = http_format_ntlm_auth, +}; + +/* Drag in HTTP authentication support */ +REQUIRING_SYMBOL ( http_ntlm_auth ); +REQUIRE_OBJECT ( httpauth ); diff --git a/src/net/udp/ntp.c b/src/net/udp/ntp.c new file mode 100644 index 00000000..11f8ccc0 --- /dev/null +++ b/src/net/udp/ntp.c @@ -0,0 +1,275 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Network Time Protocol + * + */ + +/** An NTP client */ +struct ntp_client { + /** Reference count */ + struct refcnt refcnt; + /** Job control interface */ + struct interface job; + /** Data transfer interface */ + struct interface xfer; + /** Retransmission timer */ + struct retry_timer timer; +}; + +/** + * Close NTP client + * + * @v ntp NTP client + * @v rc Reason for close + */ +static void ntp_close ( struct ntp_client *ntp, int rc ) { + + /* Stop timer */ + stop_timer ( &ntp->timer ); + + /* Shut down interfaces */ + intf_shutdown ( &ntp->xfer, rc ); + intf_shutdown ( &ntp->job, rc ); +} + +/** + * Send NTP request + * + * @v ntp NTP client + * @ret rc Return status code + */ +static int ntp_request ( struct ntp_client *ntp ) { + struct ntp_header hdr; + int rc; + + DBGC ( ntp, "NTP %p sending request\n", ntp ); + + /* Construct header */ + memset ( &hdr, 0, sizeof ( hdr ) ); + hdr.flags = ( NTP_FL_LI_UNKNOWN | NTP_FL_VN_1 | NTP_FL_MODE_CLIENT ); + hdr.transmit.seconds = htonl ( time ( NULL ) + NTP_EPOCH ); + hdr.transmit.fraction = htonl ( NTP_FRACTION_MAGIC ); + + /* Send request */ + if ( ( rc = xfer_deliver_raw ( &ntp->xfer, &hdr, + sizeof ( hdr ) ) ) != 0 ) { + DBGC ( ntp, "NTP %p could not send request: %s\n", + ntp, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Handle NTP response + * + * @v ntp NTP client + * @v iobuf I/O buffer + * @v meta Data transfer metadata + * @ret rc Return status code + */ +static int ntp_deliver ( struct ntp_client *ntp, struct io_buffer *iobuf, + struct xfer_metadata *meta ) { + struct ntp_header *hdr; + struct sockaddr_tcpip *st_src; + int32_t delta; + int rc; + + /* Check source port */ + st_src = ( ( struct sockaddr_tcpip * ) meta->src ); + if ( st_src->st_port != htons ( NTP_PORT ) ) { + DBGC ( ntp, "NTP %p received non-NTP packet:\n", ntp ); + DBGC_HDA ( ntp, 0, iobuf->data, iob_len ( iobuf ) ); + goto ignore; + } + + /* Check packet length */ + if ( iob_len ( iobuf ) < sizeof ( *hdr ) ) { + DBGC ( ntp, "NTP %p received malformed packet:\n", ntp ); + DBGC_HDA ( ntp, 0, iobuf->data, iob_len ( iobuf ) ); + goto ignore; + } + hdr = iobuf->data; + + /* Check mode */ + if ( ( hdr->flags & NTP_FL_MODE_MASK ) != NTP_FL_MODE_SERVER ) { + DBGC ( ntp, "NTP %p received non-server packet:\n", ntp ); + DBGC_HDA ( ntp, 0, iobuf->data, iob_len ( iobuf ) ); + goto ignore; + } + + /* Check magic value */ + if ( hdr->originate.fraction != htonl ( NTP_FRACTION_MAGIC ) ) { + DBGC ( ntp, "NTP %p received unrecognised packet:\n", ntp ); + DBGC_HDA ( ntp, 0, iobuf->data, iob_len ( iobuf ) ); + goto ignore; + } + + /* Check for Kiss-o'-Death packets */ + if ( ! hdr->stratum ) { + DBGC ( ntp, "NTP %p received kiss-o'-death:\n", ntp ); + DBGC_HDA ( ntp, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EPROTO; + goto close; + } + + /* Calculate clock delta */ + delta = ( ntohl ( hdr->receive.seconds ) - + ntohl ( hdr->originate.seconds ) ); + DBGC ( ntp, "NTP %p delta %d seconds\n", ntp, delta ); + + /* Adjust system clock */ + time_adjust ( delta ); + + /* Success */ + rc = 0; + + close: + ntp_close ( ntp, rc ); + ignore: + free_iob ( iobuf ); + return 0; +} + +/** + * Handle data transfer window change + * + * @v ntp NTP client + */ +static void ntp_window_changed ( struct ntp_client *ntp ) { + + /* Start timer to send initial request */ + start_timer_nodelay ( &ntp->timer ); +} + +/** Data transfer interface operations */ +static struct interface_operation ntp_xfer_op[] = { + INTF_OP ( xfer_deliver, struct ntp_client *, ntp_deliver ), + INTF_OP ( xfer_window_changed, struct ntp_client *, + ntp_window_changed ), + INTF_OP ( intf_close, struct ntp_client *, ntp_close ), +}; + +/** Data transfer interface descriptor */ +static struct interface_descriptor ntp_xfer_desc = + INTF_DESC_PASSTHRU ( struct ntp_client, xfer, ntp_xfer_op, job ); + +/** Job control interface operations */ +static struct interface_operation ntp_job_op[] = { + INTF_OP ( intf_close, struct ntp_client *, ntp_close ), +}; + +/** Job control interface descriptor */ +static struct interface_descriptor ntp_job_desc = + INTF_DESC_PASSTHRU ( struct ntp_client, job, ntp_job_op, xfer ); + +/** + * Handle NTP timer expiry + * + * @v timer Retransmission timer + * @v fail Failure indicator + */ +static void ntp_expired ( struct retry_timer *timer, int fail ) { + struct ntp_client *ntp = + container_of ( timer, struct ntp_client, timer ); + + /* Shut down client if we have failed */ + if ( fail ) { + ntp_close ( ntp, -ETIMEDOUT ); + return; + } + + /* Otherwise, restart timer and (re)transmit request */ + start_timer ( &ntp->timer ); + ntp_request ( ntp ); +} + +/** + * Start NTP client + * + * @v job Job control interface + * @v hostname NTP server + * @ret rc Return status code + */ +int start_ntp ( struct interface *job, const char *hostname ) { + struct ntp_client *ntp; + union { + struct sockaddr_tcpip st; + struct sockaddr sa; + } server; + int rc; + + /* Allocate and initialise structure*/ + ntp = zalloc ( sizeof ( *ntp ) ); + if ( ! ntp ) { + rc = -ENOMEM; + goto err_alloc; + } + ref_init ( &ntp->refcnt, NULL ); + intf_init ( &ntp->job, &ntp_job_desc, &ntp->refcnt ); + intf_init ( &ntp->xfer, &ntp_xfer_desc, &ntp->refcnt ); + timer_init ( &ntp->timer, ntp_expired, &ntp->refcnt ); + set_timer_limits ( &ntp->timer, NTP_MIN_TIMEOUT, NTP_MAX_TIMEOUT ); + + /* Open socket */ + memset ( &server, 0, sizeof ( server ) ); + server.st.st_port = htons ( NTP_PORT ); + if ( ( rc = xfer_open_named_socket ( &ntp->xfer, SOCK_DGRAM, &server.sa, + hostname, NULL ) ) != 0 ) { + DBGC ( ntp, "NTP %p could not open socket: %s\n", + ntp, strerror ( rc ) ); + goto err_open; + } + + /* Attach parent interface, mortalise self, and return */ + intf_plug_plug ( &ntp->job, job ); + ref_put ( &ntp->refcnt ); + return 0; + + err_open: + ntp_close ( ntp, rc ); + ref_put ( &ntp->refcnt ); + err_alloc: + return rc; +} diff --git a/src/scripts/efi.lds b/src/scripts/efi.lds new file mode 100644 index 00000000..f1049f24 --- /dev/null +++ b/src/scripts/efi.lds @@ -0,0 +1,110 @@ +/* -*- sh -*- */ + +/* + * Linker script for EFI images + * + */ + +SECTIONS { + + /* The file starts at a virtual address of zero, and sections are + * contiguous. Each section is aligned to at least _max_align, + * which defaults to 32. Load addresses are equal to virtual + * addresses. + */ + + _max_align = 32; + + /* Allow plenty of space for file headers */ + . = 0x1000; + + /* + * The text section + * + */ + + . = ALIGN ( _max_align ); + .text : { + _text = .; + *(.text) + *(.text.*) + _etext = .; + } + + /* + * The rodata section + * + */ + + . = ALIGN ( _max_align ); + .rodata : { + _rodata = .; + *(.rodata) + *(.rodata.*) + _erodata = .; + } + + /* + * The data section + * + */ + + . = ALIGN ( _max_align ); + .data : { + _data = .; + *(.data) + *(.data.*) + KEEP(*(SORT(.tbl.*))) /* Various tables. See include/tables.h */ + KEEP(*(.provided)) + KEEP(*(.provided.*)) + _edata = .; + } + + /* + * The bss section + * + */ + + . = ALIGN ( _max_align ); + .bss : { + _bss = .; + *(.bss) + *(.bss.*) + *(COMMON) + _ebss = .; + } + + /* + * Weak symbols that need zero values if not otherwise defined + * + */ + + .weak 0x0 : { + _weak = .; + *(.weak) + *(.weak.*) + _eweak = .; + } + _assert = ASSERT ( ( _weak == _eweak ), ".weak is non-zero length" ); + + /* + * Dispose of the comment and note sections to make the link map + * easier to read + * + */ + + /DISCARD/ : { + *(.comment) + *(.comment.*) + *(.note) + *(.note.*) + *(.eh_frame) + *(.eh_frame.*) + *(.rel) + *(.rel.*) + *(.einfo) + *(.einfo.*) + *(.discard) + *(.discard.*) + } +} diff --git a/src/tests/aes_test.c b/src/tests/aes_test.c new file mode 100644 index 00000000..ad66c734 --- /dev/null +++ b/src/tests/aes_test.c @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2012 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * AES tests + * + * These test vectors are provided by NIST as part of the + * Cryptographic Toolkit Examples, downloadable from: + * + * http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/AES_Core_All.pdf + * http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/AES_ECB.pdf + * http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/AES_CBC.pdf + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include +#include "cipher_test.h" + +/** Key used for NIST 128-bit test vectors */ +#define AES_KEY_NIST_128 \ + KEY ( 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, \ + 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c ) + +/** Key used for NIST 192-bit test vectors */ +#define AES_KEY_NIST_192 \ + KEY ( 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52, 0xc8, \ + 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5, 0x62, 0xf8, \ + 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b ) + +/** Key used for NIST 256-bit test vectors */ +#define AES_KEY_NIST_256 \ + KEY ( 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, 0x2b, \ + 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81, 0x1f, 0x35, \ + 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, 0x2d, 0x98, 0x10, \ + 0xa3, 0x09, 0x14, 0xdf, 0xf4 ) + +/** Dummy initialisation vector used for NIST ECB-mode test vectors */ +#define AES_IV_NIST_DUMMY \ + IV ( 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ) + +/** Initialisation vector used for NIST CBC-mode test vectors */ +#define AES_IV_NIST_CBC \ + IV ( 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, \ + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f ) + +/** Plaintext used for NIST test vectors */ +#define AES_PLAINTEXT_NIST \ + PLAINTEXT ( 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, \ + 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, \ + 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, \ + 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51, \ + 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11, \ + 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef, \ + 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17, \ + 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 ) + +/** AES-128-ECB (same test as AES-128-Core) */ +CIPHER_TEST ( aes_128_ecb, &aes_ecb_algorithm, + AES_KEY_NIST_128, AES_IV_NIST_DUMMY, AES_PLAINTEXT_NIST, + CIPHERTEXT ( 0x3a, 0xd7, 0x7b, 0xb4, 0x0d, 0x7a, 0x36, 0x60, + 0xa8, 0x9e, 0xca, 0xf3, 0x24, 0x66, 0xef, 0x97, + 0xf5, 0xd3, 0xd5, 0x85, 0x03, 0xb9, 0x69, 0x9d, + 0xe7, 0x85, 0x89, 0x5a, 0x96, 0xfd, 0xba, 0xaf, + 0x43, 0xb1, 0xcd, 0x7f, 0x59, 0x8e, 0xce, 0x23, + 0x88, 0x1b, 0x00, 0xe3, 0xed, 0x03, 0x06, 0x88, + 0x7b, 0x0c, 0x78, 0x5e, 0x27, 0xe8, 0xad, 0x3f, + 0x82, 0x23, 0x20, 0x71, 0x04, 0x72, 0x5d, 0xd4 ) ); + +/** AES-128-CBC */ +CIPHER_TEST ( aes_128_cbc, &aes_cbc_algorithm, + AES_KEY_NIST_128, AES_IV_NIST_CBC, AES_PLAINTEXT_NIST, + CIPHERTEXT ( 0x76, 0x49, 0xab, 0xac, 0x81, 0x19, 0xb2, 0x46, + 0xce, 0xe9, 0x8e, 0x9b, 0x12, 0xe9, 0x19, 0x7d, + 0x50, 0x86, 0xcb, 0x9b, 0x50, 0x72, 0x19, 0xee, + 0x95, 0xdb, 0x11, 0x3a, 0x91, 0x76, 0x78, 0xb2, + 0x73, 0xbe, 0xd6, 0xb8, 0xe3, 0xc1, 0x74, 0x3b, + 0x71, 0x16, 0xe6, 0x9e, 0x22, 0x22, 0x95, 0x16, + 0x3f, 0xf1, 0xca, 0xa1, 0x68, 0x1f, 0xac, 0x09, + 0x12, 0x0e, 0xca, 0x30, 0x75, 0x86, 0xe1, 0xa7 ) ); + +/** AES-192-ECB (same test as AES-192-Core) */ +CIPHER_TEST ( aes_192_ecb, &aes_ecb_algorithm, + AES_KEY_NIST_192, AES_IV_NIST_DUMMY, AES_PLAINTEXT_NIST, + CIPHERTEXT ( 0xbd, 0x33, 0x4f, 0x1d, 0x6e, 0x45, 0xf2, 0x5f, + 0xf7, 0x12, 0xa2, 0x14, 0x57, 0x1f, 0xa5, 0xcc, + 0x97, 0x41, 0x04, 0x84, 0x6d, 0x0a, 0xd3, 0xad, + 0x77, 0x34, 0xec, 0xb3, 0xec, 0xee, 0x4e, 0xef, + 0xef, 0x7a, 0xfd, 0x22, 0x70, 0xe2, 0xe6, 0x0a, + 0xdc, 0xe0, 0xba, 0x2f, 0xac, 0xe6, 0x44, 0x4e, + 0x9a, 0x4b, 0x41, 0xba, 0x73, 0x8d, 0x6c, 0x72, + 0xfb, 0x16, 0x69, 0x16, 0x03, 0xc1, 0x8e, 0x0e ) ); + +/** AES-192-CBC */ +CIPHER_TEST ( aes_192_cbc, &aes_cbc_algorithm, + AES_KEY_NIST_192, AES_IV_NIST_CBC, AES_PLAINTEXT_NIST, + CIPHERTEXT ( 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d, + 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8, + 0xb4, 0xd9, 0xad, 0xa9, 0xad, 0x7d, 0xed, 0xf4, + 0xe5, 0xe7, 0x38, 0x76, 0x3f, 0x69, 0x14, 0x5a, + 0x57, 0x1b, 0x24, 0x20, 0x12, 0xfb, 0x7a, 0xe0, + 0x7f, 0xa9, 0xba, 0xac, 0x3d, 0xf1, 0x02, 0xe0, + 0x08, 0xb0, 0xe2, 0x79, 0x88, 0x59, 0x88, 0x81, + 0xd9, 0x20, 0xa9, 0xe6, 0x4f, 0x56, 0x15, 0xcd ) ); + +/** AES-256-ECB (same test as AES-256-Core) */ +CIPHER_TEST ( aes_256_ecb, &aes_ecb_algorithm, + AES_KEY_NIST_256, AES_IV_NIST_DUMMY, AES_PLAINTEXT_NIST, + CIPHERTEXT ( 0xf3, 0xee, 0xd1, 0xbd, 0xb5, 0xd2, 0xa0, 0x3c, + 0x06, 0x4b, 0x5a, 0x7e, 0x3d, 0xb1, 0x81, 0xf8, + 0x59, 0x1c, 0xcb, 0x10, 0xd4, 0x10, 0xed, 0x26, + 0xdc, 0x5b, 0xa7, 0x4a, 0x31, 0x36, 0x28, 0x70, + 0xb6, 0xed, 0x21, 0xb9, 0x9c, 0xa6, 0xf4, 0xf9, + 0xf1, 0x53, 0xe7, 0xb1, 0xbe, 0xaf, 0xed, 0x1d, + 0x23, 0x30, 0x4b, 0x7a, 0x39, 0xf9, 0xf3, 0xff, + 0x06, 0x7d, 0x8d, 0x8f, 0x9e, 0x24, 0xec, 0xc7 ) ); + +/** AES-256-CBC */ +CIPHER_TEST ( aes_256_cbc, &aes_cbc_algorithm, + AES_KEY_NIST_256, AES_IV_NIST_CBC, AES_PLAINTEXT_NIST, + CIPHERTEXT ( 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba, + 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6, + 0x9c, 0xfc, 0x4e, 0x96, 0x7e, 0xdb, 0x80, 0x8d, + 0x67, 0x9f, 0x77, 0x7b, 0xc6, 0x70, 0x2c, 0x7d, + 0x39, 0xf2, 0x33, 0x69, 0xa9, 0xd9, 0xba, 0xcf, + 0xa5, 0x30, 0xe2, 0x63, 0x04, 0x23, 0x14, 0x61, + 0xb2, 0xeb, 0x05, 0xe2, 0xc3, 0x9b, 0xe9, 0xfc, + 0xda, 0x6c, 0x19, 0x07, 0x8c, 0x6a, 0x9d, 0x1b ) ); + +/** + * Perform AES self-test + * + */ +static void aes_test_exec ( void ) { + struct cipher_algorithm *ecb = &aes_ecb_algorithm; + struct cipher_algorithm *cbc = &aes_cbc_algorithm; + unsigned int keylen; + + /* Correctness tests */ + cipher_ok ( &aes_128_ecb ); + cipher_ok ( &aes_128_cbc ); + cipher_ok ( &aes_192_ecb ); + cipher_ok ( &aes_192_cbc ); + cipher_ok ( &aes_256_ecb ); + cipher_ok ( &aes_256_cbc ); + + /* Speed tests */ + for ( keylen = 128 ; keylen <= 256 ; keylen += 64 ) { + DBG ( "AES-%d-ECB encryption required %ld cycles per byte\n", + keylen, cipher_cost_encrypt ( ecb, ( keylen / 8 ) ) ); + DBG ( "AES-%d-ECB decryption required %ld cycles per byte\n", + keylen, cipher_cost_decrypt ( ecb, ( keylen / 8 ) ) ); + DBG ( "AES-%d-CBC encryption required %ld cycles per byte\n", + keylen, cipher_cost_encrypt ( cbc, ( keylen / 8 ) ) ); + DBG ( "AES-%d-CBC decryption required %ld cycles per byte\n", + keylen, cipher_cost_decrypt ( cbc, ( keylen / 8 ) ) ); + } +} + +/** AES self-test */ +struct self_test aes_test __self_test = { + .name = "aes", + .exec = aes_test_exec, +}; diff --git a/src/tests/asn1_test.c b/src/tests/asn1_test.c new file mode 100644 index 00000000..df3f01b6 --- /dev/null +++ b/src/tests/asn1_test.c @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * ASN.1 self-tests + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include +#include +#include "asn1_test.h" + +/** + * Report ASN.1 test result + * + * @v test ASN.1 test + * @v file Test code file + * @v line Test code line + */ +void asn1_okx ( struct asn1_test *test, const char *file, unsigned int line ) { + struct digest_algorithm *digest = &asn1_test_digest_algorithm; + struct asn1_cursor *cursor; + uint8_t ctx[digest->ctxsize]; + uint8_t out[ASN1_TEST_DIGEST_SIZE]; + unsigned int i; + size_t offset; + int next; + + /* Sanity check */ + assert ( sizeof ( out ) == digest->digestsize ); + + /* Correct image data pointer */ + test->image->data = virt_to_user ( ( void * ) test->image->data ); + + /* Check that image is detected as correct type */ + okx ( register_image ( test->image ) == 0, file, line ); + okx ( test->image->type == test->type, file, line ); + + /* Check that all ASN.1 objects can be extracted */ + for ( offset = 0, i = 0 ; i < test->count ; offset = next, i++ ) { + + /* Extract ASN.1 object */ + next = image_asn1 ( test->image, offset, &cursor ); + okx ( next >= 0, file, line ); + okx ( ( ( size_t ) next ) > offset, file, line ); + if ( next > 0 ) { + + /* Calculate digest of ASN.1 object */ + digest_init ( digest, ctx ); + digest_update ( digest, ctx, cursor->data, + cursor->len ); + digest_final ( digest, ctx, out ); + + /* Compare against expected digest */ + okx ( memcmp ( out, test->expected[i].digest, + sizeof ( out ) ) == 0, file, line ); + + /* Free ASN.1 object */ + free ( cursor ); + } + } + + /* Check that we have reached the end of the image */ + okx ( offset == test->image->len, file, line ); + + /* Unregister image */ + unregister_image ( test->image ); +} diff --git a/src/tests/asn1_test.h b/src/tests/asn1_test.h new file mode 100644 index 00000000..c8167ed3 --- /dev/null +++ b/src/tests/asn1_test.h @@ -0,0 +1,73 @@ +#ifndef _ASN1_TEST_H +#define _ASN1_TEST_H + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** Digest algorithm used for ASN.1 tests */ +#define asn1_test_digest_algorithm sha1_algorithm + +/** Digest size used for ASN.1 tests */ +#define ASN1_TEST_DIGEST_SIZE SHA1_DIGEST_SIZE + +/** An ASN.1 test digest */ +struct asn1_test_digest { + /** Digest value */ + uint8_t digest[ASN1_TEST_DIGEST_SIZE]; +}; + +/** An ASN.1 test */ +struct asn1_test { + /** Image type */ + struct image_type *type; + /** Source image */ + struct image *image; + /** Expected digests of ASN.1 objects */ + struct asn1_test_digest *expected; + /** Number of ASN.1 objects */ + unsigned int count; +}; + +/** + * Define an ASN.1 test + * + * @v _name Test name + * @v _type Test image file type + * @v _file Test image file data + * @v ... Expected ASN.1 object digests + * @ret test ASN.1 test + */ +#define ASN1( _name, _type, _file, ... ) \ + static const char _name ## __file[] = _file; \ + static struct image _name ## __image = { \ + .refcnt = REF_INIT ( ref_no_free ), \ + .name = #_name, \ + .data = ( userptr_t ) ( _name ## __file ), \ + .len = sizeof ( _name ## __file ), \ + }; \ + static struct asn1_test_digest _name ## _expected[] = { \ + __VA_ARGS__ \ + }; \ + static struct asn1_test _name = { \ + .type = _type, \ + .image = & _name ## __image, \ + .expected = _name ## _expected, \ + .count = ( sizeof ( _name ## _expected ) / \ + sizeof ( _name ## _expected[0] ) ), \ + }; + +extern void asn1_okx ( struct asn1_test *test, const char *file, + unsigned int line ); + +/** + * Report ASN.1 test result + * + * @v test ASN.1 test + */ +#define asn1_ok( test ) asn1_okx ( test, __FILE__, __LINE__ ) + +#endif /* _ASN1_TEST_H */ diff --git a/src/tests/bitops_test.c b/src/tests/bitops_test.c new file mode 100644 index 00000000..f29fc680 --- /dev/null +++ b/src/tests/bitops_test.c @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * Bit operations self-tests + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include +#include + +/** + * Perform bit operations self-tests + * + */ +static void bitops_test_exec ( void ) { + uint8_t bits[32]; + + /* Initialise bits */ + memset ( bits, 0, sizeof ( bits ) ); + + /* Test set_bit() */ + set_bit ( 0, bits ); + ok ( bits[0] == 0x01 ); + set_bit ( 17, bits ); + ok ( bits[2] == 0x02 ); + set_bit ( 22, bits ); + ok ( bits[2] == 0x42 ); + set_bit ( 22, bits ); + ok ( bits[2] == 0x42 ); + + /* Test clear_bit() */ + clear_bit ( 0, bits ); + ok ( bits[0] == 0x00 ); + bits[5] = 0xff; + clear_bit ( 42, bits ); + ok ( bits[5] == 0xfb ); + clear_bit ( 42, bits ); + ok ( bits[5] == 0xfb ); + clear_bit ( 44, bits ); + ok ( bits[5] == 0xeb ); + + /* Test test_and_set_bit() */ + ok ( test_and_set_bit ( 0, bits ) == 0 ); + ok ( bits[0] == 0x01 ); + ok ( test_and_set_bit ( 0, bits ) != 0 ); + ok ( bits[0] == 0x01 ); + ok ( test_and_set_bit ( 69, bits ) == 0 ); + ok ( bits[8] == 0x20 ); + ok ( test_and_set_bit ( 69, bits ) != 0 ); + ok ( bits[8] == 0x20 ); + ok ( test_and_set_bit ( 69, bits ) != 0 ); + ok ( bits[8] == 0x20 ); + + /* Test test_and_clear_bit() */ + ok ( test_and_clear_bit ( 0, bits ) != 0 ); + ok ( bits[0] == 0x00 ); + ok ( test_and_clear_bit ( 0, bits ) == 0 ); + ok ( bits[0] == 0x00 ); + bits[31] = 0xeb; + ok ( test_and_clear_bit ( 255, bits ) != 0 ); + ok ( bits[31] == 0x6b ); + ok ( test_and_clear_bit ( 255, bits ) == 0 ); + ok ( bits[31] == 0x6b ); + ok ( test_and_clear_bit ( 255, bits ) == 0 ); + ok ( bits[31] == 0x6b ); +} + +/** Bit operations self-test */ +struct self_test bitops_test __self_test = { + .name = "bitops", + .exec = bitops_test_exec, +}; diff --git a/src/tests/cipher_test.c b/src/tests/cipher_test.c new file mode 100644 index 00000000..800d6c13 --- /dev/null +++ b/src/tests/cipher_test.c @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2012 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * Cipher self-tests + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include +#include +#include +#include +#include "cipher_test.h" + +/** Number of sample iterations for profiling */ +#define PROFILE_COUNT 16 + +/** + * Report a cipher encryption test result + * + * @v test Cipher test + * @v file Test code file + * @v line Test code line + */ +void cipher_encrypt_okx ( struct cipher_test *test, const char *file, + unsigned int line ) { + struct cipher_algorithm *cipher = test->cipher; + size_t len = test->len; + uint8_t ctx[cipher->ctxsize]; + uint8_t ciphertext[len]; + + /* Initialise cipher */ + okx ( cipher_setkey ( cipher, ctx, test->key, test->key_len ) == 0, + file, line ); + cipher_setiv ( cipher, ctx, test->iv ); + + /* Perform encryption */ + cipher_encrypt ( cipher, ctx, test->plaintext, ciphertext, len ); + + /* Compare against expected ciphertext */ + okx ( memcmp ( ciphertext, test->ciphertext, len ) == 0, file, line ); +} + +/** + * Report a cipher decryption test result + * + * @v test Cipher test + * @v file Test code file + * @v line Test code line + */ +void cipher_decrypt_okx ( struct cipher_test *test, const char *file, + unsigned int line ) { + struct cipher_algorithm *cipher = test->cipher; + size_t len = test->len; + uint8_t ctx[cipher->ctxsize]; + uint8_t plaintext[len]; + + /* Initialise cipher */ + okx ( cipher_setkey ( cipher, ctx, test->key, test->key_len ) == 0, + file, line ); + cipher_setiv ( cipher, ctx, test->iv ); + + /* Perform encryption */ + cipher_decrypt ( cipher, ctx, test->ciphertext, plaintext, len ); + + /* Compare against expected plaintext */ + okx ( memcmp ( plaintext, test->plaintext, len ) == 0, file, line ); +} + +/** + * Report a cipher encryption and decryption test result + * + * @v test Cipher test + * @v file Test code file + * @v line Test code line + */ +void cipher_okx ( struct cipher_test *test, const char *file, + unsigned int line ) { + + cipher_encrypt_okx ( test, file, line ); + cipher_decrypt_okx ( test, file, line ); +} + +/** + * Calculate cipher encryption or decryption cost + * + * @v cipher Cipher algorithm + * @v key_len Length of key + * @v op Encryption or decryption operation + * @ret cost Cost (in cycles per byte) + */ +static unsigned long +cipher_cost ( struct cipher_algorithm *cipher, size_t key_len, + void ( * op ) ( struct cipher_algorithm *cipher, void *ctx, + const void *src, void *dst, size_t len ) ) { + static uint8_t random[8192]; /* Too large for stack */ + uint8_t key[key_len]; + uint8_t iv[cipher->blocksize]; + uint8_t ctx[cipher->ctxsize]; + struct profiler profiler; + unsigned long cost; + unsigned int i; + int rc; + + /* Fill buffer with pseudo-random data */ + srand ( 0x1234568 ); + for ( i = 0 ; i < sizeof ( random ) ; i++ ) + random[i] = rand(); + for ( i = 0 ; i < sizeof ( key ) ; i++ ) + key[i] = rand(); + for ( i = 0 ; i < sizeof ( iv ) ; i++ ) + iv[i] = rand(); + + /* Initialise cipher */ + rc = cipher_setkey ( cipher, ctx, key, key_len ); + assert ( rc == 0 ); + cipher_setiv ( cipher, ctx, iv ); + + /* Profile cipher operation */ + memset ( &profiler, 0, sizeof ( profiler ) ); + for ( i = 0 ; i < PROFILE_COUNT ; i++ ) { + profile_start ( &profiler ); + op ( cipher, ctx, random, random, sizeof ( random ) ); + profile_stop ( &profiler ); + } + + /* Round to nearest whole number of cycles per byte */ + cost = ( ( profile_mean ( &profiler ) + ( sizeof ( random ) / 2 ) ) / + sizeof ( random ) ); + + return cost; +} + +/** + * Calculate cipher encryption cost + * + * @v cipher Cipher algorithm + * @v key_len Length of key + * @ret cost Cost (in cycles per byte) + */ +unsigned long cipher_cost_encrypt ( struct cipher_algorithm *cipher, + size_t key_len ) { + return cipher_cost ( cipher, key_len, cipher_encrypt ); +} + +/** + * Calculate cipher decryption cost + * + * @v cipher Cipher algorithm + * @v key_len Length of key + * @ret cost Cost (in cycles per byte) + */ +unsigned long cipher_cost_decrypt ( struct cipher_algorithm *cipher, + size_t key_len ) { + return cipher_cost ( cipher, key_len, cipher_decrypt ); +} diff --git a/src/tests/cipher_test.h b/src/tests/cipher_test.h new file mode 100644 index 00000000..d7c5aef8 --- /dev/null +++ b/src/tests/cipher_test.h @@ -0,0 +1,111 @@ +#ifndef _CIPHER_TEST_H +#define _CIPHER_TEST_H + +/** @file + * + * Cipher self-tests + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** A cipher test */ +struct cipher_test { + /** Cipher algorithm */ + struct cipher_algorithm *cipher; + /** Key */ + const void *key; + /** Length of key */ + size_t key_len; + /** Initialisation vector */ + const void *iv; + /** Length of initialisation vector */ + size_t iv_len; + /** Plaintext */ + const void *plaintext; + /** Ciphertext */ + const void *ciphertext; + /** Length of text */ + size_t len; +}; + +/** Define inline key */ +#define KEY(...) { __VA_ARGS__ } + +/** Define inline initialisation vector */ +#define IV(...) { __VA_ARGS__ } + +/** Define inline plaintext data */ +#define PLAINTEXT(...) { __VA_ARGS__ } + +/** Define inline ciphertext data */ +#define CIPHERTEXT(...) { __VA_ARGS__ } + +/** + * Define a cipher test + * + * @v name Test name + * @v CIPHER Cipher algorithm + * @v KEY Key + * @v IV Initialisation vector + * @v PLAINTEXT Plaintext + * @v CIPHERTEXT Ciphertext + * @ret test Cipher test + */ +#define CIPHER_TEST( name, CIPHER, KEY, IV, PLAINTEXT, CIPHERTEXT ) \ + static const uint8_t name ## _key [] = KEY; \ + static const uint8_t name ## _iv [] = IV; \ + static const uint8_t name ## _plaintext [] = PLAINTEXT; \ + static const uint8_t name ## _ciphertext \ + [ sizeof ( name ## _plaintext ) ] = CIPHERTEXT; \ + static struct cipher_test name = { \ + .cipher = CIPHER, \ + .key = name ## _key, \ + .key_len = sizeof ( name ## _key ), \ + .iv = name ## _iv, \ + .iv_len = sizeof ( name ## _iv ), \ + .plaintext = name ## _plaintext, \ + .ciphertext = name ## _ciphertext, \ + .len = sizeof ( name ## _plaintext ), \ + } + +extern void cipher_encrypt_okx ( struct cipher_test *test, const char *file, + unsigned int line ); +extern void cipher_decrypt_okx ( struct cipher_test *test, const char *file, + unsigned int line ); +extern void cipher_okx ( struct cipher_test *test, const char *file, + unsigned int line ); +extern unsigned long cipher_cost_encrypt ( struct cipher_algorithm *cipher, + size_t key_len ); +extern unsigned long cipher_cost_decrypt ( struct cipher_algorithm *cipher, + size_t key_len ); + +/** + * Report a cipher encryption test result + * + * @v test Cipher test + */ +#define cipher_encrypt_ok( test ) \ + cipher_encrypt_okx ( test, __FILE__, __LINE__ ) + +/** + * Report a cipher decryption test result + * + * @v test Cipher test + */ +#define cipher_decrypt_ok( test ) \ + cipher_decrypt_okx ( test, __FILE__, __LINE__ ) + +/** + * Report a cipher encryption and decryption test result + * + * @v test Cipher test + */ +#define cipher_ok( test ) \ + cipher_okx ( test, __FILE__, __LINE__ ) + +#endif /* _CIPHER_TEST_H */ diff --git a/src/tests/der_test.c b/src/tests/der_test.c new file mode 100644 index 00000000..00cc644f --- /dev/null +++ b/src/tests/der_test.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * DER self-tests + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include +#include "asn1_test.h" + +/** Define inline data */ +#define DATA(...) { __VA_ARGS__ } + +/** Define inline expected digest */ +#define DIGEST(...) { { __VA_ARGS__ } } + +/** 32-bit RSA private key */ +ASN1 ( rsa32, &der_image_type, + DATA ( 0x30, 0x2c, 0x02, 0x01, 0x00, 0x02, 0x05, 0x00, 0xb7, 0x56, + 0x5c, 0xb1, 0x02, 0x03, 0x01, 0x00, 0x01, 0x02, 0x04, 0x66, + 0xa4, 0xc4, 0x35, 0x02, 0x03, 0x00, 0xda, 0x9f, 0x02, 0x03, + 0x00, 0xd6, 0xaf, 0x02, 0x02, 0x01, 0x59, 0x02, 0x02, 0x4e, + 0xe1, 0x02, 0x03, 0x00, 0xa6, 0x5a ), + DIGEST ( 0x82, 0x66, 0x24, 0xd9, 0xc3, 0x98, 0x1e, 0x5e, 0x56, 0xed, + 0xd0, 0xd0, 0x2a, 0x5e, 0x9c, 0x3a, 0x58, 0xdf, 0x76, 0x0d ) ); + +/** 64-bit RSA private key */ +ASN1 ( rsa64, &der_image_type, + DATA ( 0x30, 0x3e, 0x02, 0x01, 0x00, 0x02, 0x09, 0x00, 0xa1, 0xba, + 0xb5, 0x70, 0x00, 0x89, 0xc0, 0x43, 0x02, 0x03, 0x01, 0x00, + 0x01, 0x02, 0x08, 0x43, 0x98, 0xc6, 0x3c, 0x5f, 0xdc, 0x98, + 0x01, 0x02, 0x05, 0x00, 0xcf, 0x91, 0x1c, 0x5d, 0x02, 0x05, + 0x00, 0xc7, 0x77, 0x85, 0x1f, 0x02, 0x05, 0x00, 0xbc, 0xb3, + 0x33, 0x91, 0x02, 0x04, 0x1b, 0xf9, 0x38, 0x13, 0x02, 0x04, + 0x19, 0xf2, 0x58, 0x86 ), + DIGEST ( 0xee, 0x17, 0x32, 0x31, 0xf0, 0x3d, 0xfd, 0xaa, 0x9b, 0x47, + 0xaf, 0x7b, 0x4b, 0x52, 0x0b, 0xb1, 0xab, 0x25, 0x3f, 0x11 ) ); + +/** + * Perform DER self-test + * + */ +static void der_test_exec ( void ) { + + /* Perform tests */ + asn1_ok ( &rsa32 ); + asn1_ok ( &rsa64 ); +} + +/** DER self-test */ +struct self_test der_test __self_test = { + .name = "der", + .exec = der_test_exec, +}; diff --git a/src/tests/iobuf_test.c b/src/tests/iobuf_test.c new file mode 100644 index 00000000..a417c2e8 --- /dev/null +++ b/src/tests/iobuf_test.c @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * I/O buffer tests + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include +#include +#include + +/* Forward declaration */ +struct self_test iobuf_test __self_test; + +/** + * Report I/O buffer allocation test result + * + * @v len Required length of buffer + * @v align Physical alignment + * @v offset Offset from physical alignment + * @v file Test code file + * @v line Test code line + */ +static inline void alloc_iob_okx ( size_t len, size_t align, size_t offset, + const char *file, unsigned int line ) { + struct io_buffer *iobuf; + + /* Allocate I/O buffer */ + iobuf = alloc_iob_raw ( len, align, offset ); + okx ( iobuf != NULL, file, line ); + DBGC ( &iobuf_test, "IOBUF %p (%#08lx+%#zx) for %#zx align %#zx " + "offset %#zx\n", iobuf, virt_to_phys ( iobuf->data ), + iob_tailroom ( iobuf ), len, align, offset ); + + /* Validate requested length and alignment */ + okx ( ( ( ( intptr_t ) iobuf ) & ( __alignof__ ( *iobuf ) - 1 ) ) == 0, + file, line ); + okx ( iob_tailroom ( iobuf ) >= len, file, line ); + okx ( ( ( align == 0 ) || + ( ( virt_to_phys ( iobuf->data ) & ( align - 1 ) ) == + ( offset & ( align - 1 ) ) ) ), file, line ); + + /* Overwrite entire content of I/O buffer (for Valgrind) */ + memset ( iob_put ( iobuf, len ), 0x55, len ); + + /* Free I/O buffer */ + free_iob ( iobuf ); +} +#define alloc_iob_ok( len, align, offset ) \ + alloc_iob_okx ( len, align, offset, __FILE__, __LINE__ ) + +/** + * Report I/O buffer allocation failure test result + * + * @v len Required length of buffer + * @v align Physical alignment + * @v offset Offset from physical alignment + * @v file Test code file + * @v line Test code line + */ +static inline void alloc_iob_fail_okx ( size_t len, size_t align, size_t offset, + const char *file, unsigned int line ) { + struct io_buffer *iobuf; + + /* Allocate I/O buffer */ + iobuf = alloc_iob_raw ( len, align, offset ); + okx ( iobuf == NULL, file, line ); +} +#define alloc_iob_fail_ok( len, align, offset ) \ + alloc_iob_fail_okx ( len, align, offset, __FILE__, __LINE__ ) + +/** + * Perform I/O buffer self-tests + * + */ +static void iobuf_test_exec ( void ) { + + /* Check zero-length allocations */ + alloc_iob_ok ( 0, 0, 0 ); + alloc_iob_ok ( 0, 0, 1 ); + alloc_iob_ok ( 0, 1, 0 ); + alloc_iob_ok ( 0, 1024, 0 ); + alloc_iob_ok ( 0, 139, -17 ); + + /* Check various sensible allocations */ + alloc_iob_ok ( 1, 0, 0 ); + alloc_iob_ok ( 16, 16, 0 ); + alloc_iob_ok ( 64, 0, 0 ); + alloc_iob_ok ( 65, 0, 0 ); + alloc_iob_ok ( 65, 1024, 19 ); + alloc_iob_ok ( 1536, 1536, 0 ); + alloc_iob_ok ( 2048, 2048, 0 ); + alloc_iob_ok ( 2048, 2048, -10 ); + + /* Excessively large or excessively aligned allocations should fail */ + alloc_iob_fail_ok ( -1UL, 0, 0 ); + alloc_iob_fail_ok ( -1UL, 1024, 0 ); + alloc_iob_fail_ok ( 0, -1UL, 0 ); + alloc_iob_fail_ok ( 1024, -1UL, 0 ); +} + +/** I/O buffer self-test */ +struct self_test iobuf_test __self_test = { + .name = "iobuf", + .exec = iobuf_test_exec, +}; diff --git a/src/tests/ipv4_test.c b/src/tests/ipv4_test.c new file mode 100644 index 00000000..f84a8b81 --- /dev/null +++ b/src/tests/ipv4_test.c @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * IPv4 tests + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include +#include + +/** Define inline IPv4 address */ +#define IPV4(a,b,c,d) \ + htonl ( ( (a) << 24 ) | ( (b) << 16 ) | ( (c) << 8 ) | (d) ) + +/** + * Report an inet_ntoa() test result + * + * @v addr IPv4 address + * @v text Expected textual representation + * @v file Test code file + * @v line Test code line + */ +static void inet_ntoa_okx ( uint32_t addr, const char *text, const char *file, + unsigned int line ) { + struct in_addr in = { .s_addr = addr }; + char *actual; + + /* Format address */ + actual = inet_ntoa ( in ); + DBG ( "inet_ntoa ( %d.%d.%d.%d ) = %s\n", + ( ( ntohl ( addr ) >> 24 ) & 0xff ), + ( ( ntohl ( addr ) >> 16 ) & 0xff ), + ( ( ntohl ( addr ) >> 8 ) & 0xff ), + ( ( ntohl ( addr ) >> 0 ) & 0xff ), actual ); + okx ( strcmp ( actual, text ) == 0, file, line ); +} +#define inet_ntoa_ok( addr, text ) \ + inet_ntoa_okx ( addr, text, __FILE__, __LINE__ ) + +/** + * Report an inet_aton() test result + * + * @v text Textual representation + * @v addr Expected IPv4 address + * @v file Test code file + * @v line Test code line + */ +static void inet_aton_okx ( const char *text, uint32_t addr, const char *file, + unsigned int line ) { + struct in_addr actual; + + /* Parse address */ + okx ( inet_aton ( text, &actual ) != 0, file, line ); + DBG ( "inet_aton ( \"%s\" ) = %s\n", text, inet_ntoa ( actual ) ); + okx ( actual.s_addr == addr, file, line ); +}; +#define inet_aton_ok( text, addr ) \ + inet_aton_okx ( text, addr, __FILE__, __LINE__ ) + +/** + * Report an inet_aton() failure test result + * + * @v text Textual representation + * @v file Test code file + * @v line Test code line + */ +static void inet_aton_fail_okx ( const char *text, const char *file, + unsigned int line ) { + struct in_addr actual; + + /* Attempt to parse address */ + okx ( inet_aton ( text, &actual ) == 0, file, line ); +} +#define inet_aton_fail_ok( text ) \ + inet_aton_fail_okx ( text, __FILE__, __LINE__ ) + +/** + * Perform IPv4 self-tests + * + */ +static void ipv4_test_exec ( void ) { + + /* Address testing macros */ + ok ( IN_IS_CLASSA ( IPV4 ( 10, 0, 0, 1 ) ) ); + ok ( ! IN_IS_CLASSB ( IPV4 ( 10, 0, 0, 1 ) ) ); + ok ( ! IN_IS_CLASSC ( IPV4 ( 10, 0, 0, 1 ) ) ); + ok ( ! IN_IS_CLASSA ( IPV4 ( 172, 16, 0, 1 ) ) ); + ok ( IN_IS_CLASSB ( IPV4 ( 172, 16, 0, 1 ) ) ); + ok ( ! IN_IS_CLASSC ( IPV4 ( 172, 16, 0, 1 ) ) ); + ok ( ! IN_IS_CLASSA ( IPV4 ( 192, 168, 0, 1 ) ) ); + ok ( ! IN_IS_CLASSB ( IPV4 ( 192, 168, 0, 1 ) ) ); + ok ( IN_IS_CLASSC ( IPV4 ( 192, 168, 0, 1 ) ) ); + ok ( ! IN_IS_MULTICAST ( IPV4 ( 127, 0, 0, 1 ) ) ); + ok ( ! IN_IS_MULTICAST ( IPV4 ( 8, 8, 8, 8 ) ) ); + ok ( ! IN_IS_MULTICAST ( IPV4 ( 0, 0, 0, 0 ) ) ); + ok ( ! IN_IS_MULTICAST ( IPV4 ( 223, 0, 0, 1 ) ) ); + ok ( ! IN_IS_MULTICAST ( IPV4 ( 240, 0, 0, 1 ) ) ); + ok ( IN_IS_MULTICAST ( IPV4 ( 224, 0, 0, 1 ) ) ); + ok ( IN_IS_MULTICAST ( IPV4 ( 231, 89, 0, 2 ) ) ); + ok ( IN_IS_MULTICAST ( IPV4 ( 239, 6, 1, 17 ) ) ); + + /* inet_ntoa() tests */ + inet_ntoa_ok ( IPV4 ( 127, 0, 0, 1 ), "127.0.0.1" ); + inet_ntoa_ok ( IPV4 ( 0, 0, 0, 0 ), "0.0.0.0" ); + inet_ntoa_ok ( IPV4 ( 255, 255, 255, 255 ), "255.255.255.255" ); + inet_ntoa_ok ( IPV4 ( 212, 13, 204, 60 ), "212.13.204.60" ); + + /* inet_aton() tests */ + inet_aton_ok ( "212.13.204.60", IPV4 ( 212, 13, 204, 60 ) ); + inet_aton_ok ( "127.0.0.1", IPV4 ( 127, 0, 0, 1 ) ); + + /* inet_aton() failure tests */ + inet_aton_fail_ok ( "256.0.0.1" ); /* Byte out of range */ + inet_aton_fail_ok ( "212.13.204.60.1" ); /* Too long */ + inet_aton_fail_ok ( "127.0.0" ); /* Too short */ + inet_aton_fail_ok ( "1.2.3.a" ); /* Invalid characters */ + inet_aton_fail_ok ( "127.0..1" ); /* Missing bytes */ +} + +/** IPv4 self-test */ +struct self_test ipv4_test __self_test = { + .name = "ipv4", + .exec = ipv4_test_exec, +}; diff --git a/src/tests/md4_test.c b/src/tests/md4_test.c new file mode 100644 index 00000000..b6528c6e --- /dev/null +++ b/src/tests/md4_test.c @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * MD4 tests + * + * Test inputs borrowed from NIST SHA-1 tests, with results calculated + * using "openssl dgst -md4" + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include "digest_test.h" + +/* Empty test vector */ +DIGEST_TEST ( md4_empty, &md4_algorithm, DIGEST_EMPTY, + DIGEST ( 0x31, 0xd6, 0xcf, 0xe0, 0xd1, 0x6a, 0xe9, 0x31, 0xb7, + 0x3c, 0x59, 0xd7, 0xe0, 0xc0, 0x89, 0xc0 ) ); + +/* NIST test vector "abc" */ +DIGEST_TEST ( md4_nist_abc, &md4_algorithm, DIGEST_NIST_ABC, + DIGEST ( 0xa4, 0x48, 0x01, 0x7a, 0xaf, 0x21, 0xd8, 0x52, 0x5f, + 0xc1, 0x0a, 0xe8, 0x7a, 0xa6, 0x72, 0x9d ) ); + +/* NIST test vector "abc...opq" */ +DIGEST_TEST ( md4_nist_abc_opq, &md4_algorithm, DIGEST_NIST_ABC_OPQ, + DIGEST ( 0x46, 0x91, 0xa9, 0xec, 0x81, 0xb1, 0xa6, 0xbd, 0x1a, + 0xb8, 0x55, 0x72, 0x40, 0xb2, 0x45, 0xc5 ) ); + +/** + * Perform MD4 self-test + * + */ +static void md4_test_exec ( void ) { + + /* Correctness tests */ + digest_ok ( &md4_empty ); + digest_ok ( &md4_nist_abc ); + digest_ok ( &md4_nist_abc_opq ); + + /* Speed tests */ + DBG ( "MD4 required %ld cycles per byte\n", + digest_cost ( &md4_algorithm ) ); +} + +/** MD4 self-test */ +struct self_test md4_test __self_test = { + .name = "md4", + .exec = md4_test_exec, +}; diff --git a/src/tests/memset_test.c b/src/tests/memset_test.c new file mode 100644 index 00000000..d96f83fa --- /dev/null +++ b/src/tests/memset_test.c @@ -0,0 +1,157 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * memset() self-tests + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include + +/* Provide global functions to allow inspection of generated code */ + +void memset_zero_0 ( void *dest ) { memset ( dest, 0, 0 ); } +void memset_zero_1 ( void *dest ) { memset ( dest, 0, 1 ); } +void memset_zero_2 ( void *dest ) { memset ( dest, 0, 2 ); } +void memset_zero_3 ( void *dest ) { memset ( dest, 0, 3 ); } +void memset_zero_4 ( void *dest ) { memset ( dest, 0, 4 ); } +void memset_zero_5 ( void *dest ) { memset ( dest, 0, 5 ); } +void memset_zero_6 ( void *dest ) { memset ( dest, 0, 6 ); } +void memset_zero_7 ( void *dest ) { memset ( dest, 0, 7 ); } +void memset_zero_8 ( void *dest ) { memset ( dest, 0, 8 ); } +void memset_zero_9 ( void *dest ) { memset ( dest, 0, 9 ); } +void memset_zero_10 ( void *dest ) { memset ( dest, 0, 10 ); } +void memset_zero_11 ( void *dest ) { memset ( dest, 0, 11 ); } +void memset_zero_12 ( void *dest ) { memset ( dest, 0, 12 ); } +void memset_zero_13 ( void *dest ) { memset ( dest, 0, 13 ); } +void memset_zero_14 ( void *dest ) { memset ( dest, 0, 14 ); } +void memset_zero_15 ( void *dest ) { memset ( dest, 0, 15 ); } +void memset_zero_16 ( void *dest ) { memset ( dest, 0, 16 ); } +void memset_zero_17 ( void *dest ) { memset ( dest, 0, 17 ); } +void memset_zero_18 ( void *dest ) { memset ( dest, 0, 18 ); } +void memset_zero_19 ( void *dest ) { memset ( dest, 0, 19 ); } +void memset_zero_20 ( void *dest ) { memset ( dest, 0, 20 ); } +void memset_zero_21 ( void *dest ) { memset ( dest, 0, 21 ); } +void memset_zero_22 ( void *dest ) { memset ( dest, 0, 22 ); } +void memset_zero_23 ( void *dest ) { memset ( dest, 0, 23 ); } +void memset_zero_24 ( void *dest ) { memset ( dest, 0, 24 ); } +void memset_zero_25 ( void *dest ) { memset ( dest, 0, 25 ); } +void memset_zero_26 ( void *dest ) { memset ( dest, 0, 26 ); } +void memset_zero_27 ( void *dest ) { memset ( dest, 0, 27 ); } +void memset_zero_28 ( void *dest ) { memset ( dest, 0, 28 ); } +void memset_zero_29 ( void *dest ) { memset ( dest, 0, 29 ); } +void memset_zero_30 ( void *dest ) { memset ( dest, 0, 30 ); } +void memset_zero_31 ( void *dest ) { memset ( dest, 0, 31 ); } + +/** + * Force a call to the variable-length implementation of memset() + * + * @v dest Destination address + * @v fill Fill pattern + * @v len Length of data + * @ret dest Destination address + */ +__attribute__ (( noinline )) void * memset_var ( void *dest, unsigned int fill, + size_t len ) { + return memset ( dest, fill, len ); +} + +/** + * Perform a constant-length memset() test + * + * @v len Length of data + */ +#define MEMSET_TEST_CONSTANT( len ) do { \ + uint8_t dest_const[ 1 + len + 1 ]; \ + uint8_t dest_var[ 1 + len + 1 ]; \ + static uint8_t zero[len]; \ + unsigned int i; \ + \ + for ( i = 0 ; i < sizeof ( dest_const ) ; i++ ) \ + dest_const[i] = 0xaa; \ + memset ( ( dest_const + 1 ), 0, len ); \ + ok ( dest_const[0] == 0xaa ); \ + ok ( dest_const[ sizeof ( dest_const ) - 1 ] == 0xaa ); \ + ok ( memcmp ( ( dest_const + 1 ), zero, len ) == 0 ); \ + \ + for ( i = 0 ; i < sizeof ( dest_var ) ; i++ ) \ + dest_var[i] = 0xbb; \ + memset_var ( ( dest_var + 1 ), 0, len ); \ + ok ( dest_var[0] == 0xbb ); \ + ok ( dest_var[ sizeof ( dest_var ) - 1 ] == 0xbb ); \ + ok ( memcmp ( ( dest_var + 1 ), zero, len ) == 0 ); \ + } while ( 0 ) + +/** + * Perform memset() self-tests + * + */ +static void memset_test_exec ( void ) { + + /* Constant-length tests */ + MEMSET_TEST_CONSTANT ( 0 ); + MEMSET_TEST_CONSTANT ( 1 ); + MEMSET_TEST_CONSTANT ( 2 ); + MEMSET_TEST_CONSTANT ( 3 ); + MEMSET_TEST_CONSTANT ( 4 ); + MEMSET_TEST_CONSTANT ( 5 ); + MEMSET_TEST_CONSTANT ( 6 ); + MEMSET_TEST_CONSTANT ( 7 ); + MEMSET_TEST_CONSTANT ( 8 ); + MEMSET_TEST_CONSTANT ( 9 ); + MEMSET_TEST_CONSTANT ( 10 ); + MEMSET_TEST_CONSTANT ( 11 ); + MEMSET_TEST_CONSTANT ( 12 ); + MEMSET_TEST_CONSTANT ( 13 ); + MEMSET_TEST_CONSTANT ( 14 ); + MEMSET_TEST_CONSTANT ( 15 ); + MEMSET_TEST_CONSTANT ( 16 ); + MEMSET_TEST_CONSTANT ( 17 ); + MEMSET_TEST_CONSTANT ( 18 ); + MEMSET_TEST_CONSTANT ( 19 ); + MEMSET_TEST_CONSTANT ( 20 ); + MEMSET_TEST_CONSTANT ( 21 ); + MEMSET_TEST_CONSTANT ( 22 ); + MEMSET_TEST_CONSTANT ( 23 ); + MEMSET_TEST_CONSTANT ( 24 ); + MEMSET_TEST_CONSTANT ( 25 ); + MEMSET_TEST_CONSTANT ( 26 ); + MEMSET_TEST_CONSTANT ( 27 ); + MEMSET_TEST_CONSTANT ( 28 ); + MEMSET_TEST_CONSTANT ( 29 ); + MEMSET_TEST_CONSTANT ( 30 ); + MEMSET_TEST_CONSTANT ( 31 ); +} + +/** memset() self-test */ +struct self_test memset_test __self_test = { + .name = "memset", + .exec = memset_test_exec, +}; diff --git a/src/tests/ntlm_test.c b/src/tests/ntlm_test.c new file mode 100644 index 00000000..65a8b8c6 --- /dev/null +++ b/src/tests/ntlm_test.c @@ -0,0 +1,312 @@ +/* + * Copyright (C) 2017 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * NTLM authentication self-tests + * + * The test vectors are taken from the MS-NLMP specification document. + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include +#include + +/** A key generation test */ +struct ntlm_key_test { + /** Domain name (or NULL) */ + const char *domain; + /** User name (or NULL) */ + const char *username; + /** Password (or NULL) */ + const char *password; + /** Expected key */ + struct ntlm_key expected; +}; + +/** An authentication test */ +struct ntlm_authenticate_test { + /** Domain name (or NULL) */ + const char *domain; + /** User name (or NULL) */ + const char *username; + /** Password (or NULL) */ + const char *password; + /** Workstation (or NULL) */ + const char *workstation; + /** Nonce */ + struct ntlm_nonce nonce; + /** Challenge message */ + struct ntlm_challenge *challenge; + /** Length of Challenge message */ + size_t challenge_len; + /** Expected Authenticate message */ + struct ntlm_authenticate *expected; + /** Expected length of Authenticate message */ + size_t expected_len; +}; + +/** Define inline message data */ +#define DATA(...) { __VA_ARGS__ } + +/** Define a key generation digest test */ +#define KEY_TEST( name, DOMAIN, USERNAME, PASSWORD, EXPECTED ) \ + static struct ntlm_key_test name = { \ + .domain = DOMAIN, \ + .username = USERNAME, \ + .password = PASSWORD, \ + .expected = { \ + .raw = EXPECTED, \ + }, \ + }; + +/** Define an authentication test */ +#define AUTHENTICATE_TEST( name, DOMAIN, USERNAME, PASSWORD, \ + WORKSTATION, NONCE, CHALLENGE, EXPECTED ) \ + static const uint8_t name ## _challenge[] = CHALLENGE; \ + static const uint8_t name ## _expected[] = EXPECTED; \ + static struct ntlm_authenticate_test name = { \ + .domain = DOMAIN, \ + .username = USERNAME, \ + .password = PASSWORD, \ + .workstation = WORKSTATION, \ + .nonce = { \ + .raw = NONCE, \ + }, \ + .challenge = ( ( void * ) name ## _challenge ), \ + .challenge_len = sizeof ( name ## _challenge ), \ + .expected = ( ( void * ) name ## _expected ), \ + .expected_len = sizeof ( name ## _expected ), \ + }; + +/** NTOWFv2() test from MS-NLMP specification */ +KEY_TEST ( msnlmp_ntowfv2, "Domain", "User", "Password", + DATA ( 0x0c, 0x86, 0x8a, 0x40, 0x3b, 0xfd, 0x7a, 0x93, 0xa3, 0x00, + 0x1e, 0xf2, 0x2e, 0xf0, 0x2e, 0x3f ) ); + +/** Authentication test from MS-NLMP specification */ +AUTHENTICATE_TEST ( msnlmp_authenticate, + "Domain", "User", "Password", "COMPUTER", + DATA ( 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa ), + DATA ( 0x4e, 0x54, 0x4c, 0x4d, 0x53, 0x53, 0x50, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x38, 0x00, 0x00, 0x00, + 0x33, 0x82, 0x8a, 0xe2, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, + 0xcd, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x24, 0x00, 0x24, 0x00, 0x44, 0x00, 0x00, 0x00, 0x06, 0x00, + 0x70, 0x17, 0x00, 0x00, 0x00, 0x0f, 0x53, 0x00, 0x65, 0x00, + 0x72, 0x00, 0x76, 0x00, 0x65, 0x00, 0x72, 0x00, 0x02, 0x00, + 0x0c, 0x00, 0x44, 0x00, 0x6f, 0x00, 0x6d, 0x00, 0x61, 0x00, + 0x69, 0x00, 0x6e, 0x00, 0x01, 0x00, 0x0c, 0x00, 0x53, 0x00, + 0x65, 0x00, 0x72, 0x00, 0x76, 0x00, 0x65, 0x00, 0x72, 0x00, + 0x00, 0x00, 0x00, 0x00 ), + DATA ( 0x4e, 0x54, 0x4c, 0x4d, 0x53, 0x53, 0x50, 0x00, 0x03, 0x00, + 0x00, 0x00, 0x18, 0x00, 0x18, 0x00, 0x6c, 0x00, 0x00, 0x00, + 0x54, 0x00, 0x54, 0x00, 0x84, 0x00, 0x00, 0x00, 0x0c, 0x00, + 0x0c, 0x00, 0x48, 0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x00, + 0x54, 0x00, 0x00, 0x00, 0x10, 0x00, 0x10, 0x00, 0x5c, 0x00, + 0x00, 0x00, 0x10, 0x00, 0x10, 0x00, 0xd8, 0x00, 0x00, 0x00, + 0x35, 0x82, 0x88, 0xe2, 0x05, 0x01, 0x28, 0x0a, 0x00, 0x00, + 0x00, 0x0f, 0x44, 0x00, 0x6f, 0x00, 0x6d, 0x00, 0x61, 0x00, + 0x69, 0x00, 0x6e, 0x00, 0x55, 0x00, 0x73, 0x00, 0x65, 0x00, + 0x72, 0x00, 0x43, 0x00, 0x4f, 0x00, 0x4d, 0x00, 0x50, 0x00, + 0x55, 0x00, 0x54, 0x00, 0x45, 0x00, 0x52, 0x00, 0x86, 0xc3, + 0x50, 0x97, 0xac, 0x9c, 0xec, 0x10, 0x25, 0x54, 0x76, 0x4a, + 0x57, 0xcc, 0xcc, 0x19, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0x68, 0xcd, 0x0a, 0xb8, 0x51, 0xe5, 0x1c, 0x96, + 0xaa, 0xbc, 0x92, 0x7b, 0xeb, 0xef, 0x6a, 0x1c, 0x01, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x0c, 0x00, + 0x44, 0x00, 0x6f, 0x00, 0x6d, 0x00, 0x61, 0x00, 0x69, 0x00, + 0x6e, 0x00, 0x01, 0x00, 0x0c, 0x00, 0x53, 0x00, 0x65, 0x00, + 0x72, 0x00, 0x76, 0x00, 0x65, 0x00, 0x72, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0xda, 0xd2, 0x54, + 0x4f, 0xc9, 0x79, 0x90, 0x94, 0xce, 0x1c, 0xe9, 0x0b, 0xc9, + 0xd0, 0x3e ) ); + +/** + * Report key generation test result + * + * @v test Key generation test + * @v file Test code file + * @v line Test code line + */ +static void ntlm_key_okx ( struct ntlm_key_test *test, + const char *file, unsigned int line ) { + struct ntlm_key key; + + ntlm_key ( test->domain, test->username, test->password, &key ); + okx ( memcmp ( &key, &test->expected, sizeof ( key ) ) == 0, + file, line ); +} +#define ntlm_key_ok( test ) \ + ntlm_key_okx ( test, __FILE__, __LINE__ ) + +/** + * Report NTLM variable-length data test result + * + * @v msg Message header + * @v msg_len Length of message + * @v data Variable-length data descriptor + * @v expected Expected message header + * @v expected_data Expected variable-length data descriptor + * @v field Field name + * @v file Test code file + * @v line Test code line + */ +static void ntlm_data_okx ( struct ntlm_header *msg, size_t msg_len, + struct ntlm_data *data, + struct ntlm_header *expected, + struct ntlm_data *expected_data, + const char *field, const char *file, + unsigned int line ) { + size_t offset; + size_t len; + void *raw; + void *expected_raw; + + /* Verify data lies within message */ + okx ( data->len == data->max_len, file, line ); + offset = le32_to_cpu ( data->offset ); + len = le16_to_cpu ( data->len ); + okx ( offset <= msg_len, file, line ); + okx ( len <= ( msg_len - offset ), file, line ); + + /* Verify content matches expected content */ + raw = ( ( ( void * ) msg ) + offset ); + expected_raw = ( ( ( void * ) expected ) + + le32_to_cpu ( expected_data->offset ) ); + DBGC ( msg, "NTLM %s expected:\n", field ); + DBGC_HDA ( msg, 0, expected_raw, le16_to_cpu ( expected_data->len ) ); + DBGC ( msg, "NTLM %s actual:\n", field ); + DBGC_HDA ( msg, 0, raw, len ); + okx ( data->len == expected_data->len, file, line ); + okx ( memcmp ( raw, expected_raw, len ) == 0, file, line ); +} +#define ntlm_data_ok( msg, msg_len, data, expected, expected_data ) \ + ntlm_data_okx ( msg, msg_len, data, expected, expected_data, \ + __FILE__, __LINE__ ) + +/** + * Report NTLM authentication test result + * + * @v test Authentication test + * @v file Test code file + * @v line Test code line + */ +static void ntlm_authenticate_okx ( struct ntlm_authenticate_test *test, + const char *file, unsigned int line ) { + struct ntlm_authenticate *expected = test->expected; + struct ntlm_challenge_info info; + struct ntlm_authenticate *auth; + struct ntlm_key key; + struct ntlm_lm_response lm; + struct ntlm_nt_response nt; + size_t len; + + /* Parse Challenge message */ + okx ( ntlm_challenge ( test->challenge, test->challenge_len, + &info ) == 0, file, line ); + + /* Generate key */ + ntlm_key ( test->domain, test->username, test->password, &key ); + + /* Generate responses */ + ntlm_response ( &info, &key, &test->nonce, &lm, &nt ); + + /* Allocate buffer for Authenticate message */ + len = ntlm_authenticate_len ( &info, test->domain, test->username, + test->workstation ); + okx ( len >= sizeof ( *auth ), file, line ); + auth = malloc ( len ); + okx ( auth != NULL, file, line ); + + /* Construct Authenticate message */ + okx ( ntlm_authenticate ( &info, test->domain, test->username, + test->workstation, &lm, &nt, auth ) == len, + file, line ); + + /* Verify header */ + okx ( memcmp ( &auth->header, &expected->header, + sizeof ( auth->header ) ) == 0, file, line ); + + /* Verify LAN Manager response */ + ntlm_data_okx ( &auth->header, len, &auth->lm, &expected->header, + &expected->lm, "LM", file, line ); + + /* Verify NT response */ + ntlm_data_okx ( &auth->header, len, &auth->nt, &expected->header, + &expected->nt, "NT", file, line ); + + /* Verify domain name */ + ntlm_data_okx ( &auth->header, len, &auth->domain, &expected->header, + &expected->domain, "domain", file, line ); + + /* Verify user name */ + ntlm_data_okx ( &auth->header, len, &auth->user, &expected->header, + &expected->user, "user", file, line ); + + /* Verify workstation name */ + ntlm_data_okx ( &auth->header, len, &auth->workstation, + &expected->header, &expected->workstation, + "workstation",file, line ); + + /* Verify session key */ + if ( auth->flags & NTLM_NEGOTIATE_KEY_EXCH ) { + ntlm_data_okx ( &auth->header, len, &auth->session, + &expected->header, &expected->session, + "session", file, line ); + } + + /* Free Authenticate message */ + free ( auth ); +} +#define ntlm_authenticate_ok( test ) \ + ntlm_authenticate_okx ( test, __FILE__, __LINE__ ) + +/** + * Perform NTLM self-test + * + */ +static void ntlm_test_exec ( void ) { + + /* Verify key generation */ + ntlm_key_ok ( &msnlmp_ntowfv2 ); + + /* Verify authentication response */ + ntlm_authenticate_ok ( &msnlmp_authenticate ); +} + +/** NTLM self-test */ +struct self_test ntlm_test __self_test = { + .name = "ntlm", + .exec = ntlm_test_exec, +}; diff --git a/src/tests/pccrc_test.c b/src/tests/pccrc_test.c new file mode 100644 index 00000000..f4ab573a --- /dev/null +++ b/src/tests/pccrc_test.c @@ -0,0 +1,529 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * Peer Content Caching and Retrieval: Content Identification [MS-PCCRC] tests + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** Define inline raw data */ +#define DATA(...) { __VA_ARGS__ } + +/** + * Define an inline content range + * + * @v START Start offset + * @v END End offset + * @ret range Content range + */ +#define RANGE( START, END ) { .start = START, .end = END } + +/** + * Define an inline trimmed content range + * + * @v START Start offset + * @v END End offset + * @ret trim Trimmed content range + */ +#define TRIM( START, END ) { .start = START, .end = END } + +/** A content information test */ +struct peerdist_info_test { + /** Raw content information */ + const void *data; + /** Length of raw content information */ + size_t len; + /** Expected digest algorithm */ + struct digest_algorithm *expected_digest; + /** Expected digest size */ + size_t expected_digestsize; + /** Expected content range */ + struct peerdist_range expected_range; + /** Expected trimmed content range */ + struct peerdist_range expected_trim; + /** Expected number of segments */ + unsigned int expected_segments; +}; + +/** + * Define a content information test + * + * @v name Test name + * @v DATA Raw content information + * @v DIGEST Expected digest algorithm + * @v DIGESTSIZE Expected digest size + * @v RANGE Expected content range + * @v TRIM Expected trimmer content range + * @v SEGMENTS Expected number of segments + * @ret test Content information test + * + * Raw content information can be obtained from PeerDist-capable web + * servers using wget's "--header" option to inject the relevant + * PeerDist headers. For example: + * + * wget --header "Accept-Encoding: peerdist" \ + * --header "X-P2P-PeerDist: Version=1.0" \ + * http://peerdist.server.address/test.url -O - | xxd -i -c 11 + * + * Version 1 content information can be retrieved using the headers: + * + * Accept-Encoding: peerdist + * X-P2P-PeerDist: Version=1.0 + * + * Version 2 content information can be retrieved (from compatible + * servers) using the headers: + * + * Accept-Encoding: peerdist + * X-P2P-PeerDist: Version=1.1 + * X-P2P-PeerDistEx: MinContentInformation=2.0, MaxContentInformation=2.0 + */ +#define PEERDIST_INFO_TEST( name, DATA, DIGEST, DIGESTSIZE, RANGE, \ + TRIM, SEGMENTS ) \ + static const uint8_t name ## _data[] = DATA; \ + static struct peerdist_info_test name = { \ + .data = name ## _data, \ + .len = sizeof ( name ## _data ), \ + .expected_digest = DIGEST, \ + .expected_digestsize = DIGESTSIZE, \ + .expected_range = RANGE, \ + .expected_trim = TRIM, \ + .expected_segments = SEGMENTS, \ + } + +/** A content information segment test */ +struct peerdist_info_segment_test { + /** Segment index */ + unsigned int index; + /** Expected content range */ + struct peerdist_range expected_range; + /** Expected number of blocks */ + unsigned int expected_blocks; + /** Expected block size */ + size_t expected_blksize; + /** Expected segment hash of data */ + uint8_t expected_hash[PEERDIST_DIGEST_MAX_SIZE]; + /** Expected segment secret */ + uint8_t expected_secret[PEERDIST_DIGEST_MAX_SIZE]; + /** Expected segment identifier */ + uint8_t expected_id[PEERDIST_DIGEST_MAX_SIZE]; +}; + +/** + * Define a content information segment test + * + * @v name Test name + * @v INDEX Segment index + * @v RANGE Expected content range + * @v BLOCKS Expected number of blocks + * @v BLKSIZE Expected block size + * @v HASH Expected segment hash of data + * @v SECRET Expected segment secret + * @v ID Expected segment identifier + * @ret test Content information segment test + */ +#define PEERDIST_INFO_SEGMENT_TEST( name, INDEX, RANGE, BLOCKS, \ + BLKSIZE, HASH, SECRET, ID ) \ + static struct peerdist_info_segment_test name = { \ + .index = INDEX, \ + .expected_range = RANGE, \ + .expected_blocks = BLOCKS, \ + .expected_blksize = BLKSIZE, \ + .expected_hash = HASH, \ + .expected_secret = SECRET, \ + .expected_id = ID, \ + } + +/** A content information block test */ +struct peerdist_info_block_test { + /** Block index */ + unsigned int index; + /** Expected content range */ + struct peerdist_range expected_range; + /** Expected trimmed content range */ + struct peerdist_range expected_trim; + /** Expected hash of data */ + uint8_t expected_hash[PEERDIST_DIGEST_MAX_SIZE]; +}; + +/** + * Define a content information block test + * + * @v name Test name + * @v INDEX Block index + * @v RANGE Expected content range + * @v TRIM Expected trimmed content range + * @v HASH Expected hash of data + * @ret test Content information block test + */ +#define PEERDIST_INFO_BLOCK_TEST( name, INDEX, RANGE, TRIM, HASH ) \ + static struct peerdist_info_block_test name = { \ + .index = INDEX, \ + .expected_range = RANGE, \ + .expected_trim = TRIM, \ + .expected_hash = HASH, \ + } + +/** + * Define a server passphrase + * + * @v name Server passphrase name + * @v DATA Raw server passphrase + * + * The server passphrase can be exported from a Windows BranchCache + * server using the command: + * + * netsh branchcache exportkey exported.key somepassword + * + * and this encrypted exported key can be decrypted using the + * oSSL_key_dx or mcrypt_key_dx utilities found in the (prototype) + * Prequel project at https://fedorahosted.org/prequel/ : + * + * oSSL_key_dx exported.key somepassword + * or + * mcrypt_key_dx exported.key somepassword + * + * Either command will display both the server passphrase and the + * "Server Secret". Note that this latter is the version 1 server + * secret (i.e. the SHA-256 of the server passphrase); the + * corresponding version 2 server secret can be obtained by + * calculating the truncated SHA-512 of the server passphrase. + * + * We do not know the server passphrase during normal operation. We + * use it in the self-tests only to check for typos and other errors + * in the test vectors, by checking that the segment secret defined in + * a content information segment test is as expected. + */ +#define SERVER_PASSPHRASE( name, DATA ) \ + static uint8_t name[] = DATA + +/** Server passphrase used for these test vectors */ +SERVER_PASSPHRASE ( passphrase, + DATA ( 0x2a, 0x3d, 0x73, 0xeb, 0x43, 0x5e, 0x9f, 0x2b, 0x8a, 0x34, 0x42, + 0x67, 0xe7, 0x46, 0x7a, 0x3c, 0x73, 0x85, 0xc6, 0xe0, 0x55, 0xe2, + 0xb4, 0xd3, 0x0d, 0xfe, 0xc7, 0xc3, 0x8b, 0x0e, 0xd7, 0x2c ) ); + +/** IIS logo (iis-85.png) content information version 1 */ +PEERDIST_INFO_TEST ( iis_85_png_v1, + DATA ( 0x00, 0x01, 0x0c, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7e, 0x85, 0x01, 0x00, 0x00, 0x00, 0x01, + 0x00, 0xd8, 0xd9, 0x76, 0x35, 0x4a, 0x48, 0x72, 0xe9, 0x25, 0x76, + 0x18, 0x03, 0xf4, 0x58, 0xd9, 0xda, 0xaa, 0x67, 0xf8, 0xe3, 0x1c, + 0x63, 0x0f, 0xb7, 0x4e, 0x6a, 0x31, 0x2e, 0xf8, 0xa2, 0x5a, 0xba, + 0x11, 0xaf, 0xc0, 0xd7, 0x94, 0x92, 0x43, 0xf9, 0x4f, 0x9c, 0x1f, + 0xab, 0x35, 0xd9, 0xfd, 0x1e, 0x33, 0x1f, 0xcf, 0x78, 0x11, 0xa2, + 0xe0, 0x1d, 0x35, 0x87, 0xb3, 0x8d, 0x77, 0x0a, 0x29, 0xe2, 0x02, + 0x00, 0x00, 0x00, 0x73, 0xc1, 0x8a, 0xb8, 0x54, 0x91, 0x10, 0xf8, + 0xe9, 0x0e, 0x71, 0xbb, 0xc3, 0xab, 0x2a, 0xa8, 0xc4, 0x4d, 0x13, + 0xf4, 0x92, 0x94, 0x99, 0x25, 0x5b, 0x66, 0x0f, 0x24, 0xec, 0x77, + 0x80, 0x0b, 0x97, 0x4b, 0xdd, 0x65, 0x56, 0x7f, 0xde, 0xec, 0xcd, + 0xaf, 0xe4, 0x57, 0xa9, 0x50, 0x3b, 0x45, 0x48, 0xf6, 0x6e, 0xd3, + 0xb1, 0x88, 0xdc, 0xfd, 0xa0, 0xac, 0x38, 0x2b, 0x09, 0x71, 0x1a, + 0xcc ), + &sha256_algorithm, 32, RANGE ( 0, 99710 ), TRIM ( 0, 99710 ), 1 ); + +/** IIS logo (iis-85.png) content information version 1 segment 0 */ +PEERDIST_INFO_SEGMENT_TEST ( iis_85_png_v1_s0, 0, + RANGE ( 0, 99710 ), 2, 65536, + DATA ( 0xd8, 0xd9, 0x76, 0x35, 0x4a, 0x48, 0x72, 0xe9, 0x25, 0x76, 0x18, + 0x03, 0xf4, 0x58, 0xd9, 0xda, 0xaa, 0x67, 0xf8, 0xe3, 0x1c, 0x63, + 0x0f, 0xb7, 0x4e, 0x6a, 0x31, 0x2e, 0xf8, 0xa2, 0x5a, 0xba ), + DATA ( 0x11, 0xaf, 0xc0, 0xd7, 0x94, 0x92, 0x43, 0xf9, 0x4f, 0x9c, 0x1f, + 0xab, 0x35, 0xd9, 0xfd, 0x1e, 0x33, 0x1f, 0xcf, 0x78, 0x11, 0xa2, + 0xe0, 0x1d, 0x35, 0x87, 0xb3, 0x8d, 0x77, 0x0a, 0x29, 0xe2 ), + DATA ( 0x49, 0x1b, 0x21, 0x7d, 0xbe, 0xe2, 0xb5, 0xf1, 0x2c, 0xa7, 0x9b, + 0x01, 0x5e, 0x06, 0xf4, 0xbb, 0xe6, 0x4f, 0x97, 0x45, 0xba, 0xd7, + 0x86, 0x7a, 0xef, 0x17, 0xde, 0x59, 0x92, 0x7e, 0xdc, 0xe9 ) ); + +/** IIS logo (iis-85.png) content information version 1 segment 0 block 0 */ +PEERDIST_INFO_BLOCK_TEST ( iis_85_png_v1_s0_b0, 0, + RANGE ( 0, 65536 ), + TRIM ( 0, 65536 ), + DATA ( 0x73, 0xc1, 0x8a, 0xb8, 0x54, 0x91, 0x10, 0xf8, 0xe9, 0x0e, 0x71, + 0xbb, 0xc3, 0xab, 0x2a, 0xa8, 0xc4, 0x4d, 0x13, 0xf4, 0x92, 0x94, + 0x99, 0x25, 0x5b, 0x66, 0x0f, 0x24, 0xec, 0x77, 0x80, 0x0b ) ); + +/** IIS logo (iis-85.png) content information version 1 segment 0 block 1 */ +PEERDIST_INFO_BLOCK_TEST ( iis_85_png_v1_s0_b1, 1, + RANGE ( 65536, 99710 ), + TRIM ( 65536, 99710 ), + DATA ( 0x97, 0x4b, 0xdd, 0x65, 0x56, 0x7f, 0xde, 0xec, 0xcd, 0xaf, 0xe4, + 0x57, 0xa9, 0x50, 0x3b, 0x45, 0x48, 0xf6, 0x6e, 0xd3, 0xb1, 0x88, + 0xdc, 0xfd, 0xa0, 0xac, 0x38, 0x2b, 0x09, 0x71, 0x1a, 0xcc ) ); + +/** IIS logo (iis-85.png) content information version 2 */ +PEERDIST_INFO_TEST ( iis_85_png_v2, + DATA ( 0x00, 0x02, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x88, 0x00, 0x00, 0x99, 0xde, 0xe0, 0xd0, 0xc3, 0x58, + 0xe2, 0x68, 0x4b, 0x62, 0x33, 0x0d, 0x32, 0xb5, 0xf1, 0x97, 0x87, + 0x24, 0xa0, 0xd0, 0xa5, 0x2b, 0xdc, 0x5e, 0x78, 0x1f, 0xae, 0x71, + 0xff, 0x57, 0xa8, 0xbe, 0x3d, 0xd4, 0x58, 0x03, 0x7e, 0xd4, 0x04, + 0x11, 0x6b, 0xb6, 0x16, 0xd9, 0xb1, 0x41, 0x16, 0x08, 0x85, 0x20, + 0xc4, 0x7c, 0xdc, 0x50, 0xab, 0xce, 0xa3, 0xfa, 0xe1, 0x88, 0xa9, + 0x8e, 0xa2, 0x2d, 0xf3, 0xc0, 0x00, 0x00, 0xeb, 0xa0, 0x33, 0x81, + 0xd0, 0xd0, 0xcb, 0x74, 0xf4, 0xb6, 0x13, 0xd8, 0x21, 0x0f, 0x37, + 0xf0, 0x02, 0xa0, 0x6f, 0x39, 0x10, 0x58, 0x60, 0x96, 0xa1, 0x30, + 0xd3, 0x43, 0x98, 0xc0, 0x8e, 0x66, 0xd7, 0xbc, 0xb8, 0xb6, 0xeb, + 0x77, 0x83, 0xe4, 0xf8, 0x07, 0x64, 0x7b, 0x63, 0xf1, 0x46, 0xb5, + 0x2f, 0x4a, 0xc8, 0x9c, 0xcc, 0x7a, 0xbf, 0x5f, 0xa1, 0x1a, 0xca, + 0xfc, 0x2a, 0xcf, 0x50, 0x28, 0x58, 0x6c ), + &sha512_algorithm, 32, RANGE ( 0, 99710 ), TRIM ( 0, 99710 ), 2 ); + +/** IIS logo (iis-85.png) content information version 2 segment 0 */ +PEERDIST_INFO_SEGMENT_TEST ( iis_85_png_v2_s0, 0, + RANGE ( 0, 39390 ), 1, 39390, + DATA ( 0xe0, 0xd0, 0xc3, 0x58, 0xe2, 0x68, 0x4b, 0x62, 0x33, 0x0d, 0x32, + 0xb5, 0xf1, 0x97, 0x87, 0x24, 0xa0, 0xd0, 0xa5, 0x2b, 0xdc, 0x5e, + 0x78, 0x1f, 0xae, 0x71, 0xff, 0x57, 0xa8, 0xbe, 0x3d, 0xd4 ), + DATA ( 0x58, 0x03, 0x7e, 0xd4, 0x04, 0x11, 0x6b, 0xb6, 0x16, 0xd9, 0xb1, + 0x41, 0x16, 0x08, 0x85, 0x20, 0xc4, 0x7c, 0xdc, 0x50, 0xab, 0xce, + 0xa3, 0xfa, 0xe1, 0x88, 0xa9, 0x8e, 0xa2, 0x2d, 0xf3, 0xc0 ), + DATA ( 0x33, 0x71, 0xbb, 0xea, 0xdd, 0xb6, 0x23, 0x53, 0xad, 0xce, 0xf9, + 0x70, 0xa0, 0x6f, 0xdf, 0x65, 0x00, 0x1e, 0x04, 0x21, 0xf4, 0xc7, + 0x10, 0x82, 0x76, 0xb0, 0xc3, 0x7a, 0x9f, 0x9e, 0xc1, 0x0f ) ); + +/** IIS logo (iis-85.png) content information version 2 segment 0 block 0 */ +PEERDIST_INFO_BLOCK_TEST ( iis_85_png_v2_s0_b0, 0, + RANGE ( 0, 39390 ), + TRIM ( 0, 39390 ), + DATA ( 0xe0, 0xd0, 0xc3, 0x58, 0xe2, 0x68, 0x4b, 0x62, 0x33, 0x0d, 0x32, + 0xb5, 0xf1, 0x97, 0x87, 0x24, 0xa0, 0xd0, 0xa5, 0x2b, 0xdc, 0x5e, + 0x78, 0x1f, 0xae, 0x71, 0xff, 0x57, 0xa8, 0xbe, 0x3d, 0xd4 ) ); + +/** IIS logo (iis-85.png) content information version 2 segment 1 */ +PEERDIST_INFO_SEGMENT_TEST ( iis_85_png_v2_s1, 1, + RANGE ( 39390, 99710 ), 1, 60320, + DATA ( 0x33, 0x81, 0xd0, 0xd0, 0xcb, 0x74, 0xf4, 0xb6, 0x13, 0xd8, 0x21, + 0x0f, 0x37, 0xf0, 0x02, 0xa0, 0x6f, 0x39, 0x10, 0x58, 0x60, 0x96, + 0xa1, 0x30, 0xd3, 0x43, 0x98, 0xc0, 0x8e, 0x66, 0xd7, 0xbc ), + DATA ( 0xb8, 0xb6, 0xeb, 0x77, 0x83, 0xe4, 0xf8, 0x07, 0x64, 0x7b, 0x63, + 0xf1, 0x46, 0xb5, 0x2f, 0x4a, 0xc8, 0x9c, 0xcc, 0x7a, 0xbf, 0x5f, + 0xa1, 0x1a, 0xca, 0xfc, 0x2a, 0xcf, 0x50, 0x28, 0x58, 0x6c ), + DATA ( 0xd7, 0xe9, 0x24, 0x42, 0x5e, 0x8f, 0x4f, 0x88, 0xf0, 0x1d, 0xc6, + 0xa9, 0xbb, 0x1b, 0xc3, 0x7b, 0xe1, 0x13, 0xec, 0x79, 0x17, 0xc7, + 0x45, 0xd4, 0x96, 0x5c, 0x2b, 0x55, 0xfa, 0x16, 0x3a, 0x6e ) ); + +/** IIS logo (iis-85.png) content information version 2 segment 1 block 0 */ +PEERDIST_INFO_BLOCK_TEST ( iis_85_png_v2_s1_b0, 0, + RANGE ( 39390, 99710 ), + TRIM ( 39390, 99710 ), + DATA ( 0x33, 0x81, 0xd0, 0xd0, 0xcb, 0x74, 0xf4, 0xb6, 0x13, 0xd8, 0x21, + 0x0f, 0x37, 0xf0, 0x02, 0xa0, 0x6f, 0x39, 0x10, 0x58, 0x60, 0x96, + 0xa1, 0x30, 0xd3, 0x43, 0x98, 0xc0, 0x8e, 0x66, 0xd7, 0xbc ) ); + +/** + * Report content information test result + * + * @v test Content information test + * @v info Content information to fill in + * @v file Test code file + * @v line Test code line + */ +static void peerdist_info_okx ( struct peerdist_info_test *test, + struct peerdist_info *info, + const char *file, unsigned int line ) { + + /* Parse content information */ + okx ( peerdist_info ( virt_to_user ( test->data ), test->len, + info ) == 0, file, line ); + + /* Verify content information */ + okx ( info->raw.data == virt_to_user ( test->data ), file, line ); + okx ( info->raw.len == test->len, file, line ); + okx ( info->digest == test->expected_digest, file, line ); + okx ( info->digestsize == test->expected_digestsize, file, line ); + okx ( info->range.start == test->expected_range.start, file, line ); + okx ( info->range.end == test->expected_range.end, file, line ); + okx ( info->trim.start == test->expected_trim.start, file, line ); + okx ( info->trim.end == test->expected_trim.end, file, line ); + okx ( info->trim.start >= info->range.start, file, line ); + okx ( info->trim.end <= info->range.end, file, line ); + okx ( info->segments == test->expected_segments, file, line ); +} +#define peerdist_info_ok( test, info ) \ + peerdist_info_okx ( test, info, __FILE__, __LINE__ ) + +/** + * Report content information segment test result + * + * @v test Content information segment test + * @v info Content information + * @v segment Segment information to fill in + * @v file Test code file + * @v line Test code line + */ +static void peerdist_info_segment_okx ( struct peerdist_info_segment_test *test, + const struct peerdist_info *info, + struct peerdist_info_segment *segment, + const char *file, unsigned int line ) { + size_t digestsize = info->digestsize; + + /* Parse content information segment */ + okx ( peerdist_info_segment ( info, segment, test->index ) == 0, + file, line ); + + /* Verify content information segment */ + okx ( segment->info == info, file, line ); + okx ( segment->index == test->index, file, line ); + okx ( segment->range.start == test->expected_range.start, file, line ); + okx ( segment->range.end == test->expected_range.end, file, line ); + okx ( segment->blocks == test->expected_blocks, file, line ); + okx ( segment->blksize == test->expected_blksize, file, line ); + okx ( memcmp ( segment->hash, test->expected_hash, + digestsize ) == 0, file, line ); + okx ( memcmp ( segment->secret, test->expected_secret, + digestsize ) == 0, file, line ); + okx ( memcmp ( segment->id, test->expected_id, + digestsize ) == 0, file, line ); +} +#define peerdist_info_segment_ok( test, info, segment ) \ + peerdist_info_segment_okx ( test, info, segment, __FILE__, __LINE__ ) + +/** + * Report content information block test result + * + * @v test Content information block test + * @v segment Segment information + * @v block Block information to fill in + * @v file Test code file + * @v line Test code line + */ +static void +peerdist_info_block_okx ( struct peerdist_info_block_test *test, + const struct peerdist_info_segment *segment, + struct peerdist_info_block *block, + const char *file, unsigned int line ) { + const struct peerdist_info *info = segment->info; + size_t digestsize = info->digestsize; + + /* Parse content information block */ + okx ( peerdist_info_block ( segment, block, test->index ) == 0, + file, line ); + + /* Verify content information block */ + okx ( block->segment == segment, file, line ); + okx ( block->index == test->index, file, line ); + okx ( block->range.start == test->expected_range.start, file, line ); + okx ( block->range.end == test->expected_range.end, file, line ); + okx ( block->trim.start == test->expected_trim.start, file, line ); + okx ( block->trim.end == test->expected_trim.end, file, line ); + okx ( memcmp ( block->hash, test->expected_hash, + digestsize ) == 0, file, line ); +} +#define peerdist_info_block_ok( test, segment, block ) \ + peerdist_info_block_okx ( test, segment, block, __FILE__, __LINE__ ) + +/** + * Report server passphrase test result + * + * @v test Content information segment test + * @v info Content information + * @v pass Server passphrase + * @v pass_len Length of server passphrase + * @v file Test code file + * @v line Test code line + */ +static void +peerdist_info_passphrase_okx ( struct peerdist_info_segment_test *test, + const struct peerdist_info *info, + uint8_t *pass, size_t pass_len, + const char *file, unsigned int line ) { + struct digest_algorithm *digest = info->digest; + uint8_t ctx[digest->ctxsize]; + uint8_t secret[digest->digestsize]; + uint8_t expected[digest->digestsize]; + size_t digestsize = info->digestsize; + size_t secretsize = digestsize; + + /* Calculate server secret */ + digest_init ( digest, ctx ); + digest_update ( digest, ctx, pass, pass_len ); + digest_final ( digest, ctx, secret ); + + /* Calculate expected segment secret */ + hmac_init ( digest, ctx, secret, &secretsize ); + assert ( secretsize == digestsize ); + hmac_update ( digest, ctx, test->expected_hash, digestsize ); + hmac_final ( digest, ctx, secret, &secretsize, expected ); + assert ( secretsize == digestsize ); + + /* Verify segment secret */ + okx ( memcmp ( test->expected_secret, expected, digestsize ) == 0, + file, line ); +} +#define peerdist_info_passphrase_ok( test, info, pass, pass_len ) \ + peerdist_info_passphrase_okx ( test, info, pass, pass_len, \ + __FILE__, __LINE__ ) + +/** + * Perform content information self-tests + * + */ +static void peerdist_info_test_exec ( void ) { + struct peerdist_info info; + struct peerdist_info_segment segment; + struct peerdist_info_block block; + + /* IIS logo (iis-85.png) content information version 1 */ + peerdist_info_ok ( &iis_85_png_v1, &info ); + peerdist_info_passphrase_ok ( &iis_85_png_v1_s0, &info, + passphrase, sizeof ( passphrase ) ); + peerdist_info_segment_ok ( &iis_85_png_v1_s0, &info, &segment ); + peerdist_info_block_ok ( &iis_85_png_v1_s0_b0, &segment, &block ); + peerdist_info_block_ok ( &iis_85_png_v1_s0_b1, &segment, &block ); + + /* IIS logo (iis-85.png) content information version 2 */ + peerdist_info_ok ( &iis_85_png_v2, &info ); + peerdist_info_passphrase_ok ( &iis_85_png_v2_s0, &info, + passphrase, sizeof ( passphrase ) ); + peerdist_info_segment_ok ( &iis_85_png_v2_s0, &info, &segment ); + peerdist_info_block_ok ( &iis_85_png_v2_s0_b0, &segment, &block ); + peerdist_info_passphrase_ok ( &iis_85_png_v2_s1, &info, + passphrase, sizeof ( passphrase ) ); + peerdist_info_segment_ok ( &iis_85_png_v2_s1, &info, &segment ); + peerdist_info_block_ok ( &iis_85_png_v2_s1_b0, &segment, &block ); +} + +/** Content information self-test */ +struct self_test peerdist_info_test __self_test = { + .name = "pccrc", + .exec = peerdist_info_test_exec, +}; diff --git a/src/tests/pem_test.c b/src/tests/pem_test.c new file mode 100644 index 00000000..df47ad50 --- /dev/null +++ b/src/tests/pem_test.c @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * PEM self-tests + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include +#include "asn1_test.h" + +/** Define inline expected digest */ +#define DIGEST(...) { { __VA_ARGS__ } } + +/** Single RSA private key */ +ASN1 ( single, &pem_image_type, + "-----BEGIN RSA PRIVATE KEY-----\n" + "MCwCAQACBQC6loItAgMBAAECBCqhYIkCAwDyVwIDAMUbAgMAr9kCAmr9AgIaWQ==\n" + "-----END RSA PRIVATE KEY-----\n", + DIGEST ( 0xb9, 0x38, 0x83, 0xcd, 0xf4, 0x58, 0xa9, 0xa2, 0x84, 0x11, + 0xfa, 0x0b, 0x6f, 0xdc, 0x3e, 0xa3, 0x7c, 0x90, 0x7c, 0x2d ) ); + +/** Three concatenated RSA private keys */ +ASN1 ( multiple, &pem_image_type, + "-----BEGIN RSA PRIVATE KEY-----\n" + "MCwCAQACBQDtbjyVAgMBAAECBQCEOtJxAgMA+xsCAwDyDwICLGsCAgqTAgIxVQ==\n" + "-----END RSA PRIVATE KEY-----\n" + "-----BEGIN RSA PRIVATE KEY-----\n" + "MCwCAQACBQC3VlyxAgMBAAECBGakxDUCAwDanwIDANavAgIBWQICTuECAwCmWg==\n" + "-----END RSA PRIVATE KEY-----\n" + "-----BEGIN RSA PRIVATE KEY-----\n" + "MCwCAQACBQC89dS1AgMBAAECBQCxjnLBAgMA3qcCAwDZQwICP3cCAgpRAgI57A==\n" + "-----END RSA PRIVATE KEY-----\n", + DIGEST ( 0x9c, 0xb2, 0xc1, 0xa0, 0x9c, 0xcb, 0x11, 0xbf, 0x80, 0xd0, + 0x8c, 0xe5, 0xda, 0xf2, 0x3b, 0x2c, 0xca, 0x64, 0x25, 0x8a ), + DIGEST ( 0x82, 0x66, 0x24, 0xd9, 0xc3, 0x98, 0x1e, 0x5e, 0x56, 0xed, + 0xd0, 0xd0, 0x2a, 0x5e, 0x9c, 0x3a, 0x58, 0xdf, 0x76, 0x0d ), + DIGEST ( 0x01, 0xd2, 0x8a, 0x74, 0x42, 0x08, 0x0f, 0xb0, 0x03, 0x82, + 0xcd, 0xa3, 0xdc, 0x78, 0xfe, 0xd7, 0xa3, 0x28, 0xfc, 0x29 ) ); + +/** Two RSA private keys with various bits of noise added */ +ASN1 ( noisy, &pem_image_type, + "Hello world! This is uninteresting stuff before the actual data.\n" + "-----BEGIN RSA PRIVATE KEY-----\n" + "MCwCAQACBQC3VlyxAgMBAAECBGakxDUCAwDanwIDANavAgIBWQICTuECAwCmWg==\n" + "-----END RSA PRIVATE KEY-----\n" + "Here is some more uninteresting stuff.\n" + "Followed by what is actually another RSA private key, but with " + "extra whitespace added, and the description change to pretend " + "it's a certificate\n" + "-----BEGIN CERTIFICATE-----\n" + " MCwCAQACBQC6loItAgMBAAECBCqhYIkCAwD\r\n" + " yVwIDAMUbAgMAr9kCAmr9AgIaWQ== \r\n" + "-----END CERTIFICATE-----\n" + "and some trailing garbage as well\n" + "and more garbage with no final newline", + DIGEST ( 0x82, 0x66, 0x24, 0xd9, 0xc3, 0x98, 0x1e, 0x5e, 0x56, 0xed, + 0xd0, 0xd0, 0x2a, 0x5e, 0x9c, 0x3a, 0x58, 0xdf, 0x76, 0x0d ), + DIGEST ( 0xb9, 0x38, 0x83, 0xcd, 0xf4, 0x58, 0xa9, 0xa2, 0x84, 0x11, + 0xfa, 0x0b, 0x6f, 0xdc, 0x3e, 0xa3, 0x7c, 0x90, 0x7c, 0x2d ) ); + +/** + * Perform PEM self-test + * + */ +static void pem_test_exec ( void ) { + + /* Perform tests */ + asn1_ok ( &single ); + asn1_ok ( &multiple ); + asn1_ok ( &noisy ); +} + +/** PEM self-test */ +struct self_test pem_test __self_test = { + .name = "pem", + .exec = pem_test_exec, +}; diff --git a/src/tests/setjmp_test.c b/src/tests/setjmp_test.c new file mode 100644 index 00000000..deafcee0 --- /dev/null +++ b/src/tests/setjmp_test.c @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * setjmp()/longjmp() tests + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include + +/** A setjmp()/longjmp() test */ +struct setjmp_test { + /** Jump buffer */ + jmp_buf env; + /** Expected value */ + int expected; + /** Test code file */ + const char *file; + /** Test code line */ + unsigned int line; +}; + +/** Expected jump */ +static struct setjmp_test *jumped; + +/** + * Report a setjmp() test result + * + * @v test setjmp()/longjmp() test + * + * This has to be implemented as a macro since if it were a function + * then the context saved by setjmp() would be invalidated when the + * function returned. + */ +#define setjmp_ok( test ) do { \ + int value; \ + /* Sanity check */ \ + assert ( jumped == NULL ); \ + /* Initialise test */ \ + (test)->expected = 0; \ + (test)->file = __FILE__; \ + (test)->line = __LINE__; \ + /* Perform setjmp() */ \ + value = setjmp ( (test)->env ); \ + /* Report setjmp()/longjmp() result */ \ + setjmp_return_ok ( (test), value ); \ + } while ( 0 ) + +/** + * Report a setjmp()/longjmp() test result + * + * @v test setjmp()/longjmp() test + * @v value Value returned from setjmp() + * + * This function ends up reporting results from either setjmp() or + * longjmp() tests (since calls to longjmp() will return via the + * corresponding setjmp()). It therefore uses the test code file and + * line stored in the test structure, which will represent the line + * from which either setjmp() or longjmp() was called. + */ +static void setjmp_return_ok ( struct setjmp_test *test, int value ) { + + /* Determine whether this was reached via setjmp() or longjmp() */ + if ( value == 0 ) { + /* This is the initial call to setjmp() */ + okx ( test->expected == 0, test->file, test->line ); + okx ( jumped == NULL, test->file, test->line ); + } else { + /* This is reached via a call to longjmp() */ + okx ( value == test->expected, test->file, test->line ); + okx ( jumped == test, test->file, test->line ); + } + + /* Clear expected jump */ + jumped = NULL; +} + +/** + * Report a longjmp() test result + * + * @v test setjmp()/longjmp() test + * @v file Test code file + * @v line Test code line + */ +static void __attribute__ (( noreturn )) +longjmp_okx ( struct setjmp_test *test, int value, + const char *file, unsigned int line ) { + + /* Record expected value. A zero passed to longjmp() should + * result in setjmp() returning a value of one. + */ + test->expected = ( value ? value : 1 ); + + /* Record test code file and line */ + test->file = file; + test->line = line; + + /* Record expected jump */ + jumped = test; + + /* Perform longjmp(). Should return via setjmp_okx() */ + longjmp ( test->env, value ); + + /* longjmp() should never return */ + assert ( 0 ); +} +#define longjmp_ok( test, value ) \ + longjmp_okx ( test, value, __FILE__, __LINE__ ) + +/** + * Perform setjmp()/longjmp() self-tests + * + */ +static void setjmp_test_exec ( void ) { + static struct setjmp_test alpha; + static struct setjmp_test beta; + static int iteration; + + /* This is one of the very few situations in which the + * "for-case" pattern is justified. + */ + for ( iteration = 0 ; iteration < 10 ; iteration++ ) { + DBGC ( jumped, "SETJMP test iteration %d\n", iteration ); + switch ( iteration ) { + case 0: setjmp_ok ( &alpha ); break; + case 1: setjmp_ok ( &beta ); break; + case 2: longjmp_ok ( &alpha, 0 ); + case 3: longjmp_ok ( &alpha, 1 ); + case 4: longjmp_ok ( &alpha, 2 ); + case 5: longjmp_ok ( &beta, 17 ); + case 6: longjmp_ok ( &beta, 29 ); + case 7: longjmp_ok ( &alpha, -1 ); + case 8: longjmp_ok ( &beta, 0 ); + case 9: longjmp_ok ( &beta, 42 ); + } + } +} + +/** setjmp()/longjmp() self-test */ +struct self_test setjmp_test __self_test = { + .name = "setjmp", + .exec = setjmp_test_exec, +}; diff --git a/src/tests/sha512_test.c b/src/tests/sha512_test.c new file mode 100644 index 00000000..be530eba --- /dev/null +++ b/src/tests/sha512_test.c @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2015 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * SHA-512 family tests + * + * NIST test vectors are taken from + * + * http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/SHA512.pdf + * http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/SHA384.pdf + * http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/SHA512_256.pdf + * http://csrc.nist.gov/groups/ST/toolkit/documents/Examples/SHA512_224.pdf + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include "digest_test.h" + +/* Empty test vector (digest obtained from "sha512sum /dev/null") */ +DIGEST_TEST ( sha512_empty, &sha512_algorithm, DIGEST_EMPTY, + DIGEST ( 0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd, 0xf1, + 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07, 0xd6, 0x20, + 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc, 0x83, 0xf4, 0xa9, + 0x21, 0xd3, 0x6c, 0xe9, 0xce, 0x47, 0xd0, 0xd1, 0x3c, + 0x5d, 0x85, 0xf2, 0xb0, 0xff, 0x83, 0x18, 0xd2, 0x87, + 0x7e, 0xec, 0x2f, 0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, + 0x7a, 0x81, 0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, + 0x3e ) ); + +/* NIST test vector "abc" */ +DIGEST_TEST ( sha512_nist_abc, &sha512_algorithm, DIGEST_NIST_ABC, + DIGEST ( 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba, 0xcc, + 0x41, 0x73, 0x49, 0xae, 0x20, 0x41, 0x31, 0x12, 0xe6, + 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2, 0x0a, 0x9e, 0xee, + 0xe6, 0x4b, 0x55, 0xd3, 0x9a, 0x21, 0x92, 0x99, 0x2a, + 0x27, 0x4f, 0xc1, 0xa8, 0x36, 0xba, 0x3c, 0x23, 0xa3, + 0xfe, 0xeb, 0xbd, 0x45, 0x4d, 0x44, 0x23, 0x64, 0x3c, + 0xe8, 0x0e, 0x2a, 0x9a, 0xc9, 0x4f, 0xa5, 0x4c, 0xa4, + 0x9f ) ); + +/* NIST test vector "abc...stu" */ +DIGEST_TEST ( sha512_nist_abc_stu, &sha512_algorithm, DIGEST_NIST_ABC_STU, + DIGEST ( 0x8e, 0x95, 0x9b, 0x75, 0xda, 0xe3, 0x13, 0xda, 0x8c, + 0xf4, 0xf7, 0x28, 0x14, 0xfc, 0x14, 0x3f, 0x8f, 0x77, + 0x79, 0xc6, 0xeb, 0x9f, 0x7f, 0xa1, 0x72, 0x99, 0xae, + 0xad, 0xb6, 0x88, 0x90, 0x18, 0x50, 0x1d, 0x28, 0x9e, + 0x49, 0x00, 0xf7, 0xe4, 0x33, 0x1b, 0x99, 0xde, 0xc4, + 0xb5, 0x43, 0x3a, 0xc7, 0xd3, 0x29, 0xee, 0xb6, 0xdd, + 0x26, 0x54, 0x5e, 0x96, 0xe5, 0x5b, 0x87, 0x4b, 0xe9, + 0x09 ) ); + +/* Empty test vector (digest obtained from "sha384sum /dev/null") */ +DIGEST_TEST ( sha384_empty, &sha384_algorithm, DIGEST_EMPTY, + DIGEST ( 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38, 0x4c, + 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a, 0x21, 0xfd, + 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43, 0x4c, 0x0c, 0xc7, + 0xbf, 0x63, 0xf6, 0xe1, 0xda, 0x27, 0x4e, 0xde, 0xbf, + 0xe7, 0x6f, 0x65, 0xfb, 0xd5, 0x1a, 0xd2, 0xf1, 0x48, + 0x98, 0xb9, 0x5b ) ); + +/* NIST test vector "abc" */ +DIGEST_TEST ( sha384_nist_abc, &sha384_algorithm, DIGEST_NIST_ABC, + DIGEST ( 0xcb, 0x00, 0x75, 0x3f, 0x45, 0xa3, 0x5e, 0x8b, 0xb5, + 0xa0, 0x3d, 0x69, 0x9a, 0xc6, 0x50, 0x07, 0x27, 0x2c, + 0x32, 0xab, 0x0e, 0xde, 0xd1, 0x63, 0x1a, 0x8b, 0x60, + 0x5a, 0x43, 0xff, 0x5b, 0xed, 0x80, 0x86, 0x07, 0x2b, + 0xa1, 0xe7, 0xcc, 0x23, 0x58, 0xba, 0xec, 0xa1, 0x34, + 0xc8, 0x25, 0xa7 ) ); + +/* NIST test vector "abc...stu" */ +DIGEST_TEST ( sha384_nist_abc_stu, &sha384_algorithm, DIGEST_NIST_ABC_STU, + DIGEST ( 0x09, 0x33, 0x0c, 0x33, 0xf7, 0x11, 0x47, 0xe8, 0x3d, + 0x19, 0x2f, 0xc7, 0x82, 0xcd, 0x1b, 0x47, 0x53, 0x11, + 0x1b, 0x17, 0x3b, 0x3b, 0x05, 0xd2, 0x2f, 0xa0, 0x80, + 0x86, 0xe3, 0xb0, 0xf7, 0x12, 0xfc, 0xc7, 0xc7, 0x1a, + 0x55, 0x7e, 0x2d, 0xb9, 0x66, 0xc3, 0xe9, 0xfa, 0x91, + 0x74, 0x60, 0x39 ) ); + +/* Empty test vector (digest obtained from "shasum -a 512256 /dev/null") */ +DIGEST_TEST ( sha512_256_empty, &sha512_256_algorithm, DIGEST_EMPTY, + DIGEST ( 0xc6, 0x72, 0xb8, 0xd1, 0xef, 0x56, 0xed, 0x28, 0xab, + 0x87, 0xc3, 0x62, 0x2c, 0x51, 0x14, 0x06, 0x9b, 0xdd, + 0x3a, 0xd7, 0xb8, 0xf9, 0x73, 0x74, 0x98, 0xd0, 0xc0, + 0x1e, 0xce, 0xf0, 0x96, 0x7a ) ); + +/* NIST test vector "abc" */ +DIGEST_TEST ( sha512_256_nist_abc, &sha512_256_algorithm, DIGEST_NIST_ABC, + DIGEST ( 0x53, 0x04, 0x8e, 0x26, 0x81, 0x94, 0x1e, 0xf9, 0x9b, + 0x2e, 0x29, 0xb7, 0x6b, 0x4c, 0x7d, 0xab, 0xe4, 0xc2, + 0xd0, 0xc6, 0x34, 0xfc, 0x6d, 0x46, 0xe0, 0xe2, 0xf1, + 0x31, 0x07, 0xe7, 0xaf, 0x23 ) ); + +/* NIST test vector "abc...stu" */ +DIGEST_TEST ( sha512_256_nist_abc_stu, &sha512_256_algorithm, + DIGEST_NIST_ABC_STU, + DIGEST ( 0x39, 0x28, 0xe1, 0x84, 0xfb, 0x86, 0x90, 0xf8, 0x40, + 0xda, 0x39, 0x88, 0x12, 0x1d, 0x31, 0xbe, 0x65, 0xcb, + 0x9d, 0x3e, 0xf8, 0x3e, 0xe6, 0x14, 0x6f, 0xea, 0xc8, + 0x61, 0xe1, 0x9b, 0x56, 0x3a ) ); + +/* Empty test vector (digest obtained from "shasum -a 512224 /dev/null") */ +DIGEST_TEST ( sha512_224_empty, &sha512_224_algorithm, DIGEST_EMPTY, + DIGEST ( 0x6e, 0xd0, 0xdd, 0x02, 0x80, 0x6f, 0xa8, 0x9e, 0x25, + 0xde, 0x06, 0x0c, 0x19, 0xd3, 0xac, 0x86, 0xca, 0xbb, + 0x87, 0xd6, 0xa0, 0xdd, 0xd0, 0x5c, 0x33, 0x3b, 0x84, + 0xf4 ) ); + +/* NIST test vector "abc" */ +DIGEST_TEST ( sha512_224_nist_abc, &sha512_224_algorithm, DIGEST_NIST_ABC, + DIGEST ( 0x46, 0x34, 0x27, 0x0f, 0x70, 0x7b, 0x6a, 0x54, 0xda, + 0xae, 0x75, 0x30, 0x46, 0x08, 0x42, 0xe2, 0x0e, 0x37, + 0xed, 0x26, 0x5c, 0xee, 0xe9, 0xa4, 0x3e, 0x89, 0x24, + 0xaa ) ); + +/* NIST test vector "abc...stu" */ +DIGEST_TEST ( sha512_224_nist_abc_stu, &sha512_224_algorithm, + DIGEST_NIST_ABC_STU, + DIGEST ( 0x23, 0xfe, 0xc5, 0xbb, 0x94, 0xd6, 0x0b, 0x23, 0x30, + 0x81, 0x92, 0x64, 0x0b, 0x0c, 0x45, 0x33, 0x35, 0xd6, + 0x64, 0x73, 0x4f, 0xe4, 0x0e, 0x72, 0x68, 0x67, 0x4a, + 0xf9 ) ); + +/** + * Perform SHA-512 family self-test + * + */ +static void sha512_test_exec ( void ) { + + /* Correctness tests */ + digest_ok ( &sha512_empty ); + digest_ok ( &sha512_nist_abc ); + digest_ok ( &sha512_nist_abc_stu ); + digest_ok ( &sha384_empty ); + digest_ok ( &sha384_nist_abc ); + digest_ok ( &sha384_nist_abc_stu ); + digest_ok ( &sha512_256_empty ); + digest_ok ( &sha512_256_nist_abc ); + digest_ok ( &sha512_256_nist_abc_stu ); + digest_ok ( &sha512_224_empty ); + digest_ok ( &sha512_224_nist_abc ); + digest_ok ( &sha512_224_nist_abc_stu ); + + /* Speed tests */ + DBG ( "SHA512 required %ld cycles per byte\n", + digest_cost ( &sha512_algorithm ) ); + DBG ( "SHA384 required %ld cycles per byte\n", + digest_cost ( &sha384_algorithm ) ); + DBG ( "SHA512/256 required %ld cycles per byte\n", + digest_cost ( &sha512_256_algorithm ) ); + DBG ( "SHA512/224 required %ld cycles per byte\n", + digest_cost ( &sha512_224_algorithm ) ); +} + +/** SHA-512 family self-test */ +struct self_test sha512_test __self_test = { + .name = "sha512", + .exec = sha512_test_exec, +}; diff --git a/src/usr/certmgmt.c b/src/usr/certmgmt.c new file mode 100644 index 00000000..2f233fe4 --- /dev/null +++ b/src/usr/certmgmt.c @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * Certificate management + * + */ + +/** + * Display status of a certificate + * + * @v cert X.509 certificate + */ +void certstat ( struct x509_certificate *cert ) { + struct digest_algorithm *digest = &sha1_algorithm; + uint8_t fingerprint[ digest->digestsize ]; + char buf[ base16_encoded_len ( sizeof ( fingerprint ) ) + 1 /* NUL */ ]; + + /* Generate fingerprint */ + x509_fingerprint ( cert, digest, fingerprint ); + base16_encode ( fingerprint, sizeof ( fingerprint ), + buf, sizeof ( buf ) ); + + /* Print certificate status */ + printf ( "%s : %s", x509_name ( cert ), buf ); + if ( cert->flags & X509_FL_PERMANENT ) + printf ( " [PERMANENT]" ); + if ( cert->flags & X509_FL_EXPLICIT ) + printf ( " [EXPLICIT]" ); + if ( x509_is_valid ( cert ) ) + printf ( " [VALIDATED]" ); + printf ( "\n" ); +} diff --git a/src/usr/ibmgmt.c b/src/usr/ibmgmt.c new file mode 100644 index 00000000..7857664d --- /dev/null +++ b/src/usr/ibmgmt.c @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * Infiniband device management + * + */ + +/** + * Print status of Infiniband device + * + * @v ibdev Infiniband device + */ +void ibstat ( struct ib_device *ibdev ) { + struct ib_queue_pair *qp; + + printf ( "%s: " IB_GUID_FMT " using %s on %s port %d (%s)\n", + ibdev->name, IB_GUID_ARGS ( &ibdev->gid.s.guid ), + ibdev->dev->driver_name, ibdev->dev->name, ibdev->port, + ( ib_is_open ( ibdev ) ? "open" : "closed" ) ); + if ( ib_link_ok ( ibdev ) ) { + printf ( " [Link:up LID %d prefix " IB_GUID_FMT "]\n", + ibdev->lid, IB_GUID_ARGS ( &ibdev->gid.s.prefix ) ); + } else { + printf ( " [Link:down, port state %d]\n", ibdev->port_state ); + } + list_for_each_entry ( qp, &ibdev->qps, list ) { + printf ( " QPN %#lx send %d/%d recv %d/%d %s\n", + qp->qpn, qp->send.fill, qp->send.num_wqes, + qp->recv.fill, qp->recv.num_wqes, qp->name ); + } +} diff --git a/src/usr/ntpmgmt.c b/src/usr/ntpmgmt.c new file mode 100644 index 00000000..765c6dc9 --- /dev/null +++ b/src/usr/ntpmgmt.c @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2016 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * NTP management + * + */ + +/** + * Get time and date via NTP + * + * @v hostname Hostname + * @ret rc Return status code + */ +int ntp ( const char *hostname ) { + int rc; + + /* Start NTP client */ + if ( ( rc = start_ntp ( &monojob, hostname ) ) != 0 ) + return rc; + + /* Wait for NTP to complete */ + if ( ( rc = monojob_wait ( NULL, 0 ) ) != 0 ) + return rc; + + return 0; +} diff --git a/src/util/efifatbin.c b/src/util/efifatbin.c new file mode 100644 index 00000000..918e7a3c --- /dev/null +++ b/src/util/efifatbin.c @@ -0,0 +1,261 @@ +/* + * Copyright (C) 2014 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#define FILE_LICENCE(...) extern void __file_licence ( void ) +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define eprintf(...) fprintf ( stderr, __VA_ARGS__ ) + +/** Command-line options */ +struct options { +}; + +/** EFI fat binary file header */ +struct efifatbin_file_header { + /** Signature */ + uint32_t signature; + /** Count */ + uint32_t count; +} __attribute__ (( packed )); + +/** EFI fat binary signature */ +#define EFIFATBIN_SIGNATURE 0x0ef1fab9 + +/** EFI fat binary image header */ +struct efifatbin_image_header { + /** Flags */ + uint64_t flags; + /** Offset */ + uint32_t offset; + /** Length */ + uint32_t len; + /** Padding */ + uint32_t pad; +} __attribute__ (( packed )); + +/** EFI fat binary default flags */ +#define EFIFATBIN_FLAGS 0x0000000300000007ULL + +/** EFI fat binary 64-bit flag */ +#define EFIFATBIN_64BIT 0x0000000001000000ULL + +/** + * Allocate memory + * + * @v len Length of memory to allocate + * @ret ptr Pointer to allocated memory + */ +static void * xmalloc ( size_t len ) { + void *ptr; + + ptr = malloc ( len ); + if ( ! ptr ) { + eprintf ( "Could not allocate %zd bytes\n", len ); + exit ( 1 ); + } + + return ptr; +} + +/** + * Generate EFI fat binary + * + * @v count Number of input files + * @v infile_names Input filenames + * @v outfile_name Output filename + */ +static void make_efifatbin ( unsigned int count, char **infile_names, + const char *outfile_name ) { + FILE *infile[count]; + FILE *outfile; + struct stat stat[count]; + void *buf[count]; + struct efifatbin_file_header file_header; + struct efifatbin_image_header header[count]; + size_t offset; + EFI_IMAGE_DOS_HEADER *dos; + union { + EFI_IMAGE_NT_HEADERS32 nt32; + EFI_IMAGE_NT_HEADERS64 nt64; + } *nt; + unsigned int i; + + /* Generate file header */ + file_header.signature = EFIFATBIN_SIGNATURE; + file_header.count = count; + offset = ( sizeof ( file_header ) + sizeof ( header ) ); + + /* Process input files */ + for ( i = 0 ; i < count ; i++ ) { + + /* Open input file */ + infile[i] = fopen ( infile_names[i], "r" ); + if ( ! infile[i] ) { + eprintf ( "Could not open %s for reading: %s\n", + infile_names[i], strerror ( errno ) ); + exit ( 1 ); + } + + /* Determine PE file size */ + if ( fstat ( fileno ( infile[i] ), &stat[i] ) != 0 ) { + eprintf ( "Could not stat %s: %s\n", + infile_names[i], strerror ( errno ) ); + exit ( 1 ); + } + + /* Allocate buffer and read in PE file */ + buf[i] = xmalloc ( stat[i].st_size ); + if ( fread ( buf[i], stat[i].st_size, 1, infile[i] ) != 1 ) { + eprintf ( "Could not read %s: %s\n", + infile_names[i], strerror ( errno ) ); + exit ( 1 ); + } + + /* Close input file */ + fclose ( infile[i] ); + + /* Generate image header */ + header[i].flags = EFIFATBIN_FLAGS; + header[i].offset = offset; + header[i].len = stat[i].st_size; + header[i].pad = 0; + + /* Determine architecture */ + dos = buf[i]; + nt = ( buf[i] + dos->e_lfanew ); + if ( nt->nt32.FileHeader.Machine == EFI_IMAGE_MACHINE_X64 ) + header[i].flags |= EFIFATBIN_64BIT; + + /* Allow space for this image */ + offset += stat[i].st_size; + } + + /* Open output file */ + outfile = fopen ( outfile_name, "w" ); + if ( ! outfile ) { + eprintf ( "Could not open %s for writing: %s\n", + outfile_name, strerror ( errno ) ); + exit ( 1 ); + } + + /* Write fat binary header */ + if ( fwrite ( &file_header, sizeof ( file_header ), 1, outfile ) != 1 ){ + eprintf ( "Could not write %s: %s\n", + outfile_name, strerror ( errno ) ); + exit ( 1 ); + } + for ( i = 0 ; i < count ; i++ ) { + if ( fwrite ( &header[i], sizeof ( header[i] ), 1, + outfile ) != 1 ) { + eprintf ( "Could not write %s: %s\n", + outfile_name, strerror ( errno ) ); + exit ( 1 ); + } + } + + /* Write images */ + for ( i = 0 ; i < count ; i++ ) { + if ( fwrite ( buf[i], stat[i].st_size, 1, outfile ) != 1 ) { + eprintf ( "Could not write %s: %s\n", + outfile_name, strerror ( errno ) ); + exit ( 1 ); + } + } + + /* Close output file */ + fclose ( outfile ); +} + +/** + * Print help + * + * @v program_name Program name + */ +static void print_help ( const char *program_name ) { + eprintf ( "Syntax: %s infile [infile...] outfile\n", program_name ); +} + +/** + * Parse command-line options + * + * @v argc Argument count + * @v argv Argument list + * @v opts Options structure to populate + */ +static int parse_options ( const int argc, char **argv, + struct options *opts __attribute__ (( unused )) ) { + int c; + + while (1) { + int option_index = 0; + static struct option long_options[] = { + { "help", 0, NULL, 'h' }, + { 0, 0, 0, 0 } + }; + + if ( ( c = getopt_long ( argc, argv, "h", + long_options, + &option_index ) ) == -1 ) { + break; + } + + switch ( c ) { + case 'h': + print_help ( argv[0] ); + exit ( 0 ); + case '?': + default: + exit ( 2 ); + } + } + return optind; +} + +int main ( int argc, char **argv ) { + struct options opts; + int infile_index; + int outfile_index; + int count; + + /* Parse command-line arguments */ + memset ( &opts, 0, sizeof ( opts ) ); + infile_index = parse_options ( argc, argv, &opts ); + outfile_index = ( argc - 1 ); + count = ( outfile_index - infile_index ); + if ( count <= 0 ) { + print_help ( argv[0] ); + exit ( 2 ); + } + + /* Generate fat binary */ + make_efifatbin ( count, &argv[infile_index], argv[outfile_index] ); + + return 0; +} diff --git a/src/util/genefidsk b/src/util/genefidsk new file mode 100755 index 00000000..7064f99b --- /dev/null +++ b/src/util/genefidsk @@ -0,0 +1,60 @@ +#!/bin/sh +# +# Generate an EFI bootable disk image + +set -e + +function help() { + echo "Usage: ${0} [OPTIONS] " + echo + echo "where OPTIONS are:" + echo " -h Show this help" + echo " -b Specify boot file name (e.g. bootx64.efi)" + echo " -o FILE Save disk image to file" +} + +BOOT=bootx64.efi + +while getopts "hb:o:" opt; do + case ${opt} in + h) + help + exit 0 + ;; + b) + BOOT="${OPTARG}" + ;; + o) + OUT="${OPTARG}" + ;; + esac +done + +shift $((OPTIND - 1)) +IN=$1 + +if [ -z "${IN}" ]; then + echo "${0}: no input file given" >&2 + help + exit 1 +fi + +if [ -z "${OUT}" ]; then + echo "${0}: no output file given" >&2 + help + exit 1 +fi + +# Create sparse output file +rm -f ${OUT} +truncate -s 1440K ${OUT} + +# Format disk +mformat -i ${OUT} -f 1440 :: + +# Create directory structure +mmd -i ${OUT} ::efi +mmd -i ${OUT} ::efi/boot + +# Copy bootable image +mcopy -i ${OUT} ${IN} ::efi/boot/${BOOT} diff --git a/src/util/relicense.pl b/src/util/relicense.pl new file mode 100755 index 00000000..41954c1b --- /dev/null +++ b/src/util/relicense.pl @@ -0,0 +1,169 @@ +#!/usr/bin/perl -w + +=head1 NAME + +relicense.pl + +=head1 SYNOPSIS + +relicense.pl [options] -p [...] + +Option: + + -p,--permitted=FILE Specify file of emails with relicensing permission + -f,--force Manually force relicensing + -h,--help Display brief help message + -v,--verbose Increase verbosity + -q,--quiet Decrease verbosity + +=cut + +use File::Slurp; +use IPC::Run qw ( run ); +use Getopt::Long; +use Pod::Usage; +use strict; +use warnings; + +# Parse command-line options +my $verbosity = 0; +my $permfile; +my $force; +Getopt::Long::Configure ( "bundling", "auto_abbrev" ); +GetOptions ( + 'permitted|p=s' => \$permfile, + 'force|f' => \$force, + 'verbose|v+' => sub { $verbosity++; }, + 'quiet|q+' => sub { $verbosity--; }, + 'help|h' => sub { pod2usage ( 1 ); }, +) or die "Could not parse command-line options"; +pod2usage ( 1 ) unless @ARGV; + +# Read permitted emails file +my @emails = ( $permfile ? read_file ( $permfile ) : () ); +chomp @emails; +my $permitted = { map { /^.*<(\S+)>$/; ( $1 || $_ ) => 1 } @emails }; + +# Define list of relicensable licences +my $relicensable = { + GPL2_OR_LATER => 1, +}; + +# Define blurb to be added to copyright notice +my $blurb = ' + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements.'; + +# Process files +my @succeeded; +my @failed; +while ( my $filename = shift @ARGV ) { + + # Read file to determine existing licence + my $file = read_file ( $filename ); + my @licences = ( $file =~ /^\s*FILE_LICENCE\s*\(\s*(\S+)\s*\)\s*;?$/mg ); + die "No licence declaration in $filename\n" unless @licences; + die "Multiple licence declarations in $filename\n" if @licences > 1; + my $licence = $licences[0]; + + # Skip if file is already UBDL-licensed + next if $licence =~ /_OR_UBDL$/; + + # Fail immediately if file is not a candidate for relicensing + if ( ! exists $relicensable->{$licence} ) { + print "Non-relicensable licence $licence in $filename\n"; + push @failed, $filename; + next; + } + + # Run git-blame + my $stdout; + my $stderr; + run [ "git", "blame", "-M", "-C", "-p", "-w", $filename ], + \undef, \$stdout, \$stderr + or die "git-blame $filename: $?"; + die $stderr if $stderr; + + # Process output + my @stdout = split ( /\n/, $stdout ); + chomp @stdout; + my $details = {}; + my $failures = 0; + while ( @stdout ) { + + # Parse output + my $commit_line = shift @stdout; + ( my $commit, undef, my $lineno, undef, my $count ) = + ( $commit_line =~ + /^([0-9a-f]{40})\s+([0-9]+)\s+([0-9]+)(\s+([0-9]+))?$/ ) + or die "Malformed commit line \"$commit_line\"\n"; + if ( $count ) { + $details->{$commit} ||= {}; + while ( ! ( $stdout[0] =~ /^\t/ ) ) { + my $detail_line = shift @stdout; + ( my $key, undef, my $value ) = + ( $detail_line =~ /^([a-z-]+)(\s+(.+))?$/ ) + or die "Malformed detail line \"$detail_line\" for $commit_line\n"; + $details->{$commit}->{$key} = $value; + } + } + die "Missing commit details for $commit_line\n" + unless %{$details->{$commit}}; + my $code_line = shift @stdout; + ( my $line ) = ( $code_line =~ /^\t(.*)$/ ) + or die "Malformed code line \"$code_line\" for $commit_line\n"; + + # Skip trivial lines and lines so common that they are likely to + # be misattributed by git-blame + next if $line =~ /^\s*$/; # Empty lines + next if $line =~ /^\s*\/\*/; # Start of comments + next if $line =~ /^\s*\*/; # Middle (or end) of comments + next if $line =~ /^\s*\{\s*$/; # Standalone opening braces + next if $line =~ /^\s*\};?\s*$/; # Standalone closing braces + next if $line =~ /^\#include/; # Header inclusions + next if $line =~ /^\s*return\s+0;/; # return 0; + next if $line =~ /^\s*return\s+rc;/; # return rc; + next if $line =~ /^\s*PCI_ROM\s*\(.*\)\s*,\s*$/; # PCI IDs + next if $line =~ /^\s*FILE_LICENCE\s*\(.*\)\s*;$/; # Licence declarations + + # Identify author + my $author_mail = $details->{$commit}->{"author-mail"} + or die "Missing author email for $commit_line\n"; + ( my $email ) = ( $author_mail =~ /^<(\S+)>$/ ) + or die "Malformed author email \"$author_mail\" for $commit_line\n"; + undef $email if exists $details->{$commit}->{boundary}; + + # Check for relicensing permission + next if defined $email && exists $permitted->{$email}; + + # Print out lines lacking permission + printf $filename."\n" unless $failures; + printf "%4d %-30s %s\n", $lineno, ( $email || "" ), $line; + $failures++; + } + + # Fail if there are any non-trivial lines lacking relicensing permission + if ( $failures && ! $force ) { + push @failed, $filename; + next; + } + + # Modify FILE_LICENCE() line + $file =~ s/(^\s*FILE_LICENCE\s*\(\s*${licence})(\s*\)\s*;?$)/$1_OR_UBDL$2/m + or die "Could not modify FILE_LICENCE() in $filename\n"; + + # Modify copyright notice, if present + if ( $file =~ /GNU General Public License/i ) { + $file =~ s/(02110-1301, USA.$)/$1${blurb}/m + or die "Could not modify copyright notice in $filename\n"; + } + + # Write out modified file + write_file ( $filename, { atomic => 1 }, $file ); + push @succeeded, $filename; +} + +print "Relicensed: ".join ( " ", @succeeded )."\n" if @succeeded; +die "Cannot relicense: ".join ( " ", @failed )."\n" if @failed;