diff --git a/sys/mips/rmi/dev/sec/desc.h b/sys/mips/rmi/dev/sec/desc.h new file mode 100755 index 000000000000..5757e13a7654 --- /dev/null +++ b/sys/mips/rmi/dev/sec/desc.h @@ -0,0 +1,3067 @@ +/*- + * Copyright (c) 2003-2009 RMI Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of RMI Corporation, nor the names of its contributors, + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RMI_BSD */ +#ifndef _DESC_H_ +#define _DESC_H_ + + +#define ONE_BIT 0x0000000000000001ULL +#define TWO_BITS 0x0000000000000003ULL +#define THREE_BITS 0x0000000000000007ULL +#define FOUR_BITS 0x000000000000000fULL +#define FIVE_BITS 0x000000000000001fULL +#define SIX_BITS 0x000000000000003fULL +#define SEVEN_BITS 0x000000000000007fULL +#define EIGHT_BITS 0x00000000000000ffULL +#define NINE_BITS 0x00000000000001ffULL +#define ELEVEN_BITS 0x00000000000007ffULL +#define TWELVE_BITS 0x0000000000000fffULL +#define FOURTEEN_BITS 0x0000000000003fffULL +#define TWENTYFOUR_BITS 0x0000000000ffffffULL +#define THIRTY_TWO_BITS 0x00000000ffffffffULL +#define THIRTY_FIVE_BITS 0x00000007ffffffffULL +#define FOURTY_BITS 0x000000ffffffffffULL + +#define MSG_IN_CTL_LEN_BASE 40 +#define MSG_IN_CTL_ADDR_BASE 0 + +#define GET_FIELD(word,field) \ + ((word) & (field ## _MASK)) >> (field ## _LSB) + +#define FIELD_VALUE(field,value) (((value) & (field ## _BITS)) << (field ## _LSB)) + +/* + * NOTE: this macro expects 'word' to be uninitialized (i.e. zeroed) + */ +#define SET_FIELD(word,field,value) \ + { (word) |= (((value) & (field ## _BITS)) << (field ## _LSB)); } + +/* + * This macro clears 'word', then sets the value + */ +#define CLEAR_SET_FIELD(word,field,value) \ + { (word) &= ~((field ## _BITS) << (field ## _LSB)); \ + (word) |= (((value) & (field ## _BITS)) << (field ## _LSB)); } + +/* + * NOTE: May be used to build value specific mask + * (e.g. GEN_MASK(CTL_DSC_CPHR_3DES,CTL_DSC_CPHR_LSB) + */ +#define GEN_MASK(bits,lsb) ((bits) << (lsb)) + + + + +/* + * Security block data and control exchange + * + * A 2-word message ring descriptor is used to pass a pointer to the control descriptor data structure + * and a pointer to the packet descriptor data structure: + * + * 63 61 60 54 53 52 49 48 45 44 40 + * 39 5 4 0 + * --------------------------------------------------------------------------------------------------------------------------------------------------------- + * | Ctrl | Resp Dest Id Entry0 | IF_L2ALLOC | UNUSED | Control Length | UNUSED + * | 35 MSB of address of control descriptor data structure | Software Scratch0 + * | + * --------------------------------------------------------------------------------------------------------------------------------------------------------- + * 3 7 1 4 4 5 + * 35 5 + * + * 63 61 60 54 53 52 51 50 46 45 44 40 39 5 4 0 + * --------------------------------------------------------------------------------------------------------------------------------------------------------- + * | Ctrl | UNUSED | WRB_COH | WRB_L2ALLOC | DF_PTR_L2ALLOC | UNUSED | Data Length | UNUSED | 35 MSB of address of packet descriptor data structure | UNUSED | + * --------------------------------------------------------------------------------------------------------------------------------------------------------- + * 3 7 1 1 1 5 1 5 35 5 + * + * Addresses assumed to be cache-line aligned, i.e., Address[4:0] ignored (using 5'h00 instead) + * + * Control length is the number of control cachelines to be read so user needs + * to round up + * the control length to closest integer multiple of 32 bytes. Note that at + * present (08/12/04) + * the longest (sensical) ctrl structure is <= 416 bytes, i.e., 13 cachelines. + * + * The packet descriptor data structure size is fixed at 1 cacheline (32 bytes). + * This effectively makes "Data Length" a Load/NoLoad bit. NoLoad causes an abort. + * + * + * Upon completion of operation, the security block returns a 2-word free descriptor + * in the following format: + * + * 63 61 60 54 53 52 51 49 48 47 40 39 0 + * ---------------------------------------------------------------------------------------------------------------------------- + * | Ctrl | Destination Id | 2'b00 | Desc Ctrl | 1'b0 | Instruction Error | Address of control descriptor data structure | + * ---------------------------------------------------------------------------------------------------------------------------- + * | Ctrl | Destination Id | 2'b00 | Desc Ctrl | 1'b0 | Data Error | Address of packet descriptor data structure | + * ---------------------------------------------------------------------------------------------------------------------------- + * + * The Instruction and Data Error codes are enumerated in the + * ControlDescriptor and PacketDescriptor sections below + * + */ + + +/* + * Operating assumptions + * ===================== + * + * + * -> For all IpSec ops, I assume that all the IP/IPSec/TCP headers + * and the data are present at the specified source addresses. + * I also assume that all necessary header data already exists + * at the destination. Additionally, in AH I assume that all + * mutable fields (IP.{TOS, Flags, Offset, TTL, Header_Checksum}) + * and the AH.Authentication_Data have been zeroed by the client. + * + * + * -> In principle, the HW can calculate TCP checksums on both + * incoming and outgoing data; however, since the TCP header + * contains the TCP checksum of the plain payload and the header + * is encrypted, two passes would be necessary to do checksum + encryption + * for outgoing messages; + * therefore the checksum engine will likely only be used during decryption + * (incoming). + * + * + * -> For all operations involving TCP checksum, I assume the client has filled + * the TCP checksum field with the appropriate value: + * + * - 0 for generation phase + * - actual value for verification phase (expecting 0 result) + * + * + * -> For ESP tunnel, the original IP header exists between the end of the + * ESP header and the beginning of the TCP header; it is assumed that the + * maximum length of this header is 16 k(32bit)words (used in CkSum_Offset). + * + * + * -> The authentication data is merely written to the destination address; + * the client is left with the task of comparing to the data in packet + * in decrypt. + * + * -> PacketDescriptor_t.dstLLWMask relevant to AES CTR mode only but it will + * affect all AES-related operations. It will not affect DES/3DES/bypass ops. + * The mask is applied to data as it emerges from the AES engine for the sole + * purpose of providing the authenticator and cksum engines with correct data. + * CAVEAT: the HW does not mask the incoming data. It is the user's responsibility + * to set to 0 the corresponding data in memory. If the surplus data is not masked + * in memory, cksum/auth results will be incorrect if those engines receive data + * straight from memory (i.e., not from cipher, as it happens while decoding) + */ + +/* + * Fragmentation and offset related notes + * ====================================== + * + * + * A) Rebuilding packets from fragments on dword boundaries. The discussion + * below is exemplified by tests memcpy_all_off_frags and memcpy_same_off_frags + * + * 1) The Offset before data/iv on first fragment is ALWAYS written back + * Non-zero dst dword or global offsets may cause more data to be + * written than the user-specified length. + * + * + * Example: + * -------- + * + * Below is a source (first fragment) packet (@ ADD0 cache-aligned address). + * Assume we just copy it and relevant data starts on + * dword 3 so Cipher_Offset = IV_Offset = 3 (dwords). + * D0X denotes relevant data and G denotes dont care data. + * Offset data is also copied so Packet_Legth = 9 (dwords) * 8 = 72 (bytes) + * Segment_src_address = ADD0 + * + * If we want to, e.g., copy so that the relevant (i.e., D0X) data + * starts at (cache-aligned address) ADD1, we need to specify + * Dst_dword_offset = 1 so D00 is moved from dword position 3 to 0 on next cache-line + * Cipher_dst_address = ADD1 - 0x20 so D00 is written to ADD1 + * + * Note that the security engine always writes full cachelines + * therefore, data written to dword0 0 of ADD1 (denoted w/ ?) is what the sec pipe + * write back buffer contained from previous op. + * + * + * SOURCE: DESTINATION: + * ------- ------------ + * + * Segment_src_address = ADD0 Cipher_dst_address = ADD1 - 0x20 + * Packet_Legth = 72 Dst_dword_offset = 1 + * Cipher_Offset = 3 + * IV_Offset = 3 + * Use_IV = ANY + * + * + * + * 3 2 1 0 3 2 1 0 + * ----------------------- ----------------------- + * | D00 | G | G | G | <- ADD0 | G | G | G | ? | <- ADD1 - 0x20 + * ----------------------- ----------------------- + * | D04 | D03 | D02 | D01 | | D03 | D02 | D01 | D00 | <- ADD1 + * ----------------------- ----------------------- + * | | | | D05 | | | | D05 | D04 | + * ----------------------- ----------------------- + * + * 2) On fragments following the first, IV_Offset is overloaded to mean data offset + * (number of dwords to skip from beginning of cacheline before starting processing) + * and Use_IV is overloaded to mean do writeback the offset (in the clear). + * These fields in combination with Dst_dword_offset allow packet fragments with + * arbitrary boundaries/lengthd to be reasembled. + * + * + * Example: + * -------- + * + * Assume data above was first fragment of a packet we'd like to merge to + * (second) fragment below located at ADD2. The written data should follow + * the previous data without gaps or overwrites. To achieve this, one should + * assert the "Next" field on the previous fragment and use self-explanatory + * set of parameters below + * + * + * SOURCE: DESTINATION: + * ------- ------------ + * + * Segment_src_address = ADD2 Cipher_dst_address = ADD1 + 0x20 + * Packet_Legth = 104 Dst_dword_offset = 1 + * IV_Offset = 1 + * Use_IV = 0 + * + * + * + * 3 2 1 0 3 2 1 0 + * ----------------------- ----------------------- + * | D12 | D11 | D10 | G | <- ADD2 | G | G | G | ? | <- ADD1 - 0x20 + * ----------------------- ----------------------- + * | D16 | D15 | D14 | D13 | | D03 | D02 | D01 | D00 | <- ADD1 + * ----------------------- ----------------------- + * | D1a | D19 | D18 | D17 | | D11 | D10 | D05 | D04 | <- ADD1 + 0x20 + * ----------------------- ----------------------- + * | | | | D1b | | D15 | D14 | D13 | D12 | + * ----------------------- ----------------------- + * | D19 | D18 | D17 | D16 | + * ----------------------- + * | | | D1b | D1a | + * ----------------------- + * + * It is note-worthy that the merging can only be achieved if Use_IV is 0. Indeed, the security + * engine always writes full lines, therefore ADD1 + 0x20 will be re-written. Setting Use_IV to 0 + * will allow the sec pipe write back buffer to preserve D04, D05 from previous frag and only + * receive D10, D11 thereby preserving the integrity of the previous data. + * + * 3) On fragments following the first, !UseIV in combination w/ Dst_dword_offset >= (4 - IV_Offset) + * will cause a wraparound of the write thus achieving all 16 possible (Initial_Location, Final_Location) + * combinations for the data. + * + * + * Example: + * -------- + * + * Contiguously merging 2 data sets above with a third located at ADD3. If this is the last fragment, + * reset its Next bit. + * + * + * SOURCE: DESTINATION: + * ------- ------------ + * + * Segment_src_address = ADD3 Cipher_dst_address = ADD1 + 0x80 + * Packet_Legth = 152 Dst_dword_offset = 3 + * IV_Offset = 3 + * Use_IV = 0 + * + * + * + * 3 2 1 0 3 2 1 0 + * ----------------------- ----------------------- + * | D20 | G | G | G | <- ADD2 | G | G | G | ? | <- ADD1 - 0x20 + * ----------------------- ----------------------- + * | D24 | D23 | D22 | D21 | | D03 | D02 | D01 | D00 | <- ADD1 + * ----------------------- ----------------------- + * | D28 | D27 | D26 | D25 | | D11 | D10 | D05 | D04 | <- ADD1 + 0x20 + * ----------------------- ----------------------- + * | D2c | D2b | D2a | D29 | | D15 | D14 | D13 | D12 | + * ----------------------- ----------------------- + * | | D2f | D2e | D2d | | D19 | D18 | D17 | D16 | + * ----------------------- ----------------------- + * | D21 | D20 | D1b | D1a | <- ADD1 + 0x80 + * ----------------------- + * | D25 | D24 | D23 | D22 | + * ----------------------- + * | D29 | D28 | D27 | D26 | + * ----------------------- + * | D2d | D2c | D2b | D2a | + * ----------------------- + * |(D2d)|(D2c)| D2f | D2e | + * ----------------------- + * + * It is worth noticing that always writing full-lines causes the last 2 dwords in the reconstituted + * packet to be unnecessarily written: (D2d) and (D2c) + * + * + * + * B) Implications of fragmentation on AES + * + * 1) AES is a 128 bit block cipher; therefore it requires an even dword total data length + * Data fragments (provided there are more than 1) are allowed to have odd dword + * data lengths provided the total length (cumulated over fragments) is an even dword + * count; an error will be generated otherwise, upon receiving the last fragment descriptor + * (see error conditions below). + * + * 2) While using fragments with AES, a fragment (other than first) starting with a != 0 (IV) offset + * while the subsequent total dword count given to AES is odd may not be required to write + * its offset (UseIV). Doing so will cause an error (see error conditions below). + * + * + * Example: + * -------- + * + * Suppose the first fragment has an odd DATA dword count and USES AES (as seen below) + * + * SOURCE: DESTINATION: + * ------- ------------ + * + * Segment_src_address = ADD0 Cipher_dst_address = ADD1 + * Packet_Legth = 64 Dst_dword_offset = 1 + * Cipher_Offset = 3 + * IV_Offset = 1 + * Use_IV = 1 + * Cipher = Any AES + * Next = 1 + * + * + * + * + * 3 2 1 0 3 2 1 0 + * ----------------------- ----------------------- + * | D00 | IV1 | IV0 | G | <- ADD0 | E00 | IV1 | IV0 | G | <- ADD1 + * ----------------------- ----------------------- + * | D04 | D03 | D02 | D01 | | X | E03 | E02 | E01 | + * ----------------------- ----------------------- + * + * At the end of processing of the previous fragment, the AES engine input buffer has D04 + * and waits for next dword, therefore the writeback buffer cannot finish writing the fragment + * to destination (X instead of E04). + * + * If a second fragment now arrives with a non-0 offset and requires the offset data to be + * written to destination, the previous write (still needing the arrival of the last dword + * required by the AES to complete the previous operation) cannot complete before the present + * should start causing a deadlock. + */ + +/* + * Command Control Word for Message Ring Descriptor + */ + +/* #define MSG_CMD_CTL_CTL */ +#define MSG_CMD_CTL_CTL_LSB 61 +#define MSG_CMD_CTL_CTL_BITS THREE_BITS +#define MSG_CMD_CTL_CTL_MASK (MSG_CMD_CTL_CTL_BITS << MSG_CMD_CTL_CTL_LSB) + +/* #define MSG_CMD_CTL_ID */ +#define MSG_CMD_CTL_ID_LSB 54 +#define MSG_CMD_CTL_ID_BITS SEVEN_BITS +#define MSG_CMD_CTL_ID_MASK (MSG_CMD_CTL_ID_BITS << MSG_CMD_CTL_ID_LSB) + +/* #define MSG_CMD_CTL_LEN */ +#define MSG_CMD_CTL_LEN_LSB 45 +#define MSG_CMD_CTL_LEN_BITS FOUR_BITS +#define MSG_CMD_CTL_LEN_MASK (MSG_CMD_CTL_LEN_BITS << MSG_CMD_CTL_LEN_LSB) + + +/* #define MSG_CMD_CTL_ADDR */ +#define MSG_CMD_CTL_ADDR_LSB 0 +#define MSG_CMD_CTL_ADDR_BITS FOURTY_BITS +#define MSG_CMD_CTL_ADDR_MASK (MSG_CMD_CTL_ADDR_BITS << MSG_CMD_CTL_ADDR_LSB) + +#define MSG_CMD_CTL_MASK (MSG_CMD_CTL_CTL_MASK | \ + MSG_CMD_CTL_LEN_MASK | MSG_CMD_CTL_ADDR_MASK) + +/* + * Command Data Word for Message Ring Descriptor + */ + +/* #define MSG_IN_DATA_CTL */ +#define MSG_CMD_DATA_CTL_LSB 61 +#define MSG_CMD_DATA_CTL_BITS THREE_BITS +#define MSG_CMD_DATA_CTL_MASK (MSG_CMD_DATA_CTL_BITS << MSG_CMD_DATA_CTL_LSB) + +/* #define MSG_CMD_DATA_LEN */ +#define MSG_CMD_DATA_LEN_LOAD 1 +#define MSG_CMD_DATA_LEN_LSB 45 +#define MSG_CMD_DATA_LEN_BITS ONE_BIT +#define MSG_CMD_DATA_LEN_MASK (MSG_CMD_DATA_LEN_BITS << MSG_CMD_DATA_LEN_LSB) + +/* #define MSG_CMD_DATA_ADDR */ +#define MSG_CMD_DATA_ADDR_LSB 0 +#define MSG_CMD_DATA_ADDR_BITS FOURTY_BITS +#define MSG_CMD_DATA_ADDR_MASK (MSG_CMD_DATA_ADDR_BITS << MSG_CMD_DATA_ADDR_LSB) + +#define MSG_CMD_DATA_MASK (MSG_CMD_DATA_CTL_MASK | \ + MSG_CMD_DATA_LEN_MASK | MSG_CMD_DATA_ADDR_MASK) + + +/* + * Upon completion of operation, the Sec block returns a 2-word free descriptor + * in the following format: + * + * 63 61 60 54 53 52 51 49 48 40 39 0 + * ---------------------------------------------------------------------------- + * | Ctrl | Destination Id | 2'b00 | Desc Ctrl | Control Error | Source Address | + * ---------------------------------------------------------------------------- + * | Ctrl | Destination Id | 2'b00 | Desc Ctrl | Data Error | Dest Address | + * ---------------------------------------------------------------------------- + * + * The Control and Data Error codes are enumerated below + * + * Error conditions + * ================ + * + * Control Error Code Control Error Condition + * ------------------ --------------------------- + * 9'h000 No Error + * 9'h001 Unknown Cipher Op ( Cipher == 3'h{6,7}) + * 9'h002 Unknown or Illegal Mode ((Mode == 3'h{2,3,4} & !AES) | (Mode == 3'h{5,6,7})) + * 9'h004 Unsupported CkSum Src (CkSum_Src == 2'h{2,3} & CKSUM) + * 9'h008 Forbidden CFB Mask (AES & CFBMode & UseNewKeysCFBMask & CFBMask[7] & (| CFBMask[6:0])) + * 9'h010 Unknown Ctrl Op ((| Ctrl[63:37]) | (| Ctrl[15:14])) + * 9'h020 UNUSED + * 9'h040 UNUSED + * 9'h080 Data Read Error + * 9'h100 Descriptor Ctrl Field Error (D0.Ctrl != SOP || D1.Ctrl != EOP) + * + * Data Error Code Data Error Condition + * --------------- -------------------- + * 9'h000 No Error + * 9'h001 Insufficient Data To Cipher (Packet_Length <= (Cipher_Offset or IV_Offset)) + * 9'h002 Illegal IV Location ((Cipher_Offset < IV_Offset) | (Cipher_Offset <= IV_Offset & AES & ~CTR)) + * 9'h004 Illegal Wordcount To AES (Packet_Length[3] != Cipher_Offset[0] & AES) + * 9'h008 Illegal Pad And ByteCount Spec (Hash_Byte_Count != 0 & !Pad_Hash) + * 9'h010 Insufficient Data To CkSum ({Packet_Length, 1'b0} <= CkSum_Offset) + * 9'h020 Unknown Data Op ((| dstLLWMask[63:60]) | (| dstLLWMask[57:40]) | (| authDst[63:40]) | (| ckSumDst[63:40])) + * 9'h040 Insufficient Data To Auth ({Packet_Length} <= Auth_Offset) + * 9'h080 Data Read Error + * 9'h100 UNUSED + */ + +/* + * Result Control Word for Message Ring Descriptor + */ + +/* #define MSG_RSLT_CTL_CTL */ +#define MSG_RSLT_CTL_CTL_LSB 61 +#define MSG_RSLT_CTL_CTL_BITS THREE_BITS +#define MSG_RSLT_CTL_CTL_MASK \ + (MSG_RSLT_CTL_CTL_BITS << MSG_RSLT_CTL_CTL_LSB) + +/* #define MSG_RSLT_CTL_DST_ID */ +#define MSG_RSLT_CTL_DST_ID_LSB 54 +#define MSG_RSLT_CTL_DST_ID_BITS SEVEN_BITS +#define MSG_RSLT_CTL_DST_ID_MASK \ + (MSG_RSLT_CTL_DST_ID_BITS << MSG_RSLT_CTL_DST_ID_LSB) + +/* #define MSG_RSLT_CTL_DSC_CTL */ +#define MSG_RSLT_CTL_DSC_CTL_LSB 49 +#define MSG_RSLT_CTL_DSC_CTL_BITS THREE_BITS +#define MSG_RSLT_CTL_DSC_CTL_MASK \ + (MSG_RSLT_CTL_DSC_CTL_BITS << MSG_RSLT_CTL_DSC_CTL_LSB) + +/* #define MSG_RSLT_CTL_INST_ERR */ +#define MSG_RSLT_CTL_INST_ERR_LSB 40 +#define MSG_RSLT_CTL_INST_ERR_BITS NINE_BITS +#define MSG_RSLT_CTL_INST_ERR_MASK \ + (MSG_RSLT_CTL_INST_ERR_BITS << MSG_RSLT_CTL_INST_ERR_LSB) + +/* #define MSG_RSLT_CTL_DSC_ADDR */ +#define MSG_RSLT_CTL_DSC_ADDR_LSB 0 +#define MSG_RSLT_CTL_DSC_ADDR_BITS FOURTY_BITS +#define MSG_RSLT_CTL_DSC_ADDR_MASK \ + (MSG_RSLT_CTL_DSC_ADDR_BITS << MSG_RSLT_CTL_DSC_ADDR_LSB) + +/* #define MSG_RSLT_CTL_MASK */ +#define MSG_RSLT_CTL_MASK \ + (MSG_RSLT_CTL_CTRL_MASK | MSG_RSLT_CTL_DST_ID_MASK | \ + MSG_RSLT_CTL_DSC_CTL_MASK | MSG_RSLT_CTL_INST_ERR_MASK | \ + MSG_RSLT_CTL_DSC_ADDR_MASK) + +/* + * Result Data Word for Message Ring Descriptor + */ +/* #define MSG_RSLT_DATA_CTL */ +#define MSG_RSLT_DATA_CTL_LSB 61 +#define MSG_RSLT_DATA_CTL_BITS THREE_BITS +#define MSG_RSLT_DATA_CTL_MASK \ + (MSG_RSLT_DATA_CTL_BITS << MSG_RSLT_DATA_CTL_LSB) + +/* #define MSG_RSLT_DATA_DST_ID */ +#define MSG_RSLT_DATA_DST_ID_LSB 54 +#define MSG_RSLT_DATA_DST_ID_BITS SEVEN_BITS +#define MSG_RSLT_DATA_DST_ID_MASK \ + (MSG_RSLT_DATA_DST_ID_BITS << MSG_RSLT_DATA_DST_ID_LSB) + +/* #define MSG_RSLT_DATA_DSC_CTL */ +#define MSG_RSLT_DATA_DSC_CTL_LSB 49 +#define MSG_RSLT_DATA_DSC_CTL_BITS THREE_BITS +#define MSG_RSLT_DATA_DSC_CTL_MASK \ + (MSG_RSLT_DATA_DSC_CTL_BITS << MSG_RSLT_DATA_DSC_CTL_LSB) + +/* #define MSG_RSLT_DATA_INST_ERR */ +#define MSG_RSLT_DATA_INST_ERR_LSB 40 +#define MSG_RSLT_DATA_INST_ERR_BITS NINE_BITS +#define MSG_RSLT_DATA_INST_ERR_MASK \ + (MSG_RSLT_DATA_INST_ERR_BITS << MSG_RSLT_DATA_INST_ERR_LSB) + +/* #define MSG_RSLT_DATA_DSC_ADDR */ +#define MSG_RSLT_DATA_DSC_ADDR_LSB 0 +#define MSG_RSLT_DATA_DSC_ADDR_BITS FOURTY_BITS +#define MSG_RSLT_DATA_DSC_ADDR_MASK \ + (MSG_RSLT_DATA_DSC_ADDR_BITS << MSG_RSLT_DATA_DSC_ADDR_LSB) + +#define MSG_RSLT_DATA_MASK \ + (MSG_RSLT_DATA_CTRL_MASK | MSG_RSLT_DATA_DST_ID_MASK | \ + MSG_RSLT_DATA_DSC_CTL_MASK | MSG_RSLT_DATA_INST_ERR_MASK | \ + MSG_RSLT_DATA_DSC_ADDR_MASK) + + +/* + * Common Message Definitions + * + */ + +/* #define MSG_CTL_OP_ADDR */ +#define MSG_CTL_OP_ADDR_LSB 0 +#define MSG_CTL_OP_ADDR_BITS FOURTY_BITS +#define MSG_CTL_OP_ADDR_MASK (MSG_CTL_OP_ADDR_BITS << MSG_CTL_OP_ADDR_LSB) + +#define MSG_CTL_OP_TYPE +#define MSG_CTL_OP_TYPE_LSB 3 +#define MSG_CTL_OP_TYPE_BITS TWO_BITS +#define MSG_CTL_OP_TYPE_MASK \ + (MSG_CTL_OP_TYPE_BITS << MSG_CTL_OP_TYPE_LSB) + +#define MSG0_CTL_OP_ENGINE_SYMKEY 0x01 +#define MSG0_CTL_OP_ENGINE_PUBKEY 0x02 + +#define MSG1_CTL_OP_SYMKEY_PIPE0 0x00 +#define MSG1_CTL_OP_SYMKEY_PIPE1 0x01 +#define MSG1_CTL_OP_SYMKEY_PIPE2 0x02 +#define MSG1_CTL_OP_SYMKEY_PIPE3 0x03 + +#define MSG1_CTL_OP_PUBKEY_PIPE0 0x00 +#define MSG1_CTL_OP_PUBKEY_PIPE1 0x01 +#define MSG1_CTL_OP_PUBKEY_PIPE2 0x02 +#define MSG1_CTL_OP_PUBKEY_PIPE3 0x03 + + +/* /----------------------------------------\ + * | | + * | ControlDescriptor_s datastructure | + * | | + * \----------------------------------------/ + * + * + * ControlDescriptor_t.Instruction + * ------------------------------- + * + * 63 44 43 42 41 40 39 35 34 32 31 29 28 + * -------------------------------------------------------------------------------------------------------------------- + * || UNUSED || OverrideCipher | Arc4Wait4Save | SaveArc4State | LoadArc4State | Arc4KeyLen | Cipher | Mode | InCp_Key || ... CONT ... + * -------------------------------------------------------------------------------------------------------------------- + * 20 1 1 1 1 5 3 3 1 + * <-----------------------------------------------CIPHER---------------------------------------------------> + * + * 27 25 24 23 22 21 20 19 17 16 15 0 + * ----------------------------------------------------------------------------- + * || UNUSED | Hash_Hi | HMAC | Hash_Lo | InHs_Key || UNUSED || CkSum || UNUSED || + * ----------------------------------------------------------------------------- + * 3 1 1 2 1 3 1 16 + * <---------------------HASH---------------------><-----------CKSUM-----------> + * + * X0 CIPHER.Arc4Wait4Save = If op is Arc4 and it requires state saving, then + * setting this bit will cause the current op to + * delay subsequent op loading until saved state data + * becomes visible. + * CIPHER.OverrideCipher = Override encryption if PacketDescriptor_t.dstDataSettings.CipherPrefix + * is set; data will be copied out (and optionally auth/cksum) + * in the clear. This is used in GCM mode if auth only as we + * still need E(K, 0) calculated by cipher. Engine behavior is + * undefined if this bit is set and CipherPrefix is not. + * X0 SaveArc4State = Save Arc4 state at the end of Arc4 operation + * X0 LoadArc4State = Load Arc4 state at the beginning of an Arc4 operation + * This overriden by the InCp_Key setting for Arc4 + * Arc4KeyLen = Length in bytes of Arc4 key (0 is interpreted as 32) + * Ignored for other ciphers + * For ARC4, IFetch/IDecode will always read exactly 4 + * consecutive dwords into its CipherKey{0,3} regardless + * of this quantity; it will however only use the specified + * number of bytes. + * Cipher = 3'b000 Bypass + * 3'b001 DES + * 3'b010 3DES + * 3'b011 AES 128-bit key + * 3'b100 AES 192-bit key + * 3'b101 AES 256-bit key + * 3'b110 ARC4 + * 3'b111 Kasumi f8 + * Remainder UNDEFINED + * Mode = 3'b000 ECB + * 3'b001 CBC + * 3'b010 CFB (AES only, otherwise undefined) + * 3'b011 OFB (AES only, otherwise undefined) + * 3'b100 CTR (AES only, otherwise undefined) + * 3'b101 F8 (AES only, otherwise undefined) + * Remainder UNDEFINED + * InCp_Key = 1'b0 Preserve old Cipher Keys + * 1'b1 Load new Cipher Keys from memory to local registers + * and recalculate the Arc4 Sbox if Arc4 Cipher chosen; + * This overrides LoadArc4State setting. + * HASH.HMAC = 1'b0 Hash without HMAC + * 1'b1 Hash with HMAC + * Needs to be set to 0 for GCM and Kasumi F9 authenticators + * otherwise unpredictable results will be generated + * Hash = 2'b00 Hash NOP + * 2'b01 MD5 + * 2'b10 SHA-1 + * 2'b11 SHA-256 + * 3'b100 SHA-384 + * 3'b101 SHA-512 + * 3'b110 GCM + * 3'b111 Kasumi f9 + * InHs_Key = 1'b0 Preserve old HMAC Keys + * If GCM is selected as authenticator, leaving this bit + * at 0 will cause the engine to use the old H value. + * It will use the old SCI inside the decoder if + * CFBMask[1:0] == 2'b11. + * If Kasumi F9 authenticator, using 0 preserves + * old keys (IK) in decoder. + * 1'b1 Load new HMAC Keys from memory to local registers + * Setting this bit while Cipher=Arc4 and LoadArc4State=1 + * causes the decoder to load the Arc4 state from the + * cacheline following the HMAC keys (Whether HASH.HMAC + * is set or not). + * If GCM is selected as authenticator, setting this bit + * causes both H (16 bytes) and SCI (8 bytes) to be loaded + * from memory to the decoder. H will be loaded to the engine + * but SCI is only loaded to the engine if CFBMask[1:0] == 2'b11. + * If Kasumi F9 authenticator, using 1 loads new keys (IK) + * from memory to decoder. + * CHECKSUM.CkSum = 1'b0 CkSum NOP + * 1'b1 INTERNET_CHECKSUM + * + * + * + */ + + /* #define CTRL_DSC_OVERRIDECIPHER */ +#define CTL_DSC_OVERRIDECIPHER_OFF 0 +#define CTL_DSC_OVERRIDECIPHER_ON 1 +#define CTL_DSC_OVERRIDECIPHER_LSB 43 +#define CTL_DSC_OVERRIDECIPHER_BITS ONE_BIT +#define CTL_DSC_OVERRIDECIPHER_MASK (CTL_DSC_OVERRIDECIPHER_BITS << CTL_DSC_OVERRIDECIPHER_LSB) + +/* #define CTRL_DSC_ARC4_WAIT4SAVE */ +#define CTL_DSC_ARC4_WAIT4SAVE_OFF 0 +#define CTL_DSC_ARC4_WAIT4SAVE_ON 1 +#define CTL_DSC_ARC4_WAIT4SAVE_LSB 42 +#define CTL_DSC_ARC4_WAIT4SAVE_BITS ONE_BIT +#define CTL_DSC_ARC4_WAIT4SAVE_MASK (CTL_DSC_ARC4_WAIT4SAVE_BITS << CTL_DSC_ARC4_WAIT4SAVE_LSB) + +/* #define CTRL_DSC_ARC4_SAVESTATE */ +#define CTL_DSC_ARC4_SAVESTATE_OFF 0 +#define CTL_DSC_ARC4_SAVESTATE_ON 1 +#define CTL_DSC_ARC4_SAVESTATE_LSB 41 +#define CTL_DSC_ARC4_SAVESTATE_BITS ONE_BIT +#define CTL_DSC_ARC4_SAVESTATE_MASK (CTL_DSC_ARC4_SAVESTATE_BITS << CTL_DSC_ARC4_SAVESTATE_LSB) + +/* #define CTRL_DSC_ARC4_LOADSTATE */ +#define CTL_DSC_ARC4_LOADSTATE_OFF 0 +#define CTL_DSC_ARC4_LOADSTATE_ON 1 +#define CTL_DSC_ARC4_LOADSTATE_LSB 40 +#define CTL_DSC_ARC4_LOADSTATE_BITS ONE_BIT +#define CTL_DSC_ARC4_LOADSTATE_MASK (CTL_DSC_ARC4_LOADSTATE_BITS << CTL_DSC_ARC4_LOADSTATE_LSB) + +/* #define CTRL_DSC_ARC4_KEYLEN */ +#define CTL_DSC_ARC4_KEYLEN_LSB 35 +#define CTL_DSC_ARC4_KEYLEN_BITS FIVE_BITS +#define CTL_DSC_ARC4_KEYLEN_MASK (CTL_DSC_ARC4_KEYLEN_BITS << CTL_DSC_ARC4_KEYLEN_LSB) + +/* #define CTL_DSC_CPHR (cipher) */ +#define CTL_DSC_CPHR_BYPASS 0 /* undefined */ +#define CTL_DSC_CPHR_DES 1 +#define CTL_DSC_CPHR_3DES 2 +#define CTL_DSC_CPHR_AES128 3 +#define CTL_DSC_CPHR_AES192 4 +#define CTL_DSC_CPHR_AES256 5 +#define CTL_DSC_CPHR_ARC4 6 +#define CTL_DSC_CPHR_KASUMI_F8 7 +#define CTL_DSC_CPHR_LSB 32 +#define CTL_DSC_CPHR_BITS THREE_BITS +#define CTL_DSC_CPHR_MASK (CTL_DSC_CPHR_BITS << CTL_DSC_CPHR_LSB) + +/* #define CTL_DSC_MODE */ +#define CTL_DSC_MODE_ECB 0 +#define CTL_DSC_MODE_CBC 1 +#define CTL_DSC_MODE_CFB 2 +#define CTL_DSC_MODE_OFB 3 +#define CTL_DSC_MODE_CTR 4 +#define CTL_DSC_MODE_F8 5 +#define CTL_DSC_MODE_LSB 29 +#define CTL_DSC_MODE_BITS THREE_BITS +#define CTL_DSC_MODE_MASK (CTL_DSC_MODE_BITS << CTL_DSC_MODE_LSB) + +/* #define CTL_DSC_ICPHR */ +#define CTL_DSC_ICPHR_OKY 0 /* Old Keys */ +#define CTL_DSC_ICPHR_NKY 1 /* New Keys */ +#define CTL_DSC_ICPHR_LSB 28 +#define CTL_DSC_ICPHR_BITS ONE_BIT +#define CTL_DSC_ICPHR_MASK (CTL_DSC_ICPHR_BITS << CTL_DSC_ICPHR_LSB) + +/* #define CTL_DSC_HASHHI */ +#define CTL_DSC_HASHHI_LSB 24 +#define CTL_DSC_HASHHI_BITS ONE_BIT +#define CTL_DSC_HASHHI_MASK (CTL_DSC_HASHHI_BITS << CTL_DSC_HASHHI_LSB) + +/* #define CTL_DSC_HMAC */ +#define CTL_DSC_HMAC_OFF 0 +#define CTL_DSC_HMAC_ON 1 +#define CTL_DSC_HMAC_LSB 23 +#define CTL_DSC_HMAC_BITS ONE_BIT +#define CTL_DSC_HMAC_MASK (CTL_DSC_HMAC_BITS << CTL_DSC_HMAC_LSB) + +/* #define CTL_DSC_HASH */ +#define CTL_DSC_HASH_NOP 0 +#define CTL_DSC_HASH_MD5 1 +#define CTL_DSC_HASH_SHA1 2 +#define CTL_DSC_HASH_SHA256 3 +#define CTL_DSC_HASH_SHA384 4 +#define CTL_DSC_HASH_SHA512 5 +#define CTL_DSC_HASH_GCM 6 +#define CTL_DSC_HASH_KASUMI_F9 7 +#define CTL_DSC_HASH_LSB 21 +#define CTL_DSC_HASH_BITS TWO_BITS +#define CTL_DSC_HASH_MASK (CTL_DSC_HASH_BITS << CTL_DSC_HASH_LSB) + +/* #define CTL_DSC_IHASH */ +#define CTL_DSC_IHASH_OLD 0 +#define CTL_DSC_IHASH_NEW 1 +#define CTL_DSC_IHASH_LSB 20 +#define CTL_DSC_IHASH_BITS ONE_BIT +#define CTL_DSC_IHASH_MASK (CTL_DSC_IHASH_BITS << CTL_DSC_IHASH_LSB) + +/* #define CTL_DSC_CKSUM */ +#define CTL_DSC_CKSUM_NOP 0 +#define CTL_DSC_CKSUM_IP 1 +#define CTL_DSC_CKSUM_LSB 16 +#define CTL_DSC_CKSUM_BITS ONE_BIT +#define CTL_DSC_CKSUM_MASK (CTL_DSC_CKSUM_BITS << CTL_DSC_CKSUM_LSB) + + +/* + * Component strcts and unions defining CipherHashInfo_u + */ + +/* AES256, (ECB, CBC, OFB, CTR, CFB), HMAC (MD5, SHA-1, SHA-256) - 96 bytes */ +typedef struct AES256HMAC_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; +} AES256HMAC_t, *AES256HMAC_pt; + +/* AES256, (ECB, CBC, OFB, CTR, CFB), HMAC (SHA-384, SHA-512) - 160 bytes */ +typedef struct AES256HMAC2_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t hmacKey8; + uint64_t hmacKey9; + uint64_t hmacKey10; + uint64_t hmacKey11; + uint64_t hmacKey12; + uint64_t hmacKey13; + uint64_t hmacKey14; + uint64_t hmacKey15; +} AES256HMAC2_t, *AES256HMAC2_pt; + +/* AES256, (ECB, CBC, OFB, CTR, CFB), GCM - 56 bytes */ +typedef struct AES256GCM_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t GCMH0; + uint64_t GCMH1; + uint64_t GCMSCI; +} AES256GCM_t, *AES256GCM_pt; + +/* AES256, (ECB, CBC, OFB, CTR, CFB), F9 - 56 bytes */ +typedef struct AES256F9_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t authKey0; + uint64_t authKey1; +} AES256F9_t, *AES256F9_pt; + +/* AES256, (ECB, CBC, OFB, CTR, CFB), Non-HMAC (MD5, SHA-1, SHA-256) - 32 bytes */ +typedef struct AES256_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; +} AES256_t, *AES256_pt; + + +/* All AES192 possibilities */ + +/* AES192, (ECB, CBC, OFB, CTR, CFB), HMAC (MD5, SHA-1, SHA-192) - 88 bytes */ +typedef struct AES192HMAC_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; +} AES192HMAC_t, *AES192HMAC_pt; + +/* AES192, (ECB, CBC, OFB, CTR, CFB), HMAC (SHA-384, SHA-512) - 152 bytes */ +typedef struct AES192HMAC2_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t hmacKey8; + uint64_t hmacKey9; + uint64_t hmacKey10; + uint64_t hmacKey11; + uint64_t hmacKey12; + uint64_t hmacKey13; + uint64_t hmacKey14; + uint64_t hmacKey15; +} AES192HMAC2_t, *AES192HMAC2_pt; + +/* AES192, (ECB, CBC, OFB, CTR, CFB), GCM - 48 bytes */ +typedef struct AES192GCM_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t GCMH0; + uint64_t GCMH1; + uint64_t GCMSCI; +} AES192GCM_t, *AES192GCM_pt; + +/* AES192, (ECB, CBC, OFB, CTR, CFB), F9 - 48 bytes */ +typedef struct AES192F9_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t authKey0; + uint64_t authKey1; +} AES192F9_t, *AES192F9_pt; + +/* AES192, (ECB, CBC, OFB, CTR, CFB), Non-HMAC (MD5, SHA-1, SHA-192) - 24 bytes */ +typedef struct AES192_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; +} AES192_t, *AES192_pt; + + +/* All AES128 possibilities */ + +/* AES128, (ECB, CBC, OFB, CTR, CFB), HMAC (MD5, SHA-1, SHA-128) - 80 bytes */ +typedef struct AES128HMAC_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; +} AES128HMAC_t, *AES128HMAC_pt; + +/* AES128, (ECB, CBC, OFB, CTR, CFB), HMAC (SHA-384, SHA-612) - 144 bytes */ +typedef struct AES128HMAC2_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t hmacKey8; + uint64_t hmacKey9; + uint64_t hmacKey10; + uint64_t hmacKey11; + uint64_t hmacKey12; + uint64_t hmacKey13; + uint64_t hmacKey14; + uint64_t hmacKey15; +} AES128HMAC2_t, *AES128HMAC2_pt; + +/* AES128, (ECB, CBC, OFB, CTR, CFB), GCM - 40 bytes */ +typedef struct AES128GCM_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t GCMH0; + uint64_t GCMH1; + uint64_t GCMSCI; +} AES128GCM_t, *AES128GCM_pt; + +/* AES128, (ECB, CBC, OFB, CTR, CFB), F9 - 48 bytes */ +typedef struct AES128F9_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t authKey0; + uint64_t authKey1; +} AES128F9_t, *AES128F9_pt; + +/* AES128, (ECB, CBC, OFB, CTR, CFB), Non-HMAC (MD5, SHA-1, SHA-128) - 16 bytes */ +typedef struct AES128_s { + uint64_t cipherKey0; + uint64_t cipherKey1; +} AES128_t, *AES128_pt; + +/* AES128, (OFB F8), Non-HMAC (MD5, SHA-1, SHA-256) - 32 bytes */ +typedef struct AES128F8_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKeyMask0; + uint64_t cipherKeyMask1; +} AES128F8_t, *AES128F8_pt; + +/* AES128, (OFB F8), HMAC (MD5, SHA-1, SHA-256) - 96 bytes */ +typedef struct AES128F8HMAC_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKeyMask0; + uint64_t cipherKeyMask1; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; +} AES128F8HMAC_t, *AES128F8HMAC_pt; + +/* AES128, (OFB F8), HMAC (SHA-384, SHA-512) - 160 bytes */ +typedef struct AES128F8HMAC2_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKeyMask0; + uint64_t cipherKeyMask1; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t hmacKey8; + uint64_t hmacKey9; + uint64_t hmacKey10; + uint64_t hmacKey11; + uint64_t hmacKey12; + uint64_t hmacKey13; + uint64_t hmacKey14; + uint64_t hmacKey15; +} AES128F8HMAC2_t, *AES128F8HMAC2_pt; + +/* AES192, (OFB F8), Non-HMAC (MD5, SHA-1, SHA-256) - 48 bytes */ +typedef struct AES192F8_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKeyMask0; + uint64_t cipherKeyMask1; + uint64_t cipherKeyMask2; +} AES192F8_t, *AES192F8_pt; + +/* AES192, (OFB F8), HMAC (MD5, SHA-1, SHA-256) - 112 bytes */ +typedef struct AES192F8HMAC_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKeyMask0; + uint64_t cipherKeyMask1; + uint64_t cipherKeyMask2; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; +} AES192F8HMAC_t, *AES192F8HMAC_pt; + +/* AES192, (OFB F8), HMAC (SHA-384, SHA-512) - 176 bytes */ +typedef struct AES192F8HMAC2_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKeyMask0; + uint64_t cipherKeyMask1; + uint64_t cipherKeyMask2; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t hmacKey8; + uint64_t hmacKey9; + uint64_t hmacKey10; + uint64_t hmacKey11; + uint64_t hmacKey12; + uint64_t hmacKey13; + uint64_t hmacKey14; + uint64_t hmacKey15; +} AES192F8HMAC2_t, *AES192F8HMAC2_pt; + +/* AES256, (OFB F8), Non-HMAC (MD5, SHA-1, SHA-256) - 64 bytes */ +typedef struct AES256F8_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t cipherKeyMask0; + uint64_t cipherKeyMask1; + uint64_t cipherKeyMask2; + uint64_t cipherKeyMask3; +} AES256F8_t, *AES256F8_pt; + +/* AES256, (OFB F8), HMAC (MD5, SHA-1, SHA-256) - 128 bytes */ +typedef struct AES256F8HMAC_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t cipherKeyMask0; + uint64_t cipherKeyMask1; + uint64_t cipherKeyMask2; + uint64_t cipherKeyMask3; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; +} AES256F8HMAC_t, *AES256F8HMAC_pt; + +/* AES256, (OFB F8), HMAC (SHA-384, SHA-512) - 192 bytes */ +typedef struct AES256F8HMAC2_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t cipherKeyMask0; + uint64_t cipherKeyMask1; + uint64_t cipherKeyMask2; + uint64_t cipherKeyMask3; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t hmacKey8; + uint64_t hmacKey9; + uint64_t hmacKey10; + uint64_t hmacKey11; + uint64_t hmacKey12; + uint64_t hmacKey13; + uint64_t hmacKey14; + uint64_t hmacKey15; +} AES256F8HMAC2_t, *AES256F8HMAC2_pt; + +/* AES256, (F8), GCM - 40 bytes */ +typedef struct AES128F8GCM_s { + uint64_t cipherKey0; + uint64_t cipherKey2; + uint64_t GCMH0; + uint64_t GCMH1; + uint64_t GCMSCI; +} AES128F8GCM_t, *AES128F8GCM_pt; + +/* AES256, (F8), GCM - 48 bytes */ +typedef struct AES192F8GCM_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t GCMH0; + uint64_t GCMH1; + uint64_t GCMSCI; +} AES192F8GCM_t, *AES192F8GCM_pt; + +/* AES256, (F8), GCM - 56 bytes */ +typedef struct AES256F8GCM_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t GCMH0; + uint64_t GCMH1; + uint64_t GCMSCI; +} AES256F8GCM_t, *AES256F8GCM_pt; + +/* AES256, (F8), F9 - 40 bytes */ +typedef struct AES128F8F9_s { + uint64_t cipherKey0; + uint64_t cipherKey2; + uint64_t authKey0; + uint64_t authKey1; +} AES128F8F9_t, *AES128F8F9_pt; + +/* AES256, (F8), F9 - 48 bytes */ +typedef struct AES192F8F9_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t authKey0; + uint64_t authKey1; +} AES192F8F9_t, *AES192F8F9_pt; + +/* AES256F8, (F8), F9 - 56 bytes */ +typedef struct AES256F8F9_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t authKey0; + uint64_t authKey1; +} AES256F8F9_t, *AES256F8F9_pt; + +/* All DES possibilities */ + +/* DES, (ECB, CBC), HMAC (MD5, SHA-1, SHA-128) - 72 bytes */ +typedef struct DESHMAC_s { + uint64_t cipherKey0; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; +} DESHMAC_t, *DESHMAC_pt; + +/* DES, (ECB, CBC), HMAC (SHA-384, SHA-512) - 136 bytes */ +typedef struct DESHMAC2_s { + uint64_t cipherKey0; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t hmacKey8; + uint64_t hmacKey9; + uint64_t hmacKey10; + uint64_t hmacKey11; + uint64_t hmacKey12; + uint64_t hmacKey13; + uint64_t hmacKey14; + uint64_t hmacKey15; +} DESHMAC2_t, *DESHMAC2_pt; + +/* DES, (ECB, CBC), GCM - 32 bytes */ +typedef struct DESGCM_s { + uint64_t cipherKey0; + uint64_t GCMH0; + uint64_t GCMH1; + uint64_t GCMSCI; +} DESGCM_t, *DESGCM_pt; + +/* DES, (ECB, CBC), F9 - 32 bytes */ +typedef struct DESF9_s { + uint64_t cipherKey0; + uint64_t authKey0; + uint64_t authKey1; +} DESF9_t, *DESF9_pt; + +/* DES, (ECB, CBC), Non-HMAC (MD5, SHA-1, SHA-128) - 9 bytes */ +typedef struct DES_s { + uint64_t cipherKey0; +} DES_t, *DES_pt; + + +/* All 3DES possibilities */ + +/* 3DES, (ECB, CBC), HMAC (MD5, SHA-1, SHA-128) - 88 bytes */ +typedef struct DES3HMAC_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; +} DES3HMAC_t, *DES3HMAC_pt; + +/* 3DES, (ECB, CBC), HMAC (SHA-384, SHA-512) - 152 bytes */ +typedef struct DES3HMAC2_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t hmacKey8; + uint64_t hmacKey9; + uint64_t hmacKey10; + uint64_t hmacKey11; + uint64_t hmacKey12; + uint64_t hmacKey13; + uint64_t hmacKey14; + uint64_t hmacKey15; +} DES3HMAC2_t, *DES3HMAC2_pt; + +/* 3DES, (ECB, CBC), GCM - 48 bytes */ +typedef struct DES3GCM_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t GCMH0; + uint64_t GCMH1; + uint64_t GCMSCI; +} DES3GCM_t, *DES3GCM_pt; + +/* 3DES, (ECB, CBC), GCM - 48 bytes */ +typedef struct DES3F9_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t authKey0; + uint64_t authKey1; +} DES3F9_t, *DES3F9_pt; + +/* 3DES, (ECB, CBC), Non-HMAC (MD5, SHA-1, SHA-128) - 24 bytes */ +typedef struct DES3_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; +} DES3_t, *DES3_pt; + + +/* HMAC only - no cipher */ + +/* HMAC (MD5, SHA-1, SHA-128) - 64 bytes */ +typedef struct HMAC_s { + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; +} HMAC_t, *HMAC_pt; + +/* HMAC (SHA-384, SHA-512) - 128 bytes */ +typedef struct HMAC2_s { + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t hmacKey8; + uint64_t hmacKey9; + uint64_t hmacKey10; + uint64_t hmacKey11; + uint64_t hmacKey12; + uint64_t hmacKey13; + uint64_t hmacKey14; + uint64_t hmacKey15; +} HMAC2_t, *HMAC2_pt; + +/* GCM - 24 bytes */ +typedef struct GCM_s { + uint64_t GCMH0; + uint64_t GCMH1; + uint64_t GCMSCI; +} GCM_t, *GCM_pt; + +/* F9 - 24 bytes */ +typedef struct F9_s { + uint64_t authKey0; + uint64_t authKey1; +} F9_t, *F9_pt; + +/* All ARC4 possibilities */ +/* ARC4, HMAC (MD5, SHA-1, SHA-256) - 96 bytes */ +typedef struct ARC4HMAC_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; +} ARC4HMAC_t, *ARC4HMAC_pt; + +/* ARC4, HMAC (SHA-384, SHA-512) - 160 bytes */ +typedef struct ARC4HMAC2_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t hmacKey8; + uint64_t hmacKey9; + uint64_t hmacKey10; + uint64_t hmacKey11; + uint64_t hmacKey12; + uint64_t hmacKey13; + uint64_t hmacKey14; + uint64_t hmacKey15; +} ARC4HMAC2_t, *ARC4HMAC2_pt; + +/* ARC4, GCM - 56 bytes */ +typedef struct ARC4GCM_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t GCMH0; + uint64_t GCMH1; + uint64_t GCMSCI; +} ARC4GCM_t, *ARC4GCM_pt; + +/* ARC4, F9 - 56 bytes */ +typedef struct ARC4F9_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t authKey0; + uint64_t authKey1; +} ARC4F9_t, *ARC4F9_pt; + +/* ARC4, HMAC (MD5, SHA-1, SHA-256) - 408 bytes (not including 8 bytes from instruction) */ +typedef struct ARC4StateHMAC_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t PAD0; + uint64_t PAD1; + uint64_t PAD2; + uint64_t Arc4SboxData0; + uint64_t Arc4SboxData1; + uint64_t Arc4SboxData2; + uint64_t Arc4SboxData3; + uint64_t Arc4SboxData4; + uint64_t Arc4SboxData5; + uint64_t Arc4SboxData6; + uint64_t Arc4SboxData7; + uint64_t Arc4SboxData8; + uint64_t Arc4SboxData9; + uint64_t Arc4SboxData10; + uint64_t Arc4SboxData11; + uint64_t Arc4SboxData12; + uint64_t Arc4SboxData13; + uint64_t Arc4SboxData14; + uint64_t Arc4SboxData15; + uint64_t Arc4SboxData16; + uint64_t Arc4SboxData17; + uint64_t Arc4SboxData18; + uint64_t Arc4SboxData19; + uint64_t Arc4SboxData20; + uint64_t Arc4SboxData21; + uint64_t Arc4SboxData22; + uint64_t Arc4SboxData23; + uint64_t Arc4SboxData24; + uint64_t Arc4SboxData25; + uint64_t Arc4SboxData26; + uint64_t Arc4SboxData27; + uint64_t Arc4SboxData28; + uint64_t Arc4SboxData29; + uint64_t Arc4SboxData30; + uint64_t Arc4SboxData31; + uint64_t Arc4IJData; + uint64_t PAD3; + uint64_t PAD4; + uint64_t PAD5; +} ARC4StateHMAC_t, *ARC4StateHMAC_pt; + +/* ARC4, HMAC (SHA-384, SHA-512) - 480 bytes (not including 8 bytes from instruction) */ +typedef struct ARC4StateHMAC2_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t hmacKey8; + uint64_t hmacKey9; + uint64_t hmacKey10; + uint64_t hmacKey11; + uint64_t hmacKey12; + uint64_t hmacKey13; + uint64_t hmacKey14; + uint64_t hmacKey15; + uint64_t PAD0; + uint64_t PAD1; + uint64_t PAD2; + uint64_t Arc4SboxData0; + uint64_t Arc4SboxData1; + uint64_t Arc4SboxData2; + uint64_t Arc4SboxData3; + uint64_t Arc4SboxData4; + uint64_t Arc4SboxData5; + uint64_t Arc4SboxData6; + uint64_t Arc4SboxData7; + uint64_t Arc4SboxData8; + uint64_t Arc4SboxData9; + uint64_t Arc4SboxData10; + uint64_t Arc4SboxData11; + uint64_t Arc4SboxData12; + uint64_t Arc4SboxData13; + uint64_t Arc4SboxData14; + uint64_t Arc4SboxData15; + uint64_t Arc4SboxData16; + uint64_t Arc4SboxData17; + uint64_t Arc4SboxData18; + uint64_t Arc4SboxData19; + uint64_t Arc4SboxData20; + uint64_t Arc4SboxData21; + uint64_t Arc4SboxData22; + uint64_t Arc4SboxData23; + uint64_t Arc4SboxData24; + uint64_t Arc4SboxData25; + uint64_t Arc4SboxData26; + uint64_t Arc4SboxData27; + uint64_t Arc4SboxData28; + uint64_t Arc4SboxData29; + uint64_t Arc4SboxData30; + uint64_t Arc4SboxData31; + uint64_t Arc4IJData; + uint64_t PAD3; + uint64_t PAD4; + uint64_t PAD5; +} ARC4StateHMAC2_t, *ARC4StateHMAC2_pt; + +/* ARC4, GCM - 408 bytes (not including 8 bytes from instruction) */ +typedef struct ARC4StateGCM_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t GCMH0; + uint64_t GCMH1; + uint64_t GCMSCI; + uint64_t PAD0; + uint64_t PAD1; + uint64_t PAD2; + uint64_t PAD3; + uint64_t PAD4; + uint64_t PAD5; + uint64_t PAD6; + uint64_t PAD7; + uint64_t Arc4SboxData0; + uint64_t Arc4SboxData1; + uint64_t Arc4SboxData2; + uint64_t Arc4SboxData3; + uint64_t Arc4SboxData4; + uint64_t Arc4SboxData5; + uint64_t Arc4SboxData6; + uint64_t Arc4SboxData7; + uint64_t Arc4SboxData8; + uint64_t Arc4SboxData9; + uint64_t Arc4SboxData10; + uint64_t Arc4SboxData11; + uint64_t Arc4SboxData12; + uint64_t Arc4SboxData13; + uint64_t Arc4SboxData14; + uint64_t Arc4SboxData15; + uint64_t Arc4SboxData16; + uint64_t Arc4SboxData17; + uint64_t Arc4SboxData18; + uint64_t Arc4SboxData19; + uint64_t Arc4SboxData20; + uint64_t Arc4SboxData21; + uint64_t Arc4SboxData22; + uint64_t Arc4SboxData23; + uint64_t Arc4SboxData24; + uint64_t Arc4SboxData25; + uint64_t Arc4SboxData26; + uint64_t Arc4SboxData27; + uint64_t Arc4SboxData28; + uint64_t Arc4SboxData29; + uint64_t Arc4SboxData30; + uint64_t Arc4SboxData31; + uint64_t Arc4IJData; + uint64_t PAD8; + uint64_t PAD9; + uint64_t PAD10; +} ARC4StateGCM_t, *ARC4StateGCM_pt; + +/* ARC4, F9 - 408 bytes (not including 8 bytes from instruction) */ +typedef struct ARC4StateF9_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t authKey0; + uint64_t authKey1; + uint64_t PAD0; + uint64_t PAD1; + uint64_t PAD2; + uint64_t PAD3; + uint64_t PAD4; + uint64_t PAD5; + uint64_t PAD6; + uint64_t PAD7; + uint64_t PAD8; + uint64_t Arc4SboxData0; + uint64_t Arc4SboxData1; + uint64_t Arc4SboxData2; + uint64_t Arc4SboxData3; + uint64_t Arc4SboxData4; + uint64_t Arc4SboxData5; + uint64_t Arc4SboxData6; + uint64_t Arc4SboxData7; + uint64_t Arc4SboxData8; + uint64_t Arc4SboxData9; + uint64_t Arc4SboxData10; + uint64_t Arc4SboxData11; + uint64_t Arc4SboxData12; + uint64_t Arc4SboxData13; + uint64_t Arc4SboxData14; + uint64_t Arc4SboxData15; + uint64_t Arc4SboxData16; + uint64_t Arc4SboxData17; + uint64_t Arc4SboxData18; + uint64_t Arc4SboxData19; + uint64_t Arc4SboxData20; + uint64_t Arc4SboxData21; + uint64_t Arc4SboxData22; + uint64_t Arc4SboxData23; + uint64_t Arc4SboxData24; + uint64_t Arc4SboxData25; + uint64_t Arc4SboxData26; + uint64_t Arc4SboxData27; + uint64_t Arc4SboxData28; + uint64_t Arc4SboxData29; + uint64_t Arc4SboxData30; + uint64_t Arc4SboxData31; + uint64_t Arc4IJData; + uint64_t PAD9; + uint64_t PAD10; + uint64_t PAD11; +} ARC4StateF9_t, *ARC4StateF9_pt; + +/* ARC4, Non-HMAC (MD5, SHA-1, SHA-256) - 32 bytes */ +typedef struct ARC4_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; +} ARC4_t, *ARC4_pt; + +/* ARC4, Non-HMAC (MD5, SHA-1, SHA-256) - 344 bytes (not including 8 bytes from instruction) */ +typedef struct ARC4State_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t cipherKey2; + uint64_t cipherKey3; + uint64_t PAD0; + uint64_t PAD1; + uint64_t PAD2; + uint64_t Arc4SboxData0; + uint64_t Arc4SboxData1; + uint64_t Arc4SboxData2; + uint64_t Arc4SboxData3; + uint64_t Arc4SboxData4; + uint64_t Arc4SboxData5; + uint64_t Arc4SboxData6; + uint64_t Arc4SboxData7; + uint64_t Arc4SboxData8; + uint64_t Arc4SboxData9; + uint64_t Arc4SboxData10; + uint64_t Arc4SboxData11; + uint64_t Arc4SboxData12; + uint64_t Arc4SboxData13; + uint64_t Arc4SboxData14; + uint64_t Arc4SboxData15; + uint64_t Arc4SboxData16; + uint64_t Arc4SboxData17; + uint64_t Arc4SboxData18; + uint64_t Arc4SboxData19; + uint64_t Arc4SboxData20; + uint64_t Arc4SboxData21; + uint64_t Arc4SboxData22; + uint64_t Arc4SboxData23; + uint64_t Arc4SboxData24; + uint64_t Arc4SboxData25; + uint64_t Arc4SboxData26; + uint64_t Arc4SboxData27; + uint64_t Arc4SboxData28; + uint64_t Arc4SboxData29; + uint64_t Arc4SboxData30; + uint64_t Arc4SboxData31; + uint64_t Arc4IJData; + uint64_t PAD3; + uint64_t PAD4; + uint64_t PAD5; +} ARC4State_t, *ARC4State_pt; + +/* Kasumi f8 - 32 bytes */ +typedef struct KASUMIF8_s { + uint64_t cipherKey0; + uint64_t cipherKey1; +} KASUMIF8_t, *KASUMIF8_pt; + +/* Kasumi f8 + HMAC (MD5, SHA-1, SHA-256) - 80 bytes */ +typedef struct KASUMIF8HMAC_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; +} KASUMIF8HMAC_t, *KASUMIF8HMAC_pt; + +/* Kasumi f8 + HMAC (SHA-384, SHA-512) - 144 bytes */ +typedef struct KASUMIF8HMAC2_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t hmacKey0; + uint64_t hmacKey1; + uint64_t hmacKey2; + uint64_t hmacKey3; + uint64_t hmacKey4; + uint64_t hmacKey5; + uint64_t hmacKey6; + uint64_t hmacKey7; + uint64_t hmacKey8; + uint64_t hmacKey9; + uint64_t hmacKey10; + uint64_t hmacKey11; + uint64_t hmacKey12; + uint64_t hmacKey13; + uint64_t hmacKey14; + uint64_t hmacKey15; +} KASUMIF8HMAC2_t, *KASUMIF8HMAC2_pt; + +/* Kasumi f8 + GCM - 144 bytes */ +typedef struct KASUMIF8GCM_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t GCMH0; + uint64_t GCMH1; + uint64_t GCMSCI; +} KASUMIF8GCM_t, *KASUMIF8GCM_pt; + +/* Kasumi f8 + f9 - 32 bytes */ +typedef struct KASUMIF8F9_s { + uint64_t cipherKey0; + uint64_t cipherKey1; + uint64_t authKey0; + uint64_t authKey1; +} KASUMIF8F9_t, *KASUMIF8F9_pt; + +typedef union CipherHashInfo_u { + AES256HMAC_t infoAES256HMAC; + AES256_t infoAES256; + AES192HMAC_t infoAES192HMAC; + AES192_t infoAES192; + AES128HMAC_t infoAES128HMAC; + AES128_t infoAES128; + DESHMAC_t infoDESHMAC; + DES_t infoDES; + DES3HMAC_t info3DESHMAC; + DES3_t info3DES; + HMAC_t infoHMAC; + /* ARC4 */ + ARC4HMAC_t infoARC4HMAC; + ARC4StateHMAC_t infoARC4StateHMAC; + ARC4_t infoARC4; + ARC4State_t infoARC4State; + /* AES mode F8 */ + AES256F8HMAC_t infoAES256F8HMAC; + AES256F8_t infoAES256F8; + AES192F8HMAC_t infoAES192F8HMAC; + AES192F8_t infoAES192F8; + AES128F8HMAC_t infoAES128F8HMAC; + AES128F8_t infoAES128F8; + /* KASUMI F8 */ + KASUMIF8HMAC_t infoKASUMIF8HMAC; + KASUMIF8_t infoKASUMIF8; + /* GCM */ + GCM_t infoGCM; + AES256F8GCM_t infoAES256F8GCM; + AES192F8GCM_t infoAES192F8GCM; + AES128F8GCM_t infoAES128F8GCM; + AES256GCM_t infoAES256GCM; + AES192GCM_t infoAES192GCM; + AES128GCM_t infoAES128GCM; + DESGCM_t infoDESGCM; + DES3GCM_t info3DESGCM; + ARC4GCM_t infoARC4GCM; + ARC4StateGCM_t infoARC4StateGCM; + KASUMIF8GCM_t infoKASUMIF8GCM; + /* HMAC2 */ + HMAC2_t infoHMAC2; + AES256F8HMAC2_t infoAES256F8HMAC2; + AES192F8HMAC2_t infoAES192F8HMAC2; + AES128F8HMAC2_t infoAES128F8HMAC2; + AES256HMAC2_t infoAES256HMAC2; + AES192HMAC2_t infoAES192HMAC2; + AES128HMAC2_t infoAES128HMAC2; + DESHMAC2_t infoDESHMAC2; + DES3HMAC2_t info3DESHMAC2; + ARC4HMAC2_t infoARC4HMAC2; + ARC4StateHMAC2_t infoARC4StateHMAC2; + KASUMIF8HMAC2_t infoKASUMIF8HMAC2; + /* F9 */ + F9_t infoF9; + AES256F8F9_t infoAES256F8F9; + AES192F8F9_t infoAES192F8F9; + AES128F8F9_t infoAES128F8F9; + AES256F9_t infoAES256F9; + AES192F9_t infoAES192F9; + AES128F9_t infoAES128F9; + DESF9_t infoDESF9; + DES3F9_t info3DESF9; + ARC4F9_t infoARC4F9; + ARC4StateF9_t infoARC4StateF9; + KASUMIF8F9_t infoKASUMIF8F9; +} CipherHashInfo_t, *CipherHashInfo_pt; + + +/* + * + * ControlDescriptor_s datastructure + * + */ + +typedef struct ControlDescriptor_s { + uint64_t instruction; + CipherHashInfo_t cipherHashInfo; +} ControlDescriptor_t, *ControlDescriptor_pt; + + + + +/* ********************************************************************** + * PacketDescriptor_t + * ********************************************************************** + */ + +/* /--------------------------------------------\ + * | | + * | New PacketDescriptor_s datastructure | + * | | + * \--------------------------------------------/ + * + * + * + * PacketDescriptor_t.srcLengthIVOffUseIVNext + * ------------------------------------------ + * + * 63 62 61 59 58 57 56 54 53 43 + * ------------------------------------------------------------------------------------------------ + * || Load HMAC key || Pad Hash || Hash Byte Count || Next || Use IV || IV Offset || Packet length || ... CONT ... + * ------------------------------------------------------------------------------------------------ + * 1 1 3 1 1 3 11 + * + * + * 42 41 40 39 5 4 3 2 + * 0 + * ---------------------------------------------------------------------------------------------------- + * || NLHMAC || Break || Wait || Segment src address || SRTCP || Reserved || Global src data offset || + * ---------------------------------------------------------------------------------------------------- + * 1 1 1 35 1 1 3 + * + * + * + * Load HMAC key = 1'b0 Preserve old HMAC key stored in Auth engine (moot if HASH.HMAC == 0) + * 1'b1 Load HMAC key from ID registers at beginning of op + * If GCM is selected as authenticator, setting this bit + * will cause the H value from ID to be loaded to the engine + * If Kasumi F9 is selected as authenticator, setting this bit + * will cause the IK value from ID to be loaded to the engine. + * Pad Hash = 1'b0 HASH will assume the data was padded to be a multiple + * of 512 bits in length and that the last 64 bit word + * expresses the total datalength in bits seen by HASH engine + * 1'b1 The data was not padded to be a multiple of 512 bits in length; + * The Hash engine will do its own padding to generate the correct digest. + * Ignored by GCM (always does its own padding) + * Hash Byte Count Number of BYTES on last 64-bit data word to use in digest calculation RELEVANT ONLY IF Pad Hash IS SET + * 3'b000 Use all 8 + * 3'b001 Use first (MS) byte only (0-out rest), i.e., 0xddXXXXXXXXXXXXXX + * 3'b010 Use first 2 bytes only (0-out rest), i.e., 0xddddXXXXXXXXXXXX ... etc + * Next = 1'b0 Finish (return msg descriptor) at end of operation + * 1'b1 Grab the next PacketDescriptor (i.e. next cache-line) when the current is complete. + * This allows for fragmentation/defragmentation and processing of large (>16kB) packets. + * The sequence of adjacent PacketDescriptor acts as a contiguous linked list of + * pointers to the actual packets with Next==0 on the last PacketDescriptor to end processing. + * Use IV = 1'b0 On first frag: Use old IV + * On subsequent frags: Do not write out to DST the (dword) offset data + * 1'b1 On first frag: Use data @ Segment_address + IV_Offset as IV + * On subsequent frags: Do write out to DST the (dword) offset data + * IV Offset = On first frag: Offset IN NB OF 8 BYTE WORDS (dwords) from beginning of packet + * (i.e. (Potentially byte-shifted) Segment address) to cipher IV + * On subsequent frags: Offset to beginning of data to process; data to offset won't + * be given to engines and will be written out to dst in the clear. + * ON SUBSEQUENT FRAGS, IV_Offset MAY NOT EXCEED 3; LARGER VALUES WILL CAUSE AN ERROR + * SEE ERROR CONDITIONS BELOW + * Packet length = Nb double words to stream in (Including Segment address->CP/IV/Auth/CkSum offsets) + * This is the total amount of data (x8 in bytes) read (+1 dword if "Global src data offset" != 0) + * This is the total amount of data (x8 in bytes) written (+1 dword if "Global dst data offset" != 0, if Dst dword offset == 0) + * If Packet length == 11'h7ff and (Global src data offset != 0 or Global dst data offset != 0) + * the operation is aborted (no mem writes occur) + * and the "Insufficient Data To Cipher" error flag is raised + * NLHMAC = No last to hmac. Setting this to 1 will prevent the transmission of the last DWORD + * to the authenticator, i.e., the DWORD before last will be designated as last for the purposes of authentication. + * Break = Break a wait (see below) state - causes the operation to be flushed and free descriptor to be returned. + * Activated if DFetch blocked by Wait and Wait still active. + * AS OF 02/10/2005 THIS FEATURE IS EXPERIMENTAL + * Wait = Setting that bit causes the operation to block in DFetch stage. + * DFetch will keep polling the memory location until the bit is reset at which time + * the pipe resumes normal operation. This feature is convenient for software dealing with fragmented packets. + * AS OF 02/10/2005 THIS FEATURE IS EXPERIMENTAL + * Segment src address = 35 MSB of pointer to src data (i.e., cache-line aligned) + * SRTCP = Bypass the cipher for the last 4 bytes of data, i.e. the last 4 bytes will be sent to memory + * and the authenticator in the clear. Applicable to last packet descriptor andlast frag only. + * This accommodates a requirement of SRTCP. + * Global src data offset = Nb BYTES to right-shift data by before presenting it to engines + * (0-7); allows realignment of byte-aligned, non-double-word aligned data + * + * PacketDescriptor_t.dstDataSettings + * ---------------------------------- + * + * + * 63 62 60 59 58 56 55 54 53 52 41 40 + * ------------------------------------------------------------------------------------------------------------ + * || CipherPrefix | Arc4ByteCount | E/D | Cipher_Offset || Hash_Offset | Hash_Src || CkSum_Offset | CkSum_Src || ... CONT ... + * ------------------------------------------------------------------------------------------------------------ + * 1 3 1 3 2 1 12 1 + * <-----------------------CIPHER-----------------------><---------HASH-----------><-------CHECKSUM-----------> + * + * + * CipherPrefix = 128'b0 will be sent to the selected cipher + * KEEP VALUE ON ALL FRAGS after the IV is loaded, before the actual data goes in. + * The result of that encryption (aka E(K, 0))will be stored + * locally and XOR-ed with the auth digest to create the final + * digest at the end of the auth OP: + * This is covered by the GCM spec + * AesPrefix = 1'b1 -> Force E=Cipher(K,0) before start of data encr. + * -> Digest ^= E + * AesPrefix = 1'b0 -> Regular digest + * This flag is ignored if no cipher is chosen (Bypass condition) + * X0 Arc4ByteCount = Number of BYTES on last 64-bit data word to encrypt + * 3'b000 Encrypt all 8 + * 3'b001 Encrypt first (MS) byte only i.e., 0xddXXXXXXXXXXXXXX + * 3'b010 Encrypt first 2 bytes only i.e., 0xddddXXXXXXXXXXXX ... etc + * In reality, all are encrypted, however, the SBOX + * is not written past the last byte to encrypt + * E/D = 1'b0 Decrypt + * 1'b1 Encrypt + * Overloaded to also mean IV byte offset for first frag + * Cipher_Offset = Nb of words between the first data segment + * and word on which to start cipher operation + * (64 BIT WORDS !!!) + * Hash_Offset = Nb of words between the first data segment + * and word on which to start hashing + * (64 bit words) + * Hash_Src = 1'b0 DMA channel + * 1'b1 Cipher if word count exceeded Cipher_Offset; + * DMA channel otherwise + * CkSum_Offset = Nb of words between the first data segment + * and word on which to start + * checksum calculation (32 BIT WORDS !!!) + * CkSum_Src = 1'b0 DMA channel + * 1'b1 Cipher if word count exceeded Cipher_Offset + * DMA channel otherwise + * Cipher dst address = 35 MSB of pointer to dst location (i.e., cache-line aligned) + * Dst dword offset = Nb of double-words to left-shift data from spec'ed Cipher dst address before writing it to memory + * Global dst data offset = Nb BYTES to left-shift (double-word boundary aligned) data by before writing it to memory + * + * + * PacketDescriptor_t.authDstNonceLow + * ---------------------------------- + * + * 63 40 39 5 4 0 + * ----------------------------------------------------- + * || Nonce_Low || Auth_dst_address || Cipher_Offset_Hi || + * ----------------------------------------------------- + * 24 35 5 + * + * + * + * Nonce_Low = Nonce[23:0] 24 least significant bits of 32-bit long nonce + * Used by AES in counter mode + * Auth_dst_address = 35 MSB of pointer to authentication dst location (i.e., cache-line aligned) + * X0 Cipher_Offset_Hi = On first frag: 5 MSB of 8-bit Cipher_offset; will be concatenated to + * the top of PacketDescriptor_t.dstDataSettings.Cipher_Offset + * On subsequent frags: Ignored + * + * + * PacketDescriptor_t.ckSumDstNonceHiCFBMaskLLWMask + * ------------------------------------------------ + * + * + * 63 61 60 58 57 56 55 48 47 40 39 5 4 0 + * ------------------------------------------------------------------------------------------------------------------- + * || Hash_Byte_Offset || Packet length bytes || LLWMask || CFB_Mask || Nonce_Hi || CkSum_dst_address || IV_Offset_Hi || + * ------------------------------------------------------------------------------------------------------------------- + * 3 3 2 8 8 35 5 + * + * + * Hash_Byte_Offset = On first frag: Additional offset in bytes to be added to Hash_Offset + * to obtain the full offset applied to the data before + * submitting it to authenticator + * On subsequent frags: Same + * Packet length bytes = On one fragment payloads: Ignored (i.e. assumed to be 0, last dword used in its entirety) + * On fragments before last: Number of bytes on last fragment dword + * On last fragment: Ignored (i.e. assumed to be 0, last dword used in its entirety) + * LLWMask, aka, Last_long_word_mask = 2'b00 Give last 128 bit word from AES engine to auth/cksum/wrbbufer as is - applicable in AES CTR only + * 2'b11 Mask (zero-out) 32 least significant bits + * 2'b10 Mask 64 LSBs + * 2'b01 Mask 96 LSBs + * If the GCM authenticator is used, setting LLWMask to 2'b10 or 2'b01 + * will also prevent the transmission of the last DWORD + * to the authenticator, i.e., the DWORD before last will + * be designated as last for the purposes of authentication. + * CFB_Mask = 8 bit mask used by AES in CFB mode + * In CTR mode: + * CFB_Mask[1:0] = 2'b00 -> Counter[127:0] = {Nonce[31:0], IV0[63:0], 4'h00000001} (only 1 IV exp +ected) regular CTR + * 2'b01 -> Counter[127:0] = {Nonce[31:0], IV0[63:0], IV1[31:0]} (2 IV expected +) CCMP + * 2'b10 -> Counter[127:0] = {IV1[63:0], IV0[31:0], Nonce[31:0]} (2 IV expected +) GCM with SCI + * 2'b11 -> Counter[127:0] = {IDecode.SCI[63:0], IV0[31:0], Nonce[31:0]} (1 IV expected +) GCM w/o SCI + * Nonce_Hi = Nonce[31:24] 8 most significant bits of 32-bit long nonce + * Used by AES in counter mode + * CkSum_dst_address = 35 MSB of pointer to cksum dst location (i.e., cache-line aligned) + * X0 IV_Offset_Hi = On first frag: 5 MSB of 8-bit IV offset; will be concatenated to + * the top of PacketDescriptor_t.srcLengthIVOffUseIVNext.IV_Offset + * On subsequent frags: Ignored + */ + +/* #define PKT_DSC_LOADHMACKEY */ +#define PKT_DSC_LOADHMACKEY_OLD 0 +#define PKT_DSC_LOADHMACKEY_LOAD 1 +#define PKT_DSC_LOADHMACKEY_LSB 63 +#define PKT_DSC_LOADHMACKEY_BITS ONE_BIT +#define PKT_DSC_LOADHMACKEY_MASK \ + (PKT_DSC_LOADHMACKEY_BITS << PKT_DSC_LOADHMACKEY_LSB) + +/* #define PKT_DSC_PADHASH */ +#define PKT_DSC_PADHASH_PADDED 0 +#define PKT_DSC_PADHASH_PAD 1 /* requires padding */ +#define PKT_DSC_PADHASH_LSB 62 +#define PKT_DSC_PADHASH_BITS ONE_BIT +#define PKT_DSC_PADHASH_MASK (PKT_DSC_PADHASH_BITS << PKT_DSC_PADHASH_LSB) + +/* #define PKT_DSC_HASHBYTES */ +#define PKT_DSC_HASHBYTES_ALL8 0 +#define PKT_DSC_HASHBYTES_MSB 1 +#define PKT_DSC_HASHBYTES_MSW 2 +#define PKT_DSC_HASHBYTES_LSB 59 +#define PKT_DSC_HASHBYTES_BITS THREE_BITS +#define PKT_DSC_HASHBYTES_MASK \ + (PKT_DSC_HASHBYTES_BITS << PKT_DSC_HASHBYTES_LSB) + +/* #define PKT_DSC_NEXT */ +#define PKT_DSC_NEXT_FINISH 0 +#define PKT_DSC_NEXT_DO 1 +#define PKT_DSC_NEXT_LSB 58 +#define PKT_DSC_NEXT_BITS ONE_BIT +#define PKT_DSC_NEXT_MASK (PKT_DSC_NEXT_BITS << PKT_DSC_NEXT_LSB) + +/* #define PKT_DSC_IV */ +#define PKT_DSC_IV_OLD 0 +#define PKT_DSC_IV_NEW 1 +#define PKT_DSC_IV_LSB 57 +#define PKT_DSC_IV_BITS ONE_BIT +#define PKT_DSC_IV_MASK (PKT_DSC_IV_BITS << PKT_DSC_IV_LSB) + +/* #define PKT_DSC_IVOFF */ +#define PKT_DSC_IVOFF_LSB 54 +#define PKT_DSC_IVOFF_BITS THREE_BITS +#define PKT_DSC_IVOFF_MASK (PKT_DSC_IVOFF_BITS << PKT_DSC_IVOFF_LSB) + +/* #define PKT_DSC_PKTLEN */ +#define PKT_DSC_PKTLEN_LSB 43 +#define PKT_DSC_PKTLEN_BITS ELEVEN_BITS +#define PKT_DSC_PKTLEN_MASK (PKT_DSC_PKTLEN_BITS << PKT_DSC_PKTLEN_LSB) + +/* #define PKT_DSC_NLHMAC */ +#define PKT_DSC_NLHMAC_LSB 42 +#define PKT_DSC_NLHMAC_BITS ONE_BIT +#define PKT_DSC_NLHMAC_MASK (PKT_DSC_NLHMAC_BITS << PKT_DSC_NLHMAC_LSB) + +/* #define PKT_DSC_BREAK */ +#define PKT_DSC_BREAK_OLD 0 +#define PKT_DSC_BREAK_NEW 1 +#define PKT_DSC_BREAK_LSB 41 +#define PKT_DSC_BREAK_BITS ONE_BIT +#define PKT_DSC_BREAK_MASK (PKT_DSC_BREAK_BITS << PKT_DSC_BREAK_LSB) + +/* #define PKT_DSC_WAIT */ +#define PKT_DSC_WAIT_OLD 0 +#define PKT_DSC_WAIT_NEW 1 +#define PKT_DSC_WAIT_LSB 40 +#define PKT_DSC_WAIT_BITS ONE_BIT +#define PKT_DSC_WAIT_MASK (PKT_DSC_WAIT_BITS << PKT_DSC_WAIT_LSB) + +/* #define PKT_DSC_SEGADDR */ +#define PKT_DSC_SEGADDR_LSB 5 +#define PKT_DSC_SEGADDR_BITS FOURTY_BITS +#define PKT_DSC_SEGADDR_MASK \ + (PKT_DSC_SEGADDR_BITS << PKT_DSC_SEGADDR_LSB) + +/* #define PKT_DSC_SRTCP */ +#define PKT_DSC_SRTCP_OFF 0 +#define PKT_DSC_SRTCP_ON 1 +#define PKT_DSC_SRTCP_LSB 4 +#define PKT_DSC_SRTCP_BITS ONE_BIT +#define PKT_DSC_SRTCP_MASK (PKT_DSC_SRTCP_BITS << PKT_DSC_SRTCP_LSB) + +#define PKT_DSC_SEGOFFSET_LSB 0 +#define PKT_DSC_SEGOFFSET_BITS THREE_BITS +#define PKT_DSC_SEGOFFSET_MASK \ + (PKT_DSC_SEGOFFSET_BITS << PKT_DSC_SEGOFFSET_LSB) + +/* ********************************************************************** + * PacketDescriptor_t.dstDataSettings + * ********************************************************************** + */ +/* #define PKT_DSC_ARC4BYTECOUNT */ +#define PKT_DSC_ARC4BYTECOUNT_ALL8 0 +#define PKT_DSC_ARC4BYTECOUNT_MSB 1 +#define PKT_DSC_ARC4BYTECOUNT_MSW 2 +#define PKT_DSC_ARC4BYTECOUNT_LSB 60 +#define PKT_DSC_ARC4BYTECOUNT_BITS THREE_BITS +#define PKT_DSC_ARC4BYTECOUNT_MASK (PKT_DSC_ARC4BYTECOUNT_BITS << PKT_DSC_ARC4BYTECOUNT_LSB) + +/* #define PKT_DSC_SYM_OP (symmetric key operation) */ +#define PKT_DSC_SYM_OP_DECRYPT 0 +#define PKT_DSC_SYM_OP_ENCRYPT 1 +#define PKT_DSC_SYM_OP_LSB 59 +#define PKT_DSC_SYM_OP_BITS ONE_BIT +#define PKT_DSC_SYM_OP_MASK (PKT_DSC_SYM_OP_BITS << PKT_DSC_SYM_OP_LSB) + +/* #define PKT_DSC_CPHROFF */ +#define PKT_DSC_CPHROFF_LSB 56 +#define PKT_DSC_CPHROFF_BITS THREE_BITS +#define PKT_DSC_CPHROFF_MASK (PKT_DSC_CPHROFF_BITS << PKT_DSC_CPHROFF_LSB) + +/* #define PKT_DSC_HASHOFF */ +#define PKT_DSC_HASHOFF_LSB 54 +#define PKT_DSC_HASHOFF_BITS TWO_BITS +#define PKT_DSC_HASHOFF_MASK (PKT_DSC_HASHOFF_BITS << PKT_DSC_HASHOFF_LSB) + +/* #define PKT_DSC_HASHSRC */ +#define PKT_DSC_HASHSRC_DMA 0 +#define PKT_DSC_HASHSRC_CIPHER 1 +#define PKT_DSC_HASHSRC_LSB 53 +#define PKT_DSC_HASHSRC_BITS ONE_BIT +#define PKT_DSC_HASHSRC_MASK (PKT_DSC_HASHSRC_BITS << PKT_DSC_HASHSRC_LSB) + +/* #define PKT_DSC_CKSUMOFF */ +#define PKT_DSC_CKSUMOFF_LSB 41 +#define PKT_DSC_CKSUMOFF_BITS TWELVE_BITS +#define PKT_DSC_CKSUMOFF_MASK (PKT_DSC_CKSUMOFF_BITS << PKT_DSC_CKSUMOFF_LSB) + +/* #define PKT_DSC_CKSUMSRC */ +#define PKT_DSC_CKSUMSRC_DMA 0 +#define PKT_DSC_CKSUMSRC_CIPHER 1 +#define PKT_DSC_CKSUMSRC_LSB 40 +#define PKT_DSC_CKSUMSRC_BITS ONE_BIT +#define PKT_DSC_CKSUMSRC_MASK (PKT_DSC_CKSUMSRC_BITS << PKT_DSC_CKSUMSRC_LSB) + +/* #define PKT_DSC_CPHR_DST_ADDR */ +#define PKT_DSC_CPHR_DST_ADDR_LSB 0 +#define PKT_DSC_CPHR_DST_ADDR_BITS FOURTY_BITS +#define PKT_DSC_CPHR_DST_ADDR_MASK \ + (PKT_DSC_CPHR_DST_ADDR_BITS << PKT_DSC_CPHR_DST_ADDR_LSB) + +/* #define PKT_DSC_CPHR_DST_DWOFFSET */ +#define PKT_DSC_CPHR_DST_DWOFFSET_LSB 3 +#define PKT_DSC_CPHR_DST_DWOFFSET_BITS TWO_BITS +#define PKT_DSC_CPHR_DST_DWOFFSET_MASK \ + (PKT_DSC_CPHR_DST_DWOFFSET_BITS << PKT_DSC_CPHR_DST_DWOFFSET_LSB) + + /* #define PKT_DSC_CPHR_DST_OFFSET */ +#define PKT_DSC_CPHR_DST_OFFSET_LSB 0 +#define PKT_DSC_CPHR_DST_OFFSET_BITS THREE_BITS +#define PKT_DSC_CPHR_DST_OFFSET_MASK \ + (PKT_DSC_CPHR_DST_OFFSET_BITS << PKT_DSC_CPHR_DST_OFFSET_LSB) + +/* ********************************************************************** + * PacketDescriptor_t.authDstNonceLow + * ********************************************************************** + */ +/* #define PKT_DSC_NONCE_LOW */ +#define PKT_DSC_NONCE_LOW_LSB 40 +#define PKT_DSC_NONCE_LOW_BITS TWENTYFOUR_BITS +#define PKT_DSC_NONCE_LOW_MASK \ + (PKT_DSC_NONCE_LOW_BITS << PKT_DSC_NONCE_LOW_LSB) + +/* #define PKT_DSC_AUTH_DST_ADDR */ +#define PKT_DSC_AUTH_DST_ADDR_LSB 0 +#define PKT_DSC_AUTH_DST_ADDR_BITS FOURTY_BITS +#define PKT_DSC_AUTH_DST_ADDR_MASK \ + (PKT_DSC_AUTH_DST_ADDR_BITS << PKT_DSC_AUTH_DST_ADDR_LSB) + +/* #define PKT_DSC_CIPH_OFF_HI */ +#define PKT_DSC_CIPH_OFF_HI_LSB 0 +#define PKT_DSC_CIPH_OFF_HI_BITS FIVE_BITS +#define PKT_DSC_CIPH_OFF_HI_MASK (PKT_DSC_CIPH_OFF_HI_BITS << PKT_DSC_CIPH_OFF_HI_LSB) + +/* ********************************************************************** + * PacketDescriptor_t.ckSumDstNonceHiCFBMaskLLWMask + * ********************************************************************** + */ +/* #define PKT_DSC_HASH_BYTE_OFF */ +#define PKT_DSC_HASH_BYTE_OFF_LSB 61 +#define PKT_DSC_HASH_BYTE_OFF_BITS THREE_BITS +#define PKT_DSC_HASH_BYTE_OFF_MASK (PKT_DSC_HASH_BYTE_OFF_BITS << PKT_DSC_HASH_BYTE_OFF_LSB) + +/* #define PKT_DSC_PKTLEN_BYTES */ +#define PKT_DSC_PKTLEN_BYTES_LSB 58 +#define PKT_DSC_PKTLEN_BYTES_BITS THREE_BITS +#define PKT_DSC_PKTLEN_BYTES_MASK (PKT_DSC_PKTLEN_BYTES_BITS << PKT_DSC_PKTLEN_BYTES_LSB) + +/* #define PKT_DSC_LASTWORD */ +#define PKT_DSC_LASTWORD_128 0 +#define PKT_DSC_LASTWORD_96MASK 1 +#define PKT_DSC_LASTWORD_64MASK 2 +#define PKT_DSC_LASTWORD_32MASK 3 +#define PKT_DSC_LASTWORD_LSB 56 +#define PKT_DSC_LASTWORD_BITS TWO_BITS +#define PKT_DSC_LASTWORD_MASK (PKT_DSC_LASTWORD_BITS << PKT_DSC_LASTWORD_LSB) + +/* #define PKT_DSC_CFB_MASK */ +#define PKT_DSC_CFB_MASK_LSB 48 +#define PKT_DSC_CFB_MASK_BITS EIGHT_BITS +#define PKT_DSC_CFB_MASK_MASK (PKT_DSC_CFB_MASK_BITS << PKT_DSC_CFB_MASK_LSB) + +/* #define PKT_DSC_NONCE_HI */ +#define PKT_DSC_NONCE_HI_LSB 40 +#define PKT_DSC_NONCE_HI_BITS EIGHT_BITS +#define PKT_DSC_NONCE_HI_MASK (PKT_DSC_NONCE_HI_BITS << PKT_DSC_NONCE_HI_LSB) + +/* #define PKT_DSC_CKSUM_DST_ADDR */ +#define PKT_DSC_CKSUM_DST_ADDR_LSB 5 +#define PKT_DSC_CKSUM_DST_ADDR_BITS THIRTY_FIVE_BITS +#define PKT_DSC_CKSUM_DST_ADDR_MASK (PKT_DSC_CKSUM_DST_ADDR_BITS << PKT_DSC_CKSUM_DST_ADDR_LSB) + +/* #define PKT_DSC_IV_OFF_HI */ +#define PKT_DSC_IV_OFF_HI_LSB 0 +#define PKT_DSC_IV_OFF_HI_BITS FIVE_BITS +#define PKT_DSC_IV_OFF_HI_MASK (PKT_DSC_IV_OFF_HI_BITS << PKT_DSC_IV_OFF_HI_LSB) + + +/* ****************************************************************** + * Control Error Code and Conditions + * ****************************************************************** + */ +#define CTL_ERR_NONE 0x0000 /* No Error */ +#define CTL_ERR_CIPHER_OP 0x0001 /* Unknown Cipher Op */ +#define CTL_ERR_MODE 0x0002 /* Unknown or Not Allowed Mode */ +#define CTL_ERR_CHKSUM_SRC 0x0004 /* Unknown CkSum Src - UNUSED */ +#define CTL_ERR_CFB_MASK 0x0008 /* Forbidden CFB Mask - UNUSED */ +#define CTL_ERR_OP 0x0010 /* Unknown Ctrl Op - UNUSED */ +#define CTL_ERR_UNDEF1 0x0020 /* UNUSED */ +#define CTL_ERR_UNDEF2 0x0040 /* UNUSED */ +#define CTL_ERR_DATA_READ 0x0080 /* Data Read Error */ +#define CTL_ERR_DESC_CTRL 0x0100 /* Descriptor Ctrl Field Error */ + +#define CTL_ERR_TIMEOUT 0x1000 /* Message Response Timeout */ + +/* ****************************************************************** + * Data Error Code and Conditions + * ****************************************************************** + */ +#define DATA_ERR_NONE 0x0000 /* No Error */ +#define DATA_ERR_LEN_CIPHER 0x0001 /* Not Enough Data To Cipher */ +#define DATA_ERR_IV_ADDR 0x0002 /* Illegal IV Loacation */ +#define DATA_ERR_WD_LEN_AES 0x0004 /* Illegal Nb Words To AES */ +#define DATA_ERR_BYTE_COUNT 0x0008 /* Illegal Pad And ByteCount Spec */ +#define DATA_ERR_LEN_CKSUM 0x0010 /* Not Enough Data To CkSum */ +#define DATA_ERR_OP 0x0020 /* Unknown Data Op */ +#define DATA_ERR_UNDEF1 0x0040 /* UNUSED */ +#define DATA_ERR_READ 0x0080 /* Data Read Error */ +#define DATA_ERR_WRITE 0x0100 /* Data Write Error */ + + +/* + * Common Descriptor + * NOTE: Size of struct is size of cacheline. + */ + +typedef struct OperationDescriptor_s { + uint64_t phys_self; + uint32_t stn_id; + uint32_t flags; + uint32_t cpu; + uint32_t seq_num; + uint64_t reserved; +} OperationDescriptor_t, *OperationDescriptor_pt; + + +/* + * This defines the security data descriptor format + */ +typedef struct PacketDescriptor_s { + uint64_t srcLengthIVOffUseIVNext; + uint64_t dstDataSettings; + uint64_t authDstNonceLow; + uint64_t ckSumDstNonceHiCFBMaskLLWMask; +} PacketDescriptor_t, *PacketDescriptor_pt; + +typedef struct { + uint8_t *user_auth; + uint8_t *user_src; + uint8_t *user_dest; + uint8_t *user_state; + uint8_t *kern_auth; + uint8_t *kern_src; + uint8_t *kern_dest; + uint8_t *kern_state; + uint8_t *aligned_auth; + uint8_t *aligned_src; + uint8_t *aligned_dest; + uint8_t *aligned_state; +} xlr_sec_drv_user_t, *xlr_sec_drv_user_pt; + +typedef struct symkey_desc { + OperationDescriptor_t op_ctl; /* size is cacheline */ + PacketDescriptor_t pkt_desc[2]; /* size is cacheline */ + ControlDescriptor_t ctl_desc; /* makes this aligned */ + uint64_t control; /* message word0 */ + uint64_t data; /* message word1 */ + uint64_t ctl_result; + uint64_t data_result; + struct symkey_desc *alloc; /* real allocated addr */ + xlr_sec_drv_user_t user; + //volatile atomic_t flag_complete; + //struct semaphore sem_complete; + //wait_queue_t submit_wait; + + uint8_t *next_src_buf; + uint32_t next_src_len; + + uint8_t *next_dest_buf; + uint32_t next_dest_len; + + uint8_t *next_auth_dest; + uint8_t *next_cksum_dest; + + void *ses; +} symkey_desc_t, *symkey_desc_pt; + + +/* + * ************************************************************************** + * RSA Block + * ************************************************************************** + */ + +/* + * RSA and ECC Block + * ================= + * + * A 2-word message ring descriptor is used to pass all information + * pertaining to the RSA or ECC operation: + * + * 63 61 60 54 53 52 40 39 5 4 3 2 0 + * ----------------------------------------------------------------------------------------------------- + * | Ctrl | Op Class | Valid Op | Op Ctrl0 | Source Addr | Software Scratch0 | Global src data offset | + * ----------------------------------------------------------------------------------------------------- + * 3 7 1 13 35 2 3 + * + * + * 63 61 60 54 53 52 51 50 40 39 5 4 3 2 0 + * -------------------------------------------------------------------------------------------------------------------------------- + * | Ctrl | Destination Id | WRB_COH | WRB_L2ALLOC | DF_L2ALLOC | Op Ctrl1 | Dest Addr | Software Scratch1 | Global dst data offset | + * -------------------------------------------------------------------------------------------------------------------------------- + * 3 7 1 1 1 11 35 2 3 + * + * + * Op Class = 7'h0_0 Modular exponentiation + * 7'h0_1 ECC (including prime modular ops and binary GF ops) + * REMAINDER UNDEF + * + * Valid Op = 1'b1 Will cause operation to start; descriptors sent back at end of operation + * 1'b0 No operation performed; descriptors sent back right away + * + * RSA ECC + * === === + * Op Ctrl0 = BlockWidth[1] {TYPE[6:0], FUNCTION[5:0]} + * LoadConstant[1] + * ExponentWidth[10:0] + * RSA Only + * ======== + * Block Width = 1'b1 1024 bit op + * = 1'b0 512 bit op + * Load Constant = 1'b1 Load constant from data structure + * 1'b0 Preserve old constant (this assumes + * Source Addr points to RSAData_pt->Exponent + * or that the length of Constant is 0) + * Exponent Width = 11-bit expression of exponent width EXPRESSED IN NUMBER OF BITS + * + * ECC Only + * ======== + * + * TYPE = 7'h0_0 ECC prime 160 + * 7'h0_1 ECC prime 192 + * 7'h0_2 ECC prime 224 + * 7'h0_3 ECC prime 256 + * 7'h0_4 ECC prime 384 + * 7'h0_5 ECC prime 512 + * + * 7'h0_6 through 7'h1_f UNDEF + * + * 7'h2_0 ECC bin 163 + * 7'h2_1 ECC bin 191 + * 7'h2_2 ECC bin 233 + * + * 7'h2_3 through 7'h6_f UNDEF + * + * 7'h7_0 ECC UC load + * + * 7'b7_1 through 7'b7_f UNDEF + * + * Prime field Binary field + * =========== ============ + * FUNCTION = 6'h0_0 Point multiplication R = k.P Point multiplication R = k.P + * 6'h0_1 Point addition R = P + Q Binary GF inversion C(x) = 1 / A(x) mod F(x) + * 6'h0_2 Point double R = 2 x P Binary GF multiplication C(x) = B(x) * A(x) mod F(x) + * 6'h0_3 Point verification R ? Binary GF addition C(x) = B(x) + A(x) mod F(x) + * 6'h0_4 Modular addition c = x + y mod m UNDEF + * 6'h0_5 Modular substraction c = x - y mod m UNDEF + * 6'h0_6 Modular multiplication c = x * y mod m UNDEF + * 6'h0_7 Modular division c = x / y mod m UNDEF + * 6'h0_8 Modular inversion c = 1 / y mod m UNDEF + * 6'h0_9 Modular reduction c = x mod m UNDEF + * + * 6'h0_a + * through UNDEF UNDEF + * 6'h3_f + * + * Source Addr = 35 MSB of pointer to source address (i.e., cache-line aligned) + * + * Software Scratch0 = Two bit field ignored by engine and returned as is in free descriptor + * + * Global src data offset = Nb BYTES to right-shift data by before presenting it to engines + * (0-7); allows realignment of byte-aligned, non-double-word aligned data + * + * RSA ECC + * === === + * OpCtrl1 = ModulusWidth[10:0] Not used + * RSA Only + * ======== + * Modulus Width = 11-bit expression of modulus width EXPRESSED IN NUMBER OF BITS + * + * Dest Addr = 35 MSB of pointer to destination address (i.e., cache-line aligned) + * + * Software Scratch1 = Two bit field ignored by engine and returned as is in free descriptor + * + * Global dst data offset = Nb BYTES to left-shift (double-word boundary aligned) data by before writing it to memory + * + * + */ + +/* + * ECC data formats + */ + +/********************************************************** + * * + * ECC prime data formats * + * * + ********************************************************** + * + * + * The size of all quantities below is that of the precision + * of the chosen op (160, 192, ...) ROUNDED UP to a multiple + * of 8 bytes, i.e., 3 dwords (160, 192), 4 dwords (224, 256) + * 6 dwords (384) and 8 dwords (512) and padded with zeroes + * when necessary. + * + * The only exception to this is the key quantity (k) which + * needs to be rounded up to 32 bytes in all cases and padded + * with zeroes; therefore the key needs to be 4 dwords (160, 192, + * 224, 256) or 8 dwords (384, 512) + * + * The total lengths in dwords that are read and in + * bytes that are written, for each operation and + * length group, are specified at the bottom of each + * datastructure. + * + * In all that follows, m is the modulus and cst is the constant, + * cst = 2 ^ (2*length + 4) mod m . a and b are the curve + * parameters. + * + * 0) UC load + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> Dword_0 N/A + * . + * . + * . + * Dword_331 + * 332 dw + * + * 1) Point multiplication R(x_r, y_r) = k . P(x_p, y_p) + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> x_p dst+glb_off-> x_r + * x_p y_r + * y_p 2x(3/4/6/8)= + * y_p 6/8/12/16 dw + * a + * k + * m + * cst + * 7x(3/4/6/8)+(4/4/8/8)= + * 25/32/50/64 dw + * + * 2) Point addition R(x_r, y_r) = P(x_p, y_p) + Q(x_q, y_q) + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> x_p dst+glb_off-> x_r + * y_p y_r + * x_q 2x(3/4/6/8)= + * y_q 6/8/12/16 dw + * a + * m + * cst + * 7x(3/4/6/8)= + * 21/28/42/56 dw + * + * 3) Point double R(x_r, y_r) = 2 . P(x_p, y_p) + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> x_p dst+glb_off-> x_r + * y_p y_r + * a 2x(3/4/6/8)= + * m 6/8/12/16 dw + * cst + * 5x(3/4/6/8)= + * 15/20/30/40 dw + * + * 4) Point verification Is_On_Curve = P(x_p, y_p) on curve ? 1 : 0 + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> x_p dst+glb_off-> Is_On_Curve + * y_p 1 dw + * a + * b + * m + * cst + * 6x(3/4/6/8)= + * 18/24/36/48 dw + * + * 5) Modular addition c = x + y mod m + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> x dst+glb_off-> c + * y 3/4/6/8 dw + * m + * 3x(3/4/6/8)= + * 9/12/18/24 dw + * + * 6) Modular substraction c = x - y mod m + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> x dst+glb_off-> c + * y 3/4/6/8 dw + * m + * 3x(3/4/6/8)= + * 9/12/18/24 dw + * + * 7) Modular multiplication c = x * y mod m + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> x dst+glb_off-> c + * y 3/4/6/8 dw + * m + * cst + * 4x(3/4/6/8)= + * 12/16/24/32 dw + * + * 8) Modular division c = x / y mod m + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> y dst+glb_off-> c + * x 3/4/6/8 dw + * m + * 3x(3/4/6/8)= + * 9/12/18/24 dw + * + * 9) Modular inversion c = 1 / y mod m + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> y dst+glb_off-> c + * m 3/4/6/8 dw + * 2x(3/4/6/8)= + * 6/8/12/16 dw + * + * 10) Modular reduction c = x mod m + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> x dst+glb_off-> c + * m 3/4/6/8 dw + * 2x(3/4/6/8)= + * 6/8/12/16 dw + * + */ + +/********************************************************** + * * + * ECC binary data formats * + * * + ********************************************************** + * + * + * The size of all quantities below is that of the precision + * of the chosen op (163, 191, 233) ROUNDED UP to a multiple + * of 8 bytes, i.e. 3 dwords for (163, 191) and 4 dwords for + * (233), padded with zeroes as necessary. + * + * The total lengths in dwords that are read and written, + * for each operation and length group, are specified + * at the bottom of each datastructure. + * In all that follows, b is the curve parameter. + * + * 1) Point multiplication R(x_r, y_r) = k . P(x_p, y_p) + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> b dst+glb_off-> x_r + * k y_r + * x_p 2x(3/4) + * y_p 6/8 dw + * 4x(3/4)= + * 12/16 dw + * + * 2) Binary GF inversion C(x) = 1 / A(x) mod F(x) + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> A dst+glb_off-> C + * 1x(3/4)= 1x(3/4) + * 3/4 dw 3/4 dw + * + * 3) Binary GF multiplication C(x) = B(x) * A(x) mod F(x) + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> A dst+glb_off-> C + * B 1x(3/4) + * 2x(3/4)= 3/4 dw + * 6/8 dw + * + * 4) Binary GF addition C(x) = B(x) + A(x) mod F(x) + * + * DATA IN DATA OUT + * ======= ======== + * src+glb_off-> A dst+glb_off-> C + * B 1x(3/4) + * 2x(3/4)= 3/4 dw + * 6/8dw + * + */ + +/* + * RSA data format + */ + +/* + * IMPORTANT NOTE: + * + * As specified in the datastructures below, + * the engine assumes all data (including + * exponent and modulus) to be adjacent on + * dword boundaries, e.g., + * + * Operation length = 512 bits + * Exponent length = 16 bits + * Modulus length = 512 bits + * + * The engine expects to read: + * + * 63 0 + * ----------------------- + * | | Constant0 + * ----------------------- + * | | Constant1 + * ----------------------- + * | | Constant2 + * ----------------------- + * | | Constant3 + * ----------------------- + * | | Constant4 + * ----------------------- + * | | Constant5 + * ----------------------- + * | | Constant6 + * ----------------------- + * | | Constant7 + * ----------------------- + * | IGNORED |B1|B0| Exponent0 (Exponent length = 16 bits = 2 bytes, so only 2 least significant bytes of exponent used) + * ----------------------- + * | | Modulus0 + * ----------------------- + * | | Modulus1 + * ----------------------- + * | | Modulus2 + * ----------------------- + * | | Modulus3 + * ----------------------- + * | | Modulus4 + * ----------------------- + * | | Modulus5 + * ----------------------- + * | | Modulus6 + * ----------------------- + * | | Modulus7 + * ----------------------- + * | | Message0 + * ----------------------- + * | | Message1 + * ----------------------- + * | | Message2 + * ----------------------- + * | | Message3 + * ----------------------- + * | | Message4 + * ----------------------- + * | | Message5 + * ----------------------- + * | | Message6 + * ----------------------- + * | | Message7 + * ----------------------- + * + */ + +/* #define PUBKEY_CTL_CTL */ +#define PUBKEY_CTL_CTL_LSB 61 +#define PUBKEY_CTL_CTL_BITS THREE_BITS +#define PUBKEY_CTL_CTL_MASK (PUBKEY_CTL_CTL_BITS << PUBKEY_CTL_CTL_LSB) + +/* #define PUBKEY_CTL_OP_CLASS */ +#define PUBKEY_CTL_OP_CLASS_RSA 0 +#define PUBKEY_CTL_OP_CLASS_ECC 1 +#define PUBKEY_CTL_OP_CLASS_LSB 54 +#define PUBKEY_CTL_OP_CLASS_BITS SEVEN_BITS +#define PUBKEY_CTL_OP_CLASS_MASK (PUBKEY_CTL_OP_CLASS_BITS << PUBKEY_CTL_OP_CLASS_LSB) + +/* #define PUBKEY_CTL_VALID */ +#define PUBKEY_CTL_VALID_FALSE 0 +#define PUBKEY_CTL_VALID_TRUE 1 +#define PUBKEY_CTL_VALID_LSB 53 +#define PUBKEY_CTL_VALID_BITS ONE_BIT +#define PUBKEY_CTL_VALID_MASK \ + (PUBKEY_CTL_VALID_BITS << PUBKEY_CTL_VALID_LSB) + +/* #define PUBKEY_CTL_ECC_TYPE */ +#define PUBKEY_CTL_ECC_TYPE_PRIME_160 0 +#define PUBKEY_CTL_ECC_TYPE_PRIME_192 1 +#define PUBKEY_CTL_ECC_TYPE_PRIME_224 2 +#define PUBKEY_CTL_ECC_TYPE_PRIME_256 3 +#define PUBKEY_CTL_ECC_TYPE_PRIME_384 4 +#define PUBKEY_CTL_ECC_TYPE_PRIME_512 5 +#define PUBKEY_CTL_ECC_TYPE_BIN_163 0x20 +#define PUBKEY_CTL_ECC_TYPE_BIN_191 0x21 +#define PUBKEY_CTL_ECC_TYPE_BIN_233 0x22 +#define PUBKEY_CTL_ECC_TYPE_UC_LOAD 0x70 +#define PUBKEY_CTL_ECC_TYPE_LSB 46 +#define PUBKEY_CTL_ECC_TYPE_BITS SEVEN_BITS +#define PUBKEY_CTL_ECC_TYPE_MASK (PUBKEY_CTL_ECC_TYPE_BITS << PUBKEY_CTL_ECC_TYPE_LSB) + +/* #define PUBKEY_CTL_ECC_FUNCTION */ +#define PUBKEY_CTL_ECC_FUNCTION_NOP 0 +#define PUBKEY_CTL_ECC_FUNCTION_POINT_MUL 0 +#define PUBKEY_CTL_ECC_FUNCTION_POINT_ADD 1 +#define PUBKEY_CTL_ECC_FUNCTION_POINT_DBL 2 +#define PUBKEY_CTL_ECC_FUNCTION_POINT_VFY 3 +#define PUBKEY_CTL_ECC_FUNCTION_MODULAR_ADD 4 +#define PUBKEY_CTL_ECC_FUNCTION_MODULAR_SUB 5 +#define PUBKEY_CTL_ECC_FUNCTION_MODULAR_MUL 6 +#define PUBKEY_CTL_ECC_FUNCTION_MODULAR_DIV 7 +#define PUBKEY_CTL_ECC_FUNCTION_MODULAR_INV 8 +#define PUBKEY_CTL_ECC_FUNCTION_MODULAR_RED 9 +#define PUBKEY_CTL_ECC_FUNCTION_LSB 40 +#define PUBKEY_CTL_ECC_FUNCTION_BITS SIX_BITS +#define PUBKEY_CTL_ECC_FUNCTION_MASK (PUBKEY_CTL_ECC_FUNCTION_BITS << PUBKEY_CTL_ECC_FUNCTION_LSB) + +/* #define PUBKEY_CTL_BLKWIDTH */ +#define PUBKEY_CTL_BLKWIDTH_512 0 +#define PUBKEY_CTL_BLKWIDTH_1024 1 +#define PUBKEY_CTL_BLKWIDTH_LSB 52 +#define PUBKEY_CTL_BLKWIDTH_BITS ONE_BIT +#define PUBKEY_CTL_BLKWIDTH_MASK \ + (PUBKEY_CTL_BLKWIDTH_BITS << PUBKEY_CTL_BLKWIDTH_LSB) + +/* #define PUBKEY_CTL_LD_CONST */ +#define PUBKEY_CTL_LD_CONST_OLD 0 +#define PUBKEY_CTL_LD_CONST_NEW 1 +#define PUBKEY_CTL_LD_CONST_LSB 51 +#define PUBKEY_CTL_LD_CONST_BITS ONE_BIT +#define PUBKEY_CTL_LD_CONST_MASK \ + (PUBKEY_CTL_LD_CONST_BITS << PUBKEY_CTL_LD_CONST_LSB) + +/* #define PUBKEY_CTL_EXPWIDTH */ +#define PUBKEY_CTL_EXPWIDTH_LSB 40 +#define PUBKEY_CTL_EXPWIDTH_BITS ELEVEN_BITS +#define PUBKEY_CTL_EXPWIDTH_MASK \ + (PUBKEY_CTL_EXPWIDTH_BITS << PUBKEY_CTL_EXPWIDTH_LSB) + +/* #define PUBKEY_CTL_SRCADDR */ +#define PUBKEY_CTL_SRCADDR_LSB 0 +#define PUBKEY_CTL_SRCADDR_BITS FOURTY_BITS +#define PUBKEY_CTL_SRCADDR_MASK \ + (PUBKEY_CTL_SRCADDR_BITS << PUBKEY_CTL_SRCADDR_LSB) + +/* #define PUBKEY_CTL_SRC_OFFSET */ +#define PUBKEY_CTL_SRC_OFFSET_LSB 0 +#define PUBKEY_CTL_SRC_OFFSET_BITS THREE_BITS +#define PUBKEY_CTL_SRC_OFFSET_MASK \ + (PUBKEY_CTL_SRC_OFFSET_BITS << PUBKEY_CTL_SRC_OFFSET_LSB) + + +/* #define PUBKEY_CTL1_CTL */ +#define PUBKEY_CTL1_CTL_LSB 61 +#define PUBKEY_CTL1_CTL_BITS THREE_BITS +#define PUBKEY_CTL1_CTL_MASK (PUBKEY_CTL_CTL_BITS << PUBKEY_CTL_CTL_LSB) + +/* #define PUBKEY_CTL1_MODWIDTH */ +#define PUBKEY_CTL1_MODWIDTH_LSB 40 +#define PUBKEY_CTL1_MODWIDTH_BITS ELEVEN_BITS +#define PUBKEY_CTL1_MODWIDTH_MASK \ + (PUBKEY_CTL1_MODWIDTH_BITS << PUBKEY_CTL1_MODWIDTH_LSB) + +/* #define PUBKEY_CTL1_DSTADDR */ +#define PUBKEY_CTL1_DSTADDR_LSB 0 +#define PUBKEY_CTL1_DSTADDR_BITS FOURTY_BITS +#define PUBKEY_CTL1_DSTADDR_MASK \ + (PUBKEY_CTL1_DSTADDR_BITS << PUBKEY_CTL1_DSTADDR_LSB) + +/* #define PUBKEY_CTL1_DST_OFFSET */ +#define PUBKEY_CTL1_DST_OFFSET_LSB 0 +#define PUBKEY_CTL1_DST_OFFSET_BITS THREE_BITS +#define PUBKEY_CTL1_DST_OFFSET_MASK \ + (PUBKEY_CTL1_DST_OFFSET_BITS << PUBKEY_CTL1_DST_OFFSET_LSB) + +/* + * Upon completion of operation, the RSA block returns a 2-word free descriptor + * in the following format: + * + * 63 61 60 54 53 52 51 49 48 40 39 5 4 3 2 0 + * ------------------------------------------------------------------------------------------------------------------------- + * | Ctrl | Destination Id | 2'b00 | Desc Ctrl | Control Error | Source Address | Software Scratch0 | Global src data offset | + * ------------------------------------------------------------------------------------------------------------------------- + * | Ctrl | Destination Id | 2'b00 | Desc Ctrl | Data Error | Dest Address | Software Scratch1 | Global dst data offset | + * ------------------------------------------------------------------------------------------------------------------------- + * + * The Control and Data Error codes are enumerated below + * + * Error conditions + * ================ + * + * Control Error Code Control Error Condition + * ------------------ ----------------------- + * 9'h000 No Error + * 9'h001 Undefined Op Class + * 9'h002 Undefined ECC TYPE (ECC only) + * 9'h004 Undefined ECC FUNCTION (ECC only) + * 9'h008 ECC timeout (ECC only) + * 9'h010 UNUSED + * 9'h020 UNUSED + * 9'h040 UNUSED + * 9'h080 Data Read Error + * 9'h100 Descriptor Ctrl Field Error (D0.Ctrl != SOP || D1.Ctrl != EOP) + * + * Data Error Code Data Error Condition + * --------------- -------------------- + * 9'h000 No Error + * 9'h001 Exponent Width > Block Width (RSA Only) + * 9'h002 Modulus Width > Block Width (RSA Only) + * 9'h004 UNUSED + * 9'h008 UNUSED + * 9'h010 UNUSED + * 9'h020 UNUSED + * 9'h040 UNUSED + * 9'h080 Data Read Error + * 9'h100 UNUSED + */ + +/* + * Result Data Word for Message Ring Descriptor + */ + +/* #define PUBKEY_RSLT_CTL_CTL */ +#define PUBKEY_RSLT_CTL_CTL_LSB 61 +#define PUBKEY_RSLT_CTL_CTL_BITS THREE_BITS +#define PUBKEY_RSLT_CTL_CTL_MASK \ + (PUBKEY_RSLT_CTL_CTL_BITS << PUBKEY_RSLT_CTL_CTL_LSB) + +/* #define PUBKEY_RSLT_CTL_DST_ID */ +#define PUBKEY_RSLT_CTL_DST_ID_LSB 54 +#define PUBKEY_RSLT_CTL_DST_ID_BITS SEVEN_BITS +#define PUBKEY_RSLT_CTL_DST_ID_MASK \ + (PUBKEY_RSLT_CTL_DST_ID_BITS << PUBKEY_RSLT_CTL_DST_ID_LSB) + +/* #define PUBKEY_RSLT_CTL_DESC_CTL */ +#define PUBKEY_RSLT_CTL_DESC_CTL_LSB 49 +#define PUBKEY_RSLT_CTL_DESC_CTL_BITS THREE_BITS +#define PUBKEY_RSLT_CTL_DESC_CTL_MASK \ + (PUBKEY_RSLT_CTL_DESC_CTL_BITS << PUBKEY_RSLT_CTL_DESC_CTL_LSB) + + +/* #define PUBKEY_RSLT_CTL_ERROR */ +#define PUBKEY_RSLT_CTL_ERROR_LSB 40 +#define PUBKEY_RSLT_CTL_ERROR_BITS NINE_BITS +#define PUBKEY_RSLT_CTL_ERROR_MASK \ + (PUBKEY_RSLT_CTL_ERROR_BITS << PUBKEY_RSLT_CTL_ERROR_LSB) + +/* #define PUBKEY_RSLT_CTL_SRCADDR */ +#define PUBKEY_RSLT_CTL_SRCADDR_LSB 0 +#define PUBKEY_RSLT_CTL_SRCADDR_BITS FOURTY_BITS +#define PUBKEY_RSLT_CTL_SRCADDR_MASK \ + (PUBKEY_RSLT_CTL_SRCADDR_BITS << PUBKEY_RSLT_CTL_SRCADDR_LSB) + + +/* #define PUBKEY_RSLT_DATA_CTL */ +#define PUBKEY_RSLT_DATA_CTL_LSB 61 +#define PUBKEY_RSLT_DATA_CTL_BITS THREE_BITS +#define PUBKEY_RSLT_DATA_CTL_MASK \ + (PUBKEY_RSLT_DATA_CTL_BITS << PUBKEY_RSLT_DATA_CTL_LSB) + +/* #define PUBKEY_RSLT_DATA_DST_ID */ +#define PUBKEY_RSLT_DATA_DST_ID_LSB 54 +#define PUBKEY_RSLT_DATA_DST_ID_BITS SEVEN_BITS +#define PUBKEY_RSLT_DATA_DST_ID_MASK \ + (PUBKEY_RSLT_DATA_DST_ID_BITS << PUBKEY_RSLT_DATA_DST_ID_LSB) + +/* #define PUBKEY_RSLT_DATA_DESC_CTL */ +#define PUBKEY_RSLT_DATA_DESC_CTL_LSB 49 +#define PUBKEY_RSLT_DATA_DESC_CTL_BITS THREE_BITS +#define PUBKEY_RSLT_DATA_DESC_CTL_MASK \ + (PUBKEY_RSLT_DATA_DESC_CTL_BITS << PUBKEY_RSLT_DATA_DESC_CTL_LSB) + +/* #define PUBKEY_RSLT_DATA_ERROR */ +#define PUBKEY_RSLT_DATA_ERROR_LSB 40 +#define PUBKEY_RSLT_DATA_ERROR_BITS NINE_BITS +#define PUBKEY_RSLT_DATA_ERROR_MASK \ + (PUBKEY_RSLT_DATA_ERROR_BITS << PUBKEY_RSLT_DATA_ERROR_LSB) + +/* #define PUBKEY_RSLT_DATA_DSTADDR */ +#define PUBKEY_RSLT_DATA_DSTADDR_LSB 40 +#define PUBKEY_RSLT_DATA_DSTADDR_BITS FOURTY_BITS +#define PUBKEY_RSLT_DATA_DSTADDR_MASK \ + (PUBKEY_RSLT_DATA_DSTADDR_BITS << PUBKEY_RSLT_DATA_DSTADDR_LSB) + +/* + * ****************************************************************** + * RSA Block - Data Error Code and Conditions + * ****************************************************************** + */ + +#define PK_CTL_ERR_NONE 0x0000 /* No Error */ +#define PK_CTL_ERR_OP_CLASS 0x0001 /* Undefined Op Class */ +#define PK_CTL_ERR_ECC_TYPE 0x0002 /* Undefined ECC TYPE (ECC only) */ +#define PK_CTL_ERR_ECC_FUNCT 0x0004 /* Undefined ECC FUNCTION (ECC only) */ +#define PK_CTL_ERR_ECC_TIMEOUT 0x0008 /* ECC timeout (ECC only) */ +#define PK_CTL_ERR_READ 0x0080 /* Data Read Error */ +#define PK_CTL_ERR_DESC 0x0100 /* Descriptor Ctrl Field Error + * (D0.Ctrl != SOP || D1.Ctrl != EOP) */ +#define PK_CTL_ERR_TIMEOUT 0x1000 /* Message Responce Timeout */ + +#define PK_DATA_ERR_NONE 0x0000 /* No Error */ +#define PK_DATA_ERR_EXP_WIDTH 0x0001 /* Exponent Width > Block Width */ +#define PK_DATA_ERR_MOD_WIDTH 0x0002 /* Modulus Width > Block Width */ +#define PK_DATA_ERR_READ 0x0080 /* Data Read Error */ + + +/* + * This defines the RSA data format + */ +/* + * typedef struct RSAData_s { + * uint64_t Constant; + * uint64_t Exponent; + * uint64_t Modulus; + * uint64_t Message; + *} RSAData_t, *RSAData_pt; + * + * typedef RSAData_t DHData_t; + * typedef RSAData_pt DHData_pt; + */ + +typedef struct UserPubData_s { + uint8_t *source; + uint8_t *user_result; + uint32_t result_length; +} UserPubData_t, *UserPubData_pt; + +typedef struct pubkey_desc { + OperationDescriptor_t op_ctl; /* size is cacheline */ + uint8_t source[1024]; + uint8_t dest[256]; /* 1024 makes cacheline-aligned */ + uint64_t control0; + uint64_t control1; + uint64_t ctl_result; + uint64_t data_result; + struct pubkey_desc *alloc; + UserPubData_t kern; /* ptrs for temp buffers */ + //volatile atomic_t flag_complete; + //struct semaphore sem_complete; + //wait_queue_t submit_wait; +} pubkey_desc_t, *pubkey_desc_pt; + +/* + * KASUMI F8 and F9 use the IV0/IV1 fields : + * + * 63 41 40 39 37 36 32 31 0 + * ---------------------------------------------------------------------------- + * | |FX/DIRECTION| | F8/BEARER | F8/COUNT | IV0 + * ---------------------------------------------------------------------------- + * 1 5 32 + * + * 63 32 31 0 + * ---------------------------------------------------------------------------- + * | F9/FRESH | F9/COUNT | IV1 + * ---------------------------------------------------------------------------- + * 32 32 + */ +#endif /* _XLR_SEC_DESC_H_ */ diff --git a/sys/mips/rmi/dev/sec/rmilib.c b/sys/mips/rmi/dev/sec/rmilib.c new file mode 100644 index 000000000000..219ef6c9383c --- /dev/null +++ b/sys/mips/rmi/dev/sec/rmilib.c @@ -0,0 +1,3172 @@ +/*- + * Copyright (c) 2003-2009 RMI Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of RMI Corporation, nor the names of its contributors, + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RMI_BSD */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + + +#include +#include +#include +#include +#include +#include +#include +#include + + +// static int msgrng_stnid_pk0 = MSGRNG_STNID_PK0; + +/*#define RMI_SEC_DEBUG */ + +#define SMP_CACHE_BYTES XLR_CACHELINE_SIZE +#define NUM_CHUNKS(size, bits) ( ((size)>>(bits)) + (((size)&((1<<(bits))-1))?1:0) ) + +static const char nib2hex[] = "0123456789ABCDEF"; +symkey_desc_pt g_desc; +struct xlr_sec_command *g_cmd; + +#ifdef XLR_SEC_CMD_DEBUG +static void + decode_symkey_desc(symkey_desc_pt desc, uint32_t cfg_vector); + +#endif + +void print_buf(char *desc, void *data, int len); + +static int + xlr_sec_cipher_hash_command(xlr_sec_io_pt op, symkey_desc_pt desc, uint8_t); + +static xlr_sec_error_t +xlr_sec_setup_descriptor(xlr_sec_io_pt op, + unsigned int flags, + symkey_desc_pt desc, + uint32_t * cfg_vector); + +static +xlr_sec_error_t +xlr_sec_setup_packet(xlr_sec_io_pt op, + symkey_desc_pt desc, + unsigned int flags, + uint64_t * data, + PacketDescriptor_pt pkt_desc, + ControlDescriptor_pt ctl_desc, + uint32_t vector, + PacketDescriptor_pt next_pkt_desc, + uint8_t multi_frag_flag); + +static int + xlr_sec_submit_message(symkey_desc_pt desc, uint32_t cfg_vector); + +static +xlr_sec_error_t +xlr_sec_setup_cipher(xlr_sec_io_pt op, + ControlDescriptor_pt ctl_desc, + uint32_t * vector); + +static +xlr_sec_error_t +xlr_sec_setup_digest(xlr_sec_io_pt op, + ControlDescriptor_pt ctl_desc, + uint32_t * vector); + +static +xlr_sec_error_t +xlr_sec_setup_cksum(xlr_sec_io_pt op, + ControlDescriptor_pt ctl_desc); + +static +xlr_sec_error_t +xlr_sec_control_setup(xlr_sec_io_pt op, + unsigned int flags, + uint64_t * control, + ControlDescriptor_pt ctl_desc, + xlr_sec_drv_user_t * user, + uint32_t vector); + + +xlr_sec_error_t +xlr_sec_submit_op(symkey_desc_pt desc); + +static void xlr_sec_free_desc(symkey_desc_pt desc); + +void +xlr_sec_msgring_handler(int bucket, int size, int code, int stid, + struct msgrng_msg *msg, void *data); + + +void +xlr_sec_init(struct xlr_sec_softc *sc) +{ + unsigned int i; + xlr_reg_t *mmio; + + + mmio = sc->mmio = xlr_io_mmio(XLR_IO_SECURITY_OFFSET); + + xlr_write_reg(mmio, SEC_DMA_CREDIT, SEC_DMA_CREDIT_CONFIG); + + + xlr_write_reg(mmio, SEC_CONFIG2, SEC_CFG2_ROUND_ROBIN_ON); + + for (i = 0; i < 8; i++) + xlr_write_reg(mmio, + SEC_MSG_BUCKET0_SIZE + i, + xlr_is_xls() ? + xls_bucket_sizes.bucket[MSGRNG_STNID_SEC + i] : + bucket_sizes.bucket[MSGRNG_STNID_SEC + i]); + + for (i = 0; i < 128; i++) + xlr_write_reg(mmio, + SEC_CC_CPU0_0 + i, + xlr_is_xls() ? + xls_cc_table_sec.counters[i >> 3][i & 0x07] : + cc_table_sec.counters[i >> 3][i & 0x07]); + + + /* + * Register a bucket handler with the phoenix messaging subsystem + * For now, register handler for bucket 0->5 in msg stn 0 + */ + if (register_msgring_handler(TX_STN_SAE, xlr_sec_msgring_handler, NULL)) { + panic("Couldn't register msgring handler 0\n"); + } + return; +} + + + +int +xlr_sec_setup(struct xlr_sec_session *ses, + struct xlr_sec_command *cmd, + symkey_desc_pt desc +) +{ + xlr_sec_io_pt op; + int size, ret_val; + int iv_len; + + + desc->ses = ses; + op = &cmd->op; + if (op == NULL) + return (-ENOMEM); + + + + desc->ctl_desc.instruction = 0; + memset(&desc->ctl_desc.cipherHashInfo, 0, sizeof(CipherHashInfo_t)); + desc->control = 0; + + desc->pkt_desc[0].srcLengthIVOffUseIVNext = 0; + desc->pkt_desc[0].dstDataSettings = 0; + desc->pkt_desc[0].authDstNonceLow = 0; + desc->pkt_desc[0].ckSumDstNonceHiCFBMaskLLWMask = 0; + desc->pkt_desc[1].srcLengthIVOffUseIVNext = 0; + desc->pkt_desc[1].dstDataSettings = 0; + desc->pkt_desc[1].authDstNonceLow = 0; + desc->pkt_desc[1].ckSumDstNonceHiCFBMaskLLWMask = 0; + + desc->data = 0; + desc->ctl_result = 0; + desc->data_result = 0; + + + if (op->flags & XLR_SEC_FLAGS_HIGH_PRIORITY) + if (!xlr_is_xls()) + desc->op_ctl.stn_id++; + + desc->user.user_src = (uint8_t *) (unsigned long)op->source_buf; + desc->user.user_dest = (uint8_t *) (unsigned long)op->dest_buf; + desc->user.user_auth = (uint8_t *) (unsigned long)op->auth_dest; + + + if ((op->cipher_type == XLR_SEC_CIPHER_TYPE_ARC4) && + (!op->rc4_state && (op->rc4_loadstate || op->rc4_savestate))) { + printf(" ** Load/Save State and no State **"); + xlr_sec_free_desc(desc); + return (-EINVAL); + } + desc->user.user_state = (uint8_t *) (unsigned long)op->rc4_state; + + + switch (op->cipher_type) { + case XLR_SEC_CIPHER_TYPE_NONE: + iv_len = 0; + break; + case XLR_SEC_CIPHER_TYPE_DES: + case XLR_SEC_CIPHER_TYPE_3DES: + iv_len = XLR_SEC_DES_IV_LENGTH; + break; + case XLR_SEC_CIPHER_TYPE_AES128: + case XLR_SEC_CIPHER_TYPE_AES192: + case XLR_SEC_CIPHER_TYPE_AES256: + iv_len = XLR_SEC_AES_IV_LENGTH; + break; + case XLR_SEC_CIPHER_TYPE_ARC4: + iv_len = XLR_SEC_ARC4_IV_LENGTH; + break; + case XLR_SEC_CIPHER_TYPE_KASUMI_F8: + iv_len = XLR_SEC_KASUMI_F8_IV_LENGTH; + break; + + default: + printf(" ** Undefined Cipher Type **"); + xlr_sec_free_desc(desc); + return (-EINVAL); + } + + + + + size = op->source_buf_size + iv_len; + + /* + * make sure that there are enough bytes for aes based stream + * ciphers + */ + if (op->cipher_mode == XLR_SEC_CIPHER_MODE_F8 || + op->cipher_mode == XLR_SEC_CIPHER_MODE_CTR) + size += XLR_SEC_AES_BLOCK_SIZE - 1; + + if (op->cipher_type == XLR_SEC_CIPHER_TYPE_NONE) { + if (op->source_buf_size != 0) { + memcpy(desc->user.aligned_src, (uint8_t *) (unsigned long)op->source_buf, + op->source_buf_size); + } + } else { + if (ses->multi_frag_flag) { + /* copy IV into temporary kernel source buffer */ + memcpy(desc->user.aligned_src, &op->initial_vector[0], iv_len); + + /* copy input data to temporary kernel source buffer */ + memcpy((uint8_t *) (desc->user.aligned_src + iv_len), + (uint8_t *) (unsigned long)op->source_buf, SEC_MAX_FRAG_LEN); + + desc->next_src_len = op->source_buf_size - SEC_MAX_FRAG_LEN; + memcpy((uint8_t *) (desc->next_src_buf), + (uint8_t *) (unsigned long)(op->source_buf + SEC_MAX_FRAG_LEN), + desc->next_src_len); + + op->source_buf_size = SEC_MAX_FRAG_LEN; + op->source_buf_size += iv_len; + } else { + /* copy IV into temporary kernel source buffer */ + memcpy(desc->user.aligned_src, &op->initial_vector[0], iv_len); + + /* copy input data to temporary kernel source buffer */ + memcpy((uint8_t *) (desc->user.aligned_src + iv_len), + (uint8_t *) (unsigned long)op->source_buf, op->source_buf_size); + op->source_buf_size += iv_len; + } + } + + + + /* Set source to new kernel space */ + op->source_buf = (uint64_t) (unsigned long)desc->user.aligned_src; + + + /* + * Build new dest buffer, for Cipher output only + */ + if (op->cipher_type == XLR_SEC_CIPHER_TYPE_NONE) { + /* + * Digest Engine *NEEDS* this, otherwise it will write at + * 0[x] + */ + op->dest_buf = (uint64_t) (unsigned long)desc->user.aligned_src; + } else { + /* DEBUG -dpk */ + XLR_SEC_CMD_DIAG("dest_buf_size = %d \n", op->dest_buf_size); + + size = op->dest_buf_size + iv_len; + + /* + * make sure that there are enough bytes for aes based + * stream ciphers + */ + if (op->cipher_mode == XLR_SEC_CIPHER_MODE_F8 || + op->cipher_mode == XLR_SEC_CIPHER_MODE_CTR) + size += XLR_SEC_AES_BLOCK_SIZE - 1; + op->dest_buf = (uint64_t) (unsigned long)desc->user.aligned_dest; + + } + + ret_val = xlr_sec_cipher_hash_command(op, desc, ses->multi_frag_flag); + + return (ret_val); + +} + +static int +xlr_sec_cipher_hash_command(xlr_sec_io_pt op, symkey_desc_pt desc, + uint8_t multi_frag_flag) +{ + xlr_sec_error_t err; + uint32_t cfg_vector; + unsigned int setup_flags = 0; + + err = XLR_SEC_ERR_NONE; + cfg_vector = 0; + + if ((op->digest_type == XLR_SEC_DIGEST_TYPE_NONE) && + (op->cipher_type != XLR_SEC_CIPHER_TYPE_ARC4) && + (op->cipher_mode != XLR_SEC_CIPHER_MODE_F8) && + (op->cipher_type != XLR_SEC_CIPHER_TYPE_KASUMI_F8) && + (op->source_buf_size & 0x7)) { + printf("Invalid Cipher Block Size, data len=%d\n", + op->source_buf_size); + return (-EINVAL); + } + do { + + if ((op->cipher_type == XLR_SEC_CIPHER_TYPE_3DES) && + (op->cipher_op == XLR_SEC_CIPHER_OP_DECRYPT)) + setup_flags = XLR_SEC_SETUP_OP_FLIP_3DES_KEY; + + err = xlr_sec_setup_descriptor(op, + setup_flags, + desc, &cfg_vector); + if (err != XLR_SEC_ERR_NONE) + break; + + err = xlr_sec_setup_packet(op, + desc, + op->digest_type != XLR_SEC_DIGEST_TYPE_NONE ? + XLR_SEC_SETUP_OP_CIPHER_HMAC : 0, + &desc->data, + &desc->pkt_desc[0], + &desc->ctl_desc, + cfg_vector, + &desc->pkt_desc[1], + multi_frag_flag); + if (err != XLR_SEC_ERR_NONE) + break; + } while (0); + if (err != XLR_SEC_ERR_NONE) { + return (EINVAL); + } + err = xlr_sec_submit_message(desc, cfg_vector); + return err; +} + + +static xlr_sec_error_t +xlr_sec_setup_descriptor(xlr_sec_io_pt op, + unsigned int flags, + symkey_desc_pt desc, + uint32_t * cfg_vector) +{ + xlr_sec_error_t err; + + XLR_SEC_CMD_DIAG("xlr_sec_setup_descriptor: ENTER\n"); + + + if ((err = xlr_sec_setup_cipher(op, &desc->ctl_desc, cfg_vector)) != XLR_SEC_ERR_NONE) { + XLR_SEC_CMD_DIAG("xlr_sec_setup_descriptor: xlr_sec_setup_cipher done err %d\n", + (int)err); + return err; + } + if (op->digest_type != XLR_SEC_DIGEST_TYPE_NONE) { + if ((err = xlr_sec_setup_digest(op, &desc->ctl_desc, cfg_vector)) != XLR_SEC_ERR_NONE) { + XLR_SEC_CMD_DIAG("xlr_sec_setup_descriptor: xlr_sec_setup_digest done err %d\n", + (int)err); + return err; + } + } + if ((err = xlr_sec_setup_cksum(op, &desc->ctl_desc)) != XLR_SEC_ERR_NONE) { + XLR_SEC_CMD_DIAG("xlr_sec_setup_descriptor: xlr_sec_setup_cksum done err %d\n", + (int)err); + return err; + } + if ((err = xlr_sec_control_setup(op, + flags, + &desc->control, + &desc->ctl_desc, + &desc->user, + *cfg_vector)) != XLR_SEC_ERR_NONE) { + XLR_SEC_CMD_DIAG("xlr_sec_setup_descriptor: xlr_sec_control_setup done err %d\n", + (int)err); + return err; + } + XLR_SEC_CMD_DIAG("xlr_sec_setup_descriptor: DONE\n"); + return err; +} + + + +static +xlr_sec_error_t +xlr_sec_setup_packet(xlr_sec_io_pt op, + symkey_desc_pt desc, + unsigned int flags, + uint64_t * data, + PacketDescriptor_pt pkt_desc, + ControlDescriptor_pt ctl_desc, + uint32_t vector, + PacketDescriptor_pt next_pkt_desc, + uint8_t multi_frag_flag) +{ + uint32_t len, next_len = 0, len_dwords, last_u64_bytes; + uint64_t addr; + uint64_t seg_addr, next_seg_addr = 0; + uint64_t byte_offset, global_offset; + uint32_t cipher_offset_dwords; + + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: ENTER vector = %04x\n", vector); + + /* physical address of the source buffer */ + addr = (uint64_t) vtophys((void *)(unsigned long)op->source_buf); + /* cache-aligned base of the source buffer */ + seg_addr = (addr & ~(SMP_CACHE_BYTES - 1)); + /* offset in bytes to the source buffer start from the segment base */ + byte_offset = addr - seg_addr; + /* global offset: 0-7 bytes */ + global_offset = byte_offset & 0x7; + + + /* + * op->source_buf_size is expected to be the Nb double words to + * stream in (Including Segment address->CP/IV/Auth/CkSum offsets) + */ + + /* + * adjusted length of the whole thing, accounting for the added + * head, sans global_offset (per Paul S.) + */ + + len = op->source_buf_size + byte_offset - global_offset; + if (multi_frag_flag) { + next_seg_addr = (uint64_t) vtophys((void *)(unsigned long)(desc->next_src_buf)); + next_seg_addr = (next_seg_addr & ~(SMP_CACHE_BYTES - 1)); + next_len = desc->next_src_len; + } + /* length of the whole thing in dwords */ + len_dwords = NUM_CHUNKS(len, 3); + /* number of bytes in the last chunk (len % 8) */ + last_u64_bytes = len & 0x07; + + if (op->cipher_offset & 0x7) { + printf("** cipher_offset(%d) fails 64-bit word alignment **", + op->cipher_offset); + + return XLR_SEC_ERR_CIPHER_MODE; /* ! fix ! */ + } + /* + * global_offset is only three bits, so work the number of the whole + * 8-byte words into the global offset. both offset and + * cipher_offset are byte counts + */ + cipher_offset_dwords = (op->iv_offset + byte_offset) >> 3; + + + if (op->cipher_mode == XLR_SEC_CIPHER_MODE_F8 || + op->cipher_mode == XLR_SEC_CIPHER_MODE_CTR) { + if (multi_frag_flag) { + int nlhmac = ((op->source_buf_size + global_offset + 7 - op->cipher_offset) >> 3) & 1; + + pkt_desc->srcLengthIVOffUseIVNext = + + FIELD_VALUE(PKT_DSC_HASHBYTES, len & 7) | + FIELD_VALUE(PKT_DSC_IVOFF, cipher_offset_dwords) | + FIELD_VALUE(PKT_DSC_PKTLEN, nlhmac + ((len + 7) >> 3)) | + FIELD_VALUE(PKT_DSC_NLHMAC, nlhmac) | + FIELD_VALUE(PKT_DSC_BREAK, 0) | + FIELD_VALUE(PKT_DSC_WAIT, 1) | + FIELD_VALUE(PKT_DSC_NEXT, 1) | + FIELD_VALUE(PKT_DSC_SEGADDR, seg_addr >> (PKT_DSC_SEGADDR_LSB)) | + FIELD_VALUE(PKT_DSC_SEGOFFSET, global_offset); + } else { + int nlhmac = ((op->source_buf_size + global_offset + 7 - op->cipher_offset) >> 3) & 1; + + pkt_desc->srcLengthIVOffUseIVNext = + FIELD_VALUE(PKT_DSC_HASHBYTES, len & 7) | + FIELD_VALUE(PKT_DSC_IVOFF, cipher_offset_dwords) | + FIELD_VALUE(PKT_DSC_PKTLEN, nlhmac + ((len + 7) >> 3)) | + FIELD_VALUE(PKT_DSC_NLHMAC, nlhmac) | + FIELD_VALUE(PKT_DSC_BREAK, 0) | + FIELD_VALUE(PKT_DSC_WAIT, 0) | + FIELD_VALUE(PKT_DSC_SEGADDR, seg_addr >> (PKT_DSC_SEGADDR_LSB)) | + FIELD_VALUE(PKT_DSC_SEGOFFSET, global_offset); + + } + } else { + if (multi_frag_flag) { + pkt_desc->srcLengthIVOffUseIVNext = + + FIELD_VALUE(PKT_DSC_HASHBYTES, len & 7) | + FIELD_VALUE(PKT_DSC_IVOFF, cipher_offset_dwords) | + FIELD_VALUE(PKT_DSC_PKTLEN, (len + 7) >> 3) | + FIELD_VALUE(PKT_DSC_BREAK, 0) | + FIELD_VALUE(PKT_DSC_WAIT, 0) | + FIELD_VALUE(PKT_DSC_NEXT, 1) | + FIELD_VALUE(PKT_DSC_SEGADDR, seg_addr >> (PKT_DSC_SEGADDR_LSB)) | + FIELD_VALUE(PKT_DSC_SEGOFFSET, global_offset); + + + next_pkt_desc->srcLengthIVOffUseIVNext = + FIELD_VALUE(PKT_DSC_HASHBYTES, (next_len & 7)) | + FIELD_VALUE(PKT_DSC_IVOFF, 0) | + FIELD_VALUE(PKT_DSC_PKTLEN, (next_len + 7) >> 3) | + FIELD_VALUE(PKT_DSC_BREAK, 0) | + FIELD_VALUE(PKT_DSC_WAIT, 0) | + FIELD_VALUE(PKT_DSC_NEXT, 0) | + FIELD_VALUE(PKT_DSC_SEGADDR, next_seg_addr >> (PKT_DSC_SEGADDR_LSB)) | + FIELD_VALUE(PKT_DSC_SEGOFFSET, 0); + + + } else { + pkt_desc->srcLengthIVOffUseIVNext = + FIELD_VALUE(PKT_DSC_HASHBYTES, len & 7) | + FIELD_VALUE(PKT_DSC_IVOFF, cipher_offset_dwords) | + FIELD_VALUE(PKT_DSC_PKTLEN, (len + 7) >> 3) | + FIELD_VALUE(PKT_DSC_BREAK, 0) | + FIELD_VALUE(PKT_DSC_WAIT, 0) | + FIELD_VALUE(PKT_DSC_SEGADDR, seg_addr >> (PKT_DSC_SEGADDR_LSB)) | + FIELD_VALUE(PKT_DSC_SEGOFFSET, global_offset); + + + } + } + + switch (op->pkt_hmac) { + case XLR_SEC_LOADHMACKEY_MODE_OLD: + CLEAR_SET_FIELD(pkt_desc->srcLengthIVOffUseIVNext, + PKT_DSC_LOADHMACKEY, PKT_DSC_LOADHMACKEY_OLD); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->srcLengthIVOffUseIVNext, + PKT_DSC_LOADHMACKEY, PKT_DSC_LOADHMACKEY_OLD); + + } + break; + case XLR_SEC_LOADHMACKEY_MODE_LOAD: + CLEAR_SET_FIELD(pkt_desc->srcLengthIVOffUseIVNext, + PKT_DSC_LOADHMACKEY, PKT_DSC_LOADHMACKEY_LOAD); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->srcLengthIVOffUseIVNext, + PKT_DSC_LOADHMACKEY, PKT_DSC_LOADHMACKEY_LOAD); + + } + break; + default: + if (vector & (XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_F9)) { + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: ERR_LOADHMACKEY_MODE EXIT\n"); + return XLR_SEC_ERR_LOADHMACKEY_MODE; + } + break; + } + + switch (op->pkt_hash) { + case XLR_SEC_PADHASH_PADDED: + CLEAR_SET_FIELD(pkt_desc->srcLengthIVOffUseIVNext, + PKT_DSC_PADHASH, PKT_DSC_PADHASH_PADDED); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->srcLengthIVOffUseIVNext, + PKT_DSC_PADHASH, PKT_DSC_PADHASH_PADDED); + } + break; + case XLR_SEC_PADHASH_PAD: + CLEAR_SET_FIELD(pkt_desc->srcLengthIVOffUseIVNext, + PKT_DSC_PADHASH, PKT_DSC_PADHASH_PAD); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->srcLengthIVOffUseIVNext, + PKT_DSC_PADHASH, PKT_DSC_PADHASH_PAD); + } + break; + default: + if (vector & (XLR_SEC_VECTOR_MAC | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_HMAC2)) { + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: ERR_PADHASH_MODE EXIT\n"); + return XLR_SEC_ERR_PADHASH_MODE; + } + break; + } + + switch (op->pkt_iv) { + case XLR_SEC_PKT_IV_OLD: + CLEAR_SET_FIELD(pkt_desc->srcLengthIVOffUseIVNext, + PKT_DSC_IV, PKT_DSC_IV_OLD); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->srcLengthIVOffUseIVNext, + PKT_DSC_IV, PKT_DSC_IV_OLD); + + } + break; + case XLR_SEC_PKT_IV_NEW: + CLEAR_SET_FIELD(pkt_desc->srcLengthIVOffUseIVNext, + PKT_DSC_IV, PKT_DSC_IV_NEW); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->srcLengthIVOffUseIVNext, + PKT_DSC_IV, PKT_DSC_IV_NEW); + + } + break; + default: + if (vector & XLR_SEC_VECTOR_CIPHER) { + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: ERR_PKT_IV_MODE EXIT\n"); + return XLR_SEC_ERR_PKT_IV_MODE; + } + break; + } + + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: src_buf=%llx phys_src_buf=%llx \n", + (unsigned long long)op->source_buf, (unsigned long long)addr); + + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: seg_addr=%llx offset=%lld\n", + (unsigned long long)seg_addr, (unsigned long long)byte_offset); + + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: global src offset: %d, iv_offset=%d\n", + cipher_offset_dwords, op->iv_offset); + + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: src_buf_sz=%d PKT_LEN=%d\n", + op->source_buf_size, len_dwords); + + /* + * same operation with the destination. cipher offset affects this, + * as well + */ + if (multi_frag_flag) { + next_seg_addr = (uint64_t) vtophys((void *)(unsigned long)(desc->next_dest_buf)); + next_seg_addr = (next_seg_addr & ~(SMP_CACHE_BYTES - 1)); + } + addr = (uint64_t) vtophys((void *)(unsigned long)op->dest_buf); + seg_addr = (addr & ~(SMP_CACHE_BYTES - 1)); + byte_offset = addr - seg_addr; + global_offset = byte_offset & 0x7; + + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: dest_buf=%llx phys_dest_buf=%llx \n", + (unsigned long long)op->dest_buf, (unsigned long long)addr); + + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: seg_addr=%llx offset=%lld\n", + (unsigned long long)seg_addr, (unsigned long long)byte_offset); + + /* + * Dest Address = (Cipher Dest Address) + (Cipher Offset) + (Global + * Dest Data Offset) + * + * Cipher Dest Address - Cache-line (0xffffffffe0) Cipher Offset - + * Which (64-bit) Word in Cacheline (0-3) Global Dest Data Offset - + * Number of Bytes in (64-bit) Word before data + * + * It must be set for Digest-only Ops, since the Digest engine will + * write data to this address. + */ + cipher_offset_dwords = (op->cipher_offset + byte_offset) >> 3; + + + pkt_desc->dstDataSettings = + /* SYM_OP, HASHSRC */ + FIELD_VALUE(PKT_DSC_CPHROFF, cipher_offset_dwords) | + FIELD_VALUE(PKT_DSC_HASHOFF, (op->digest_offset + byte_offset) >> 3) | + FIELD_VALUE(PKT_DSC_CPHR_DST_ADDR, seg_addr) | + FIELD_VALUE(PKT_DSC_CPHR_DST_DWOFFSET, 0) | + FIELD_VALUE(PKT_DSC_CPHR_DST_OFFSET, global_offset); + + if (multi_frag_flag) { + next_pkt_desc->dstDataSettings = + /* SYM_OP, HASHSRC */ + FIELD_VALUE(PKT_DSC_CPHROFF, cipher_offset_dwords) | + FIELD_VALUE(PKT_DSC_HASHOFF, (op->digest_offset + byte_offset) >> 3) | + FIELD_VALUE(PKT_DSC_CPHR_DST_ADDR, next_seg_addr) | + FIELD_VALUE(PKT_DSC_CPHR_DST_DWOFFSET, 0) | + FIELD_VALUE(PKT_DSC_CPHR_DST_OFFSET, global_offset); + + } + if (op->cipher_type == XLR_SEC_CIPHER_TYPE_ARC4) + pkt_desc->dstDataSettings |= FIELD_VALUE(PKT_DSC_ARC4BYTECOUNT, last_u64_bytes); + + if (op->cipher_type != XLR_SEC_CIPHER_TYPE_NONE) { + switch (op->cipher_op) { + case XLR_SEC_CIPHER_OP_ENCRYPT: + CLEAR_SET_FIELD(pkt_desc->dstDataSettings, + PKT_DSC_SYM_OP, PKT_DSC_SYM_OP_ENCRYPT); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->dstDataSettings, + PKT_DSC_SYM_OP, PKT_DSC_SYM_OP_ENCRYPT); + + } + break; + case XLR_SEC_CIPHER_OP_DECRYPT: + CLEAR_SET_FIELD(pkt_desc->dstDataSettings, + PKT_DSC_SYM_OP, PKT_DSC_SYM_OP_DECRYPT); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->dstDataSettings, + PKT_DSC_SYM_OP, PKT_DSC_SYM_OP_DECRYPT); + + } + break; + default: + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: ERR_CIPHER_OP EXIT\n"); + return XLR_SEC_ERR_CIPHER_OP; + } + } + if (flags & XLR_SEC_SETUP_OP_HMAC) { + switch (op->digest_src) { + case XLR_SEC_DIGEST_SRC_DMA: + CLEAR_SET_FIELD(pkt_desc->dstDataSettings, + PKT_DSC_HASHSRC, PKT_DSC_HASHSRC_DMA); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->dstDataSettings, + PKT_DSC_HASHSRC, PKT_DSC_HASHSRC_DMA); + + } + break; + case XLR_SEC_DIGEST_SRC_CPHR: + CLEAR_SET_FIELD(pkt_desc->dstDataSettings, + PKT_DSC_HASHSRC, PKT_DSC_HASHSRC_CIPHER); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->dstDataSettings, + PKT_DSC_HASHSRC, PKT_DSC_HASHSRC_CIPHER); + } + break; + default: + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: ERR_DIGEST_SRC EXIT\n"); + return XLR_SEC_ERR_DIGEST_SRC; + } + } + if (op->cksum_type != XLR_SEC_CKSUM_TYPE_NOP) { + switch (op->cksum_src) { + case XLR_SEC_CKSUM_SRC_DMA: + CLEAR_SET_FIELD(pkt_desc->dstDataSettings, + PKT_DSC_CKSUMSRC, PKT_DSC_CKSUMSRC_DMA); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->dstDataSettings, + PKT_DSC_CKSUMSRC, PKT_DSC_CKSUMSRC_DMA); + } + break; + case XLR_SEC_CKSUM_SRC_CIPHER: + CLEAR_SET_FIELD(next_pkt_desc->dstDataSettings, + PKT_DSC_CKSUMSRC, PKT_DSC_CKSUMSRC_CIPHER); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->dstDataSettings, + PKT_DSC_CKSUMSRC, PKT_DSC_CKSUMSRC_CIPHER); + } + break; + default: + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: ERR_CKSUM_SRC EXIT\n"); + return XLR_SEC_ERR_CKSUM_SRC; + } + } + pkt_desc->ckSumDstNonceHiCFBMaskLLWMask = + FIELD_VALUE(PKT_DSC_HASH_BYTE_OFF, (op->digest_offset & 0x7)) | + FIELD_VALUE(PKT_DSC_PKTLEN_BYTES, 0) | + /* NONCE_HI, PKT_DSC_LASTWORD, CFB_MASK, CKSUM_DST_ADDR */ + FIELD_VALUE(PKT_DSC_IV_OFF_HI, 0); + + if (multi_frag_flag) { + next_pkt_desc->ckSumDstNonceHiCFBMaskLLWMask = + FIELD_VALUE(PKT_DSC_HASH_BYTE_OFF, (op->digest_offset & 0x7)) | + FIELD_VALUE(PKT_DSC_PKTLEN_BYTES, 0) | + /* NONCE_HI, PKT_DSC_LASTWORD, CFB_MASK, CKSUM_DST_ADDR */ + FIELD_VALUE(PKT_DSC_IV_OFF_HI, 0); + + } + switch (op->pkt_lastword) { + case XLR_SEC_LASTWORD_128: + CLEAR_SET_FIELD(pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_LASTWORD, PKT_DSC_LASTWORD_128); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_LASTWORD, PKT_DSC_LASTWORD_128); + + } + break; + case XLR_SEC_LASTWORD_96MASK: + CLEAR_SET_FIELD(pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_LASTWORD, PKT_DSC_LASTWORD_96MASK); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_LASTWORD, PKT_DSC_LASTWORD_96MASK); + } + break; + case XLR_SEC_LASTWORD_64MASK: + CLEAR_SET_FIELD(pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_LASTWORD, PKT_DSC_LASTWORD_64MASK); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_LASTWORD, PKT_DSC_LASTWORD_64MASK); + } + break; + case XLR_SEC_LASTWORD_32MASK: + CLEAR_SET_FIELD(pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_LASTWORD, PKT_DSC_LASTWORD_32MASK); + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_LASTWORD, PKT_DSC_LASTWORD_32MASK); + } + break; + default: + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: ERR_LASTWORD_MODE EXIT\n"); + return XLR_SEC_ERR_LASTWORD_MODE; + } + CLEAR_SET_FIELD(pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_CFB_MASK, op->cfb_mask); + CLEAR_SET_FIELD(pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_NONCE_HI, htonl(op->nonce) >> 24); + CLEAR_SET_FIELD(pkt_desc->authDstNonceLow, + PKT_DSC_NONCE_LOW, htonl(op->nonce) & 0xffffff); + + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_CFB_MASK, op->cfb_mask); + CLEAR_SET_FIELD(next_pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_NONCE_HI, htonl(op->nonce) >> 24); + CLEAR_SET_FIELD(next_pkt_desc->authDstNonceLow, + PKT_DSC_NONCE_LOW, htonl(op->nonce) & 0xffffff); + + + } + /* Auth Dest Address must be Cacheline aligned on input */ + if (vector & (XLR_SEC_VECTOR_MAC | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_F9)) { + pkt_desc->authDstNonceLow |= + /* NONCE_LOW */ + FIELD_VALUE(PKT_DSC_AUTH_DST_ADDR, + (uint64_t) vtophys((void *)(unsigned long)op->auth_dest)) | + FIELD_VALUE(PKT_DSC_CIPH_OFF_HI, 0); + + + if (multi_frag_flag) { + next_pkt_desc->authDstNonceLow |= + /* NONCE_LOW */ + FIELD_VALUE(PKT_DSC_AUTH_DST_ADDR, + (uint64_t) vtophys((void *)(unsigned long)desc->next_auth_dest)) | + FIELD_VALUE(PKT_DSC_CIPH_OFF_HI, 0); + + + } + } + /* CkSum Dest Address must be Cacheline aligned on input */ + if (op->cksum_type == XLR_SEC_CKSUM_TYPE_IP) { + CLEAR_SET_FIELD(pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_CKSUM_DST_ADDR, + (uint64_t) vtophys((void *)(unsigned long)op->cksum_dest)); + + if (multi_frag_flag) { + CLEAR_SET_FIELD(next_pkt_desc->ckSumDstNonceHiCFBMaskLLWMask, + PKT_DSC_CKSUM_DST_ADDR, + (uint64_t) vtophys((void *)(unsigned long)desc->next_cksum_dest)); + + } + } + /* + * XLR_SEC_CMD_DIAG (" xlr_sec_setup_packet(): pkt_desc=%llx + * phys_pkt_desc=%llx \n", (unsigned long long)pkt_desc, (unsigned + * long long)virt_to_phys(pkt_desc)); (unsigned long long)pkt_desc, + * (unsigned long long)vtophys(pkt_desc)); + */ + XLR_SEC_CMD_DIAG(" xlr_sec_setup_packet(): pkt_desc=%p phys_pkt_desc=%llx \n", + pkt_desc, (unsigned long long)vtophys(pkt_desc)); + + + + CLEAR_SET_FIELD(*data, MSG_CMD_DATA_ADDR, ((uint64_t) vtophys(pkt_desc))); + CLEAR_SET_FIELD(*data, MSG_CMD_DATA_CTL, SEC_EOP); + CLEAR_SET_FIELD(*data, MSG_CMD_DATA_LEN, MSG_CMD_DATA_LEN_LOAD); + + + XLR_SEC_CMD_DIAG("xlr_sec_setup_packet: DONE\n"); + +#ifdef RMI_SEC_DEBUG + + { + printf("data desc\n"); + printf("srcLengthIVOffUseIVNext = 0x%llx\n", pkt_desc->srcLengthIVOffUseIVNext); + printf("dstDataSettings = 0x%llx\n", pkt_desc->dstDataSettings); + printf("authDstNonceLow = 0x%llx\n", pkt_desc->authDstNonceLow); + printf("ckSumDstNonceHiCFBMaskLLWMask = 0x%llx\n", pkt_desc->ckSumDstNonceHiCFBMaskLLWMask); + } + + if (multi_frag_flag) { + + printf("next data desc\n"); + printf("srcLengthIVOffUseIVNext = 0x%llx\n", next_pkt_desc->srcLengthIVOffUseIVNext); + printf("dstDataSettings = 0x%llx\n", next_pkt_desc->dstDataSettings); + printf("authDstNonceLow = 0x%llx\n", next_pkt_desc->authDstNonceLow); + printf("ckSumDstNonceHiCFBMaskLLWMask = 0x%llx\n", next_pkt_desc->ckSumDstNonceHiCFBMaskLLWMask); + } +#endif + +#ifdef SYMBOL + if (op->cipher_type == XLR_SEC_CIPHER_TYPE_ARC4) { + op->source_buf -= 0; + op->source_buf_size += 0; + op->dest_buf -= 0; + } +#endif + return XLR_SEC_ERR_NONE; +} + + +static int +identify_symkey_ctl_error(uint32_t code, xlr_sec_error_t err) +{ + int ret_val = EINVAL; + + switch (code) { + case CTL_ERR_NONE: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: CTL Error: No Error\n"); + ret_val = 0; + break; + case CTL_ERR_CIPHER_OP: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: CTL Error(CTL_ERR_CIPHER_OP) - Unknown Cipher Op \n"); + break; + case CTL_ERR_MODE: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: CTL Error(CTL_ERR_MODE) - " + "Unknown or Not Allowed Mode \n"); + break; + case CTL_ERR_CHKSUM_SRC: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: CTL Error(CTL_ERR_CHKSUM_SRC) - Unknown CkSum Src\n"); + break; + case CTL_ERR_CFB_MASK: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: CTL Error(CTL_ERR_CFB_MASK) - Forbidden CFB Mask \n"); + break; + case CTL_ERR_OP: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: CTL Error(CTL_ERR_OP) - Unknown Ctrl Op \n"); + break; + case CTL_ERR_DATA_READ: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: CTL Error(CTL_ERR_DATA_READ) - Data Read Error\n"); + break; + case CTL_ERR_DESC_CTRL: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: CTL Error(CTL_ERR_DESC_CTRL) - " + "Descriptor Ctrl Field Error \n"); + break; + case CTL_ERR_UNDEF1: + case CTL_ERR_UNDEF2: + default: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: CTL Error: UNKNOWN CODE=%d \n", code); + break; + } + return ret_val; +} + +static +int +identify_symkey_data_error(uint32_t code, xlr_sec_error_t err) +{ + int ret_val = -EINVAL; + + switch (code) { + case DATA_ERR_NONE: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: DATA Error No Error\n"); + ret_val = 0; + break; + case DATA_ERR_LEN_CIPHER: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: DATA Error() - Not Enough Data To Cipher\n"); + break; + case DATA_ERR_IV_ADDR: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: DATA Error() - Illegal IV Loacation\n"); + break; + case DATA_ERR_WD_LEN_AES: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: DATA Error() - Illegal Nb Words To AES\n"); + break; + case DATA_ERR_BYTE_COUNT: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: DATA Error() - Illegal Pad And ByteCount Spec\n"); + break; + case DATA_ERR_LEN_CKSUM: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: DATA Error() - Not Enough Data To CkSum\n"); + break; + case DATA_ERR_OP: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: DATA Error() - Unknown Data Op \n"); + break; + case DATA_ERR_READ: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: DATA Error() - Data Read Error \n"); + break; + case DATA_ERR_WRITE: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: DATA Error() - Data Write Error \n"); + break; + case DATA_ERR_UNDEF1: + default: + XLR_SEC_CMD_DIAG("XLR_SEC_SEC: DATA Error - UNKNOWN CODE=%d \n", code); + break; + } + return ret_val; +} + + +static int +xlr_sec_submit_message(symkey_desc_pt desc, uint32_t cfg_vector) +{ + xlr_sec_error_t err; + uint32_t ctl_error, data_error; + int ret_val = 0; + + XLR_SEC_CMD_DIAG("xlr_sec_submit_message: ENTER\n"); + + err = XLR_SEC_ERR_NONE; + + XLR_SEC_CMD_DIAG_SYM_DESC(desc, cfg_vector); + + do { + /* For now, send message and wait for response */ + err = xlr_sec_submit_op(desc); + + XLR_SEC_CMD_DIAG("xlr_sec_submit_message: err = %d \n", (uint32_t) err); + + if (err != XLR_SEC_ERR_NONE) { + ret_val = (EINVAL); + break; + } + ctl_error = desc->ctl_result; + data_error = desc->data_result; + + XLR_SEC_CMD_DIAG("xlr_sec_submit_message: ctl_error = %x data_error = %x\n", + ctl_error, data_error); + + if ((ret_val = identify_symkey_ctl_error(ctl_error, err)) == 0) + ret_val = identify_symkey_data_error(data_error, err); + + XLR_SEC_CMD_DIAG("xlr_sec_submit_message: identify error = %d \n", ret_val); + + } while (0); + + XLR_SEC_CMD_DIAG("xlr_sec_submit_message: DONE\n"); + return (ret_val); +} + + +static +xlr_sec_error_t +xlr_sec_setup_cipher(xlr_sec_io_pt op, + ControlDescriptor_pt ctl_desc, + uint32_t * vector) +{ + uint32_t aes_flag = 0; + uint32_t cipher_vector = 0; + + XLR_SEC_CMD_DIAG("xlr_sec_setup_cipher: ENTER vector = %04x\n", *vector); + + switch (op->cipher_type) { + case XLR_SEC_CIPHER_TYPE_NONE: + SET_FIELD(ctl_desc->instruction, CTL_DSC_CPHR, CTL_DSC_CPHR_BYPASS); + XLR_SEC_CMD_DIAG("xlr_sec_setup_cipher: CIPHER_TYPE_NONE EXIT\n"); + return XLR_SEC_ERR_NONE; + case XLR_SEC_CIPHER_TYPE_DES: + cipher_vector |= XLR_SEC_VECTOR_CIPHER_DES; + SET_FIELD(ctl_desc->instruction, CTL_DSC_CPHR, CTL_DSC_CPHR_DES); + break; + case XLR_SEC_CIPHER_TYPE_3DES: + cipher_vector |= XLR_SEC_VECTOR_CIPHER_3DES; + SET_FIELD(ctl_desc->instruction, CTL_DSC_CPHR, CTL_DSC_CPHR_3DES); + break; + case XLR_SEC_CIPHER_TYPE_AES128: + aes_flag = 1; + cipher_vector |= XLR_SEC_VECTOR_CIPHER_AES128; + SET_FIELD(ctl_desc->instruction, CTL_DSC_CPHR, CTL_DSC_CPHR_AES128); + break; + case XLR_SEC_CIPHER_TYPE_AES192: + aes_flag = 1; + cipher_vector |= XLR_SEC_VECTOR_CIPHER_AES192; + SET_FIELD(ctl_desc->instruction, CTL_DSC_CPHR, CTL_DSC_CPHR_AES192); + break; + case XLR_SEC_CIPHER_TYPE_AES256: + aes_flag = 1; + cipher_vector |= XLR_SEC_VECTOR_CIPHER_AES256; + SET_FIELD(ctl_desc->instruction, CTL_DSC_CPHR, CTL_DSC_CPHR_AES256); + break; + case XLR_SEC_CIPHER_TYPE_ARC4: + cipher_vector |= XLR_SEC_VECTOR_CIPHER_ARC4; + SET_FIELD(ctl_desc->instruction, CTL_DSC_CPHR, CTL_DSC_CPHR_ARC4); + SET_FIELD(ctl_desc->instruction, CTL_DSC_ARC4_KEYLEN, + op->rc4_key_len); + SET_FIELD(ctl_desc->instruction, CTL_DSC_ARC4_LOADSTATE, + op->rc4_loadstate); + SET_FIELD(ctl_desc->instruction, CTL_DSC_ARC4_SAVESTATE, + op->rc4_savestate); + if (op->rc4_loadstate || op->rc4_savestate) + cipher_vector |= XLR_SEC_VECTOR_STATE; + break; + case XLR_SEC_CIPHER_TYPE_KASUMI_F8: + aes_flag = 1; + cipher_vector |= XLR_SEC_VECTOR_CIPHER_KASUMI_F8; + SET_FIELD(ctl_desc->instruction, CTL_DSC_CPHR, CTL_DSC_CPHR_KASUMI_F8); + break; + default: + XLR_SEC_CMD_DIAG("xlr_sec_setup_cipher: ERR_CIPHER_TYPE EXIT\n"); + return XLR_SEC_ERR_CIPHER_TYPE; + } + + switch (op->cipher_mode) { + case XLR_SEC_CIPHER_MODE_ECB: + if (aes_flag == 1) + cipher_vector |= XLR_SEC_VECTOR_MODE_ECB_CBC_OFB; + else + cipher_vector |= XLR_SEC_VECTOR_MODE_ECB_CBC; + SET_FIELD(ctl_desc->instruction, CTL_DSC_MODE, CTL_DSC_MODE_ECB); + break; + case XLR_SEC_CIPHER_MODE_CBC: + if (aes_flag == 1) + cipher_vector |= XLR_SEC_VECTOR_MODE_ECB_CBC_OFB; + else + cipher_vector |= XLR_SEC_VECTOR_MODE_ECB_CBC; + SET_FIELD(ctl_desc->instruction, CTL_DSC_MODE, CTL_DSC_MODE_CBC); + break; + case XLR_SEC_CIPHER_MODE_OFB: + if (aes_flag == 0) { + XLR_SEC_CMD_DIAG("xlr_sec_setup_cipher: ERR_CIPHER_MODE EXIT\n"); + return XLR_SEC_ERR_CIPHER_MODE; + } + cipher_vector |= XLR_SEC_VECTOR_MODE_ECB_CBC_OFB; + SET_FIELD(ctl_desc->instruction, CTL_DSC_MODE, CTL_DSC_MODE_OFB); + break; + case XLR_SEC_CIPHER_MODE_CTR: + if (aes_flag == 0) { + XLR_SEC_CMD_DIAG("xlr_sec_setup_cipher: ERR_CIPHER_MODE EXIT\n"); + return XLR_SEC_ERR_CIPHER_MODE; + } + cipher_vector |= XLR_SEC_VECTOR_MODE_CTR_CFB; + SET_FIELD(ctl_desc->instruction, CTL_DSC_MODE, CTL_DSC_MODE_CTR); + break; + case XLR_SEC_CIPHER_MODE_CFB: + if (aes_flag == 0) { + XLR_SEC_CMD_DIAG("xlr_sec_setup_cipher: ERR_CIPHER_MODE EXIT\n"); + return XLR_SEC_ERR_CIPHER_MODE; + } + cipher_vector |= XLR_SEC_VECTOR_MODE_CTR_CFB; + SET_FIELD(ctl_desc->instruction, CTL_DSC_MODE, CTL_DSC_MODE_CFB); + break; + case XLR_SEC_CIPHER_MODE_F8: + if (aes_flag == 0) { + XLR_SEC_CMD_DIAG("xlr_sec_setup_cipher: ERR_CIPHER_MODE EXIT\n"); + return XLR_SEC_ERR_CIPHER_MODE; + } + cipher_vector |= XLR_SEC_VECTOR_MODE_F8; + SET_FIELD(ctl_desc->instruction, CTL_DSC_MODE, CTL_DSC_MODE_F8); + break; + default: + if (!(cipher_vector & (XLR_SEC_VECTOR_CIPHER_ARC4 | XLR_SEC_VECTOR_CIPHER_KASUMI_F8))) { + XLR_SEC_CMD_DIAG("xlr_sec_setup_cipher: ERR_CIPHER_MODE EXIT\n"); + return XLR_SEC_ERR_CIPHER_MODE; + } + } + + switch (op->cipher_init) { + case XLR_SEC_CIPHER_INIT_OK: + SET_FIELD(ctl_desc->instruction, + CTL_DSC_ICPHR, CTL_DSC_ICPHR_OKY); + break; + + case XLR_SEC_CIPHER_INIT_NK: + SET_FIELD(ctl_desc->instruction, + CTL_DSC_ICPHR, CTL_DSC_ICPHR_NKY); + break; + default: + XLR_SEC_CMD_DIAG("xlr_sec_setup_cipher: ERR_CIPHER_INIT EXIT\n"); + return XLR_SEC_ERR_CIPHER_INIT; + } + + *vector |= cipher_vector; + + XLR_SEC_CMD_DIAG("xlr_sec_setup_cipher: EXIT vector = %04x\n", *vector); + + return XLR_SEC_ERR_NONE; +} + + +static +xlr_sec_error_t +xlr_sec_setup_digest(xlr_sec_io_pt op, + ControlDescriptor_pt ctl_desc, + uint32_t * vector) +{ + uint32_t hash_flag = 0; + uint32_t hmac_flag = 0; + uint32_t digest_vector = 0; + + XLR_SEC_CMD_DIAG("xlr_sec_setup_digest: ENTER vector = %04x\n", *vector); + + switch (op->digest_type) { + case XLR_SEC_DIGEST_TYPE_MD5: + digest_vector |= XLR_SEC_VECTOR_MAC; + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASH, CTL_DSC_HASH_MD5); + break; + case XLR_SEC_DIGEST_TYPE_SHA1: + digest_vector |= XLR_SEC_VECTOR_MAC; + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASH, CTL_DSC_HASH_SHA1); + break; + case XLR_SEC_DIGEST_TYPE_SHA256: + digest_vector |= XLR_SEC_VECTOR_MAC; + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASH, CTL_DSC_HASH_SHA256); + break; + case XLR_SEC_DIGEST_TYPE_SHA384: + digest_vector |= XLR_SEC_VECTOR_MAC; + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASHHI, CTL_DSC_HASH_SHA384 >> 2); + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASH, CTL_DSC_HASH_SHA384); + break; + case XLR_SEC_DIGEST_TYPE_SHA512: + digest_vector |= XLR_SEC_VECTOR_MAC; + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASHHI, CTL_DSC_HASH_SHA512 >> 2); + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASH, CTL_DSC_HASH_SHA512); + break; + case XLR_SEC_DIGEST_TYPE_GCM: + hash_flag = 1; + digest_vector |= XLR_SEC_VECTOR_GCM; + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASHHI, CTL_DSC_HASH_GCM >> 2); + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASH, CTL_DSC_HASH_GCM); + break; + case XLR_SEC_DIGEST_TYPE_KASUMI_F9: + hash_flag = 1; + digest_vector |= XLR_SEC_VECTOR_F9; + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASHHI, CTL_DSC_HASH_KASUMI_F9 >> 2); + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASH, CTL_DSC_HASH_KASUMI_F9); + break; + case XLR_SEC_DIGEST_TYPE_HMAC_MD5: + hmac_flag = 1; + digest_vector |= XLR_SEC_VECTOR_HMAC; + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASH, CTL_DSC_HASH_MD5); + break; + case XLR_SEC_DIGEST_TYPE_HMAC_SHA1: + hmac_flag = 1; + digest_vector |= XLR_SEC_VECTOR_HMAC; + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASH, CTL_DSC_HASH_SHA1); + break; + case XLR_SEC_DIGEST_TYPE_HMAC_SHA256: + hmac_flag = 1; + digest_vector |= XLR_SEC_VECTOR_HMAC; + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASH, CTL_DSC_HASH_SHA256); + break; + case XLR_SEC_DIGEST_TYPE_HMAC_SHA384: + hmac_flag = 1; + digest_vector |= XLR_SEC_VECTOR_HMAC2; + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASHHI, CTL_DSC_HASH_SHA384 >> 2); + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASH, CTL_DSC_HASH_SHA384); + break; + case XLR_SEC_DIGEST_TYPE_HMAC_SHA512: + hmac_flag = 1; + digest_vector |= XLR_SEC_VECTOR_HMAC2; + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASHHI, CTL_DSC_HASH_SHA512 >> 2); + SET_FIELD(ctl_desc->instruction, CTL_DSC_HASH, CTL_DSC_HASH_SHA512); + break; + default: + return XLR_SEC_ERR_DIGEST_TYPE; + } + + if (hmac_flag == 1) { + SET_FIELD(ctl_desc->instruction, CTL_DSC_HMAC, CTL_DSC_HMAC_ON); + + } + if (hmac_flag || hash_flag) { + switch (op->digest_init) { + case XLR_SEC_DIGEST_INIT_OLDKEY: + SET_FIELD(ctl_desc->instruction, CTL_DSC_IHASH, CTL_DSC_IHASH_OLD); + break; + case XLR_SEC_DIGEST_INIT_NEWKEY: + SET_FIELD(ctl_desc->instruction, CTL_DSC_IHASH, CTL_DSC_IHASH_NEW); + break; + default: + return XLR_SEC_ERR_DIGEST_INIT; + } + } /* hmac_flag */ + *vector |= digest_vector; + + XLR_SEC_CMD_DIAG("xlr_sec_setup_digest: EXIT vector = %04x\n", *vector); + + return XLR_SEC_ERR_NONE; +} + +static +xlr_sec_error_t +xlr_sec_setup_cksum(xlr_sec_io_pt op, + ControlDescriptor_pt ctl_desc) +{ + switch (op->cksum_type) { + case XLR_SEC_CKSUM_TYPE_NOP: + SET_FIELD(ctl_desc->instruction, CTL_DSC_CKSUM, CTL_DSC_CKSUM_NOP); + return XLR_SEC_ERR_NONE; + case XLR_SEC_CKSUM_TYPE_IP: + SET_FIELD(ctl_desc->instruction, CTL_DSC_CKSUM, CTL_DSC_CKSUM_IP); + break; + default: + return XLR_SEC_ERR_CKSUM_TYPE; + } + + return XLR_SEC_ERR_NONE; +} + + +static +xlr_sec_error_t +xlr_sec_control_setup(xlr_sec_io_pt op, + unsigned int flags, + uint64_t * control, + ControlDescriptor_pt ctl_desc, + xlr_sec_drv_user_t * user, + uint32_t vector) +{ + uint64_t *hmac_key = NULL; + uint64_t *cipher_key = NULL; + uint64_t *cipher_state = NULL; + uint32_t ctl_size = 0; + uint64_t ctl_addr = 0; + uint32_t cipher_keylen = 0; + uint32_t hmac_keylen = 0; + uint32_t ctl_len; + +#ifdef SYM_DEBUG + XLR_SEC_CMD_DIAG(" ENTER vector = %04x\n", vector); +#endif + + + switch (vector) { + case XLR_SEC_VECTOR_MAC: + XLR_SEC_CMD_DIAG(" XLR_SEC_VECTOR_MAC \n"); + ctl_size = sizeof(HMAC_t); + break; + case XLR_SEC_VECTOR_HMAC: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_HMAC \n"); + hmac_key = &ctl_desc->cipherHashInfo.infoHMAC.hmacKey0; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(HMAC_t); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_ARC4\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoARC4.cipherKey0; + cipher_keylen = op->rc4_key_len; + ctl_size = sizeof(ARC4_t); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__HMAC: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_ARC4__HMAC\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoARC4HMAC.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoARC4HMAC.hmacKey0; + cipher_keylen = op->rc4_key_len; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(ARC4HMAC_t); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__STATE: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_ARC4__STATE\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoARC4State.cipherKey0; + cipher_state = + &ctl_desc->cipherHashInfo.infoARC4State.Arc4SboxData0; + cipher_keylen = op->rc4_key_len; + ctl_size = sizeof(ARC4State_t); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__HMAC__STATE: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_ARC4__HMAC__STATE\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoARC4StateHMAC.cipherKey0; + cipher_state = + &ctl_desc->cipherHashInfo.infoARC4StateHMAC.Arc4SboxData0; + hmac_key = &ctl_desc->cipherHashInfo.infoARC4StateHMAC.hmacKey0; + cipher_keylen = op->rc4_key_len; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(ARC4StateHMAC_t); + break; + case XLR_SEC_VECTOR_CIPHER_KASUMI_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_KASUMI_F8\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoKASUMIF8.cipherKey0; + cipher_keylen = XLR_SEC_KASUMI_F8_KEY_LENGTH; + ctl_size = sizeof(KASUMIF8_t); + break; + case XLR_SEC_VECTOR_CIPHER_KASUMI_F8__HMAC: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_KASUMI_F8__HMAC\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoKASUMIF8HMAC.cipherKey0; + cipher_keylen = XLR_SEC_KASUMI_F8_KEY_LENGTH; + hmac_key = &ctl_desc->cipherHashInfo.infoKASUMIF8HMAC.hmacKey0; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(KASUMIF8HMAC_t); + break; + case XLR_SEC_VECTOR_CIPHER_KASUMI_F8__HMAC2: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_KASUMI_F8__HMAC2\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoKASUMIF8HMAC2.cipherKey0; + cipher_keylen = XLR_SEC_KASUMI_F8_KEY_LENGTH; + hmac_key = &ctl_desc->cipherHashInfo.infoKASUMIF8HMAC2.hmacKey0; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(KASUMIF8HMAC2_t); + break; + case XLR_SEC_VECTOR_CIPHER_KASUMI_F8__GCM: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_KASUMI_F8__GCM\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoKASUMIF8GCM.cipherKey0; + cipher_keylen = XLR_SEC_KASUMI_F8_KEY_LENGTH; + hmac_key = &ctl_desc->cipherHashInfo.infoKASUMIF8GCM.GCMH0; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(KASUMIF8GCM_t); + break; + case XLR_SEC_VECTOR_CIPHER_KASUMI_F8__F9: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_KASUMI_F8__F9\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoKASUMIF8F9.cipherKey0; + cipher_keylen = XLR_SEC_KASUMI_F8_KEY_LENGTH; + hmac_key = &ctl_desc->cipherHashInfo.infoKASUMIF8F9.authKey0; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(KASUMIF8F9_t); + break; + case XLR_SEC_VECTOR__CIPHER_DES__HMAC__MODE_ECB_CBC: + XLR_SEC_CMD_DIAG(" XLR_SEC_VECTOR__CIPHER_DES__HMAC__MODE_ECB_CBC \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoDESHMAC.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoDESHMAC.hmacKey0; + hmac_keylen = sizeof(HMAC_t); + cipher_keylen = XLR_SEC_DES_KEY_LENGTH; + ctl_size = sizeof(DESHMAC_t); + break; + case XLR_SEC_VECTOR__CIPHER_DES__MODE_ECB_CBC: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_DES__MODE_ECB_CBC \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoDES.cipherKey0; + cipher_keylen = XLR_SEC_DES_KEY_LENGTH; + ctl_size = sizeof(DES_t); + break; + case XLR_SEC_VECTOR__CIPHER_3DES__HMAC__MODE_ECB_CBC: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_3DES__HMAC__MODE_ECB_CBC \n"); + cipher_key = &ctl_desc->cipherHashInfo.info3DESHMAC.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.info3DESHMAC.hmacKey0; + cipher_keylen = XLR_SEC_3DES_KEY_LENGTH; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(DES3HMAC_t); + break; + case XLR_SEC_VECTOR__CIPHER_3DES__MODE_ECB_CBC: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_3DES__MODE_ECB_CBC \n"); + cipher_key = &ctl_desc->cipherHashInfo.info3DES.cipherKey0; + cipher_keylen = XLR_SEC_3DES_KEY_LENGTH; + ctl_size = sizeof(DES3_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128HMAC.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES128HMAC.hmacKey0; + cipher_keylen = XLR_SEC_AES128_KEY_LENGTH; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(AES128HMAC_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES128__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128.cipherKey0; + cipher_keylen = XLR_SEC_AES128_KEY_LENGTH; + ctl_size = sizeof(AES128_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG(" XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128HMAC.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES128HMAC.hmacKey0; + cipher_keylen = XLR_SEC_AES128_KEY_LENGTH; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(AES128HMAC_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES128__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128.cipherKey0; + cipher_keylen = XLR_SEC_AES128_KEY_LENGTH; + ctl_size = sizeof(AES128_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128F8HMAC.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES128F8HMAC.hmacKey0; + cipher_keylen = XLR_SEC_AES128F8_KEY_LENGTH; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(AES128F8HMAC_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES128__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128F8.cipherKey0; + cipher_keylen = XLR_SEC_AES128F8_KEY_LENGTH; + ctl_size = sizeof(AES128F8_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192HMAC.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES192HMAC.hmacKey0; + cipher_keylen = XLR_SEC_AES192_KEY_LENGTH; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(AES192HMAC_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192.cipherKey0; + cipher_keylen = XLR_SEC_AES192_KEY_LENGTH; + ctl_size = sizeof(AES192_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192HMAC.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES192HMAC.hmacKey0; + cipher_keylen = XLR_SEC_AES192_KEY_LENGTH; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(AES192HMAC_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192.cipherKey0; + cipher_keylen = XLR_SEC_AES192_KEY_LENGTH; + ctl_size = sizeof(AES192_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192F8HMAC.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES192F8HMAC.hmacKey0; + cipher_keylen = XLR_SEC_AES192F8_KEY_LENGTH; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(AES192F8HMAC_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192F8.cipherKey0; + cipher_keylen = XLR_SEC_AES192F8_KEY_LENGTH; + ctl_size = sizeof(AES192F8_t); + break; + + case XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256HMAC.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES256HMAC.hmacKey0; + cipher_keylen = XLR_SEC_AES256_KEY_LENGTH; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(AES256HMAC_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256.cipherKey0; + cipher_keylen = XLR_SEC_AES256_KEY_LENGTH; + ctl_size = sizeof(AES256_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256HMAC.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES256HMAC.hmacKey0; + cipher_keylen = XLR_SEC_AES256_KEY_LENGTH; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(AES256HMAC_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256.cipherKey0; + cipher_keylen = XLR_SEC_AES256_KEY_LENGTH; + ctl_size = sizeof(AES256_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256F8HMAC.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES256F8HMAC.hmacKey0; + cipher_keylen = XLR_SEC_AES256F8_KEY_LENGTH; + hmac_keylen = sizeof(HMAC_t); + ctl_size = sizeof(AES256F8HMAC_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256F8.cipherKey0; + cipher_keylen = XLR_SEC_AES256F8_KEY_LENGTH; + ctl_size = sizeof(AES256F8_t); + break; + case XLR_SEC_VECTOR_HMAC2: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_HMAC2 \n"); + hmac_key = &ctl_desc->cipherHashInfo.infoHMAC2.hmacKey0; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(HMAC2_t); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__HMAC2: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_ARC4__HMAC2\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoARC4HMAC2.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoARC4HMAC2.hmacKey0; + cipher_keylen = op->rc4_key_len; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(ARC4HMAC2_t); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__HMAC2__STATE: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_ARC4__HMAC2__STATE\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoARC4StateHMAC2.cipherKey0; + cipher_state = + &ctl_desc->cipherHashInfo.infoARC4StateHMAC2.Arc4SboxData0; + hmac_key = &ctl_desc->cipherHashInfo.infoARC4StateHMAC2.hmacKey0; + cipher_keylen = op->rc4_key_len; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(ARC4StateHMAC2_t); + break; + case XLR_SEC_VECTOR__CIPHER_DES__HMAC2__MODE_ECB_CBC: + XLR_SEC_CMD_DIAG(" XLR_SEC_VECTOR__CIPHER_DES__HMAC2__MODE_ECB_CBC \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoDESHMAC2.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoDESHMAC2.hmacKey0; + hmac_keylen = sizeof(HMAC2_t); + cipher_keylen = XLR_SEC_DES_KEY_LENGTH; + ctl_size = sizeof(DESHMAC2_t); + break; + case XLR_SEC_VECTOR__CIPHER_3DES__HMAC2__MODE_ECB_CBC: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_3DES__HMAC2__MODE_ECB_CBC \n"); + cipher_key = &ctl_desc->cipherHashInfo.info3DESHMAC2.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.info3DESHMAC2.hmacKey0; + cipher_keylen = XLR_SEC_3DES_KEY_LENGTH; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(DES3HMAC2_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128HMAC2.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES128HMAC2.hmacKey0; + cipher_keylen = XLR_SEC_AES128_KEY_LENGTH; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(AES128HMAC2_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG(" XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128HMAC2.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES128HMAC2.hmacKey0; + cipher_keylen = XLR_SEC_AES128_KEY_LENGTH; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(AES128HMAC2_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128F8HMAC2.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES128F8HMAC2.hmacKey0; + cipher_keylen = XLR_SEC_AES128F8_KEY_LENGTH; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(AES128F8HMAC2_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192HMAC2.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES192HMAC2.hmacKey0; + cipher_keylen = XLR_SEC_AES192_KEY_LENGTH; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(AES192HMAC2_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192HMAC2.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES192HMAC2.hmacKey0; + cipher_keylen = XLR_SEC_AES192_KEY_LENGTH; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(AES192HMAC2_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192F8HMAC2.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES192F8HMAC2.hmacKey0; + cipher_keylen = XLR_SEC_AES192F8_KEY_LENGTH; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(AES192F8HMAC2_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256HMAC2.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES256HMAC2.hmacKey0; + cipher_keylen = XLR_SEC_AES256_KEY_LENGTH; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(AES256HMAC2_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256HMAC2.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES256HMAC2.hmacKey0; + cipher_keylen = XLR_SEC_AES256_KEY_LENGTH; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(AES256HMAC2_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256F8HMAC2.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES256F8HMAC2.hmacKey0; + cipher_keylen = XLR_SEC_AES256F8_KEY_LENGTH; + hmac_keylen = sizeof(HMAC2_t); + ctl_size = sizeof(AES256F8HMAC2_t); + break; + case XLR_SEC_VECTOR_GCM: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_GCM \n"); + hmac_key = &ctl_desc->cipherHashInfo.infoGCM.GCMH0; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(GCM_t); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__GCM: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_ARC4__GCM\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoARC4GCM.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoARC4GCM.GCMH0; + cipher_keylen = op->rc4_key_len; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(ARC4GCM_t); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__GCM__STATE: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_ARC4__GCM__STATE\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoARC4StateGCM.cipherKey0; + cipher_state = + &ctl_desc->cipherHashInfo.infoARC4StateGCM.Arc4SboxData0; + hmac_key = &ctl_desc->cipherHashInfo.infoARC4StateGCM.GCMH0; + cipher_keylen = op->rc4_key_len; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(ARC4StateGCM_t); + break; + case XLR_SEC_VECTOR__CIPHER_DES__GCM__MODE_ECB_CBC: + XLR_SEC_CMD_DIAG(" XLR_SEC_VECTOR__CIPHER_DES__GCM__MODE_ECB_CBC \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoDESGCM.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoDESGCM.GCMH0; + hmac_keylen = sizeof(GCM_t); + cipher_keylen = XLR_SEC_DES_KEY_LENGTH; + ctl_size = sizeof(DESGCM_t); + break; + case XLR_SEC_VECTOR__CIPHER_3DES__GCM__MODE_ECB_CBC: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_3DES__GCM__MODE_ECB_CBC \n"); + cipher_key = &ctl_desc->cipherHashInfo.info3DESGCM.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.info3DESGCM.GCMH0; + cipher_keylen = XLR_SEC_3DES_KEY_LENGTH; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(DES3GCM_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128GCM.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES128GCM.GCMH0; + cipher_keylen = XLR_SEC_AES128_KEY_LENGTH; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(AES128GCM_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG(" XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128GCM.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES128GCM.GCMH0; + cipher_keylen = XLR_SEC_AES128_KEY_LENGTH; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(AES128GCM_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128F8GCM.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES128F8GCM.GCMH0; + cipher_keylen = XLR_SEC_AES128F8_KEY_LENGTH; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(AES128F8GCM_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192GCM.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES192GCM.GCMH0; + cipher_keylen = XLR_SEC_AES192_KEY_LENGTH; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(AES192GCM_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192GCM.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES192GCM.GCMH0; + cipher_keylen = XLR_SEC_AES192_KEY_LENGTH; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(AES192GCM_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192F8GCM.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES192F8GCM.GCMH0; + cipher_keylen = XLR_SEC_AES192F8_KEY_LENGTH; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(AES192F8GCM_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256GCM.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES256GCM.GCMH0; + cipher_keylen = XLR_SEC_AES256_KEY_LENGTH; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(AES256GCM_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256GCM.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES256GCM.GCMH0; + cipher_keylen = XLR_SEC_AES256_KEY_LENGTH; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(AES256GCM_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256F8GCM.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES256F8GCM.GCMH0; + cipher_keylen = XLR_SEC_AES256F8_KEY_LENGTH; + hmac_keylen = sizeof(GCM_t); + ctl_size = sizeof(AES256F8GCM_t); + break; + case XLR_SEC_VECTOR_F9: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_F9 \n"); + hmac_key = &ctl_desc->cipherHashInfo.infoF9.authKey0; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(F9_t); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__F9: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_ARC4__F9\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoARC4F9.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoARC4F9.authKey0; + cipher_keylen = op->rc4_key_len; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(ARC4F9_t); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__F9__STATE: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR_CIPHER_ARC4__F9__STATE\n"); + cipher_key = &ctl_desc->cipherHashInfo.infoARC4StateF9.cipherKey0; + cipher_state = + &ctl_desc->cipherHashInfo.infoARC4StateF9.Arc4SboxData0; + hmac_key = &ctl_desc->cipherHashInfo.infoARC4StateF9.authKey0; + cipher_keylen = op->rc4_key_len; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(ARC4StateF9_t); + break; + case XLR_SEC_VECTOR__CIPHER_DES__F9__MODE_ECB_CBC: + XLR_SEC_CMD_DIAG(" XLR_SEC_VECTOR__CIPHER_DES__F9__MODE_ECB_CBC \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoDESF9.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoDESF9.authKey0; + hmac_keylen = sizeof(F9_t); + cipher_keylen = XLR_SEC_DES_KEY_LENGTH; + ctl_size = sizeof(DESF9_t); + break; + case XLR_SEC_VECTOR__CIPHER_3DES__F9__MODE_ECB_CBC: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_3DES__F9__MODE_ECB_CBC \n"); + cipher_key = &ctl_desc->cipherHashInfo.info3DESF9.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.info3DESF9.authKey0; + cipher_keylen = XLR_SEC_3DES_KEY_LENGTH; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(DES3F9_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128F9.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES128F9.authKey0; + cipher_keylen = XLR_SEC_AES128_KEY_LENGTH; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(AES128F9_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG(" XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128F9.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES128F9.authKey0; + cipher_keylen = XLR_SEC_AES128_KEY_LENGTH; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(AES128F9_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES128F8F9.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES128F8F9.authKey0; + cipher_keylen = XLR_SEC_AES128F8_KEY_LENGTH; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(AES128F8F9_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192F9.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES192F9.authKey0; + cipher_keylen = XLR_SEC_AES192_KEY_LENGTH; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(AES192F9_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192F9.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES192F9.authKey0; + cipher_keylen = XLR_SEC_AES192_KEY_LENGTH; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(AES192F9_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES192F8F9.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES192F8F9.authKey0; + cipher_keylen = XLR_SEC_AES192F8_KEY_LENGTH; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(AES192F8F9_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_CTR_CFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_CTR_CFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256F9.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES256F9.authKey0; + cipher_keylen = XLR_SEC_AES256_KEY_LENGTH; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(AES256F9_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_ECB_CBC_OFB: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_ECB_CBC_OFB \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256F9.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES256F9.authKey0; + cipher_keylen = XLR_SEC_AES256_KEY_LENGTH; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(AES256F9_t); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_F8: + XLR_SEC_CMD_DIAG("XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_F8 \n"); + cipher_key = &ctl_desc->cipherHashInfo.infoAES256F8F9.cipherKey0; + hmac_key = &ctl_desc->cipherHashInfo.infoAES256F8F9.authKey0; + cipher_keylen = XLR_SEC_AES256F8_KEY_LENGTH; + hmac_keylen = sizeof(F9_t); + ctl_size = sizeof(AES256F8F9_t); + break; + + default: + XLR_SEC_CMD_DIAG("default \n"); + return XLR_SEC_ERR_CONTROL_VECTOR; + } + + if ((cipher_key != NULL) && !(flags & XLR_SEC_SETUP_OP_PRESERVE_CIPHER_KEY)) + memcpy(cipher_key, &op->crypt_key[0], cipher_keylen); + + if ((hmac_key != NULL) && !(flags & XLR_SEC_SETUP_OP_PRESERVE_HMAC_KEY)) + memcpy(hmac_key, &op->mac_key[0], hmac_keylen); + if (cipher_state) { + if (op->rc4_loadstate) + memcpy(cipher_state, (void *)(unsigned long)op->rc4_state, + XLR_SEC_MAX_RC4_STATE_SIZE); + if (op->rc4_savestate) + user->aligned_state = (char *)cipher_state; + } + if (flags & XLR_SEC_SETUP_OP_FLIP_3DES_KEY) { + uint64_t temp; + + temp = ctl_desc->cipherHashInfo.info3DES.cipherKey0; + ctl_desc->cipherHashInfo.info3DES.cipherKey0 = + ctl_desc->cipherHashInfo.info3DES.cipherKey2; + ctl_desc->cipherHashInfo.info3DES.cipherKey2 = temp; + } + /* + * Control length is the number of control cachelines to be read so + * user needs to round up the control length to closest integer + * multiple of 32 bytes. + */ + ctl_size += sizeof(ctl_desc->instruction); + ctl_len = NUM_CHUNKS(ctl_size, 5); + XLR_SEC_CMD_DIAG("ctl_size in bytes: %u, in cachelines: %u\n", ctl_size, ctl_len); + CLEAR_SET_FIELD(*control, MSG_CMD_CTL_LEN, ctl_len); + + ctl_addr = (uint64_t) vtophys(ctl_desc); + CLEAR_SET_FIELD(*control, MSG_CMD_CTL_ADDR, ctl_addr); + + XLR_SEC_CMD_DIAG(" xlr_sec_control_setup(): ctl_desc=%p ctl_addr=%llx \n", + ctl_desc, (unsigned long long)ctl_addr); + + CLEAR_SET_FIELD(*control, MSG_CMD_CTL_CTL, SEC_SOP); + + return XLR_SEC_ERR_NONE; +} + + +xlr_sec_error_t +xlr_sec_submit_op(symkey_desc_pt desc) +{ + struct msgrng_msg send_msg; + + int rsp_dest_id, cpu, hard_cpu, hard_thread; + int code, retries; + unsigned long msgrng_flags = 0; + + /* threads (0-3) are orthogonal to buckets 0-3 */ + cpu = xlr_cpu_id(); + + hard_cpu = cpu >> 2; + hard_thread = cpu & 0x3;/* thread id */ + rsp_dest_id = (hard_cpu << 3) + hard_thread; + + desc->op_ctl.cpu = hard_cpu; + desc->op_ctl.flags = 0; /* called from kernel thread */ + + XLR_SEC_CMD_DIAG("[%s]:%d: cpu=0x%x hard_cpu=0x%x hard_thrd=0x%x id=0x%x \n", + __FUNCTION__, __LINE__, cpu, hard_cpu, hard_thread, rsp_dest_id); + + /* + * Set DestId in Message Control Word. This tells the Security + * Engine which bucket to send the reply to for this CPU + */ + CLEAR_SET_FIELD(desc->control, MSG_CMD_CTL_ID, rsp_dest_id); + CLEAR_SET_FIELD(desc->data, MSG_CMD_CTL_ID, rsp_dest_id); + + CLEAR_SET_FIELD(desc->control, MSG_CTL_OP_TYPE, MSG0_CTL_OP_ENGINE_SYMKEY); + CLEAR_SET_FIELD(desc->data, MSG_CTL_OP_TYPE, MSG1_CTL_OP_SYMKEY_PIPE0); + + send_msg.msg0 = desc->control | (1ULL << 53); + send_msg.msg1 = desc->data | (1ULL << 53) | (1ULL << 52); + send_msg.msg2 = send_msg.msg3 = 0; + + desc->op_ctl.flags = 1; + //in_interrupt(); /* ipsec softirq ? */ + + XLR_SEC_CMD_DIAG("[%s]: IN_IRQ=%d msg0=0x%llx msg1=0x%llx \n", + __FUNCTION__, desc->op_ctl.flags, send_msg.msg0, send_msg.msg1); + + + + retries = 100; + + while (retries--) { + msgrng_flags_save(msgrng_flags); + + code = message_send_retry(SEC_MSGRING_WORDSIZE, + MSGRNG_CODE_SEC, + desc->op_ctl.stn_id, + &send_msg); + + + msgrng_flags_restore(msgrng_flags); + + if (code == 0) + break; + } + + + return (XLR_SEC_ERR_NONE); +} + + + +symkey_desc_pt +xlr_sec_allocate_desc(void *session_ptr) +{ + uint64_t addr; + symkey_desc_pt aligned, new; + + new = (symkey_desc_pt) malloc(sizeof(symkey_desc_t), + M_DEVBUF, M_NOWAIT | M_ZERO); + + if (new == NULL) + return (NULL); + + new->ses = session_ptr; + + new->user.kern_src = new->user.aligned_src = + (uint8_t *) contigmalloc(256 * 1024 + 1024, + M_DEVBUF, M_NOWAIT | M_ZERO, + 0, 0xffffffff, XLR_CACHELINE_SIZE, 0); + + if (new->user.kern_src == NULL) { + printf("ERROR - malloc failed for user.kern_src\n"); + return NULL; + } + new->user.aligned_dest = new->user.kern_dest = + (uint8_t *) contigmalloc(257 * 1024, + M_DEVBUF, M_NOWAIT | M_ZERO, + 0, 0xffffffff, XLR_CACHELINE_SIZE, 0); + + if (new->user.aligned_dest == NULL) { + printf("ERROR - malloc failed for user.aligned_dest\n"); + return NULL; + } + new->next_src_buf = (uint8_t *) contigmalloc(256 * 1024 + 1024, + M_DEVBUF, M_NOWAIT | M_ZERO, + 0, 0xffffffff, XLR_CACHELINE_SIZE, 0); + + if (new->next_src_buf == NULL) { + printf("ERROR - malloc failed for next_src_buf\n"); + return NULL; + } + new->next_dest_buf = + (uint8_t *) contigmalloc(257 * 1024, + M_DEVBUF, M_NOWAIT | M_ZERO, + 0, 0xffffffff, XLR_CACHELINE_SIZE, 0); + + if (new->next_dest_buf == NULL) { + printf("ERROR - malloc failed for next_dest_buf\n"); + return NULL; + } + new->user.kern_auth = new->user.user_auth = NULL; + new->user.aligned_auth = new->user.user_auth = NULL; + + + /* find cacheline alignment */ + aligned = new; + addr = (uint64_t) vtophys(new); + + /* save for free */ + aligned->alloc = new; + + /* setup common control info */ + aligned->op_ctl.phys_self = addr; + aligned->op_ctl.stn_id = MSGRNG_STNID_SEC0; + + return (aligned); +} + + +static void +xlr_sec_free_desc(symkey_desc_pt desc) +{ + if ((desc == NULL) || (desc->alloc == NULL)) { + printf("%s: NULL descriptor \n", __FUNCTION__); + return; + } + contigfree(desc, sizeof(symkey_desc_t), M_DEVBUF); + + + return; +} + +void +print_buf(char *desc, void *data, int len) +{ + uint8_t *dp; + int i; + + DPRINT("%s: ", desc); /* newline done in for-loop */ + dp = data; + for (i = 0; i < len; i++, dp++) { + if ((i % 16) == 0) + DPRINT("\n"); + DPRINT(" %c%c", + nib2hex[(((*dp) & 0xf0) >> 4)], + nib2hex[((*dp) & 0x0f)]); + } + DPRINT("\n"); +} + + +#ifdef XLR_SEC_CMD_DEBUG +static void +decode_symkey_desc(symkey_desc_pt desc, uint32_t cfg_vector) +{ + + unsigned long long word; + + /* uint8_t *info; */ + /* int i; */ + + DPRINT("MSG - CTL: \n"); + DPRINT("\t CTRL = %lld \n", + GET_FIELD(desc->control, MSG_CMD_CTL_CTL)); + DPRINT("\t CTRL LEN = %lld \n", + GET_FIELD(desc->control, MSG_CMD_CTL_LEN)); + DPRINT("\t CTRL ADDR = %llx \n\n", + GET_FIELD(desc->control, MSG_CMD_CTL_ADDR)); + + DPRINT("MSG - DATA: \n"); + DPRINT("\t CTRL = %lld \n", + GET_FIELD(desc->data, MSG_CMD_DATA_CTL)); + DPRINT("\t DATA LEN = %lld \n", + GET_FIELD(desc->data, MSG_CMD_DATA_LEN)); + DPRINT("\t DATA ADDR = %llx \n\n", + GET_FIELD(desc->data, MSG_CMD_DATA_ADDR)); + + DPRINT("CONTROL DESCRIPTOR: \n"); + word = desc->ctl_desc.instruction; + DPRINT("\tINSTRUCTION: %llx\n", word); + DPRINT("\t\tOVERRIDE CIPH = %lld \n", GET_FIELD(word, CTL_DSC_OVERRIDECIPHER)); + DPRINT("\t\tARC4 WAIT = %lld \n", GET_FIELD(word, CTL_DSC_ARC4_WAIT4SAVE)); + DPRINT("\t\tARC4 SAVE = %lld \n", GET_FIELD(word, CTL_DSC_ARC4_SAVESTATE)); + DPRINT("\t\tARC4 LOAD = %lld \n", GET_FIELD(word, CTL_DSC_ARC4_LOADSTATE)); + DPRINT("\t\tARC4 KEYLEN = %lld \n", GET_FIELD(word, CTL_DSC_ARC4_KEYLEN)); + DPRINT("\t\tCIPHER = %lld \n", GET_FIELD(word, CTL_DSC_CPHR)); + DPRINT("\t\tCIPHER MODE = %lld \n", GET_FIELD(word, CTL_DSC_MODE)); + DPRINT("\t\tINIT CIPHER = %lld \n", GET_FIELD(word, CTL_DSC_ICPHR)); + DPRINT("\t\tHMAC = %lld \n", GET_FIELD(word, CTL_DSC_HMAC)); + DPRINT("\t\tHASH ALG = %lld \n", GET_FIELD(word, CTL_DSC_HASH) | (GET_FIELD(word, CTL_DSC_HASHHI) << 2)); + DPRINT("\t\tINIT HASH = %lld \n", GET_FIELD(word, CTL_DSC_IHASH)); + DPRINT("\t\tCHKSUM = %lld \n", GET_FIELD(word, CTL_DSC_CKSUM)); + DPRINT("\tCIPHER HASH INFO: \n"); +#if 0 + info = (uint8_t *) & desc->ctl_desc->cipherHashInfo; + for (i = 0; i < sizeof(CipherHashInfo_t); i++, info++) { + DPRINT(" %02x", *info); + if (i && (i % 16) == 0) + DPRINT("\n"); + } + DPRINT("\n\n"); +#endif + + switch (cfg_vector) { + case XLR_SEC_VECTOR_CIPHER_ARC4: + DPRINT("VECTOR: XLR_SEC_VECTOR_CIPHER_ARC4 \n"); + print_buf("ARC4 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4.cipherKey0, + GET_FIELD(word, CTL_DSC_ARC4_KEYLEN)); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__HMAC: + DPRINT("VECTOR: XLR_SEC_VECTOR_CIPHER_ARC4__HMAC \n"); + print_buf("ARC4 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4HMAC.cipherKey0, + GET_FIELD(word, CTL_DSC_ARC4_KEYLEN)); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoARC4HMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__STATE: + DPRINT("VECTOR: XLR_SEC_VECTOR_CIPHER_ARC4__STATE \n"); + print_buf("ARC4 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4State.cipherKey0, + GET_FIELD(word, CTL_DSC_ARC4_KEYLEN)); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__HMAC__STATE: + DPRINT("VECTOR: XLR_SEC_VECTOR_CIPHER_ARC4__HMAC__STATE \n"); + print_buf("ARC4 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4StateHMAC.cipherKey0, + GET_FIELD(word, CTL_DSC_ARC4_KEYLEN)); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoARC4StateHMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR_CIPHER_KASUMI_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR_CIPHER_KASUMI_F8 \n"); + print_buf("KASUMI_F8 Key", + &desc->ctl_desc.cipherHashInfo.infoKASUMIF8.cipherKey0, + XLR_SEC_KASUMI_F8_KEY_LENGTH); + break; + case XLR_SEC_VECTOR_CIPHER_KASUMI_F8__HMAC: + DPRINT("XLR_SEC_VECTOR_CIPHER_KASUMI_F8__HMAC\n"); + print_buf("KASUMI_F8 Key", + &desc->ctl_desc.cipherHashInfo.infoKASUMIF8HMAC.cipherKey0, + XLR_SEC_KASUMI_F8_KEY_LENGTH); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoKASUMIF8HMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR_CIPHER_KASUMI_F8__HMAC2: + DPRINT("XLR_SEC_VECTOR_CIPHER_KASUMI_F8__HMAC2\n"); + print_buf("KASUMI_F8 Key", + &desc->ctl_desc.cipherHashInfo.infoKASUMIF8HMAC2.cipherKey0, + XLR_SEC_KASUMI_F8_KEY_LENGTH); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoKASUMIF8HMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR_CIPHER_KASUMI_F8__GCM: + DPRINT("XLR_SEC_VECTOR_CIPHER_KASUMI_F8__GCM\n"); + print_buf("KASUMI_F8 Key", + &desc->ctl_desc.cipherHashInfo.infoKASUMIF8GCM.cipherKey0, + XLR_SEC_KASUMI_F8_KEY_LENGTH); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoKASUMIF8GCM.GCMH0, + sizeof(GCM_t)); + break; + case XLR_SEC_VECTOR_CIPHER_KASUMI_F8__F9: + DPRINT("XLR_SEC_VECTOR_CIPHER_KASUMI_F8__F9\n"); + print_buf("KASUMI_F8 Key", + &desc->ctl_desc.cipherHashInfo.infoKASUMIF8F9.cipherKey0, + XLR_SEC_KASUMI_F8_KEY_LENGTH); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoKASUMIF8F9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR_MAC: + DPRINT("VECTOR: XLR_SEC_VECTOR_MAC \n"); + DPRINT("MAC-ONLY - No Info\n"); + break; + case XLR_SEC_VECTOR_HMAC: + DPRINT("VECTOR: XLR_SEC_VECTOR_HMAC \n"); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoHMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR__CIPHER_DES__HMAC__MODE_ECB_CBC: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_DES__HMAC__MODE_ECB_CBC \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoDESHMAC.cipherKey0, + XLR_SEC_DES_KEY_LENGTH); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoDESHMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR__CIPHER_DES__MODE_ECB_CBC: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_DES__MODE_ECB_CBC \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoDES.cipherKey0, + XLR_SEC_DES_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_3DES__HMAC__MODE_ECB_CBC: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_3DES__HMAC__MODE_ECB_CBC \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.info3DESHMAC.cipherKey0, + XLR_SEC_3DES_KEY_LENGTH); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.info3DESHMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR__CIPHER_3DES__MODE_ECB_CBC: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_3DES__MODE_ECB_CBC \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.info3DES.cipherKey0, + XLR_SEC_3DES_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128HMAC.cipherKey0, + XLR_SEC_AES128_KEY_LENGTH); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoAES128HMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128.cipherKey0, + XLR_SEC_AES128_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_ECB_CBC_OFB\n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128HMAC.cipherKey0, + XLR_SEC_AES128_KEY_LENGTH); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoAES128HMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__MODE_ECB_CBC_OFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128.cipherKey0, + XLR_SEC_AES128_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192HMAC.cipherKey0, + XLR_SEC_AES192_KEY_LENGTH); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoAES192HMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192.cipherKey0, + XLR_SEC_AES192_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_ECB_CBC_OFB\n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192HMAC.cipherKey0, + XLR_SEC_AES192_KEY_LENGTH); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoAES192HMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__MODE_ECB_CBC_OFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192.cipherKey0, + XLR_SEC_AES192_KEY_LENGTH); + break; + + case XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256HMAC.cipherKey0, + XLR_SEC_AES256_KEY_LENGTH); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoAES256HMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256.cipherKey0, + XLR_SEC_AES256_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_ECB_CBC_OFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256HMAC.cipherKey0, + XLR_SEC_AES256_KEY_LENGTH); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoAES256HMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__MODE_ECB_CBC_OFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256.cipherKey0, + XLR_SEC_AES256_KEY_LENGTH); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__HMAC2: + DPRINT("VECTOR: XLR_SEC_VECTOR_CIPHER_ARC4__HMAC2 \n"); + print_buf("ARC4 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4HMAC2.cipherKey0, + GET_FIELD(word, CTL_DSC_ARC4_KEYLEN)); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4HMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__HMAC2__STATE: + DPRINT("VECTOR: XLR_SEC_VECTOR_CIPHER_ARC4__HMAC2__STATE \n"); + print_buf("ARC4 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4StateHMAC2.cipherKey0, + GET_FIELD(word, CTL_DSC_ARC4_KEYLEN)); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4StateHMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR_HMAC2: + DPRINT("VECTOR: XLR_SEC_VECTOR_HMAC2 \n"); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoHMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR__CIPHER_DES__HMAC2__MODE_ECB_CBC: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_DES__HMAC2__MODE_ECB_CBC \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoDESHMAC2.cipherKey0, + XLR_SEC_DES_KEY_LENGTH); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoDESHMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR__CIPHER_3DES__HMAC2__MODE_ECB_CBC: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_3DES__HMAC2__MODE_ECB_CBC \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.info3DESHMAC2.cipherKey0, + XLR_SEC_3DES_KEY_LENGTH); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.info3DESHMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128HMAC2.cipherKey0, + XLR_SEC_AES128_KEY_LENGTH); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoAES128HMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_ECB_CBC_OFB\n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128HMAC2.cipherKey0, + XLR_SEC_AES128_KEY_LENGTH); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoAES128HMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192HMAC2.cipherKey0, + XLR_SEC_AES192_KEY_LENGTH); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoAES192HMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_ECB_CBC_OFB\n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192HMAC2.cipherKey0, + XLR_SEC_AES192_KEY_LENGTH); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoAES192HMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256HMAC2.cipherKey0, + XLR_SEC_AES256_KEY_LENGTH); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoAES256HMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + + case XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_ECB_CBC_OFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256HMAC2.cipherKey0, + XLR_SEC_AES256_KEY_LENGTH); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoAES256HMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__GCM: + DPRINT("VECTOR: XLR_SEC_VECTOR_CIPHER_ARC4__GCM \n"); + print_buf("ARC4 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4GCM.cipherKey0, + GET_FIELD(word, CTL_DSC_ARC4_KEYLEN)); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoARC4GCM.GCMH0, + sizeof(GCM_t)); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__GCM__STATE: + DPRINT("VECTOR: XLR_SEC_VECTOR_CIPHER_ARC4__GCM__STATE \n"); + print_buf("ARC4 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4StateGCM.cipherKey0, + GET_FIELD(word, CTL_DSC_ARC4_KEYLEN)); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoARC4StateGCM.GCMH0, + sizeof(GCM_t)); + break; + case XLR_SEC_VECTOR_GCM: + DPRINT("VECTOR: XLR_SEC_VECTOR_GCM \n"); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoGCM.GCMH0, + sizeof(GCM_t)); + break; + case XLR_SEC_VECTOR__CIPHER_DES__GCM__MODE_ECB_CBC: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_DES__GCM__MODE_ECB_CBC \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoDESGCM.cipherKey0, + XLR_SEC_DES_KEY_LENGTH); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoDESGCM.GCMH0, + sizeof(GCM_t)); + break; + case XLR_SEC_VECTOR__CIPHER_3DES__GCM__MODE_ECB_CBC: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_3DES__GCM__MODE_ECB_CBC \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.info3DESGCM.cipherKey0, + XLR_SEC_3DES_KEY_LENGTH); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.info3DESGCM.GCMH0, + sizeof(GCM_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128GCM.cipherKey0, + XLR_SEC_AES128_KEY_LENGTH); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoAES128GCM.GCMH0, + XLR_SEC_AES128_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_ECB_CBC_OFB\n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128GCM.cipherKey0, + XLR_SEC_AES128_KEY_LENGTH); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoAES128GCM.GCMH0, + XLR_SEC_AES128_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192GCM.cipherKey0, + XLR_SEC_AES192_KEY_LENGTH); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoAES192GCM.GCMH0, + XLR_SEC_AES192_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_ECB_CBC_OFB\n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192GCM.cipherKey0, + XLR_SEC_AES192_KEY_LENGTH); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoAES192GCM.GCMH0, + XLR_SEC_AES192_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256GCM.cipherKey0, + XLR_SEC_AES256_KEY_LENGTH); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoAES256GCM.GCMH0, + XLR_SEC_AES256_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_ECB_CBC_OFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256GCM.cipherKey0, + XLR_SEC_AES256_KEY_LENGTH); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoAES256GCM.GCMH0, + XLR_SEC_AES256_KEY_LENGTH); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__F9: + DPRINT("VECTOR: XLR_SEC_VECTOR_CIPHER_ARC4__F9 \n"); + print_buf("ARC4 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4F9.cipherKey0, + GET_FIELD(word, CTL_DSC_ARC4_KEYLEN)); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4F9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR_CIPHER_ARC4__F9__STATE: + DPRINT("VECTOR: XLR_SEC_VECTOR_CIPHER_ARC4__F9__STATE \n"); + print_buf("ARC4 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4StateF9.cipherKey0, + GET_FIELD(word, CTL_DSC_ARC4_KEYLEN)); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoARC4StateF9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR_F9: + DPRINT("VECTOR: XLR_SEC_VECTOR_F9 \n"); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoF9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR__CIPHER_DES__F9__MODE_ECB_CBC: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_DES__F9__MODE_ECB_CBC \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoDESF9.cipherKey0, + XLR_SEC_DES_KEY_LENGTH); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoDESF9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR__CIPHER_3DES__F9__MODE_ECB_CBC: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_3DES__F9__MODE_ECB_CBC \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.info3DESF9.cipherKey0, + XLR_SEC_3DES_KEY_LENGTH); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.info3DESF9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128F9.cipherKey0, + XLR_SEC_AES128_KEY_LENGTH); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoAES128F9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_ECB_CBC_OFB\n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128F9.cipherKey0, + XLR_SEC_AES128_KEY_LENGTH); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoAES128F9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F9.cipherKey0, + XLR_SEC_AES192_KEY_LENGTH); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_ECB_CBC_OFB\n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F9.cipherKey0, + XLR_SEC_AES192_KEY_LENGTH); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_CTR_CFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_CTR_CFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256F9.cipherKey0, + XLR_SEC_AES256_KEY_LENGTH); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoAES256F9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_ECB_CBC_OFB: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_ECB_CBC_OFB \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256F9.cipherKey0, + XLR_SEC_AES256_KEY_LENGTH); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoAES256F9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128F8HMAC.cipherKey0, + XLR_SEC_AES128F8_KEY_LENGTH); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoAES128F8HMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128F8.cipherKey0, + XLR_SEC_AES128F8_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F8HMAC.cipherKey0, + XLR_SEC_AES192F8_KEY_LENGTH); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F8HMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F8.cipherKey0, + XLR_SEC_AES192F8_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256F8HMAC.cipherKey0, + XLR_SEC_AES256F8_KEY_LENGTH); + print_buf("HMAC Key", + &desc->ctl_desc.cipherHashInfo.infoAES256HMAC.hmacKey0, + sizeof(HMAC_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256F8.cipherKey0, + XLR_SEC_AES256F8_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128F8HMAC2.cipherKey0, + XLR_SEC_AES128F8_KEY_LENGTH); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoAES128F8HMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F8HMAC2.cipherKey0, + XLR_SEC_AES192F8_KEY_LENGTH); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F8HMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256F8HMAC2.cipherKey0, + XLR_SEC_AES256F8_KEY_LENGTH); + print_buf("HMAC2 Key", + &desc->ctl_desc.cipherHashInfo.infoAES256F8HMAC2.hmacKey0, + sizeof(HMAC2_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128F8GCM.cipherKey0, + XLR_SEC_AES128F8_KEY_LENGTH); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoAES128GCM.GCMH0, + XLR_SEC_AES128_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F8GCM.cipherKey0, + XLR_SEC_AES192_KEY_LENGTH); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F8GCM.GCMH0, + XLR_SEC_AES192_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256F8GCM.cipherKey0, + XLR_SEC_AES256F8_KEY_LENGTH); + print_buf("GCM Key", + &desc->ctl_desc.cipherHashInfo.infoAES256F8GCM.GCMH0, + XLR_SEC_AES256_KEY_LENGTH); + break; + case XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES128F8F9.cipherKey0, + XLR_SEC_AES128F8_KEY_LENGTH); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoAES128F8F9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F8F9.cipherKey0, + XLR_SEC_AES192F8_KEY_LENGTH); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoAES192F8F9.authKey0, + sizeof(F9_t)); + break; + case XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_F8: + DPRINT("VECTOR: XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_F8 \n"); + print_buf("CIPHER Key", + &desc->ctl_desc.cipherHashInfo.infoAES256F8F9.cipherKey0, + XLR_SEC_AES256F8_KEY_LENGTH); + print_buf("F9 Key", + &desc->ctl_desc.cipherHashInfo.infoAES256F8F9.authKey0, + sizeof(F9_t)); + break; + + default: + DPRINT("VECTOR: ???? \n"); + DPRINT(">>> WHAT THE HECK !!! <<< \n"); + break; + } + DPRINT("PACKET DESCRIPTOR: \n"); + word = desc->pkt_desc.srcLengthIVOffUseIVNext; + DPRINT("\tSrcLengthIVOffsetIVNext: %llx\n", word); + DPRINT("\t\tLoad HMAC = %lld \n", + GET_FIELD(word, PKT_DSC_LOADHMACKEY)); + DPRINT("\t\tPad Hash = %lld \n", + GET_FIELD(word, PKT_DSC_PADHASH)); + DPRINT("\t\tHash Byte Count = %lld \n", + GET_FIELD(word, PKT_DSC_HASHBYTES)); + DPRINT("\t\tNext = %lld \n", + GET_FIELD(word, PKT_DSC_NEXT)); + DPRINT("\t\tUse IV = %lld \n", + GET_FIELD(word, PKT_DSC_IV)); + DPRINT("\t\tIV Offset = %lld \n", + GET_FIELD(word, PKT_DSC_IVOFF)); + DPRINT("\t\tPacket Length = %lld \n", + GET_FIELD(word, PKT_DSC_PKTLEN)); + DPRINT("\t\tNLHMAC = %lld \n", GET_FIELD(word, PKT_DSC_NLHMAC)); + DPRINT("\t\tBreak = %lld \n", GET_FIELD(word, PKT_DSC_BREAK)); + DPRINT("\t\tWait = %lld \n", GET_FIELD(word, PKT_DSC_WAIT)); + DPRINT("\t\tSegment Src Addr = %llx \n", + (GET_FIELD(word, PKT_DSC_SEGADDR) << 5) & 0xffffffffffULL); + DPRINT("\t\tSRTCP = %lld \n", GET_FIELD(word, PKT_DSC_SRTCP)); + DPRINT("\t\tGlobal Src Offset = %lld \n", + GET_FIELD(word, PKT_DSC_SEGOFFSET)); + + word = desc->pkt_desc.dstDataSettings; + DPRINT("\tdstDataSettings: %llx \n", word); + DPRINT("\t\tArc4 Byte Count = %lld \n", GET_FIELD(word, + PKT_DSC_ARC4BYTECOUNT)); + DPRINT("\t\tSym Operation = %lld \n", GET_FIELD(word, PKT_DSC_SYM_OP)); + DPRINT("\t\tCipher Offset = %lld \n", GET_FIELD(word, PKT_DSC_CPHROFF)); + DPRINT("\t\tHash Offset = %lld \n", GET_FIELD(word, PKT_DSC_HASHOFF)); + DPRINT("\t\tHash Source = %lld \n", GET_FIELD(word, PKT_DSC_HASHSRC)); + DPRINT("\t\tChecksum Offset = %lld \n", GET_FIELD(word, + PKT_DSC_CKSUMOFF)); + DPRINT("\t\tChecksum Source = %lld \n", GET_FIELD(word, + PKT_DSC_CKSUMSRC)); + DPRINT("\t\tCipher Dest Addr = %llx \n", GET_FIELD(word, + PKT_DSC_CPHR_DST_ADDR)); + DPRINT("\t\tCipher Dest Dword = %lld \n", GET_FIELD(word, + PKT_DSC_CPHR_DST_DWOFFSET)); + DPRINT("\t\tCipher Dest Offset= %lld \n", GET_FIELD(word, + PKT_DSC_CPHR_DST_OFFSET)); + word = desc->pkt_desc.authDstNonceLow; + DPRINT("\tauthDstNonceLow: %llx \n", word); + DPRINT("\t\tNonce Low 24 = %lld \n", GET_FIELD(word, + PKT_DSC_NONCE_LOW)); + DPRINT("\t\tauthDst = %llx \n", GET_FIELD(word, + PKT_DSC_AUTH_DST_ADDR)); + DPRINT("\t\tCipher Offset High= %lld \n", GET_FIELD(word, + PKT_DSC_CIPH_OFF_HI)); + word = desc->pkt_desc.ckSumDstNonceHiCFBMaskLLWMask; + DPRINT("\tckSumDstNonceHiCFBMaskLLWMask: %llx \n", word); + DPRINT("\t\tHash Byte off = %lld \n", GET_FIELD(word, PKT_DSC_HASH_BYTE_OFF)); + DPRINT("\t\tPacket Len bytes = %lld \n", GET_FIELD(word, PKT_DSC_PKTLEN_BYTES)); + DPRINT("\t\tLast Long Word Mask = %lld \n", GET_FIELD(word, + PKT_DSC_LASTWORD)); + DPRINT("\t\tCipher Dst Address = %llx \n", GET_FIELD(word, + PKT_DSC_CPHR_DST_ADDR)); + DPRINT("\t\tGlobal Dst Offset = %lld \n", GET_FIELD(word, + PKT_DSC_CPHR_DST_OFFSET)); + + DPRINT("CFG_VECTOR = %04x\n", cfg_vector); + DPRINT("\n\n"); +} + +#endif + + + +/* This function is called from an interrupt handler */ +void +xlr_sec_msgring_handler(int bucket, int size, int code, int stid, + struct msgrng_msg *msg, void *data) +{ + uint64_t error; + uint64_t addr, sec_eng, sec_pipe; + xlr_sec_io_pt op = NULL; + symkey_desc_pt desc = NULL; + struct xlr_sec_session *ses = NULL; + struct xlr_sec_command *cmd = NULL; + + + if (code != MSGRNG_CODE_SEC) { + panic("xlr_sec_msgring_handler: bad code = %d," + " expected code = %d\n", + code, MSGRNG_CODE_SEC); + } + if ((stid < MSGRNG_STNID_SEC0) || (stid > MSGRNG_STNID_PK0)) { + panic("xlr_sec_msgring_handler: bad stn id = %d, expect %d - %d\n", + stid, MSGRNG_STNID_SEC0, MSGRNG_STNID_PK0); + } + /* + * The Submit() operation encodes the engine and pipe in these two + * separate fields. This allows use to verify the result type with + * the submitted operation type. + */ + sec_eng = GET_FIELD(msg->msg0, MSG_CTL_OP_TYPE); + sec_pipe = GET_FIELD(msg->msg1, MSG_CTL_OP_TYPE); + + + error = msg->msg0 >> 40 & 0x1ff; + if (error) + printf("ctrl error = 0x%llx\n", error); + error = msg->msg1 >> 40 & 0x1ff; + if (error) + printf("data error = 0x%llx\n", error); + + + XLR_SEC_CMD_DIAG("[%s]: eng=%lld pipe=%lld\n", + __FUNCTION__, sec_eng, sec_pipe); + + /* Symmetric Key Operation ? */ + if (sec_eng == MSG0_CTL_OP_ENGINE_SYMKEY) { + + /* + * The data descriptor address allows us to associate the + * response with the submitted operation. Address is 40-bit + * cacheline aligned address. We need to zero bit 0-4 since + * they are used for the engine and pipe Id. + */ + addr = GET_FIELD(msg->msg1, MSG_RSLT_DATA_DSC_ADDR); + + addr = addr & ~((1 << 5) - 1); + if (!addr) { + panic("[%s:STNID_SEC]: NULL symkey addr!\n", __FUNCTION__); + + } + /* + * The adddress points to the data descriptor. The operation + * descriptor is defined with the 32-byte cacheline size in + * mind. It allows the code to use this address to + * reference the symkey descriptor. (ref: xlr_sec_desc.h) + */ + addr = addr - sizeof(OperationDescriptor_t); + desc = (symkey_desc_pt) MIPS_PHYS_TO_KSEG0(addr); + + if (!desc) { + printf("\nerror : not getting desc back correctly \n"); + panic("[%s:STNID_SEC]: NULL symkey data descriptor!\n", __FUNCTION__); + } + ses = (struct xlr_sec_session *)desc->ses; + if (!ses) { + printf("\n error : not getting ses back correctly \n"); + panic("[%s:STNID_SEC]: NULL symkey data descriptor!\n", __FUNCTION__); + } + cmd = &ses->cmd; + if (!cmd) { + printf("\n error : not getting cmd back correctly \n"); + panic("[%s:STNID_SEC]: NULL symkey data descriptor!\n", __FUNCTION__); + } + op = &cmd->op; + if (!op) { + printf("\n error : not getting op back correctly \n"); + panic("[%s:STNID_SEC]: NULL symkey data descriptor!\n", __FUNCTION__); + } + XLR_SEC_CMD_DIAG("[%s:STNID_SEC]: addr=0x%llx desc=%p alloc=%p \n", + __FUNCTION__, addr, desc, desc->alloc); + + XLR_SEC_CMD_DIAG("[%s:STNID_SEC]: op_ctl=%p phys_self=%llx stn_id=%d \n", + __FUNCTION__, &desc->op_ctl, desc->op_ctl.phys_self, + desc->op_ctl.stn_id); + + if (addr != desc->op_ctl.phys_self) { + XLR_SEC_CMD_DIAG("[%s:STNID_SEC]: Control Descriptor fails Self-Verify !\n", + __FUNCTION__); + printf("[%s:STNID_SEC]: Control Descriptor fails Self-Verify !\n", + __FUNCTION__); + printf("[%s:STNID_SEC]: addr=0x%llx desc=%p alloc=%p \n", + __FUNCTION__, (unsigned long long)addr, desc, desc->alloc); + printf("[%s:STNID_SEC]: op_ctl=%p phys_self=%llx stn_id=%d \n", + __FUNCTION__, &desc->op_ctl, (unsigned long long)desc->op_ctl.phys_self, + desc->op_ctl.stn_id); + + } + if (desc->op_ctl.stn_id != MSGRNG_STNID_SEC0 && + desc->op_ctl.stn_id != MSGRNG_STNID_SEC1) { + XLR_SEC_CMD_DIAG("[%s:STNID_SEC]: Operation Type Mismatch !\n", + __FUNCTION__); + printf("[%s:STNID_SEC]: Operation Type Mismatch !\n", + __FUNCTION__); + printf("[%s:STNID_SEC]: addr=0x%llx desc=%p alloc=%p \n", + __FUNCTION__, (unsigned long long)addr, desc, desc->alloc); + printf("[%s:STNID_SEC]: op_ctl=%p phys_self=%llx stn_id=%d \n", + __FUNCTION__, &desc->op_ctl, (unsigned long long)desc->op_ctl.phys_self, + desc->op_ctl.stn_id); + } + desc->ctl_result = GET_FIELD(msg->msg0, MSG_RSLT_CTL_INST_ERR); + desc->data_result = GET_FIELD(msg->msg1, MSG_RSLT_DATA_INST_ERR); + + XLR_SEC_CMD_DIAG("[%s:STNID_SEC]: cpu=%d ctl_result=0x%llx data_result=%llx\n", + __FUNCTION__, desc->op_ctl.cpu, + desc->ctl_result, desc->data_result); + + } +#if 0 + else if (sec_eng == MSG0_CTL_OP_ENGINE_PUBKEY) { + pubkey_desc_pt desc; + + if (sec_pipe != MSG1_CTL_OP_PUBKEY_PIPE0) { + /* response to uc load */ + /* + * XLR_SEC_CMD_DIAG("[%s:STNID_SEC]: ecc cpu=%d + * ctl_result=0x%llx data_result=%llx\n", + * __FUNCTION__, desc->op_ctl.cpu, desc->ctl_result, + * desc->data_result); + */ + return; + } + /* + * The data descriptor address allows us to associate the + * response with the submitted operation. Address is 40-bit + * cacheline aligned address. We need to zero bit 0-4 since + * they are used for the engine and pipe Id. + */ + addr = GET_FIELD(msg->msg0, PUBKEY_RSLT_CTL_SRCADDR); + addr = addr & ~((1 << 5) - 1); + if (!addr) { + panic("[%s:STNID_SEC]: NULL pubkey ctrl desc!\n", __FUNCTION__); + } + /* + * The adddress points to the data descriptor. The operation + * descriptor is defined with the 32-byte cacheline size in + * mind. It allows the code to use this address to + * reference the symkey descriptor. (ref: xlr_sec_desc.h) + */ + addr = addr - sizeof(OperationDescriptor_t); + + /* Get pointer to pubkey Descriptor */ + desc = (pubkey_desc_pt) (unsigned long)addr; + if (!desc) { + panic("[%s:STNID_SEC]: NULL pubkey data descriptor!\n", __FUNCTION__); + } + XLR_SEC_CMD_DIAG("[%s:STNID_PK0]: addr=0x%llx desc=%p alloc=%p \n", + __FUNCTION__, addr, desc, desc->alloc); + + XLR_SEC_CMD_DIAG("[%s:STNID_PK0]: op_ctl=%p phys_self=%llx stn_id=%d \n", + __FUNCTION__, &desc->op_ctl, desc->op_ctl.phys_self, + desc->op_ctl.stn_id); + + if (addr != desc->op_ctl.phys_self) { + XLR_SEC_CMD_DIAG("[%s:STNID_PK0]: Control Descriptor fails Self-Verify !\n", + __FUNCTION__); + } + if (desc->op_ctl.stn_id != msgrng_stnid_pk0) { + XLR_SEC_CMD_DIAG("[%s:STNID_PK0]: Operation Type Mismatch ! \n", + __FUNCTION__); + } + desc->ctl_result = GET_FIELD(msg->msg0, PUBKEY_RSLT_CTL_ERROR); + desc->data_result = GET_FIELD(msg->msg1, PUBKEY_RSLT_DATA_ERROR); + + XLR_SEC_CMD_DIAG("[%s:STNID_PK0]: ctl_result=0x%llx data_result=%llx\n", + __FUNCTION__, desc->ctl_result, desc->data_result); + + } +#endif + else { + printf("[%s]: HANDLER bad id = %d\n", __FUNCTION__, stid); + } +#ifdef RMI_SEC_DEBUG + if (ses->multi_frag_flag) { + int i; + char *ptr; + + printf("\n RETURNED DATA: \n"); + + ptr = (char *)(unsigned long)(desc->user.aligned_dest + cmd->op.cipher_offset); + for (i = 0; i < SEC_MAX_FRAG_LEN; i++) { + printf("%c ", (char)*ptr++); + if ((i % 10) == 0) + printf("\n"); + } + + printf("second desc\n"); + ptr = (char *)(unsigned long)(desc->next_dest_buf); + for (i = 0; i < desc->next_src_len; i++) { + printf("%c ", (char)*ptr++); + if ((i % 10) == 0) + printf("\n"); + } + } +#endif + + + /* Copy cipher-data to User-space */ + if (op->cipher_type != XLR_SEC_CIPHER_TYPE_NONE) { + + size = op->dest_buf_size; + + /* DEBUG -dpk */ + XLR_SEC_CMD_DIAG("cipher: to_addr=%p from_addr=%p size=%d \n", + desc->user.user_dest, desc->user.aligned_dest, size); + + if (ses->multi_frag_flag) { + crypto_copyback(cmd->crp->crp_flags, cmd->crp->crp_buf, 0, + SEC_MAX_FRAG_LEN, (caddr_t)(long)desc->user.aligned_dest + op->cipher_offset); + crypto_copyback(cmd->crp->crp_flags, cmd->crp->crp_buf + SEC_MAX_FRAG_LEN, 0, + desc->next_src_len, (caddr_t)(long)desc->next_dest_buf); + crypto_done(cmd->crp); + } else { + crypto_copyback(cmd->crp->crp_flags, cmd->crp->crp_buf, 0, + cmd->op.dest_buf_size, (caddr_t)(long)desc->user.aligned_dest + op->cipher_offset); + crypto_done(cmd->crp); + + } + + + } + /* Copy digest to User-space */ + if (op->digest_type != XLR_SEC_DIGEST_TYPE_NONE) { + + int offset = 0; + + switch (op->digest_type) { + case XLR_SEC_DIGEST_TYPE_MD5: + size = XLR_SEC_MD5_LENGTH; + break; + case XLR_SEC_DIGEST_TYPE_SHA1: + size = XLR_SEC_SHA1_LENGTH; + break; + case XLR_SEC_DIGEST_TYPE_SHA256: + size = XLR_SEC_SHA256_LENGTH; + break; + case XLR_SEC_DIGEST_TYPE_SHA384: + size = XLR_SEC_SHA384_LENGTH; + break; + case XLR_SEC_DIGEST_TYPE_SHA512: + size = XLR_SEC_SHA512_LENGTH; + break; + case XLR_SEC_DIGEST_TYPE_GCM: + size = XLR_SEC_GCM_LENGTH; + break; + case XLR_SEC_DIGEST_TYPE_KASUMI_F9: + offset = 4; + size = XLR_SEC_KASUMI_F9_RESULT_LENGTH; + break; + default: + size = 0; + } + + XLR_SEC_CMD_DIAG("digest: to_addr=%p from_addr=%p size=%d \n", + desc->user.user_auth, desc->user.aligned_auth, size); + memcpy(desc->user.user_auth, desc->user.aligned_auth + offset, size); + op->auth_dest = (uint64_t) (unsigned long)desc->user.user_auth; + } + if (op->cipher_type == XLR_SEC_CIPHER_TYPE_ARC4 && + op->rc4_savestate) { + + size = XLR_SEC_MAX_RC4_STATE_SIZE; + + XLR_SEC_CMD_DIAG("state: to_addr=%p from_addr=%p size=%d \n", + desc->user.user_state, desc->user.aligned_state, size); + op->rc4_state = (uint64_t) (unsigned long)desc->user.user_state; + } + return; +} diff --git a/sys/mips/rmi/dev/sec/rmilib.h b/sys/mips/rmi/dev/sec/rmilib.h new file mode 100644 index 000000000000..060b9db1cbd8 --- /dev/null +++ b/sys/mips/rmi/dev/sec/rmilib.h @@ -0,0 +1,997 @@ +/*- + * Copyright (c) 2003-2009 RMI Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of RMI Corporation, nor the names of its contributors, + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RMI_BSD */ + +#ifndef _RMILIB_H_ +#define _RMILIB_H_ + +#include +#include + +/*#define XLR_SEC_CMD_DEBUG*/ + +#ifdef XLR_SEC_CMD_DEBUG +#define DPRINT printf +#define XLR_SEC_CMD_DIAG(fmt, args...) { \ + DPRINT(fmt, ##args); \ + } +#define XLR_SEC_CMD_DIAG_SYM_DESC(desc, vec) { \ + decode_symkey_desc ((desc), (vec)); \ + } +#else +#define DPRINT(fmt, args...) +#define XLR_SEC_CMD_DIAG(fmt, args...) +#define XLR_SEC_CMD_DIAG_SYM_DESC(desc, vec) +#endif + + + + + + +/* +#include + +#define OS_ALLOC_KERNEL(size) kmalloc((size), GFP_KERNEL) +#define virt_to_phys(x) vtophys((vm_offset_t)(x)) +*/ +/* + * Cryptographic parameter definitions + */ +#define XLR_SEC_DES_KEY_LENGTH 8 /* Bytes */ +#define XLR_SEC_3DES_KEY_LENGTH 24 /* Bytes */ +#define XLR_SEC_AES128_KEY_LENGTH 16 /* Bytes */ +#define XLR_SEC_AES192_KEY_LENGTH 24 /* Bytes */ +#define XLR_SEC_AES256_KEY_LENGTH 32 /* Bytes */ +#define XLR_SEC_AES128F8_KEY_LENGTH 32 /* Bytes */ +#define XLR_SEC_AES192F8_KEY_LENGTH 48 /* Bytes */ +#define XLR_SEC_AES256F8_KEY_LENGTH 64 /* Bytes */ +#define XLR_SEC_KASUMI_F8_KEY_LENGTH 16 /* Bytes */ +#define XLR_SEC_MAX_CRYPT_KEY_LENGTH XLR_SEC_AES256F8_KEY_LENGTH + + +#define XLR_SEC_DES_IV_LENGTH 8 /* Bytes */ +#define XLR_SEC_AES_IV_LENGTH 16 /* Bytes */ +#define XLR_SEC_ARC4_IV_LENGTH 0 /* Bytes */ +#define XLR_SEC_KASUMI_F8_IV_LENGTH 16 /* Bytes */ +#define XLR_SEC_MAX_IV_LENGTH 16 /* Bytes */ +#define XLR_SEC_IV_LENGTH_BYTES 8 /* Bytes */ + +#define XLR_SEC_AES_BLOCK_SIZE 16 /* Bytes */ +#define XLR_SEC_DES_BLOCK_SIZE 8 /* Bytes */ +#define XLR_SEC_3DES_BLOCK_SIZE 8 /* Bytes */ + +#define XLR_SEC_MD5_BLOCK_SIZE 64 /* Bytes */ +#define XLR_SEC_SHA1_BLOCK_SIZE 64 /* Bytes */ +#define XLR_SEC_SHA256_BLOCK_SIZE 64 /* Bytes */ +#define XLR_SEC_SHA384_BLOCK_SIZE 128 /* Bytes */ +#define XLR_SEC_SHA512_BLOCK_SIZE 128 /* Bytes */ +#define XLR_SEC_GCM_BLOCK_SIZE 16 /* XXX: Bytes */ +#define XLR_SEC_KASUMI_F9_BLOCK_SIZE 16 /* XXX: Bytes */ +#define XLR_SEC_MAX_BLOCK_SIZE 64 /* Max of MD5/SHA */ +#define XLR_SEC_MD5_LENGTH 16 /* Bytes */ +#define XLR_SEC_SHA1_LENGTH 20 /* Bytes */ +#define XLR_SEC_SHA256_LENGTH 32 /* Bytes */ +#define XLR_SEC_SHA384_LENGTH 64 /* Bytes */ +#define XLR_SEC_SHA512_LENGTH 64 /* Bytes */ +#define XLR_SEC_GCM_LENGTH 16 /* Bytes */ +#define XLR_SEC_KASUMI_F9_LENGTH 16 /* Bytes */ +#define XLR_SEC_KASUMI_F9_RESULT_LENGTH 4 /* Bytes */ +#define XLR_SEC_HMAC_LENGTH 64 /* Max of MD5/SHA/SHA256 */ +#define XLR_SEC_MAX_AUTH_KEY_LENGTH XLR_SEC_SHA512_BLOCK_SIZE +#define XLR_SEC_MAX_RC4_STATE_SIZE 264 /* char s[256], int i, int j */ + +/* Status code is used by the SRL to indicate status */ +typedef unsigned int xlr_sec_status_t; + +/* + * Status codes + */ +#define XLR_SEC_STATUS_SUCCESS 0 +#define XLR_SEC_STATUS_NO_DEVICE -1 +#define XLR_SEC_STATUS_TIMEOUT -2 +#define XLR_SEC_STATUS_INVALID_PARAMETER -3 +#define XLR_SEC_STATUS_DEVICE_FAILED -4 +#define XLR_SEC_STATUS_DEVICE_BUSY -5 +#define XLR_SEC_STATUS_NO_RESOURCE -6 +#define XLR_SEC_STATUS_CANCELLED -7 + +/* + * Flags + */ +#define XLR_SEC_FLAGS_HIGH_PRIORITY 1 + +/* Error code is used to indicate any errors */ +typedef int xlr_sec_error_t; + +/* + */ +#define XLR_SEC_ERR_NONE 0 +#define XLR_SEC_ERR_CIPHER_OP -1 +#define XLR_SEC_ERR_CIPHER_TYPE -2 +#define XLR_SEC_ERR_CIPHER_MODE -3 +#define XLR_SEC_ERR_CIPHER_INIT -4 +#define XLR_SEC_ERR_DIGEST_TYPE -5 +#define XLR_SEC_ERR_DIGEST_INIT -6 +#define XLR_SEC_ERR_DIGEST_SRC -7 +#define XLR_SEC_ERR_CKSUM_TYPE -8 +#define XLR_SEC_ERR_CKSUM_SRC -9 +#define XLR_SEC_ERR_ALLOC -10 +#define XLR_SEC_ERR_CONTROL_VECTOR -11 +#define XLR_SEC_ERR_LOADHMACKEY_MODE -12 +#define XLR_SEC_ERR_PADHASH_MODE -13 +#define XLR_SEC_ERR_HASHBYTES_MODE -14 +#define XLR_SEC_ERR_NEXT_MODE -15 +#define XLR_SEC_ERR_PKT_IV_MODE -16 +#define XLR_SEC_ERR_LASTWORD_MODE -17 +#define XLR_SEC_ERR_PUBKEY_OP -18 +#define XLR_SEC_ERR_SYMKEY_MSGSND -19 +#define XLR_SEC_ERR_PUBKEY_MSGSND -20 +#define XLR_SEC_ERR_SYMKEY_GETSEM -21 +#define XLR_SEC_ERR_PUBKEY_GETSEM -22 + +/* + * Descriptor Vector quantities + * (helps to identify descriptor type per operation) + */ +#define XLR_SEC_VECTOR_CIPHER_DES 0x0001 +#define XLR_SEC_VECTOR_CIPHER_3DES 0x0002 +#define XLR_SEC_VECTOR_CIPHER_AES128 0x0004 +#define XLR_SEC_VECTOR_CIPHER_AES192 0x0008 +#define XLR_SEC_VECTOR_CIPHER_AES256 0x0010 +#define XLR_SEC_VECTOR_CIPHER_ARC4 0x0020 +#define XLR_SEC_VECTOR_CIPHER_AES (XLR_SEC_VECTOR_CIPHER_AES128 | \ + XLR_SEC_VECTOR_CIPHER_AES192 | \ + XLR_SEC_VECTOR_CIPHER_AES256) +#define XLR_SEC_VECTOR_CIPHER (XLR_SEC_VECTOR_CIPHER_DES | \ + XLR_SEC_VECTOR_CIPHER_3DES | \ + XLR_SEC_VECTOR_CIPHER_AES128 | \ + XLR_SEC_VECTOR_CIPHER_AES192 | \ + XLR_SEC_VECTOR_CIPHER_AES256 | \ + XLR_SEC_VECTOR_CIPHER_ARC4) + +#define XLR_SEC_VECTOR_HMAC 0x0040 +#define XLR_SEC_VECTOR_MAC 0x0080 +#define XLR_SEC_VECTOR_MODE_CTR_CFB 0x0100 +#define XLR_SEC_VECTOR_MODE_ECB_CBC_OFB 0x0200 +#define XLR_SEC_VECTOR_MODE_ECB_CBC 0x0400 +#define XLR_SEC_VECTOR_STATE 0x0800 +#define XLR_SEC_VECTOR_CIPHER_KASUMI_F8 0x01000 +#define XLR_SEC_VECTOR_HMAC2 0x02000 +#define XLR_SEC_VECTOR_GCM 0x04000 +#define XLR_SEC_VECTOR_F9 0x08000 +#define XLR_SEC_VECTOR_MODE_F8 0x10000 + +#define XLR_SEC_VECTOR_CIPHER_ARC4__HMAC \ +(XLR_SEC_VECTOR_CIPHER_ARC4 | XLR_SEC_VECTOR_HMAC) +#define XLR_SEC_VECTOR_CIPHER_ARC4__STATE \ +(XLR_SEC_VECTOR_CIPHER_ARC4 | XLR_SEC_VECTOR_STATE) +#define XLR_SEC_VECTOR_CIPHER_ARC4__HMAC__STATE \ +(XLR_SEC_VECTOR_CIPHER_ARC4 | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_STATE) + +#define XLR_SEC_VECTOR__CIPHER_DES__HMAC__MODE_ECB_CBC \ +(XLR_SEC_VECTOR_CIPHER_DES | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_MODE_ECB_CBC) + +#define XLR_SEC_VECTOR__CIPHER_DES__MODE_ECB_CBC \ +(XLR_SEC_VECTOR_CIPHER_DES | XLR_SEC_VECTOR_MODE_ECB_CBC) + +#define XLR_SEC_VECTOR__CIPHER_3DES__HMAC__MODE_ECB_CBC \ +(XLR_SEC_VECTOR_CIPHER_3DES | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_MODE_ECB_CBC) + +#define XLR_SEC_VECTOR__CIPHER_3DES__MODE_ECB_CBC \ +(XLR_SEC_VECTOR_CIPHER_3DES | XLR_SEC_VECTOR_MODE_ECB_CBC) + +#define XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES128__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES128__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES192__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES192__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES256__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES256__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES128__HMAC__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR__CIPHER_AES128__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR__CIPHER_AES192__HMAC__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR__CIPHER_AES192__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR__CIPHER_AES256__HMAC__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_HMAC | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR__CIPHER_AES256__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR_CIPHER_KASUMI_F8__F9 \ +(XLR_SEC_VECTOR_CIPHER_KASUMI_F8 | XLR_SEC_VECTOR_F9) + +#define XLR_SEC_VECTOR_CIPHER_KASUMI_F8__HMAC \ +(XLR_SEC_VECTOR_CIPHER_KASUMI_F8 | XLR_SEC_VECTOR_HMAC) + +#define XLR_SEC_VECTOR_CIPHER_KASUMI_F8__HMAC2 \ +(XLR_SEC_VECTOR_CIPHER_KASUMI_F8 | XLR_SEC_VECTOR_HMAC2) + +#define XLR_SEC_VECTOR_CIPHER_KASUMI_F8__GCM \ +(XLR_SEC_VECTOR_CIPHER_KASUMI_F8 | XLR_SEC_VECTOR_GCM) + +#define XLR_SEC_VECTOR_CIPHER_ARC4__HMAC2 \ +(XLR_SEC_VECTOR_CIPHER_ARC4 | XLR_SEC_VECTOR_HMAC2) + +#define XLR_SEC_VECTOR_CIPHER_ARC4__HMAC2__STATE \ +(XLR_SEC_VECTOR_CIPHER_ARC4 | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_STATE) + +#define XLR_SEC_VECTOR__CIPHER_DES__HMAC2__MODE_ECB_CBC \ +(XLR_SEC_VECTOR_CIPHER_DES | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_MODE_ECB_CBC) + +#define XLR_SEC_VECTOR__CIPHER_3DES__HMAC2__MODE_ECB_CBC \ +(XLR_SEC_VECTOR_CIPHER_3DES | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_MODE_ECB_CBC) + +#define XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES128__HMAC2__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR__CIPHER_AES192__HMAC2__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR__CIPHER_AES256__HMAC2__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_HMAC2 | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR_CIPHER_ARC4__GCM \ +(XLR_SEC_VECTOR_CIPHER_ARC4 | XLR_SEC_VECTOR_GCM) + +#define XLR_SEC_VECTOR_CIPHER_ARC4__GCM__STATE \ +(XLR_SEC_VECTOR_CIPHER_ARC4 | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_STATE) + +#define XLR_SEC_VECTOR__CIPHER_DES__GCM__MODE_ECB_CBC \ +(XLR_SEC_VECTOR_CIPHER_DES | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_MODE_ECB_CBC) + +#define XLR_SEC_VECTOR__CIPHER_3DES__GCM__MODE_ECB_CBC \ +(XLR_SEC_VECTOR_CIPHER_3DES | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_MODE_ECB_CBC) + +#define XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES128__GCM__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR__CIPHER_AES192__GCM__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR__CIPHER_AES256__GCM__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_GCM | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR_CIPHER_ARC4__F9 \ +(XLR_SEC_VECTOR_CIPHER_ARC4 | XLR_SEC_VECTOR_F9) + +#define XLR_SEC_VECTOR_CIPHER_ARC4__F9__STATE \ +(XLR_SEC_VECTOR_CIPHER_ARC4 | XLR_SEC_VECTOR_F9 | XLR_SEC_VECTOR_STATE) + +#define XLR_SEC_VECTOR__CIPHER_DES__F9__MODE_ECB_CBC \ +(XLR_SEC_VECTOR_CIPHER_DES | XLR_SEC_VECTOR_F9 | XLR_SEC_VECTOR_MODE_ECB_CBC) + +#define XLR_SEC_VECTOR__CIPHER_3DES__F9__MODE_ECB_CBC \ +(XLR_SEC_VECTOR_CIPHER_3DES | XLR_SEC_VECTOR_F9 | XLR_SEC_VECTOR_MODE_ECB_CBC) + +#define XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_F9 | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_F9 | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_F9 | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_F9 | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_CTR_CFB \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_F9 | XLR_SEC_VECTOR_MODE_CTR_CFB) + +#define XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_ECB_CBC_OFB \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_F9 | XLR_SEC_VECTOR_MODE_ECB_CBC_OFB) + +#define XLR_SEC_VECTOR__CIPHER_AES128__F9__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES128 | XLR_SEC_VECTOR_F9 | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR__CIPHER_AES192__F9__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES192 | XLR_SEC_VECTOR_F9 | XLR_SEC_VECTOR_MODE_F8) + +#define XLR_SEC_VECTOR__CIPHER_AES256__F9__MODE_F8 \ +(XLR_SEC_VECTOR_CIPHER_AES256 | XLR_SEC_VECTOR_F9 | XLR_SEC_VECTOR_MODE_F8) + +/* + * Cipher Modes + */ +typedef enum { + XLR_SEC_CIPHER_MODE_NONE = 0, + XLR_SEC_CIPHER_MODE_PASS = 1, + XLR_SEC_CIPHER_MODE_ECB, + XLR_SEC_CIPHER_MODE_CBC, + XLR_SEC_CIPHER_MODE_OFB, + XLR_SEC_CIPHER_MODE_CTR, + XLR_SEC_CIPHER_MODE_CFB, + XLR_SEC_CIPHER_MODE_F8 +} XLR_SEC_CIPHER_MODE; + +typedef enum { + XLR_SEC_CIPHER_OP_NONE = 0, + XLR_SEC_CIPHER_OP_ENCRYPT = 1, + XLR_SEC_CIPHER_OP_DECRYPT +} XLR_SEC_CIPHER_OP; + +typedef enum { + XLR_SEC_CIPHER_TYPE_UNSUPPORTED = -1, + XLR_SEC_CIPHER_TYPE_NONE = 0, + XLR_SEC_CIPHER_TYPE_DES, + XLR_SEC_CIPHER_TYPE_3DES, + XLR_SEC_CIPHER_TYPE_AES128, + XLR_SEC_CIPHER_TYPE_AES192, + XLR_SEC_CIPHER_TYPE_AES256, + XLR_SEC_CIPHER_TYPE_ARC4, + XLR_SEC_CIPHER_TYPE_KASUMI_F8 +} XLR_SEC_CIPHER_TYPE; + +typedef enum { + XLR_SEC_CIPHER_INIT_OK = 1, /* Preserve old Keys */ + XLR_SEC_CIPHER_INIT_NK /* Load new Keys */ +} XLR_SEC_CIPHER_INIT; + + +/* + * Hash Modes + */ +typedef enum { + XLR_SEC_DIGEST_TYPE_UNSUPPORTED = -1, + XLR_SEC_DIGEST_TYPE_NONE = 0, + XLR_SEC_DIGEST_TYPE_MD5, + XLR_SEC_DIGEST_TYPE_SHA1, + XLR_SEC_DIGEST_TYPE_SHA256, + XLR_SEC_DIGEST_TYPE_SHA384, + XLR_SEC_DIGEST_TYPE_SHA512, + XLR_SEC_DIGEST_TYPE_GCM, + XLR_SEC_DIGEST_TYPE_KASUMI_F9, + XLR_SEC_DIGEST_TYPE_HMAC_MD5, + XLR_SEC_DIGEST_TYPE_HMAC_SHA1, + XLR_SEC_DIGEST_TYPE_HMAC_SHA256, + XLR_SEC_DIGEST_TYPE_HMAC_SHA384, + XLR_SEC_DIGEST_TYPE_HMAC_SHA512, + XLR_SEC_DIGEST_TYPE_HMAC_AES_CBC, + XLR_SEC_DIGEST_TYPE_HMAC_AES_XCBC +} XLR_SEC_DIGEST_TYPE; + +typedef enum { + XLR_SEC_DIGEST_INIT_OLDKEY = 1, /* Preserve old key HMAC key stored in + * ID registers (moot if HASH.HMAC == + * 0) */ + XLR_SEC_DIGEST_INIT_NEWKEY /* Load new HMAC key from memory ctrl + * section to ID registers */ +} XLR_SEC_DIGEST_INIT; + +typedef enum { + XLR_SEC_DIGEST_SRC_DMA = 1, /* DMA channel */ + XLR_SEC_DIGEST_SRC_CPHR /* Cipher if word count exceeded + * Cipher_Offset; else DMA */ +} XLR_SEC_DIGEST_SRC; + +/* + * Checksum Modes + */ +typedef enum { + XLR_SEC_CKSUM_TYPE_NOP = 1, + XLR_SEC_CKSUM_TYPE_IP +} XLR_SEC_CKSUM_TYPE; + +typedef enum { + XLR_SEC_CKSUM_SRC_DMA = 1, + XLR_SEC_CKSUM_SRC_CIPHER +} XLR_SEC_CKSUM_SRC; + +/* + * Packet Modes + */ +typedef enum { + XLR_SEC_LOADHMACKEY_MODE_OLD = 1, + XLR_SEC_LOADHMACKEY_MODE_LOAD +} XLR_SEC_LOADHMACKEY_MODE; + +typedef enum { + XLR_SEC_PADHASH_PADDED = 1, + XLR_SEC_PADHASH_PAD +} XLR_SEC_PADHASH_MODE; + +typedef enum { + XLR_SEC_HASHBYTES_ALL8 = 1, + XLR_SEC_HASHBYTES_MSB, + XLR_SEC_HASHBYTES_MSW +} XLR_SEC_HASHBYTES_MODE; + +typedef enum { + XLR_SEC_NEXT_FINISH = 1, + XLR_SEC_NEXT_DO +} XLR_SEC_NEXT_MODE; + +typedef enum { + XLR_SEC_PKT_IV_OLD = 1, + XLR_SEC_PKT_IV_NEW +} XLR_SEC_PKT_IV_MODE; + +typedef enum { + XLR_SEC_LASTWORD_128 = 1, + XLR_SEC_LASTWORD_96MASK, + XLR_SEC_LASTWORD_64MASK, + XLR_SEC_LASTWORD_32MASK +} XLR_SEC_LASTWORD_MODE; + +typedef enum { + XLR_SEC_CFB_MASK_REGULAR_CTR = 0, + XLR_SEC_CFB_MASK_CCMP, + XLR_SEC_CFB_MASK_GCM_WITH_SCI, + XLR_SEC_CFB_MASK_GCM_WITHOUT_SCI +} XLR_SEC_CFB_MASK_MODE; + +/* + * Public Key + */ +typedef enum { + RMIPK_BLKWIDTH_512 = 1, + RMIPK_BLKWIDTH_1024 +} RMIPK_BLKWIDTH_MODE; + +typedef enum { + RMIPK_LDCONST_OLD = 1, + RMIPK_LDCONST_NEW +} RMIPK_LDCONST_MODE; + + +typedef struct xlr_sec_io_s { + unsigned int command; + unsigned int result_status; + unsigned int flags; + unsigned int session_num; + unsigned int use_callback; + unsigned int time_us; + unsigned int user_context[2]; /* usable for anything by caller */ + unsigned int command_context; /* Context (ID) of this command). */ + unsigned char initial_vector[XLR_SEC_MAX_IV_LENGTH]; + unsigned char crypt_key[XLR_SEC_MAX_CRYPT_KEY_LENGTH]; + unsigned char mac_key[XLR_SEC_MAX_AUTH_KEY_LENGTH]; + + XLR_SEC_CIPHER_OP cipher_op; + XLR_SEC_CIPHER_MODE cipher_mode; + XLR_SEC_CIPHER_TYPE cipher_type; + XLR_SEC_CIPHER_INIT cipher_init; + unsigned int cipher_offset; + + XLR_SEC_DIGEST_TYPE digest_type; + XLR_SEC_DIGEST_INIT digest_init; + XLR_SEC_DIGEST_SRC digest_src; + unsigned int digest_offset; + + XLR_SEC_CKSUM_TYPE cksum_type; + XLR_SEC_CKSUM_SRC cksum_src; + unsigned int cksum_offset; + + XLR_SEC_LOADHMACKEY_MODE pkt_hmac; + XLR_SEC_PADHASH_MODE pkt_hash; + XLR_SEC_HASHBYTES_MODE pkt_hashbytes; + XLR_SEC_NEXT_MODE pkt_next; + XLR_SEC_PKT_IV_MODE pkt_iv; + XLR_SEC_LASTWORD_MODE pkt_lastword; + + unsigned int nonce; + unsigned int cfb_mask; + + unsigned int iv_offset; + unsigned short pad_type; + unsigned short rc4_key_len; + + unsigned int num_packets; + unsigned int num_fragments; + + uint64_t source_buf; + unsigned int source_buf_size; + uint64_t dest_buf; + unsigned int dest_buf_size; + + uint64_t auth_dest; + uint64_t cksum_dest; + + unsigned short rc4_loadstate; + unsigned short rc4_savestate; + uint64_t rc4_state; + +} xlr_sec_io_t, *xlr_sec_io_pt; + + +#define XLR_SEC_SESSION(sid) ((sid) & 0x000007ff) +#define XLR_SEC_SID(crd,ses) (((crd) << 28) | ((ses) & 0x7ff)) + +/* + * Length values for cryptography + */ +/* +#define XLR_SEC_DES_KEY_LENGTH 8 +#define XLR_SEC_3DES_KEY_LENGTH 24 +#define XLR_SEC_MAX_CRYPT_KEY_LENGTH XLR_SEC_3DES_KEY_LENGTH +#define XLR_SEC_IV_LENGTH 8 +#define XLR_SEC_AES_IV_LENGTH 16 +#define XLR_SEC_MAX_IV_LENGTH XLR_SEC_AES_IV_LENGTH +*/ + +#define SEC_MAX_FRAG_LEN 16000 + +struct xlr_sec_command { + uint16_t session_num; + struct cryptop *crp; + struct cryptodesc *enccrd, *maccrd; + + xlr_sec_io_t op; +}; +struct xlr_sec_session { + uint32_t sessionid; + int hs_used; + int hs_mlen; + struct xlr_sec_command cmd; + void *desc_ptr; + uint8_t multi_frag_flag; +}; + +/* + * Holds data specific to rmi security accelerators + */ +struct xlr_sec_softc { + device_t sc_dev; /* device backpointer */ + struct mtx sc_mtx; /* per-instance lock */ + + int32_t sc_cid; + struct xlr_sec_session *sc_sessions; + int sc_nsessions; + xlr_reg_t *mmio; +}; + + +/* + +union xlr_sec_operand_t { + struct mbuf *m; + struct uio *io; + void *buf; +}xlr_sec_operand; +*/ + + + + + +/* this is passed to packet setup to optimize */ +#define XLR_SEC_SETUP_OP_CIPHER 0x00000001 +#define XLR_SEC_SETUP_OP_HMAC 0x00000002 +#define XLR_SEC_SETUP_OP_CIPHER_HMAC (XLR_SEC_SETUP_OP_CIPHER | XLR_SEC_SETUP_OP_HMAC) +/* this is passed to control_setup to update w/preserving existing keys */ +#define XLR_SEC_SETUP_OP_PRESERVE_HMAC_KEY 0x80000000 +#define XLR_SEC_SETUP_OP_PRESERVE_CIPHER_KEY 0x40000000 +#define XLR_SEC_SETUP_OP_UPDATE_KEYS 0x00000010 +#define XLR_SEC_SETUP_OP_FLIP_3DES_KEY 0x00000020 + + + + + +/* + * Message Ring Specifics + */ + +#define SEC_MSGRING_WORDSIZE 2 + + +/* + * + * + * rwR 31 30 29 27 26 24 23 21 20 18 + * | NA | RSA0Out | Rsa0In | Pipe3Out | Pipe3In | ... + * + * 17 15 14 12 11 9 8 6 5 3 2 0 + * | Pipe2Out | Pipe2In | Pipe1In | Pipe1In | Pipe0Out | Pipe0In | + * + * DMA CREDIT REG - + * NUMBER OF CREDITS PER PIPE + */ + +#define SEC_DMA_CREDIT_RSA0_OUT_FOUR 0x20000000 +#define SEC_DMA_CREDIT_RSA0_OUT_TWO 0x10000000 +#define SEC_DMA_CREDIT_RSA0_OUT_ONE 0x08000000 + +#define SEC_DMA_CREDIT_RSA0_IN_FOUR 0x04000000 +#define SEC_DMA_CREDIT_RSA0_IN_TWO 0x02000000 +#define SEC_DMA_CREDIT_RSA0_IN_ONE 0x01000000 + +#define SEC_DMA_CREDIT_PIPE3_OUT_FOUR 0x00800000 +#define SEC_DMA_CREDIT_PIPE3_OUT_TWO 0x00400000 +#define SEC_DMA_CREDIT_PIPE3_OUT_ONE 0x00200000 + +#define SEC_DMA_CREDIT_PIPE3_IN_FOUR 0x00100000 +#define SEC_DMA_CREDIT_PIPE3_IN_TWO 0x00080000 +#define SEC_DMA_CREDIT_PIPE3_IN_ONE 0x00040000 + +#define SEC_DMA_CREDIT_PIPE2_OUT_FOUR 0x00020000 +#define SEC_DMA_CREDIT_PIPE2_OUT_TWO 0x00010000 +#define SEC_DMA_CREDIT_PIPE2_OUT_ONE 0x00008000 + +#define SEC_DMA_CREDIT_PIPE2_IN_FOUR 0x00004000 +#define SEC_DMA_CREDIT_PIPE2_IN_TWO 0x00002000 +#define SEC_DMA_CREDIT_PIPE2_IN_ONE 0x00001000 + +#define SEC_DMA_CREDIT_PIPE1_OUT_FOUR 0x00000800 +#define SEC_DMA_CREDIT_PIPE1_OUT_TWO 0x00000400 +#define SEC_DMA_CREDIT_PIPE1_OUT_ONE 0x00000200 + +#define SEC_DMA_CREDIT_PIPE1_IN_FOUR 0x00000100 +#define SEC_DMA_CREDIT_PIPE1_IN_TWO 0x00000080 +#define SEC_DMA_CREDIT_PIPE1_IN_ONE 0x00000040 + +#define SEC_DMA_CREDIT_PIPE0_OUT_FOUR 0x00000020 +#define SEC_DMA_CREDIT_PIPE0_OUT_TWO 0x00000010 +#define SEC_DMA_CREDIT_PIPE0_OUT_ONE 0x00000008 + +#define SEC_DMA_CREDIT_PIPE0_IN_FOUR 0x00000004 +#define SEC_DMA_CREDIT_PIPE0_IN_TWO 0x00000002 +#define SEC_DMA_CREDIT_PIPE0_IN_ONE 0x00000001 + + +/* + * Currently, FOUR credits per PIPE + * 0x24924924 + */ +#define SEC_DMA_CREDIT_CONFIG SEC_DMA_CREDIT_RSA0_OUT_FOUR | \ + SEC_DMA_CREDIT_RSA0_IN_FOUR | \ + SEC_DMA_CREDIT_PIPE3_OUT_FOUR | \ + SEC_DMA_CREDIT_PIPE3_IN_FOUR | \ + SEC_DMA_CREDIT_PIPE2_OUT_FOUR | \ + SEC_DMA_CREDIT_PIPE2_IN_FOUR | \ + SEC_DMA_CREDIT_PIPE1_OUT_FOUR | \ + SEC_DMA_CREDIT_PIPE1_IN_FOUR | \ + SEC_DMA_CREDIT_PIPE0_OUT_FOUR | \ + SEC_DMA_CREDIT_PIPE0_IN_FOUR + + + + +/* + * CONFIG2 + * 31 5 4 3 + * | NA | PIPE3_DEF_DBL_ISS | PIPE2_DEF_DBL_ISS | ... + * + * 2 1 0 + * ... | PIPE1_DEF_DBL_ISS | PIPE0_DEF_DBL_ISS | ROUND_ROBIN_MODE | + * + * DBL_ISS - mode for SECENG and DMA controller which slows down transfers + * (to be conservativei; 0=Disable,1=Enable). + * ROUND_ROBIN - mode where SECENG dispatches operations to PIPE0-PIPE3 + * and all messages are sent to PIPE0. + * + */ + +#define SEC_CFG2_PIPE3_DBL_ISS_ON 0x00000010 +#define SEC_CFG2_PIPE3_DBL_ISS_OFF 0x00000000 +#define SEC_CFG2_PIPE2_DBL_ISS_ON 0x00000008 +#define SEC_CFG2_PIPE2_DBL_ISS_OFF 0x00000000 +#define SEC_CFG2_PIPE1_DBL_ISS_ON 0x00000004 +#define SEC_CFG2_PIPE1_DBL_ISS_OFF 0x00000000 +#define SEC_CFG2_PIPE0_DBL_ISS_ON 0x00000002 +#define SEC_CFG2_PIPE0_DBL_ISS_OFF 0x00000000 +#define SEC_CFG2_ROUND_ROBIN_ON 0x00000001 +#define SEC_CFG2_ROUND_ROBIN_OFF 0x00000000 + + +enum sec_pipe_config { + + SEC_PIPE_CIPHER_KEY0_L0 = 0x00, + SEC_PIPE_CIPHER_KEY0_HI, + SEC_PIPE_CIPHER_KEY1_LO, + SEC_PIPE_CIPHER_KEY1_HI, + SEC_PIPE_CIPHER_KEY2_LO, + SEC_PIPE_CIPHER_KEY2_HI, + SEC_PIPE_CIPHER_KEY3_LO, + SEC_PIPE_CIPHER_KEY3_HI, + SEC_PIPE_HMAC_KEY0_LO, + SEC_PIPE_HMAC_KEY0_HI, + SEC_PIPE_HMAC_KEY1_LO, + SEC_PIPE_HMAC_KEY1_HI, + SEC_PIPE_HMAC_KEY2_LO, + SEC_PIPE_HMAC_KEY2_HI, + SEC_PIPE_HMAC_KEY3_LO, + SEC_PIPE_HMAC_KEY3_HI, + SEC_PIPE_HMAC_KEY4_LO, + SEC_PIPE_HMAC_KEY4_HI, + SEC_PIPE_HMAC_KEY5_LO, + SEC_PIPE_HMAC_KEY5_HI, + SEC_PIPE_HMAC_KEY6_LO, + SEC_PIPE_HMAC_KEY6_HI, + SEC_PIPE_HMAC_KEY7_LO, + SEC_PIPE_HMAC_KEY7_HI, + SEC_PIPE_NCFBM_LO, + SEC_PIPE_NCFBM_HI, + SEC_PIPE_INSTR_LO, + SEC_PIPE_INSTR_HI, + SEC_PIPE_RSVD0, + SEC_PIPE_RSVD1, + SEC_PIPE_RSVD2, + SEC_PIPE_RSVD3, + + SEC_PIPE_DF_PTRS0, + SEC_PIPE_DF_PTRS1, + SEC_PIPE_DF_PTRS2, + SEC_PIPE_DF_PTRS3, + SEC_PIPE_DF_PTRS4, + SEC_PIPE_DF_PTRS5, + SEC_PIPE_DF_PTRS6, + SEC_PIPE_DF_PTRS7, + + SEC_PIPE_DU_DATA_IN_LO, + SEC_PIPE_DU_DATA_IN_HI, + SEC_PIPE_DU_DATA_IN_CTRL, + SEC_PIPE_DU_DATA_OUT_LO, + SEC_PIPE_DU_DATA_OUT_HI, + SEC_PIPE_DU_DATA_OUT_CTRL, + + SEC_PIPE_STATE0, + SEC_PIPE_STATE1, + SEC_PIPE_STATE2, + SEC_PIPE_STATE3, + SEC_PIPE_STATE4, + SEC_PIPE_INCLUDE_MASK0, + SEC_PIPE_INCLUDE_MASK1, + SEC_PIPE_INCLUDE_MASK2, + SEC_PIPE_INCLUDE_MASK3, + SEC_PIPE_INCLUDE_MASK4, + SEC_PIPE_EXCLUDE_MASK0, + SEC_PIPE_EXCLUDE_MASK1, + SEC_PIPE_EXCLUDE_MASK2, + SEC_PIPE_EXCLUDE_MASK3, + SEC_PIPE_EXCLUDE_MASK4, +}; + + +enum sec_pipe_base_config { + + SEC_PIPE0_BASE = 0x00, + SEC_PIPE1_BASE = 0x40, + SEC_PIPE2_BASE = 0x80, + SEC_PIPE3_BASE = 0xc0 + +}; + +enum sec_rsa_config { + + SEC_RSA_PIPE0_DU_DATA_IN_LO = 0x100, + SEC_RSA_PIPE0_DU_DATA_IN_HI, + SEC_RSA_PIPE0_DU_DATA_IN_CTRL, + SEC_RSA_PIPE0_DU_DATA_OUT_LO, + SEC_RSA_PIPE0_DU_DATA_OUT_HI, + SEC_RSA_PIPE0_DU_DATA_OUT_CTRL, + SEC_RSA_RSVD0, + SEC_RSA_RSVD1, + + SEC_RSA_PIPE0_STATE0, + SEC_RSA_PIPE0_STATE1, + SEC_RSA_PIPE0_STATE2, + SEC_RSA_PIPE0_INCLUDE_MASK0, + SEC_RSA_PIPE0_INCLUDE_MASK1, + SEC_RSA_PIPE0_INCLUDE_MASK2, + SEC_RSA_PIPE0_EXCLUDE_MASK0, + SEC_RSA_PIPE0_EXCLUDE_MASK1, + SEC_RSA_PIPE0_EXCLUDE_MASK2, + SEC_RSA_PIPE0_EVENT_CTR + +}; + + + + +enum sec_config { + + SEC_DMA_CREDIT = 0x140, + SEC_CONFIG1, + SEC_CONFIG2, + SEC_CONFIG3, + +}; + + + +enum sec_debug_config { + + SEC_DW0_DESCRIPTOR0_LO = 0x180, + SEC_DW0_DESCRIPTOR0_HI, + SEC_DW0_DESCRIPTOR1_LO, + SEC_DW0_DESCRIPTOR1_HI, + SEC_DW1_DESCRIPTOR0_LO, + SEC_DW1_DESCRIPTOR0_HI, + SEC_DW1_DESCRIPTOR1_LO, + SEC_DW1_DESCRIPTOR1_HI, + SEC_DW2_DESCRIPTOR0_LO, + SEC_DW2_DESCRIPTOR0_HI, + SEC_DW2_DESCRIPTOR1_LO, + SEC_DW2_DESCRIPTOR1_HI, + SEC_DW3_DESCRIPTOR0_LO, + SEC_DW3_DESCRIPTOR0_HI, + SEC_DW3_DESCRIPTOR1_LO, + SEC_DW3_DESCRIPTOR1_HI, + + SEC_STATE0, + SEC_STATE1, + SEC_STATE2, + SEC_INCLUDE_MASK0, + SEC_INCLUDE_MASK1, + SEC_INCLUDE_MASK2, + SEC_EXCLUDE_MASK0, + SEC_EXCLUDE_MASK1, + SEC_EXCLUDE_MASK2, + SEC_EVENT_CTR + +}; + + +enum sec_msgring_bucket_config { + + SEC_BIU_CREDITS = 0x308, + + SEC_MSG_BUCKET0_SIZE = 0x320, + SEC_MSG_BUCKET1_SIZE, + SEC_MSG_BUCKET2_SIZE, + SEC_MSG_BUCKET3_SIZE, + SEC_MSG_BUCKET4_SIZE, + SEC_MSG_BUCKET5_SIZE, + SEC_MSG_BUCKET6_SIZE, + SEC_MSG_BUCKET7_SIZE, +}; + +enum sec_msgring_credit_config { + + SEC_CC_CPU0_0 = 0x380, + SEC_CC_CPU1_0 = 0x388, + SEC_CC_CPU2_0 = 0x390, + SEC_CC_CPU3_0 = 0x398, + SEC_CC_CPU4_0 = 0x3a0, + SEC_CC_CPU5_0 = 0x3a8, + SEC_CC_CPU6_0 = 0x3b0, + SEC_CC_CPU7_0 = 0x3b8 + +}; + +enum sec_engine_id { + SEC_PIPE0, + SEC_PIPE1, + SEC_PIPE2, + SEC_PIPE3, + SEC_RSA +}; + +enum sec_cipher { + SEC_AES256_MODE_HMAC, + SEC_AES256_MODE, + SEC_AES256_HMAC, + SEC_AES256, + SEC_AES192_MODE_HMAC, + SEC_AES192_MODE, + SEC_AES192_HMAC, + SEC_AES192, + SEC_AES128_MODE_HMAC, + SEC_AES128_MODE, + SEC_AES128_HMAC, + SEC_AES128, + SEC_DES_HMAC, + SEC_DES, + SEC_3DES, + SEC_3DES_HMAC, + SEC_HMAC +}; + +enum sec_msgrng_msg_ctrl_config { + SEC_EOP = 5, + SEC_SOP = 6, +}; + + + +void +xlr_sec_init(struct xlr_sec_softc *sc); + +int +xlr_sec_setup(struct xlr_sec_session *ses, + struct xlr_sec_command *cmd, symkey_desc_pt desc); + +symkey_desc_pt xlr_sec_allocate_desc(void *); + +#endif diff --git a/sys/mips/rmi/dev/sec/rmisec.c b/sys/mips/rmi/dev/sec/rmisec.c new file mode 100644 index 000000000000..ad20e0955efd --- /dev/null +++ b/sys/mips/rmi/dev/sec/rmisec.c @@ -0,0 +1,603 @@ +/*- + * Copyright (c) 2003-2009 RMI Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of RMI Corporation, nor the names of its contributors, + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RMI_BSD */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include + +/*#define RMI_SEC_DEBUG */ + + +void xlr_sec_print_data(struct cryptop *crp); + +static int xlr_sec_newsession(void *arg, uint32_t * sidp, struct cryptoini *cri); +static int xlr_sec_freesession(void *arg, uint64_t tid); +static int xlr_sec_process(void *arg, struct cryptop *crp, int hint); + + +static int xlr_sec_probe(device_t); +static int xlr_sec_attach(device_t); +static int xlr_sec_detach(device_t); + + +static device_method_t xlr_sec_methods[] = { + /* device interface */ + DEVMETHOD(device_probe, xlr_sec_probe), + DEVMETHOD(device_attach, xlr_sec_attach), + DEVMETHOD(device_detach, xlr_sec_detach), + + /* bus interface */ + DEVMETHOD(bus_print_child, bus_generic_print_child), + DEVMETHOD(bus_driver_added, bus_generic_driver_added), + + {0, 0} +}; + +static driver_t xlr_sec_driver = { + "rmisec", + xlr_sec_methods, + sizeof(struct xlr_sec_softc) +}; +static devclass_t xlr_sec_devclass; + +DRIVER_MODULE(rmisec, iodi, xlr_sec_driver, xlr_sec_devclass, 0, 0); +MODULE_DEPEND(rmisec, crypto, 1, 1, 1); + + + +static int +xlr_sec_probe(device_t dev) +{ + return (BUS_PROBE_DEFAULT); + +} + + +/* + * Attach an interface that successfully probed. + */ +static int +xlr_sec_attach(device_t dev) +{ + + struct xlr_sec_softc *sc = device_get_softc(dev); + + bzero(sc, sizeof(*sc)); + sc->sc_dev = dev; + + + mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "rmi crypto driver", MTX_DEF); + + sc->sc_cid = crypto_get_driverid(0); + if (sc->sc_cid < 0) { + printf("xlr_sec - error : could not get the driver id\n"); + goto error_exit; + } + if (crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, + xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) + printf("register failed for CRYPTO_DES_CBC\n"); + + if (crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, + xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) + printf("register failed for CRYPTO_3DES_CBC\n"); + + if (crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, + xlr_sec_newsession, xlr_sec_freesession, + xlr_sec_process, sc) != 0) + printf("register failed for CRYPTO_AES_CBC\n"); + + if (crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, + xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) + printf("register failed for CRYPTO_ARC4\n"); + + + if (crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, + xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) + printf("register failed for CRYPTO_MD5\n"); + + if (crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, + xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) + printf("register failed for CRYPTO_SHA1\n"); + + if (crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, + xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) + printf("register failed for CRYPTO_MD5_HMAC\n"); + + if (crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, + xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) + printf("register failed for CRYPTO_SHA1_HMAC\n"); + + + xlr_sec_init(sc); + return (0); + + +error_exit: + return (ENXIO); + +} + + +/* + * Detach an interface that successfully probed. + */ +static int +xlr_sec_detach(device_t dev) +{ + int sesn; + struct xlr_sec_softc *sc = device_get_softc(dev); + struct xlr_sec_session *ses = NULL; + symkey_desc_pt desc; + + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { + ses = &sc->sc_sessions[sesn]; + desc = (symkey_desc_pt) ses->desc_ptr; + free(desc->user.kern_src, M_DEVBUF); + free(desc->user.kern_dest, M_DEVBUF); + free(desc->next_src_buf, M_DEVBUF); + free(desc->next_dest_buf, M_DEVBUF); + free(ses->desc_ptr, M_DEVBUF); + } + + return (0); +} + + + + +/* + * Allocate a new 'session' and return an encoded session id. 'sidp' + * contains our registration id, and should contain an encoded session + * id on successful allocation. + */ +static int +xlr_sec_newsession(void *arg, u_int32_t * sidp, struct cryptoini *cri) +{ + struct cryptoini *c; + struct xlr_sec_softc *sc = arg; + int mac = 0, cry = 0, sesn; + struct xlr_sec_session *ses = NULL; + + + if (sidp == NULL || cri == NULL || sc == NULL) + return (EINVAL); + + + if (sc->sc_sessions == NULL) { + ses = sc->sc_sessions = (struct xlr_sec_session *)malloc( + sizeof(struct xlr_sec_session), M_DEVBUF, M_NOWAIT); + if (ses == NULL) + return (ENOMEM); + + ses->desc_ptr = (void *)xlr_sec_allocate_desc((void *)ses); + if (ses->desc_ptr == NULL) + return (ENOMEM); + + sesn = 0; + ses->sessionid = sesn; + sc->sc_nsessions = 1; + } else { + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { + if (!sc->sc_sessions[sesn].hs_used) { + ses = &sc->sc_sessions[sesn]; + break; + } + } + + if (ses == NULL) { + sesn = sc->sc_nsessions; + ses = (struct xlr_sec_session *)malloc((sesn + 1) * + sizeof(struct xlr_sec_session), M_DEVBUF, M_NOWAIT); + if (ses == NULL) + return (ENOMEM); + bcopy(sc->sc_sessions, ses, sesn * sizeof(struct xlr_sec_session)); + bzero(sc->sc_sessions, sesn * sizeof(struct xlr_sec_session)); + free(sc->sc_sessions, M_DEVBUF); + sc->sc_sessions = ses; + ses = &sc->sc_sessions[sesn]; + ses->sessionid = sesn; + ses->desc_ptr = (void *)xlr_sec_allocate_desc((void *)ses); + if (ses->desc_ptr == NULL) + return (ENOMEM); + sc->sc_nsessions++; + } + } + ses->hs_used = 1; + + + for (c = cri; c != NULL; c = c->cri_next) { + + switch (c->cri_alg) { + case CRYPTO_MD5: + case CRYPTO_SHA1: + case CRYPTO_MD5_HMAC: + case CRYPTO_SHA1_HMAC: + if (mac) + return (EINVAL); + mac = 1; + ses->hs_mlen = c->cri_mlen; + if (ses->hs_mlen == 0) { + switch (c->cri_alg) { + case CRYPTO_MD5: + case CRYPTO_MD5_HMAC: + ses->hs_mlen = 16; + break; + case CRYPTO_SHA1: + case CRYPTO_SHA1_HMAC: + ses->hs_mlen = 20; + break; + } + } + break; + case CRYPTO_DES_CBC: + case CRYPTO_3DES_CBC: + case CRYPTO_AES_CBC: + /* XXX this may read fewer, does it matter? */ + /* + * read_random(ses->hs_iv, c->cri_alg == + * CRYPTO_AES_CBC ? XLR_SEC_AES_IV_LENGTH : + * XLR_SEC_IV_LENGTH); + */ + /* FALLTHROUGH */ + case CRYPTO_ARC4: + if (cry) + return (EINVAL); + cry = 1; + break; + default: + return (EINVAL); + } + } + if (mac == 0 && cry == 0) + return (EINVAL); + + *sidp = XLR_SEC_SID(device_get_unit(sc->sc_dev), sesn); + return (0); +} + +/* + * Deallocate a session. + * XXX this routine should run a zero'd mac/encrypt key into context ram. + * XXX to blow away any keys already stored there. + */ +static int +xlr_sec_freesession(void *arg, u_int64_t tid) +{ + struct xlr_sec_softc *sc = arg; + int session; + u_int32_t sid = CRYPTO_SESID2LID(tid); + + if (sc == NULL) + return (EINVAL); + + session = XLR_SEC_SESSION(sid); + if (session >= sc->sc_nsessions) + return (EINVAL); + + sc->sc_sessions[session].hs_used = 0; + + return (0); +} + +#ifdef RMI_SEC_DEBUG + +void +xlr_sec_print_data(struct cryptop *crp) +{ + int i, key_len; + struct cryptodesc *crp_desc; + + printf("session id = 0x%llx, crp_ilen = %d, crp_olen=%d \n", + crp->crp_sid, crp->crp_ilen, crp->crp_olen); + + printf("crp_flags = 0x%x\n", crp->crp_flags); + + + printf("crp buf:\n"); + for (i = 0; i < crp->crp_ilen; i++) { + printf("%c ", crp->crp_buf[i]); + if (i % 10 == 0) + printf("\n"); + } + + printf("\n"); + printf("****************** desc ****************\n"); + crp_desc = crp->crp_desc; + printf("crd_skip=%d, crd_len=%d, crd_flags=0x%x, crd_alg=%d\n", + crp_desc->crd_skip, crp_desc->crd_len, crp_desc->crd_flags, crp_desc->crd_alg); + + key_len = crp_desc->crd_klen / 8; + printf("key(%d) :\n", key_len); + for (i = 0; i < key_len; i++) + printf("%d", crp_desc->crd_key[i]); + printf("\n"); + + printf(" IV : \n"); + for (i = 0; i < EALG_MAX_BLOCK_LEN; i++) + printf("%d", crp_desc->crd_iv[i]); + printf("\n"); + + printf("crd_next=%p\n", crp_desc->crd_next); + return; +} + +#endif + + +static int +xlr_sec_process(void *arg, struct cryptop *crp, int hint) +{ + struct xlr_sec_softc *sc = arg; + struct xlr_sec_command *cmd = NULL; + int session, err; + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; + struct xlr_sec_session *ses; + + if (crp == NULL || crp->crp_callback == NULL) { + return (EINVAL); + } + session = XLR_SEC_SESSION(crp->crp_sid); + if (sc == NULL || session >= sc->sc_nsessions) { + err = EINVAL; + goto errout; + } + ses = &sc->sc_sessions[session]; + + cmd = &ses->cmd; + if (cmd == NULL) { + err = ENOMEM; + goto errout; + } + crd1 = crp->crp_desc; + if (crd1 == NULL) { + err = EINVAL; + goto errout; + } + crd2 = crd1->crd_next; + + if (crd2 == NULL) { + if (crd1->crd_alg == CRYPTO_MD5_HMAC || + crd1->crd_alg == CRYPTO_SHA1_HMAC || + crd1->crd_alg == CRYPTO_SHA1 || + crd1->crd_alg == CRYPTO_MD5) { + maccrd = crd1; + enccrd = NULL; + } else if (crd1->crd_alg == CRYPTO_DES_CBC || + crd1->crd_alg == CRYPTO_3DES_CBC || + crd1->crd_alg == CRYPTO_AES_CBC || + crd1->crd_alg == CRYPTO_ARC4) { + maccrd = NULL; + enccrd = crd1; + } else { + err = EINVAL; + goto errout; + } + } else { + if ((crd1->crd_alg == CRYPTO_MD5_HMAC || + crd1->crd_alg == CRYPTO_SHA1_HMAC || + crd1->crd_alg == CRYPTO_MD5 || + crd1->crd_alg == CRYPTO_SHA1) && + (crd2->crd_alg == CRYPTO_DES_CBC || + crd2->crd_alg == CRYPTO_3DES_CBC || + crd2->crd_alg == CRYPTO_AES_CBC || + crd2->crd_alg == CRYPTO_ARC4)) { + maccrd = crd1; + enccrd = crd2; + } else if ((crd1->crd_alg == CRYPTO_DES_CBC || + crd1->crd_alg == CRYPTO_ARC4 || + crd1->crd_alg == CRYPTO_3DES_CBC || + crd1->crd_alg == CRYPTO_AES_CBC) && + (crd2->crd_alg == CRYPTO_MD5_HMAC || + crd2->crd_alg == CRYPTO_SHA1_HMAC || + crd2->crd_alg == CRYPTO_MD5 || + crd2->crd_alg == CRYPTO_SHA1) && + (crd1->crd_flags & CRD_F_ENCRYPT)) { + enccrd = crd1; + maccrd = crd2; + } else { + err = EINVAL; + goto errout; + } + } + + bzero(&cmd->op, sizeof(xlr_sec_io_t)); + + cmd->op.source_buf = (uint64_t) (unsigned long)crp->crp_buf; + cmd->op.source_buf_size = crp->crp_ilen; + if (crp->crp_flags & CRYPTO_F_REL) { + cmd->op.dest_buf = (uint64_t) (unsigned long)crp->crp_buf; + cmd->op.dest_buf_size = crp->crp_ilen; + } else { + cmd->op.dest_buf = (uint64_t) (unsigned long)crp->crp_buf; + cmd->op.dest_buf_size = crp->crp_ilen; + } + cmd->op.num_packets = 1; + cmd->op.num_fragments = 1; + + + if (cmd->op.source_buf_size > SEC_MAX_FRAG_LEN) { + ses->multi_frag_flag = 1; + } else { + ses->multi_frag_flag = 0; + } + + if (maccrd) { + cmd->maccrd = maccrd; + cmd->op.cipher_op = XLR_SEC_CIPHER_MODE_PASS; + cmd->op.cipher_mode = XLR_SEC_CIPHER_MODE_NONE; + cmd->op.cipher_type = XLR_SEC_CIPHER_TYPE_NONE; + cmd->op.cipher_init = 0; + cmd->op.cipher_offset = 0; + + switch (maccrd->crd_alg) { + case CRYPTO_MD5: + cmd->op.digest_type = XLR_SEC_DIGEST_TYPE_MD5; + cmd->op.digest_init = XLR_SEC_DIGEST_INIT_NEWKEY; + cmd->op.digest_src = XLR_SEC_DIGEST_SRC_DMA; + cmd->op.digest_offset = 0; + + cmd->op.cksum_type = XLR_SEC_CKSUM_TYPE_NOP; + cmd->op.cksum_src = XLR_SEC_CKSUM_SRC_CIPHER; + cmd->op.cksum_offset = 0; + + cmd->op.pkt_hmac = XLR_SEC_LOADHMACKEY_MODE_OLD; + cmd->op.pkt_hash = XLR_SEC_PADHASH_PAD; + cmd->op.pkt_hashbytes = XLR_SEC_HASHBYTES_ALL8; + cmd->op.pkt_next = XLR_SEC_NEXT_FINISH; + cmd->op.pkt_iv = XLR_SEC_PKT_IV_OLD; + cmd->op.pkt_lastword = XLR_SEC_LASTWORD_128; + + + default: + printf("currently not handled\n"); + } + } + if (enccrd) { + cmd->enccrd = enccrd; + +#ifdef RMI_SEC_DEBUG + xlr_sec_print_data(crp); +#endif + + if (enccrd->crd_flags & CRD_F_ENCRYPT) { + cmd->op.cipher_op = XLR_SEC_CIPHER_OP_ENCRYPT; + } else + cmd->op.cipher_op = XLR_SEC_CIPHER_OP_DECRYPT; + + switch (enccrd->crd_alg) { + case CRYPTO_DES_CBC: + case CRYPTO_3DES_CBC: + if (enccrd->crd_alg == CRYPTO_DES_CBC) { + cmd->op.cipher_type = XLR_SEC_CIPHER_TYPE_DES; + memcpy(&cmd->op.crypt_key[0], enccrd->crd_key, XLR_SEC_DES_KEY_LENGTH); + } else { + cmd->op.cipher_type = XLR_SEC_CIPHER_TYPE_3DES; + //if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) { + memcpy(&cmd->op.crypt_key[0], enccrd->crd_key, XLR_SEC_3DES_KEY_LENGTH); + } + } + + cmd->op.cipher_mode = XLR_SEC_CIPHER_MODE_CBC; + cmd->op.cipher_init = XLR_SEC_CIPHER_INIT_NK; + cmd->op.cipher_offset = XLR_SEC_DES_IV_LENGTH; + + cmd->op.digest_type = XLR_SEC_DIGEST_TYPE_NONE; + cmd->op.digest_init = XLR_SEC_DIGEST_INIT_OLDKEY; + cmd->op.digest_src = XLR_SEC_DIGEST_SRC_DMA; + cmd->op.digest_offset = 0; + + cmd->op.cksum_type = XLR_SEC_CKSUM_TYPE_NOP; + cmd->op.cksum_src = XLR_SEC_CKSUM_SRC_CIPHER; + cmd->op.cksum_offset = 0; + + cmd->op.pkt_hmac = XLR_SEC_LOADHMACKEY_MODE_OLD; + cmd->op.pkt_hash = XLR_SEC_PADHASH_PAD; + cmd->op.pkt_hashbytes = XLR_SEC_HASHBYTES_ALL8; + cmd->op.pkt_next = XLR_SEC_NEXT_FINISH; + cmd->op.pkt_iv = XLR_SEC_PKT_IV_NEW; + cmd->op.pkt_lastword = XLR_SEC_LASTWORD_128; + + //if ((!(enccrd->crd_flags & CRD_F_IV_PRESENT)) && + if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT)) { + memcpy(&cmd->op.initial_vector[0], enccrd->crd_iv, XLR_SEC_DES_IV_LENGTH); + } + break; + + case CRYPTO_AES_CBC: + if (enccrd->crd_alg == CRYPTO_AES_CBC) { + cmd->op.cipher_type = XLR_SEC_CIPHER_TYPE_AES128; + //if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) { + memcpy(&cmd->op.crypt_key[0], enccrd->crd_key, XLR_SEC_AES128_KEY_LENGTH); + } + } + cmd->op.cipher_mode = XLR_SEC_CIPHER_MODE_CBC; + cmd->op.cipher_init = XLR_SEC_CIPHER_INIT_NK; + cmd->op.cipher_offset = XLR_SEC_AES_BLOCK_SIZE; + + cmd->op.digest_type = XLR_SEC_DIGEST_TYPE_NONE; + cmd->op.digest_init = XLR_SEC_DIGEST_INIT_OLDKEY; + cmd->op.digest_src = XLR_SEC_DIGEST_SRC_DMA; + cmd->op.digest_offset = 0; + + cmd->op.cksum_type = XLR_SEC_CKSUM_TYPE_NOP; + cmd->op.cksum_src = XLR_SEC_CKSUM_SRC_CIPHER; + cmd->op.cksum_offset = 0; + + cmd->op.pkt_hmac = XLR_SEC_LOADHMACKEY_MODE_OLD; + cmd->op.pkt_hash = XLR_SEC_PADHASH_PAD; + cmd->op.pkt_hashbytes = XLR_SEC_HASHBYTES_ALL8; + cmd->op.pkt_next = XLR_SEC_NEXT_FINISH; + cmd->op.pkt_iv = XLR_SEC_PKT_IV_NEW; + cmd->op.pkt_lastword = XLR_SEC_LASTWORD_128; + + //if (!(enccrd->crd_flags & CRD_F_IV_PRESENT)) { + if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT)) { + memcpy(&cmd->op.initial_vector[0], enccrd->crd_iv, XLR_SEC_AES_BLOCK_SIZE); + } + // + } + break; + } + } + cmd->crp = crp; + cmd->session_num = session; + xlr_sec_setup(ses, cmd, (symkey_desc_pt) ses->desc_ptr); + + return (0); + +errout: + if (cmd != NULL) + free(cmd, M_DEVBUF); + crp->crp_etype = err; + crypto_done(crp); + return (err); +} diff --git a/sys/mips/rmi/dev/sec/stats.h b/sys/mips/rmi/dev/sec/stats.h new file mode 100644 index 000000000000..276f7e9b9991 --- /dev/null +++ b/sys/mips/rmi/dev/sec/stats.h @@ -0,0 +1,469 @@ +/*- + * Copyright (c) 2003-2009 RMI Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of RMI Corporation, nor the names of its contributors, + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RMI_BSD */ + +#ifndef _STATS_H_ +#define _STATS_H_ + +typedef struct hmac_stats +{ + unsigned long md5_count; + unsigned long long md5_bytes; + unsigned long sha1_count; + unsigned long long sha1_bytes; + unsigned long sha256_count; + unsigned long long sha256_bytes; + unsigned long sha384_count; + unsigned long long sha384_bytes; + unsigned long sha512_count; + unsigned long long sha512_bytes; + unsigned long gcm_count; + unsigned long long gcm_bytes; + unsigned long kasumi_f9_count; + unsigned long long kasumi_f9_bytes; + unsigned long reverts; + unsigned long long reverts_bytes; +} hmac_stats_t, *hmac_stats_pt; + +typedef struct cipher_stats +{ + unsigned long des_encrypts; + unsigned long long des_encrypt_bytes; + unsigned long des_decrypts; + unsigned long long des_decrypt_bytes; + unsigned long des3_encrypts; + unsigned long long des3_encrypt_bytes; + unsigned long des3_decrypts; + unsigned long long des3_decrypt_bytes; + unsigned long aes_encrypts; + unsigned long long aes_encrypt_bytes; + unsigned long aes_decrypts; + unsigned long long aes_decrypt_bytes; + unsigned long arc4_encrypts; + unsigned long long arc4_encrypt_bytes; + unsigned long arc4_decrypts; + unsigned long long arc4_decrypt_bytes; + unsigned long kasumi_f8_encrypts; + unsigned long long kasumi_f8_encrypt_bytes; + unsigned long kasumi_f8_decrypts; + unsigned long long kasumi_f8_decrypt_bytes; + unsigned long reverts; + unsigned long long reverts_bytes; +} cipher_stats_t, *cipher_stats_pt; + + +typedef struct modexp_stats +{ + unsigned long modexp_512s; + unsigned long modexp_1024s; +} modexp_stats_t, *modexp_stats_pt; + +typedef struct ecc_stats +{ + unsigned long ecc_mul; + unsigned long ecc_add; + unsigned long ecc_dbl; + unsigned long ecc_vfy; + unsigned long ecc_bin_mul; + unsigned long ecc_field_bin_inv; + unsigned long ecc_field_bin_mul; + unsigned long ecc_field_bin_add; + unsigned long ecc_field_add; + unsigned long ecc_field_sub; + unsigned long ecc_field_mul; + unsigned long ecc_field_inv; + unsigned long ecc_field_div; + unsigned long ecc_field_red; +} ecc_stats_t, *ecc_stats_pt; + + +typedef struct opt_stats +{ + unsigned long combined; + unsigned long unaligned_auth_dest; + unsigned long sym_failed; + unsigned long modexp_failed; + unsigned long ecc_failed; +} opt_stats_t, *opt_stats_pt; + +typedef struct rmisec_stats +{ + uint32_t sent; + uint32_t received; + uint32_t stats_mask; + uint32_t control_mask; + rwlock_t rmisec_control_lock; + rwlock_t rmisec_stats_lock; + char clear_start[0]; + uint64_t wait_time; + uint32_t max_wait_time; + uint32_t maxsnd_wait_time; + uint32_t wait_count; + hmac_stats_t hmac; + cipher_stats_t cipher; + modexp_stats_t modexp; + ecc_stats_t ecc; + opt_stats_t opt; +} rmisec_stats_t, *rmisec_stats_pt; + + +/* stats routines */ + +static void inline phxdrv_record_sent(rmisec_stats_pt stats) +{ + write_lock(&stats->rmisec_stats_lock); + stats->sent++; + write_unlock(&stats->rmisec_stats_lock); +} + +static void inline phxdrv_record_received(rmisec_stats_pt stats) +{ + write_lock(&stats->rmisec_stats_lock); + stats->received++; + write_unlock(&stats->rmisec_stats_lock); +} + + +static void inline phxdrv_record_des(rmisec_stats_pt stats, int enc, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_DES) { + write_lock(&stats->rmisec_stats_lock); + if (enc) { + stats->cipher.des_encrypts++; + stats->cipher.des_encrypt_bytes += nbytes; + } + else { + stats->cipher.des_decrypts++; + stats->cipher.des_decrypt_bytes += nbytes; + } + write_unlock(&stats->rmisec_stats_lock); + } +} + + +static void inline phxdrv_record_3des(rmisec_stats_pt stats, int enc, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_3DES) { + write_lock(&stats->rmisec_stats_lock); + if (enc) { + stats->cipher.des3_encrypts++; + stats->cipher.des3_encrypt_bytes += nbytes; + } + else { + stats->cipher.des3_decrypts++; + stats->cipher.des3_decrypt_bytes += nbytes; + } + write_unlock(&stats->rmisec_stats_lock); + } +} + + +static void inline phxdrv_record_aes(rmisec_stats_pt stats, int enc, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_AES) { + write_lock(&stats->rmisec_stats_lock); + if (enc) { + stats->cipher.aes_encrypts++; + stats->cipher.aes_encrypt_bytes += nbytes; + } + else { + stats->cipher.aes_decrypts++; + stats->cipher.aes_decrypt_bytes += nbytes; + } + write_unlock(&stats->rmisec_stats_lock); + } +} + + +static void inline phxdrv_record_arc4(rmisec_stats_pt stats, int enc, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_ARC4) { + write_lock(&stats->rmisec_stats_lock); + if (enc) { + stats->cipher.arc4_encrypts++; + stats->cipher.arc4_encrypt_bytes += nbytes; + } + else { + stats->cipher.arc4_decrypts++; + stats->cipher.arc4_decrypt_bytes += nbytes; + } + write_unlock(&stats->rmisec_stats_lock); + } +} + +static void inline phxdrv_record_kasumi_f8(rmisec_stats_pt stats, int enc, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_KASUMI_F8) { + write_lock(&stats->rmisec_stats_lock); + if (enc) { + stats->cipher.kasumi_f8_encrypts++; + stats->cipher.kasumi_f8_encrypt_bytes += nbytes; + } + else { + stats->cipher.kasumi_f8_decrypts++; + stats->cipher.kasumi_f8_decrypt_bytes += nbytes; + } + write_unlock(&stats->rmisec_stats_lock); + } +} + + +static void inline phxdrv_record_modexp(rmisec_stats_pt stats, + int blksize) +{ + if (stats->stats_mask & PHXDRV_PROFILE_MODEXP) { + write_lock(&stats->rmisec_stats_lock); + if (blksize == 512) { + stats->modexp.modexp_512s++; + } + if (blksize == 1024) { + stats->modexp.modexp_1024s++; + } + write_unlock(&stats->rmisec_stats_lock); + } +} + + +static void inline phxdrv_record_ecc(rmisec_stats_pt stats, PHX_ECC_OP op) +{ + if (stats->stats_mask & PHXDRV_PROFILE_ECC) { + write_lock(&stats->rmisec_stats_lock); + switch (op) { + case PHX_ECC_NOP: + break; + case PHX_ECC_MUL: + stats->ecc.ecc_mul++; + break; + case PHX_ECC_BIN_MUL: + stats->ecc.ecc_bin_mul++; + break; + case PHX_ECC_ADD: + stats->ecc.ecc_add++; + break; + case PHX_ECC_DBL: + stats->ecc.ecc_dbl++; + break; + case PHX_ECC_VFY: + stats->ecc.ecc_vfy++; + break; + case PHX_ECC_FIELD_BIN_INV: + stats->ecc.ecc_field_bin_inv++; + break; + case PHX_ECC_FIELD_BIN_MUL: + stats->ecc.ecc_field_bin_mul++; + break; + case PHX_ECC_FIELD_BIN_ADD: + stats->ecc.ecc_field_bin_add++; + break; + case PHX_ECC_FIELD_ADD: + stats->ecc.ecc_field_add++; + break; + case PHX_ECC_FIELD_SUB: + stats->ecc.ecc_field_sub++; + break; + case PHX_ECC_FIELD_MUL: + stats->ecc.ecc_field_mul++; + break; + case PHX_ECC_FIELD_INV: + stats->ecc.ecc_field_inv++; + break; + case PHX_ECC_FIELD_DIV: + stats->ecc.ecc_field_div++; + break; + case PHX_ECC_FIELD_RED: + stats->ecc.ecc_field_red++; + break; + case PHX_ECC_FIELD: + case PHX_ECC_BIN: + break; + } + write_unlock(&stats->rmisec_stats_lock); + } +} + +static void inline phxdrv_record_cipher_revert(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_CPHR_REVERTS) { + write_lock(&stats->rmisec_stats_lock); + stats->cipher.reverts++; + stats->cipher.reverts_bytes += nbytes; + write_unlock(&stats->rmisec_stats_lock); + } +} + +static void inline phxdrv_record_hmac_revert(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_HMAC_REVERTS) { + write_lock(&stats->rmisec_stats_lock); + stats->hmac.reverts++; + stats->hmac.reverts_bytes += nbytes; + write_unlock(&stats->rmisec_stats_lock); + } +} + + +static void inline phxdrv_record_md5(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_MD5) { + write_lock(&stats->rmisec_stats_lock); + stats->hmac.md5_count++; + stats->hmac.md5_bytes += nbytes; + write_unlock(&stats->rmisec_stats_lock); + } +} + +static void inline phxdrv_record_sha1(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_SHA1) { + write_lock(&stats->rmisec_stats_lock); + stats->hmac.sha1_count++; + stats->hmac.sha1_bytes += nbytes; + write_unlock(&stats->rmisec_stats_lock); + } +} + + +static void inline phxdrv_record_sha256(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_SHA256) { + write_lock(&stats->rmisec_stats_lock); + stats->hmac.sha256_count++; + stats->hmac.sha256_bytes += nbytes; + write_unlock(&stats->rmisec_stats_lock); + } +} + +static void inline phxdrv_record_sha384(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_SHA384) { + write_lock(&stats->rmisec_stats_lock); + stats->hmac.sha384_count++; + stats->hmac.sha384_bytes += nbytes; + write_unlock(&stats->rmisec_stats_lock); + } +} + + +static void inline phxdrv_record_sha512(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_SHA512) { + write_lock(&stats->rmisec_stats_lock); + stats->hmac.sha512_count++; + stats->hmac.sha512_bytes += nbytes; + write_unlock(&stats->rmisec_stats_lock); + } +} + +static void inline phxdrv_record_gcm(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_GCM) { + write_lock(&stats->rmisec_stats_lock); + stats->hmac.gcm_count++; + stats->hmac.gcm_bytes += nbytes; + write_unlock(&stats->rmisec_stats_lock); + } +} + + +static void inline phxdrv_record_kasumi_f9(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_KASUMI_F9) { + write_lock(&stats->rmisec_stats_lock); + stats->hmac.kasumi_f9_count++; + stats->hmac.kasumi_f9_bytes += nbytes; + write_unlock(&stats->rmisec_stats_lock); + } +} + +static void inline phxdrv_record_unaligned_auth_dest(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_UNALIGNED_AUTH_DEST) { + write_lock(&stats->rmisec_stats_lock); + stats->opt.unaligned_auth_dest++; + write_unlock(&stats->rmisec_stats_lock); + } +} + + +static void inline phxdrv_record_combined(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_COMBINED) { + write_lock(&stats->rmisec_stats_lock); + stats->opt.combined++; + write_unlock(&stats->rmisec_stats_lock); + } +} + +static void inline phxdrv_record_sym_failed(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_COMBINED) { + write_lock(&stats->rmisec_stats_lock); + stats->opt.sym_failed++; + write_unlock(&stats->rmisec_stats_lock); + } +} + +static void inline phxdrv_record_modexp_failed(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_COMBINED) { + write_lock(&stats->rmisec_stats_lock); + stats->opt.modexp_failed++; + write_unlock(&stats->rmisec_stats_lock); + } +} + +static void inline phxdrv_record_ecc_failed(rmisec_stats_pt stats, + int nbytes) +{ + if (stats->stats_mask & PHXDRV_PROFILE_COMBINED) { + write_lock(&stats->rmisec_stats_lock); + stats->opt.ecc_failed++; + write_unlock(&stats->rmisec_stats_lock); + } +} + +#endif diff --git a/sys/mips/rmi/dev/xlr/atx_cpld.h b/sys/mips/rmi/dev/xlr/atx_cpld.h new file mode 100644 index 000000000000..157e43ed1d7b --- /dev/null +++ b/sys/mips/rmi/dev/xlr/atx_cpld.h @@ -0,0 +1,53 @@ +/*- + * Copyright (c) 2003-2009 RMI Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of RMI Corporation, nor the names of its contributors, + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RMI_BSD */ +#ifndef _RMI_ATX_CPLD_H_ +#define _RMI_ATX_CPLD_H_ + +/* + * bit_0 : xgs0 phy reset, bit_1 : xgs1 phy reset, bit_2 : HT reset, bit_3 : + * RTC reset, bit_4 : gmac phy soft reset, bit_5 : gmac phy hard reset, bit_6 + * : board reset, bit_7 : reserved + */ +#define ATX_CPLD_RESET_1 2 + +/* + * bit_0_2 : reserved, bit_3 : turn off xpak_0 tx, bit_4 : turn off xpak_1 + * tx, bit_5 : HT stop (active low), bit_6 : flash program enable, bit_7 : + * compact flash io mode + */ +#define ATX_CPLD_MISC_CTRL 8 + +/* + * bit_0 : reset tcam, bit_1 : reset xpak_0 module, bit_2 : reset xpak_1 + * module, bit_3_7 : reserved + */ +#define ATX_CPLD_RESET_2 9 + +#endif /* _RMI_ATX_CPLD_H_ */ diff --git a/sys/mips/rmi/dev/xlr/rge.c b/sys/mips/rmi/dev/xlr/rge.c new file mode 100644 index 000000000000..e0fa8c15851a --- /dev/null +++ b/sys/mips/rmi/dev/xlr/rge.c @@ -0,0 +1,2748 @@ +/*- + * Copyright (c) 2003-2009 RMI Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of RMI Corporation, nor the names of its contributors, + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RMI_BSD */ + +#ifdef HAVE_KERNEL_OPTION_HEADERS +#include "opt_device_polling.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define __RMAN_RESOURCE_VISIBLE +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include /* for DELAY */ +#include /* */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + + +#include +#include +#include + +#include +#include + +/* #include "opt_rge.h" */ + +#include "miibus_if.h" + +MODULE_DEPEND(rge, ether, 1, 1, 1); +MODULE_DEPEND(rge, miibus, 1, 1, 1); + +/* #define DEBUG */ +/*#define RX_COPY */ + +#define RGE_TX_THRESHOLD 1024 +#define RGE_TX_Q_SIZE 1024 + +#ifdef DEBUG +#undef dbg_msg +int mac_debug = 1; + +#define dbg_msg(fmt, args...) \ + do {\ + if (mac_debug) {\ + printf("[%s@%d|%s]: cpu_%d: " fmt, \ + __FILE__, __LINE__, __FUNCTION__, PCPU_GET(cpuid), ##args);\ + }\ + } while(0); + +#define DUMP_PACKETS +#else +#undef dbg_msg +#define dbg_msg(fmt, args...) +int mac_debug = 0; + +#endif + +#define MAC_B2B_IPG 88 + +/* frame sizes need to be cacheline aligned */ +#define MAX_FRAME_SIZE 1536 +#define MAX_FRAME_SIZE_JUMBO 9216 + +#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES +#define MAC_PREPAD 0 +#define BYTE_OFFSET 2 +#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE+BYTE_OFFSET+MAC_PREPAD+MAC_SKB_BACK_PTR_SIZE+SMP_CACHE_BYTES) +#define MAC_CRC_LEN 4 +#define MAX_NUM_MSGRNG_STN_CC 128 + +#define MAX_NUM_DESC 1024 +#define MAX_SPILL_SIZE (MAX_NUM_DESC + 128) + +#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16 + +#define MAX_FRIN_SPILL (MAX_SPILL_SIZE << 2) +#define MAX_FROUT_SPILL (MAX_SPILL_SIZE << 2) +#define MAX_CLASS_0_SPILL (MAX_SPILL_SIZE << 2) +#define MAX_CLASS_1_SPILL (MAX_SPILL_SIZE << 2) +#define MAX_CLASS_2_SPILL (MAX_SPILL_SIZE << 2) +#define MAX_CLASS_3_SPILL (MAX_SPILL_SIZE << 2) + +/***************************************************************** + * Phoenix Generic Mac driver + *****************************************************************/ + +extern uint32_t cpu_ltop_map[32]; + +#ifdef ENABLED_DEBUG +static int port_counters[4][8] __aligned(XLR_CACHELINE_SIZE); + +#define port_inc_counter(port, counter) atomic_add_int(&port_counters[port][(counter)], 1) +#define port_set_counter(port, counter, value) atomic_set_int(&port_counters[port][(counter)], (value)) +#else +#define port_inc_counter(port, counter) /* Nothing */ +#define port_set_counter(port, counter, value) /* Nothing */ +#endif + +int xlr_rge_tx_prepend[MAXCPU]; +int xlr_rge_tx_done[MAXCPU]; +int xlr_rge_get_p2d_failed[MAXCPU]; +int xlr_rge_msg_snd_failed[MAXCPU]; +int xlr_rge_tx_ok_done[MAXCPU]; +int xlr_rge_rx_done[MAXCPU]; +int xlr_rge_repl_done[MAXCPU]; + +static __inline__ unsigned int +ldadd_wu(unsigned int value, unsigned long *addr) +{ + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "move $8, %2\n" + "move $9, %3\n" + /* "ldaddwu $8, $9\n" */ + ".word 0x71280011\n" + "move %0, $8\n" + ".set pop\n" + : "=&r"(value), "+m"(*addr) + : "0"(value), "r"((unsigned long)addr) + : "$8", "$9"); + + return value; +} + +/* #define mac_stats_add(x, val) ({(x) += (val);}) */ +#define mac_stats_add(x, val) ldadd_wu(val, &x) + + +#define XLR_MAX_CORE 8 +#define RGE_LOCK_INIT(_sc, _name) \ + mtx_init(&(_sc)->rge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF) +#define RGE_LOCK(_sc) mtx_lock(&(_sc)->rge_mtx) +#define RGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rge_mtx, MA_OWNED) +#define RGE_UNLOCK(_sc) mtx_unlock(&(_sc)->rge_mtx) +#define RGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rge_mtx) + +#define XLR_MAX_MACS 8 +#define XLR_MAX_TX_FRAGS 14 +#define MAX_P2D_DESC_PER_PORT 512 +struct p2d_tx_desc { + uint64_t frag[XLR_MAX_TX_FRAGS + 2]; +}; + +#define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT * sizeof(struct p2d_tx_desc)) + +struct rge_softc *dev_mac[XLR_MAX_MACS]; +static int dev_mac_xgs0; +static int dev_mac_gmac0; + +static int gmac_common_init_done; + + +static int rge_probe(device_t); +static int rge_attach(device_t); +static int rge_detach(device_t); +static int rge_suspend(device_t); +static int rge_resume(device_t); +static void rge_release_resources(struct rge_softc *); +static void rge_rx(struct rge_softc *, vm_paddr_t paddr, int); +static void rge_intr(void *); +static void rge_start_locked(struct ifnet *, int); +static void rge_start(struct ifnet *); +static int rge_ioctl(struct ifnet *, u_long, caddr_t); +static void rge_init(void *); +static void rge_stop(struct rge_softc *); +static void rge_watchdog(struct ifnet *); +static int rge_shutdown(device_t); +static void rge_reset(struct rge_softc *); + +static struct mbuf *get_mbuf(void); +static void free_buf(vm_paddr_t paddr); +static void *get_buf(void); + +static void xlr_mac_get_hwaddr(struct rge_softc *); +static void xlr_mac_setup_hwaddr(struct driver_data *); +static void rmi_xlr_mac_set_enable(struct driver_data *priv, int flag); +static void rmi_xlr_xgmac_init(struct driver_data *priv); +static void rmi_xlr_gmac_init(struct driver_data *priv); +static void mac_common_init(void); +static int rge_mii_write(device_t, int, int, int); +static int rge_mii_read(device_t, int, int); +static void rmi_xlr_mac_mii_statchg(device_t); +static int rmi_xlr_mac_mediachange(struct ifnet *); +static void rmi_xlr_mac_mediastatus(struct ifnet *, struct ifmediareq *); +static void xlr_mac_set_rx_mode(struct rge_softc *sc); +void +rmi_xlr_mac_msgring_handler(int bucket, int size, int code, + int stid, struct msgrng_msg *msg, + void *data); +static void mac_frin_replenish(void *); +static int rmi_xlr_mac_open(struct rge_softc *); +static int rmi_xlr_mac_close(struct rge_softc *); +static int +mac_xmit(struct mbuf *, struct rge_softc *, + struct driver_data *, int, struct p2d_tx_desc *); +static int rmi_xlr_mac_xmit(struct mbuf *, struct rge_softc *, int, struct p2d_tx_desc *); +static struct rge_softc_stats *rmi_xlr_mac_get_stats(struct rge_softc *sc); +static void rmi_xlr_mac_set_multicast_list(struct rge_softc *sc); +static int rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu); +static int rmi_xlr_mac_fill_rxfr(struct rge_softc *sc); +static void rmi_xlr_config_spill_area(struct driver_data *priv); +static int rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed); +static int +rmi_xlr_mac_set_duplex(struct driver_data *s, + xlr_mac_duplex_t duplex, xlr_mac_fc_t fc); +static void serdes_regs_init(struct driver_data *priv); +static int rmi_xlr_gmac_reset(struct driver_data *priv); + +/*Statistics...*/ +static int get_p2d_desc_failed = 0; +static int msg_snd_failed = 0; + +SYSCTL_INT(_hw, OID_AUTO, get_p2d_failed, CTLFLAG_RW, + &get_p2d_desc_failed, 0, "p2d desc failed"); +SYSCTL_INT(_hw, OID_AUTO, msg_snd_failed, CTLFLAG_RW, + &msg_snd_failed, 0, "msg snd failed"); + +struct callout xlr_tx_stop_bkp; + +static device_method_t rge_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, rge_probe), + DEVMETHOD(device_attach, rge_attach), + DEVMETHOD(device_detach, rge_detach), + DEVMETHOD(device_shutdown, rge_shutdown), + DEVMETHOD(device_suspend, rge_suspend), + DEVMETHOD(device_resume, rge_resume), + + /* MII interface */ + DEVMETHOD(miibus_readreg, rge_mii_read), + DEVMETHOD(miibus_statchg, rmi_xlr_mac_mii_statchg), + DEVMETHOD(miibus_writereg, rge_mii_write), + {0, 0} +}; + +static driver_t rge_driver = { + "rge", + rge_methods, + sizeof(struct rge_softc) +}; + +static devclass_t rge_devclass; + +DRIVER_MODULE(rge, iodi, rge_driver, rge_devclass, 0, 0); +DRIVER_MODULE(miibus, rge, miibus_driver, miibus_devclass, 0, 0); + +#ifndef __STR +#define __STR(x) #x +#endif +#ifndef STR +#define STR(x) __STR(x) +#endif + +#define XKPHYS 0x8000000000000000 +/* -- No longer needed RRS +static __inline__ uint32_t +lw_40bit_phys(uint64_t phys, int cca) +{ + uint64_t addr; + uint32_t value = 0; + unsigned long flags; + + addr = XKPHYS | ((uint64_t) cca << 59) | (phys & 0xfffffffffcULL); + + enable_KX(flags); + __asm__ __volatile__( + ".set push\n" + ".set noreorder\n" + ".set mips64\n" + "lw %0, 0(%1) \n" + ".set pop\n" + : "=r"(value) + : "r"(addr)); + + disable_KX(flags); + return value; +} +*/ +/* -- No longer used RRS +static __inline__ uint64_t +ld_40bit_phys(uint64_t phys, int cca) +{ + uint64_t addr; + uint64_t value = 0; + unsigned long flags; + + + addr = XKPHYS | ((uint64_t) cca << 59) | (phys & 0xfffffffffcULL); + enable_KX(flags); + __asm__ __volatile__( + ".set push\n" + ".set noreorder\n" + ".set mips64\n" + "ld %0, 0(%1) \n" + ".set pop\n" + : "=r"(value) + : "r"(addr)); + + disable_KX(flags); + return value; +} +*/ + +void *xlr_tx_ring_mem; + +struct tx_desc_node { + struct p2d_tx_desc *ptr; + TAILQ_ENTRY(tx_desc_node) list; +}; + +#define XLR_MAX_TX_DESC_NODES (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT) +struct tx_desc_node tx_desc_nodes[XLR_MAX_TX_DESC_NODES]; +static volatile int xlr_tot_avail_p2d[XLR_MAX_CORE]; +static int xlr_total_active_core = 0; + +/* + * This should contain the list of all free tx frag desc nodes pointing to tx + * p2d arrays + */ +static +TAILQ_HEAD(, tx_desc_node) tx_frag_desc[XLR_MAX_CORE] = +{ + TAILQ_HEAD_INITIALIZER(tx_frag_desc[0]), + TAILQ_HEAD_INITIALIZER(tx_frag_desc[1]), + TAILQ_HEAD_INITIALIZER(tx_frag_desc[2]), + TAILQ_HEAD_INITIALIZER(tx_frag_desc[3]), + TAILQ_HEAD_INITIALIZER(tx_frag_desc[4]), + TAILQ_HEAD_INITIALIZER(tx_frag_desc[5]), + TAILQ_HEAD_INITIALIZER(tx_frag_desc[6]), + TAILQ_HEAD_INITIALIZER(tx_frag_desc[7]), +}; + +/* This contains a list of free tx frag node descriptors */ +static +TAILQ_HEAD(, tx_desc_node) free_tx_frag_desc[XLR_MAX_CORE] = +{ + TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[0]), + TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[1]), + TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[2]), + TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[3]), + TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[4]), + TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[5]), + TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[6]), + TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[7]), +}; + +static struct mtx tx_desc_lock[XLR_MAX_CORE]; + +static inline void +mac_make_desc_rfr(struct msgrng_msg *msg, + vm_paddr_t addr) +{ + msg->msg0 = (uint64_t) addr & 0xffffffffe0ULL; + msg->msg1 = msg->msg2 = msg->msg3 = 0; +} + +#define MAC_TX_DESC_ALIGNMENT (XLR_CACHELINE_SIZE - 1) + +static void +init_p2d_allocation(void) +{ + int active_core[8] = {0}; + int i = 0; + uint32_t cpumask; + int cpu; + + cpumask = PCPU_GET(cpumask) | PCPU_GET(other_cpus); + + for (i = 0; i < 32; i++) { + if (cpumask & (1 << i)) { + cpu = cpu_ltop_map[i]; + if (!active_core[cpu / 4]) { + active_core[cpu / 4] = 1; + xlr_total_active_core++; + } + } + } + for (i = 0; i < XLR_MAX_CORE; i++) { + if (active_core[i]) + xlr_tot_avail_p2d[i] = XLR_MAX_TX_DESC_NODES / xlr_total_active_core; + } + printf("Total Active Core %d\n", xlr_total_active_core); +} + + +static void +init_tx_ring(void) +{ + int i; + int j = 0; + struct tx_desc_node *start, *node; + struct p2d_tx_desc *tx_desc; + vm_paddr_t paddr; + vm_offset_t unmapped_addr; + + for (i = 0; i < XLR_MAX_CORE; i++) + mtx_init(&tx_desc_lock[i], "xlr tx_desc", NULL, MTX_SPIN); + + start = &tx_desc_nodes[0]; + /* TODO: try to get this from KSEG0 */ + xlr_tx_ring_mem = contigmalloc((MAX_TX_RING_SIZE + XLR_CACHELINE_SIZE), + M_DEVBUF, M_NOWAIT | M_ZERO, 0, + 0x10000000, XLR_CACHELINE_SIZE, 0); + + if (xlr_tx_ring_mem == NULL) { + panic("TX ring memory allocation failed"); + } + paddr = vtophys((vm_offset_t)xlr_tx_ring_mem); + + unmapped_addr = MIPS_PHYS_TO_KSEG0(paddr); + + + tx_desc = (struct p2d_tx_desc *)unmapped_addr; + + for (i = 0; i < XLR_MAX_TX_DESC_NODES; i++) { + node = start + i; + node->ptr = tx_desc; + tx_desc++; + TAILQ_INSERT_HEAD(&tx_frag_desc[j], node, list); + j = (i / (XLR_MAX_TX_DESC_NODES / xlr_total_active_core)); + } +} + +static inline struct p2d_tx_desc * +get_p2d_desc(void) +{ + struct tx_desc_node *node; + struct p2d_tx_desc *tx_desc = NULL; + int cpu = xlr_cpu_id(); + + mtx_lock_spin(&tx_desc_lock[cpu]); + node = TAILQ_FIRST(&tx_frag_desc[cpu]); + if (node) { + xlr_tot_avail_p2d[cpu]--; + TAILQ_REMOVE(&tx_frag_desc[cpu], node, list); + tx_desc = node->ptr; + TAILQ_INSERT_HEAD(&free_tx_frag_desc[cpu], node, list); + } else { + /* Increment p2d desc fail count */ + get_p2d_desc_failed++; + } + mtx_unlock_spin(&tx_desc_lock[cpu]); + return tx_desc; +} +static void +free_p2d_desc(struct p2d_tx_desc *tx_desc) +{ + struct tx_desc_node *node; + int cpu = xlr_cpu_id(); + + mtx_lock_spin(&tx_desc_lock[cpu]); + node = TAILQ_FIRST(&free_tx_frag_desc[cpu]); + KASSERT((node != NULL), ("Free TX frag node list is empty\n")); + + TAILQ_REMOVE(&free_tx_frag_desc[cpu], node, list); + node->ptr = tx_desc; + TAILQ_INSERT_HEAD(&tx_frag_desc[cpu], node, list); + xlr_tot_avail_p2d[cpu]++; + mtx_unlock_spin(&tx_desc_lock[cpu]); + +} + +static int +build_frag_list(struct mbuf *m_head, struct msgrng_msg *p2p_msg, struct p2d_tx_desc *tx_desc) +{ + struct mbuf *m; + vm_paddr_t paddr; + uint64_t p2d_len; + int nfrag; + vm_paddr_t p1, p2; + uint32_t len1, len2; + vm_offset_t taddr; + uint64_t fr_stid; + + fr_stid = (xlr_cpu_id() << 3) + xlr_thr_id() + 4; + + if (tx_desc == NULL) + return 1; + + nfrag = 0; + for (m = m_head; m != NULL; m = m->m_next) { + if ((nfrag + 1) >= XLR_MAX_TX_FRAGS) { + free_p2d_desc(tx_desc); + return 1; + } + if (m->m_len != 0) { + paddr = vtophys(mtod(m, vm_offset_t)); + p1 = paddr + m->m_len; + p2 = vtophys(((vm_offset_t)m->m_data + m->m_len)); + if (p1 != p2) { + len1 = (uint32_t) + (PAGE_SIZE - (paddr & PAGE_MASK)); + tx_desc->frag[nfrag] = (127ULL << 54) | + ((uint64_t) len1 << 40) | paddr; + nfrag++; + taddr = (vm_offset_t)m->m_data + len1; + p2 = vtophys(taddr); + len2 = m->m_len - len1; + if (nfrag >= XLR_MAX_TX_FRAGS) + panic("TX frags exceeded"); + + tx_desc->frag[nfrag] = (127ULL << 54) | + ((uint64_t) len2 << 40) | p2; + + taddr += len2; + p1 = vtophys(taddr); + + if ((p2 + len2) != p1) { + printf("p1 = %p p2 = %p\n", (void *)p1, (void *)p2); + printf("len1 = %x len2 = %x\n", len1, + len2); + printf("m_data %p\n", m->m_data); + DELAY(1000000); + panic("Multiple Mbuf segment discontiguous\n"); + } + } else { + tx_desc->frag[nfrag] = (127ULL << 54) | + ((uint64_t) m->m_len << 40) | paddr; + } + nfrag++; + } + } + /* set eop in the last tx p2d desc */ + tx_desc->frag[nfrag - 1] |= (1ULL << 63); + paddr = vtophys((vm_offset_t)tx_desc); + tx_desc->frag[nfrag] = (1ULL << 63) | (fr_stid << 54) | paddr; + nfrag++; + tx_desc->frag[XLR_MAX_TX_FRAGS] = (uint64_t) (vm_offset_t)tx_desc; + tx_desc->frag[XLR_MAX_TX_FRAGS + 1] = (uint64_t) (vm_offset_t)m_head; + + p2d_len = (nfrag * 8); + p2p_msg->msg0 = (1ULL << 63) | (1ULL << 62) | (127ULL << 54) | + (p2d_len << 40) | paddr; + + return 0; +} +static void +release_tx_desc(struct msgrng_msg *msg, int rel_buf) +{ + /* + * OLD code: vm_paddr_t paddr = msg->msg0 & 0xffffffffffULL; + * uint64_t temp; struct p2d_tx_desc *tx_desc; struct mbuf *m; + * + * paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t)); *** In o32 we will + * crash here ****** temp = ld_40bit_phys(paddr, 3); tx_desc = + * (struct p2d_tx_desc *)((vm_offset_t)temp); + * + * if (rel_buf) { paddr += sizeof(uint64_t); + * + * temp = ld_40bit_phys(paddr, 3); + * + * m = (struct mbuf *)((vm_offset_t)temp); m_freem(m); } printf("Call + * fre_p2d_desc\n"); free_p2d_desc(tx_desc); + */ + struct p2d_tx_desc *tx_desc, *chk_addr; + struct mbuf *m; + + tx_desc = (struct p2d_tx_desc *)MIPS_PHYS_TO_KSEG0(msg->msg0); + chk_addr = (struct p2d_tx_desc *)(uint32_t) (tx_desc->frag[XLR_MAX_TX_FRAGS] & 0x00000000ffffffff); + if (tx_desc != chk_addr) { + printf("Address %p does not match with stored addr %p - we leaked a descriptor\n", + tx_desc, chk_addr); + return; + } + if (rel_buf) { + m = (struct mbuf *)(uint32_t) (tx_desc->frag[XLR_MAX_TX_FRAGS + 1] & 0x00000000ffffffff); + m_freem(m); + } + free_p2d_desc(tx_desc); +} + +#ifdef RX_COPY +#define RGE_MAX_NUM_DESC (6 * MAX_NUM_DESC) +uint8_t *rge_rx_buffers[RGE_MAX_NUM_DESC]; +static struct mtx rge_rx_mtx; +int g_rx_buf_head; + +static void +init_rx_buf(void) +{ + int i; + uint8_t *buf, *start; + uint32_t size, *ptr; + + mtx_init(&rge_rx_mtx, "xlr rx_desc", NULL, MTX_SPIN); + + size = (RGE_MAX_NUM_DESC * (MAX_FRAME_SIZE + XLR_CACHELINE_SIZE)); + + start = (uint8_t *) contigmalloc(size, M_DEVBUF, M_NOWAIT | M_ZERO, + 0, 0xffffffff, XLR_CACHELINE_SIZE, 0); + if (start == NULL) + panic("NO RX BUFFERS"); + buf = start; + size = (MAX_FRAME_SIZE + XLR_CACHELINE_SIZE); + for (i = 0; i < RGE_MAX_NUM_DESC; i++) { + buf = start + (i * size); + ptr = (uint32_t *) buf; + *ptr = (uint32_t) buf; + rge_rx_buffers[i] = buf + XLR_CACHELINE_SIZE; + } +} + +static void * +get_rx_buf(void) +{ + void *ptr = NULL; + + mtx_lock_spin(&rge_rx_mtx); + if (g_rx_buf_head < RGE_MAX_NUM_DESC) { + ptr = (void *)rge_rx_buffers[g_rx_buf_head]; + g_rx_buf_head++; + } + mtx_unlock_spin(&rge_rx_mtx); + return ptr; +} + +#endif + +static struct mbuf * +get_mbuf(void) +{ + struct mbuf *m_new = NULL; + + if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL) + return NULL; + + m_new->m_len = MCLBYTES; + m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; + return m_new; +} + +static void +free_buf(vm_paddr_t paddr) +{ + struct mbuf *m; + uint32_t *temp; + uint32_t mag, um; + + /* + * This will crash I think. RRS temp = lw_40bit_phys((paddr - + * XLR_CACHELINE_SIZE), 3); m = (struct mbuf *)temp; + */ + /* + * This gets us a kseg0 address for the mbuf/magic on the ring but + * we need to get the va to free the mbuf. This is stored at *temp; + */ + temp = (uint32_t *) MIPS_PHYS_TO_KSEG0(paddr - XLR_CACHELINE_SIZE); + um = temp[0]; + mag = temp[1]; + if (mag != 0xf00bad) { + printf("Something is wrong kseg:%p found mag:%x not 0xf00bad\n", + temp, mag); + return; + } + m = (struct mbuf *)um; + if (m != NULL) + m_freem(m); +} + +static void * +get_buf(void) +{ +#ifdef RX_COPY + return get_rx_buf(); +#else + struct mbuf *m_new = NULL; + +#ifdef INVARIANTS + vm_paddr_t temp1, temp2; + +#endif + unsigned int *md; + + m_new = get_mbuf(); + + if (m_new == NULL) + return NULL; + + m_adj(m_new, XLR_CACHELINE_SIZE - ((unsigned int)m_new->m_data & 0x1f)); + md = (unsigned int *)m_new->m_data; + md[0] = (unsigned int)m_new; /* Back Ptr */ + md[1] = 0xf00bad; + m_adj(m_new, XLR_CACHELINE_SIZE); + + + /* return (void *)m_new; */ +#ifdef INVARIANTS + temp1 = vtophys((vm_offset_t)m_new->m_data); + temp2 = vtophys((vm_offset_t)m_new->m_data + 1536); + if ((temp1 + 1536) != temp2) + panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n"); +#endif + return (void *)m_new->m_data; +#endif +} + +/********************************************************************** + **********************************************************************/ +static void +rmi_xlr_mac_set_enable(struct driver_data *priv, int flag) +{ + uint32_t regval; + int tx_threshold = 1518; + + if (flag) { + regval = xlr_read_reg(priv->mmio, R_TX_CONTROL); + regval |= (1 << O_TX_CONTROL__TxEnable) | + (tx_threshold << O_TX_CONTROL__TxThreshold); + + xlr_write_reg(priv->mmio, R_TX_CONTROL, regval); + + regval = xlr_read_reg(priv->mmio, R_RX_CONTROL); + regval |= 1 << O_RX_CONTROL__RxEnable; + if (priv->mode == XLR_PORT0_RGMII) + regval |= 1 << O_RX_CONTROL__RGMII; + xlr_write_reg(priv->mmio, R_RX_CONTROL, regval); + + regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1); + regval |= (O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen); + xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval); + } else { + regval = xlr_read_reg(priv->mmio, R_TX_CONTROL); + regval &= ~((1 << O_TX_CONTROL__TxEnable) | + (tx_threshold << O_TX_CONTROL__TxThreshold)); + + xlr_write_reg(priv->mmio, R_TX_CONTROL, regval); + + regval = xlr_read_reg(priv->mmio, R_RX_CONTROL); + regval &= ~(1 << O_RX_CONTROL__RxEnable); + xlr_write_reg(priv->mmio, R_RX_CONTROL, regval); + + regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1); + regval &= ~(O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen); + xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval); + } +} + +/********************************************************************** + **********************************************************************/ +static __inline__ int +xlr_mac_send_fr(struct driver_data *priv, + vm_paddr_t addr, int len) +{ + int stid = priv->rfrbucket; + struct msgrng_msg msg; + int vcpu = (xlr_cpu_id() << 2) + xlr_thr_id(); + + mac_make_desc_rfr(&msg, addr); + + /* Send the packet to MAC */ + dbg_msg("mac_%d: Sending free packet %llx to stid %d\n", + priv->instance, addr, stid); + if (priv->type == XLR_XGMAC) { + while (message_send(1, MSGRNG_CODE_XGMAC, stid, &msg)); + } else { + while (message_send(1, MSGRNG_CODE_MAC, stid, &msg)); + xlr_rge_repl_done[vcpu]++; + } + + return 0; +} + +/**************************************************************/ + +static void +xgmac_mdio_setup(volatile unsigned int *_mmio) +{ + int i; + uint32_t rd_data; + + for (i = 0; i < 4; i++) { + rd_data = xmdio_read(_mmio, 1, 0x8000 + i); + rd_data = rd_data & 0xffffdfff; /* clear isolate bit */ + xmdio_write(_mmio, 1, 0x8000 + i, rd_data); + } +} + +/********************************************************************** + * Init MII interface + * + * Input parameters: + * s - priv structure + ********************************************************************* */ +#define PHY_STATUS_RETRIES 25000 + +static void +rmi_xlr_mac_mii_init(struct driver_data *priv) +{ + xlr_reg_t *mii_mmio = priv->mii_mmio; + + /* use the lowest clock divisor - divisor 28 */ + xlr_write_reg(mii_mmio, R_MII_MGMT_CONFIG, 0x07); +} + +/********************************************************************** + * Read a PHY register. + * + * Input parameters: + * s - priv structure + * phyaddr - PHY's address + * regidx = index of register to read + * + * Return value: + * value read, or 0 if an error occurred. + ********************************************************************* */ + +static int +rge_mii_read_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx) +{ + int i = 0; + + /* setup the phy reg to be used */ + xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS, + (phyaddr << 8) | (regidx << 0)); + /* Issue the read command */ + xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, + (1 << O_MII_MGMT_COMMAND__rstat)); + + /* poll for the read cycle to complete */ + for (i = 0; i < PHY_STATUS_RETRIES; i++) { + if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0) + break; + } + + /* clear the read cycle */ + xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 0); + + if (i == PHY_STATUS_RETRIES) { + return 0xffffffff; + } + /* Read the data back */ + return xlr_read_reg(mii_mmio, R_MII_MGMT_STATUS); +} + +static int +rge_mii_read(device_t dev, int phyaddr, int regidx) +{ + struct rge_softc *sc = device_get_softc(dev); + + return rge_mii_read_internal(sc->priv.mii_mmio, phyaddr, regidx); +} + +/********************************************************************** + * Set MII hooks to newly selected media + * + * Input parameters: + * ifp - Interface Pointer + * + * Return value: + * nothing + ********************************************************************* */ +static int +rmi_xlr_mac_mediachange(struct ifnet *ifp) +{ + struct rge_softc *sc = ifp->if_softc; + + if (ifp->if_flags & IFF_UP) + mii_mediachg(&sc->rge_mii); + + return 0; +} + +/********************************************************************** + * Get the current interface media status + * + * Input parameters: + * ifp - Interface Pointer + * ifmr - Interface media request ptr + * + * Return value: + * nothing + ********************************************************************* */ +static void +rmi_xlr_mac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) +{ + struct rge_softc *sc = ifp->if_softc; + + /* Check whether this is interface is active or not. */ + ifmr->ifm_status = IFM_AVALID; + if (sc->link_up) { + ifmr->ifm_status |= IFM_ACTIVE; + } else { + ifmr->ifm_active = IFM_ETHER; + } +} + +/********************************************************************** + * Write a value to a PHY register. + * + * Input parameters: + * s - priv structure + * phyaddr - PHY to use + * regidx - register within the PHY + * regval - data to write to register + * + * Return value: + * nothing + ********************************************************************* */ +static void +rge_mii_write_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx, int regval) +{ + int i = 0; + + xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS, + (phyaddr << 8) | (regidx << 0)); + + /* Write the data which starts the write cycle */ + xlr_write_reg(mii_mmio, R_MII_MGMT_WRITE_DATA, regval); + + /* poll for the write cycle to complete */ + for (i = 0; i < PHY_STATUS_RETRIES; i++) { + if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0) + break; + } + + return; +} + +static int +rge_mii_write(device_t dev, int phyaddr, int regidx, int regval) +{ + struct rge_softc *sc = device_get_softc(dev); + + rge_mii_write_internal(sc->priv.mii_mmio, phyaddr, regidx, regval); + return (0); +} + +static void +rmi_xlr_mac_mii_statchg(struct device *dev) +{ +} + +static void +serdes_regs_init(struct driver_data *priv) +{ + xlr_reg_t *mmio_gpio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GPIO_OFFSET); + int i; + + /* Initialize SERDES CONTROL Registers */ + rge_mii_write_internal(priv->serdes_mmio, 26, 0, 0x6DB0); + rge_mii_write_internal(priv->serdes_mmio, 26, 1, 0xFFFF); + rge_mii_write_internal(priv->serdes_mmio, 26, 2, 0xB6D0); + rge_mii_write_internal(priv->serdes_mmio, 26, 3, 0x00FF); + rge_mii_write_internal(priv->serdes_mmio, 26, 4, 0x0000); + rge_mii_write_internal(priv->serdes_mmio, 26, 5, 0x0000); + rge_mii_write_internal(priv->serdes_mmio, 26, 6, 0x0005); + rge_mii_write_internal(priv->serdes_mmio, 26, 7, 0x0001); + rge_mii_write_internal(priv->serdes_mmio, 26, 8, 0x0000); + rge_mii_write_internal(priv->serdes_mmio, 26, 9, 0x0000); + rge_mii_write_internal(priv->serdes_mmio, 26, 10, 0x0000); + + /* + * For loop delay and GPIO programming crud from Linux driver, + */ + for (i = 0; i < 10000000; i++) { + } + mmio_gpio[0x20] = 0x7e6802; + mmio_gpio[0x10] = 0x7104; + for (i = 0; i < 100000000; i++) { + } + return; +} + +static void +serdes_autoconfig(struct driver_data *priv) +{ + int delay = 100000; + + /* Enable Auto negotiation in the PCS Layer */ + rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x1000); + DELAY(delay); + rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x0200); + DELAY(delay); + + rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x1000); + DELAY(delay); + rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x0200); + DELAY(delay); + + rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x1000); + DELAY(delay); + rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x0200); + DELAY(delay); + + rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x1000); + DELAY(delay); + rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x0200); + DELAY(delay); + +} + +/***************************************************************** + * Initialize GMAC + *****************************************************************/ +static void +rmi_xlr_config_pde(struct driver_data *priv) +{ + int i = 0, cpu = 0, bucket = 0; + uint64_t bucket_map = 0; + + /* uint32_t desc_pack_ctrl = 0; */ + uint32_t cpumask; + + cpumask = PCPU_GET(cpumask) | PCPU_GET(other_cpus); + + for (i = 0; i < 32; i++) { + if (cpumask & (1 << i)) { + cpu = cpu_ltop_map[i]; + bucket = ((cpu >> 2) << 3); + //|(cpu & 0x03); + bucket_map |= (1ULL << bucket); + dbg_msg("i=%d, cpu=%d, bucket = %d, bucket_map=%llx\n", + i, cpu, bucket, bucket_map); + } + } + + /* bucket_map = 0x1; */ + xlr_write_reg(priv->mmio, R_PDE_CLASS_0, (bucket_map & 0xffffffff)); + xlr_write_reg(priv->mmio, R_PDE_CLASS_0 + 1, + ((bucket_map >> 32) & 0xffffffff)); + + xlr_write_reg(priv->mmio, R_PDE_CLASS_1, (bucket_map & 0xffffffff)); + xlr_write_reg(priv->mmio, R_PDE_CLASS_1 + 1, + ((bucket_map >> 32) & 0xffffffff)); + + xlr_write_reg(priv->mmio, R_PDE_CLASS_2, (bucket_map & 0xffffffff)); + xlr_write_reg(priv->mmio, R_PDE_CLASS_2 + 1, + ((bucket_map >> 32) & 0xffffffff)); + + xlr_write_reg(priv->mmio, R_PDE_CLASS_3, (bucket_map & 0xffffffff)); + xlr_write_reg(priv->mmio, R_PDE_CLASS_3 + 1, + ((bucket_map >> 32) & 0xffffffff)); +} + +static void +rmi_xlr_config_parser(struct driver_data *priv) +{ + /* + * Mark it as no classification The parser extract is gauranteed to + * be zero with no classfication + */ + xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x00); + + xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x01); + + /* configure the parser : L2 Type is configured in the bootloader */ + /* extract IP: src, dest protocol */ + xlr_write_reg(priv->mmio, R_L3CTABLE, + (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) | + (0x0800 << 0)); + xlr_write_reg(priv->mmio, R_L3CTABLE + 1, + (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10)); + +} + +static void +rmi_xlr_config_classifier(struct driver_data *priv) +{ + int i = 0; + + if (priv->type == XLR_XGMAC) { + /* xgmac translation table doesn't have sane values on reset */ + for (i = 0; i < 64; i++) + xlr_write_reg(priv->mmio, R_TRANSLATETABLE + i, 0x0); + + /* + * use upper 7 bits of the parser extract to index the + * translate table + */ + xlr_write_reg(priv->mmio, R_PARSERCONFIGREG, 0x0); + } +} + +enum { + SGMII_SPEED_10 = 0x00000000, + SGMII_SPEED_100 = 0x02000000, + SGMII_SPEED_1000 = 0x04000000, +}; + +static void +rmi_xlr_gmac_config_speed(struct driver_data *priv) +{ + int phy_addr = priv->phy_addr; + xlr_reg_t *mmio = priv->mmio; + struct rge_softc *sc = priv->sc; + + priv->speed = rge_mii_read_internal(priv->mii_mmio, phy_addr, 28); + priv->link = rge_mii_read_internal(priv->mii_mmio, phy_addr, 1) & 0x4; + priv->speed = (priv->speed >> 3) & 0x03; + + if (priv->speed == xlr_mac_speed_10) { + if (priv->mode != XLR_RGMII) + xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_10); + xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137); + xlr_write_reg(mmio, R_CORECONTROL, 0x02); + printf("%s: [10Mbps]\n", device_get_nameunit(sc->rge_dev)); + sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX; + sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX; + sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX; + } else if (priv->speed == xlr_mac_speed_100) { + if (priv->mode != XLR_RGMII) + xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100); + xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137); + xlr_write_reg(mmio, R_CORECONTROL, 0x01); + printf("%s: [100Mbps]\n", device_get_nameunit(sc->rge_dev)); + sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX; + sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX; + sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX; + } else { + if (priv->speed != xlr_mac_speed_1000) { + if (priv->mode != XLR_RGMII) + xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100); + printf("PHY reported unknown MAC speed, defaulting to 100Mbps\n"); + xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7137); + xlr_write_reg(mmio, R_CORECONTROL, 0x01); + sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX; + sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX; + sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX; + } else { + if (priv->mode != XLR_RGMII) + xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_1000); + xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7237); + xlr_write_reg(mmio, R_CORECONTROL, 0x00); + printf("%s: [1000Mbps]\n", device_get_nameunit(sc->rge_dev)); + sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX; + sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX; + sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX; + } + } + + if (!priv->link) { + sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER; + sc->link_up = 0; + } else { + sc->link_up = 1; + } +} + +/***************************************************************** + * Initialize XGMAC + *****************************************************************/ +static void +rmi_xlr_xgmac_init(struct driver_data *priv) +{ + int i = 0; + xlr_reg_t *mmio = priv->mmio; + int id = priv->instance; + struct rge_softc *sc = priv->sc; + volatile unsigned short *cpld; + + cpld = (volatile unsigned short *)0xBD840000; + + xlr_write_reg(priv->mmio, R_DESC_PACK_CTRL, + (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize) | (4 << 20)); + xlr_write_reg(priv->mmio, R_BYTEOFFSET0, BYTE_OFFSET); + rmi_xlr_config_pde(priv); + rmi_xlr_config_parser(priv); + rmi_xlr_config_classifier(priv); + + xlr_write_reg(priv->mmio, R_MSG_TX_THRESHOLD, 1); + + /* configure the XGMAC Registers */ + xlr_write_reg(mmio, R_XGMAC_CONFIG_1, 0x50000026); + + /* configure the XGMAC_GLUE Registers */ + xlr_write_reg(mmio, R_DMACR0, 0xffffffff); + xlr_write_reg(mmio, R_DMACR1, 0xffffffff); + xlr_write_reg(mmio, R_DMACR2, 0xffffffff); + xlr_write_reg(mmio, R_DMACR3, 0xffffffff); + xlr_write_reg(mmio, R_STATCTRL, 0x04); + xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff); + + xlr_write_reg(mmio, R_XGMACPADCALIBRATION, 0x030); + xlr_write_reg(mmio, R_EGRESSFIFOCARVINGSLOTS, 0x0f); + xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff); + xlr_write_reg(mmio, R_XGMAC_MIIM_CONFIG, 0x3e); + + /* + * take XGMII phy out of reset + */ + /* + * we are pulling everything out of reset because writing a 0 would + * reset other devices on the chip + */ + cpld[ATX_CPLD_RESET_1] = 0xffff; + cpld[ATX_CPLD_MISC_CTRL] = 0xffff; + cpld[ATX_CPLD_RESET_2] = 0xffff; + + xgmac_mdio_setup(mmio); + + rmi_xlr_config_spill_area(priv); + + if (id == 0) { + for (i = 0; i < 16; i++) { + xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i, + bucket_sizes. + bucket[MSGRNG_STNID_XGS0_TX + i]); + } + + xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE, + bucket_sizes.bucket[MSGRNG_STNID_XMAC0JFR]); + xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE, + bucket_sizes.bucket[MSGRNG_STNID_XMAC0RFR]); + + for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) { + xlr_write_reg(mmio, R_CC_CPU0_0 + i, + cc_table_xgs_0. + counters[i >> 3][i & 0x07]); + } + } else if (id == 1) { + for (i = 0; i < 16; i++) { + xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i, + bucket_sizes. + bucket[MSGRNG_STNID_XGS1_TX + i]); + } + + xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE, + bucket_sizes.bucket[MSGRNG_STNID_XMAC1JFR]); + xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE, + bucket_sizes.bucket[MSGRNG_STNID_XMAC1RFR]); + + for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) { + xlr_write_reg(mmio, R_CC_CPU0_0 + i, + cc_table_xgs_1. + counters[i >> 3][i & 0x07]); + } + } + sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX; + sc->rge_mii.mii_media.ifm_media |= (IFM_AVALID | IFM_ACTIVE); + sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX; + sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX; + sc->rge_mii.mii_media.ifm_cur->ifm_media |= (IFM_AVALID | IFM_ACTIVE); + + priv->init_frin_desc = 1; +} + +/******************************************************* + * Initialization gmac + *******************************************************/ +static int +rmi_xlr_gmac_reset(struct driver_data *priv) +{ + volatile uint32_t val; + xlr_reg_t *mmio = priv->mmio; + int i, maxloops = 100; + + /* Disable MAC RX */ + val = xlr_read_reg(mmio, R_MAC_CONFIG_1); + val &= ~0x4; + xlr_write_reg(mmio, R_MAC_CONFIG_1, val); + + /* Disable Core RX */ + val = xlr_read_reg(mmio, R_RX_CONTROL); + val &= ~0x1; + xlr_write_reg(mmio, R_RX_CONTROL, val); + + /* wait for rx to halt */ + for (i = 0; i < maxloops; i++) { + val = xlr_read_reg(mmio, R_RX_CONTROL); + if (val & 0x2) + break; + DELAY(1000); + } + if (i == maxloops) + return -1; + + /* Issue a soft reset */ + val = xlr_read_reg(mmio, R_RX_CONTROL); + val |= 0x4; + xlr_write_reg(mmio, R_RX_CONTROL, val); + + /* wait for reset to complete */ + for (i = 0; i < maxloops; i++) { + val = xlr_read_reg(mmio, R_RX_CONTROL); + if (val & 0x8) + break; + DELAY(1000); + } + if (i == maxloops) + return -1; + + /* Clear the soft reset bit */ + val = xlr_read_reg(mmio, R_RX_CONTROL); + val &= ~0x4; + xlr_write_reg(mmio, R_RX_CONTROL, val); + return 0; +} + +static void +rmi_xlr_gmac_init(struct driver_data *priv) +{ + int i = 0; + xlr_reg_t *mmio = priv->mmio; + int id = priv->instance; + struct stn_cc *gmac_cc_config; + uint32_t value = 0; + int blk = id / 4, port = id % 4; + + rmi_xlr_mac_set_enable(priv, 0); + + rmi_xlr_config_spill_area(priv); + + xlr_write_reg(mmio, R_DESC_PACK_CTRL, + (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) | + (1 << O_DESC_PACK_CTRL__MaxEntry) | + (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize)); + + rmi_xlr_config_pde(priv); + rmi_xlr_config_parser(priv); + rmi_xlr_config_classifier(priv); + + xlr_write_reg(mmio, R_MSG_TX_THRESHOLD, 3); + xlr_write_reg(mmio, R_MAC_CONFIG_1, 0x35); + xlr_write_reg(mmio, R_RX_CONTROL, (0x7 << 6)); + + if (priv->mode == XLR_PORT0_RGMII) { + printf("Port 0 set in RGMII mode\n"); + value = xlr_read_reg(mmio, R_RX_CONTROL); + value |= 1 << O_RX_CONTROL__RGMII; + xlr_write_reg(mmio, R_RX_CONTROL, value); + } + rmi_xlr_mac_mii_init(priv); + + +#if 0 + priv->advertising = ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half | + ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half | + ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | + ADVERTISED_MII; +#endif + + /* + * Enable all MDIO interrupts in the phy RX_ER bit seems to be get + * set about every 1 sec in GigE mode, ignore it for now... + */ + rge_mii_write_internal(priv->mii_mmio, priv->phy_addr, 25, 0xfffffffe); + + if (priv->mode != XLR_RGMII) { + serdes_regs_init(priv); + serdes_autoconfig(priv); + } + rmi_xlr_gmac_config_speed(priv); + + value = xlr_read_reg(mmio, R_IPG_IFG); + xlr_write_reg(mmio, R_IPG_IFG, ((value & ~0x7f) | MAC_B2B_IPG)); + xlr_write_reg(mmio, R_DMACR0, 0xffffffff); + xlr_write_reg(mmio, R_DMACR1, 0xffffffff); + xlr_write_reg(mmio, R_DMACR2, 0xffffffff); + xlr_write_reg(mmio, R_DMACR3, 0xffffffff); + xlr_write_reg(mmio, R_STATCTRL, 0x04); + xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff); + xlr_write_reg(mmio, R_INTMASK, 0); + xlr_write_reg(mmio, R_FREEQCARVE, 0); + + xlr_write_reg(mmio, R_GMAC_TX0_BUCKET_SIZE + port, + xlr_board_info.bucket_sizes->bucket[priv->txbucket]); + xlr_write_reg(mmio, R_GMAC_JFR0_BUCKET_SIZE, + xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]); + xlr_write_reg(mmio, R_GMAC_RFR0_BUCKET_SIZE, + xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]); + xlr_write_reg(mmio, R_GMAC_JFR1_BUCKET_SIZE, + xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]); + xlr_write_reg(mmio, R_GMAC_RFR1_BUCKET_SIZE, + xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]); + + dbg_msg("Programming credit counter %d : %d -> %d\n", blk, R_GMAC_TX0_BUCKET_SIZE + port, + xlr_board_info.bucket_sizes->bucket[priv->txbucket]); + + gmac_cc_config = xlr_board_info.gmac_block[blk].credit_config; + for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) { + xlr_write_reg(mmio, R_CC_CPU0_0 + i, + gmac_cc_config->counters[i >> 3][i & 0x07]); + dbg_msg("%d: %d -> %d\n", priv->instance, + R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]); + } + priv->init_frin_desc = 1; +} + +/********************************************************************** + * Set promiscuous mode + **********************************************************************/ +static void +xlr_mac_set_rx_mode(struct rge_softc *sc) +{ + struct driver_data *priv = &(sc->priv); + uint32_t regval; + + regval = xlr_read_reg(priv->mmio, R_MAC_FILTER_CONFIG); + + if (sc->flags & IFF_PROMISC) { + regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) | + (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) | + (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) | + (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN); + } else { + regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) | + (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN)); + } + + xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, regval); +} + +/********************************************************************** + * Configure LAN speed for the specified MAC. + ********************************************************************* */ +static int +rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed) +{ + return 0; +} + +/********************************************************************** + * Set Ethernet duplex and flow control options for this MAC + ********************************************************************* */ +static int +rmi_xlr_mac_set_duplex(struct driver_data *s, + xlr_mac_duplex_t duplex, xlr_mac_fc_t fc) +{ + return 0; +} + +/***************************************************************** + * Kernel Net Stack <-> MAC Driver Interface + *****************************************************************/ +/********************************************************************** + **********************************************************************/ +#define MAC_TX_FAIL 2 +#define MAC_TX_PASS 0 +#define MAC_TX_RETRY 1 + +static __inline__ void +message_send_block(unsigned int size, unsigned int code, + unsigned int stid, struct msgrng_msg *msg) +{ + unsigned int dest = 0; + unsigned long long status = 0; + + msgrng_load_tx_msg0(msg->msg0); + msgrng_load_tx_msg1(msg->msg1); + msgrng_load_tx_msg2(msg->msg2); + msgrng_load_tx_msg3(msg->msg3); + + dest = ((size - 1) << 16) | (code << 8) | (stid); + + do { + msgrng_send(dest); + status = msgrng_read_status(); + } while (status & 0x6); + +} + +int xlr_dev_queue_xmit_hack = 0; + +static int +mac_xmit(struct mbuf *m, struct rge_softc *sc, + struct driver_data *priv, int len, struct p2d_tx_desc *tx_desc) +{ + struct msgrng_msg msg; + int stid = priv->txbucket; + uint32_t tx_cycles = 0; + unsigned long mflags = 0; + int vcpu = PCPU_GET(cpuid); + int rv; + + tx_cycles = mips_rd_count(); + + if (build_frag_list(m, &msg, tx_desc) != 0) + return MAC_TX_FAIL; + + else { + msgrng_access_enable(mflags); + if ((rv = message_send_retry(1, MSGRNG_CODE_MAC, stid, &msg)) != 0) { + msg_snd_failed++; + msgrng_access_disable(mflags); + release_tx_desc(&msg, 0); + xlr_rge_msg_snd_failed[vcpu]++; + dbg_msg("Failed packet to cpu %d, rv = %d, stid %d, msg0=%llx\n", + vcpu, rv, stid, msg.msg0); + return MAC_TX_FAIL; + } + msgrng_access_disable(mflags); + port_inc_counter(priv->instance, PORT_TX); + } + + /* Send the packet to MAC */ + dbg_msg("Sent tx packet to stid %d, msg0=%llx, msg1=%llx \n", stid, msg.msg0, msg.msg1); +#ifdef DUMP_PACKETS + { + int i = 0; + unsigned char *buf = (char *)m->m_data; + + printf("Tx Packet: length=%d\n", len); + for (i = 0; i < 64; i++) { + if (i && (i % 16) == 0) + printf("\n"); + printf("%02x ", buf[i]); + } + printf("\n"); + } +#endif + xlr_inc_counter(NETIF_TX); + return MAC_TX_PASS; +} + +static int +rmi_xlr_mac_xmit(struct mbuf *m, struct rge_softc *sc, int len, struct p2d_tx_desc *tx_desc) +{ + struct driver_data *priv = &(sc->priv); + int ret = -ENOSPC; + + dbg_msg("IN\n"); + + xlr_inc_counter(NETIF_STACK_TX); + +retry: + ret = mac_xmit(m, sc, priv, len, tx_desc); + + if (ret == MAC_TX_RETRY) + goto retry; + + dbg_msg("OUT, ret = %d\n", ret); + if (ret == MAC_TX_FAIL) { + /* FULL */ + dbg_msg("Msg Ring Full. Stopping upper layer Q\n"); + port_inc_counter(priv->instance, PORT_STOPQ); + } + return ret; +} + +static void +mac_frin_replenish(void *args /* ignored */ ) +{ +#ifdef RX_COPY + return; +#else + int cpu = xlr_cpu_id(); + int done = 0; + int i = 0; + + xlr_inc_counter(REPLENISH_ENTER); + /* + * xlr_set_counter(REPLENISH_ENTER_COUNT, + * atomic_read(frin_to_be_sent)); + */ + xlr_set_counter(REPLENISH_CPU, PCPU_GET(cpuid)); + + for (;;) { + + done = 0; + + for (i = 0; i < XLR_MAX_MACS; i++) { + /* int offset = 0; */ + unsigned long msgrng_flags; + void *m; + uint32_t cycles; + struct rge_softc *sc; + struct driver_data *priv; + int frin_to_be_sent; + + sc = dev_mac[i]; + if (!sc) + goto skip; + + priv = &(sc->priv); + frin_to_be_sent = priv->frin_to_be_sent[cpu]; + + /* if (atomic_read(frin_to_be_sent) < 0) */ + if (frin_to_be_sent < 0) { + panic("BUG?: [%s]: gmac_%d illegal value for frin_to_be_sent=%d\n", + __FUNCTION__, i, + frin_to_be_sent); + } + /* if (!atomic_read(frin_to_be_sent)) */ + if (!frin_to_be_sent) + goto skip; + + cycles = mips_rd_count(); + { + m = get_buf(); + if (!m) { + device_printf(sc->rge_dev, "No buffer\n"); + goto skip; + } + } + xlr_inc_counter(REPLENISH_FRIN); + msgrng_access_enable(msgrng_flags); + if (xlr_mac_send_fr(priv, vtophys(m), MAX_FRAME_SIZE)) { + free_buf(vtophys(m)); + printf("[%s]: rx free message_send failed!\n", __FUNCTION__); + msgrng_access_disable(msgrng_flags); + break; + } + msgrng_access_disable(msgrng_flags); + xlr_set_counter(REPLENISH_CYCLES, + (read_c0_count() - cycles)); + atomic_subtract_int((&priv->frin_to_be_sent[cpu]), 1); + + continue; + skip: + done++; + } + if (done == XLR_MAX_MACS) + break; + } +#endif +} + +static volatile uint32_t g_tx_frm_tx_ok; + +static void +rge_tx_bkp_func(void *arg, int npending) +{ + int i = 0; + + for (i = 0; i < xlr_board_info.gmacports; i++) { + if (!dev_mac[i] || !dev_mac[i]->active) + continue; + rge_start_locked(dev_mac[i]->rge_ifp, RGE_TX_THRESHOLD); + } + atomic_subtract_int(&g_tx_frm_tx_ok, 1); +} + +/* This function is called from an interrupt handler */ +void +rmi_xlr_mac_msgring_handler(int bucket, int size, int code, + int stid, struct msgrng_msg *msg, + void *data /* ignored */ ) +{ + uint64_t phys_addr = 0; + unsigned long addr = 0; + uint32_t length = 0; + int ctrl = 0, port = 0; + struct rge_softc *sc = NULL; + struct driver_data *priv = 0; + struct ifnet *ifp; + int cpu = xlr_cpu_id(); + int vcpu = (cpu << 2) + xlr_thr_id(); + + dbg_msg("mac: bucket=%d, size=%d, code=%d, stid=%d, msg0=%llx msg1=%llx\n", + bucket, size, code, stid, msg->msg0, msg->msg1); + + phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL); + length = (msg->msg0 >> 40) & 0x3fff; + if (length == 0) { + ctrl = CTRL_REG_FREE; + port = (msg->msg0 >> 54) & 0x0f; + addr = 0; + } else { + ctrl = CTRL_SNGL; + length = length - BYTE_OFFSET - MAC_CRC_LEN; + port = msg->msg0 & 0x0f; + addr = 0; + } + + if (xlr_board_info.is_xls) { + if (stid == MSGRNG_STNID_GMAC1) + port += 4; + sc = dev_mac[dev_mac_gmac0 + port]; + } else { + if (stid == MSGRNG_STNID_XGS0FR) + sc = dev_mac[dev_mac_xgs0]; + else if (stid == MSGRNG_STNID_XGS1FR) + sc = dev_mac[dev_mac_xgs0 + 1]; + else + sc = dev_mac[dev_mac_gmac0 + port]; + } + if (sc == NULL) + return; + priv = &(sc->priv); + + dbg_msg("msg0 = %llx, stid = %d, port = %d, addr=%lx, length=%d, ctrl=%d\n", + msg->msg0, stid, port, addr, length, ctrl); + + if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) { + xlr_rge_tx_ok_done[vcpu]++; + release_tx_desc(msg, 1); + ifp = sc->rge_ifp; + if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + } + if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1)) + rge_tx_bkp_func(NULL, 0); + xlr_set_counter(NETIF_TX_COMPLETE_CYCLES, + (read_c0_count() - msgrng_msg_cycles)); + } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) { + /* Rx Packet */ + /* struct mbuf *m = 0; */ + /* int logical_cpu = 0; */ + + dbg_msg("Received packet, port = %d\n", port); + /* + * if num frins to be sent exceeds threshold, wake up the + * helper thread + */ + atomic_add_int(&(priv->frin_to_be_sent[cpu]), 1); + if ((priv->frin_to_be_sent[cpu]) > MAC_FRIN_TO_BE_SENT_THRESHOLD) { + mac_frin_replenish(NULL); + } + dbg_msg("gmac_%d: rx packet: phys_addr = %llx, length = %x\n", + priv->instance, phys_addr, length); + mac_stats_add(priv->stats.rx_packets, 1); + mac_stats_add(priv->stats.rx_bytes, length); + xlr_inc_counter(NETIF_RX); + xlr_set_counter(NETIF_RX_CYCLES, + (read_c0_count() - msgrng_msg_cycles)); + rge_rx(sc, phys_addr, length); + xlr_rge_rx_done[vcpu]++; + } else { + printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl); + } + +} + +/********************************************************************** + **********************************************************************/ +static int +rge_probe(dev) + device_t dev; +{ + /* Always return 0 */ + return 0; +} + +volatile unsigned long xlr_debug_enabled; +struct callout rge_dbg_count; +static void +xlr_debug_count(void *addr) +{ + struct driver_data *priv = &dev_mac[0]->priv; + + /* uint32_t crdt; */ + if (xlr_debug_enabled) { + printf("\nAvailRxIn %#x\n", xlr_read_reg(priv->mmio, 0x23e)); + } + callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL); +} + + +static void +xlr_tx_q_wakeup(void *addr) +{ + int i = 0; + int j = 0; + + for (i = 0; i < xlr_board_info.gmacports; i++) { + if (!dev_mac[i] || !dev_mac[i]->active) + continue; + if ((dev_mac[i]->rge_ifp->if_drv_flags) & IFF_DRV_OACTIVE) { + for (j = 0; j < XLR_MAX_CORE; j++) { + if (xlr_tot_avail_p2d[j]) { + dev_mac[i]->rge_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + break; + } + } + } + } + callout_reset(&xlr_tx_stop_bkp, 5 * hz, xlr_tx_q_wakeup, NULL); +} + +static int +rge_attach(device_t dev) +{ + struct ifnet *ifp; + struct rge_softc *sc; + struct driver_data *priv = 0; + int ret = 0; + struct xlr_gmac_block_t *gmac_conf = device_get_ivars(dev); + + sc = device_get_softc(dev); + sc->rge_dev = dev; + + /* Initialize mac's */ + sc->unit = device_get_unit(dev); + + if (sc->unit > XLR_MAX_MACS) { + ret = ENXIO; + goto out; + } + RGE_LOCK_INIT(sc, device_get_nameunit(dev)); + + priv = &(sc->priv); + priv->sc = sc; + + sc->flags = 0; /* TODO : fix me up later */ + + priv->id = sc->unit; + if (gmac_conf->type == XLR_GMAC) { + priv->instance = priv->id; + priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr + + 0x1000 * (sc->unit % 4)); + if ((ret = rmi_xlr_gmac_reset(priv)) == -1) + goto out; + } else if (gmac_conf->type == XLR_XGMAC) { + priv->instance = priv->id - xlr_board_info.gmacports; + priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr); + } + if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_VI) { + dbg_msg("Arizona board - offset 4 \n"); + priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_4_OFFSET); + } else + priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET); + + priv->pcs_mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr); + priv->serdes_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET); + + sc->base_addr = (unsigned long)priv->mmio; + sc->mem_end = (unsigned long)priv->mmio + XLR_IO_SIZE - 1; + + sc->xmit = rge_start; + sc->stop = rge_stop; + sc->get_stats = rmi_xlr_mac_get_stats; + sc->ioctl = rge_ioctl; + + /* Initialize the device specific driver data */ + mtx_init(&priv->lock, "rge", NULL, MTX_SPIN); + + priv->type = gmac_conf->type; + + priv->mode = gmac_conf->mode; + if (xlr_board_info.is_xls == 0) { + if (xlr_board_atx_ii() && !xlr_board_atx_ii_b()) + priv->phy_addr = priv->instance - 2; + else + priv->phy_addr = priv->instance; + priv->mode = XLR_RGMII; + } else { + if (gmac_conf->mode == XLR_PORT0_RGMII && + priv->instance == 0) { + priv->mode = XLR_PORT0_RGMII; + priv->phy_addr = 0; + } else { + priv->mode = XLR_SGMII; + priv->phy_addr = priv->instance + 16; + } + } + + priv->txbucket = gmac_conf->station_txbase + priv->instance % 4; + priv->rfrbucket = gmac_conf->station_rfr; + priv->spill_configured = 0; + + dbg_msg("priv->mmio=%p\n", priv->mmio); + + /* Set up ifnet structure */ + ifp = sc->rge_ifp = if_alloc(IFT_ETHER); + if (ifp == NULL) { + device_printf(sc->rge_dev, "failed to if_alloc()\n"); + rge_release_resources(sc); + ret = ENXIO; + RGE_LOCK_DESTROY(sc); + goto out; + } + ifp->if_softc = sc; + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_ioctl = rge_ioctl; + ifp->if_start = rge_start; + ifp->if_watchdog = rge_watchdog; + ifp->if_init = rge_init; + ifp->if_mtu = ETHERMTU; + ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE; + IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); + IFQ_SET_READY(&ifp->if_snd); + sc->active = 1; + ifp->if_hwassist = 0; + ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING; + ifp->if_capenable = ifp->if_capabilities; + + /* Initialize the rge_softc */ + sc->irq = gmac_conf->baseirq + priv->instance % 4; + + /* Set the IRQ into the rid field */ + /* + * note this is a hack to pass the irq to the iodi interrupt setup + * routines + */ + sc->rge_irq.__r_i = (struct resource_i *)sc->irq; + + ret = bus_setup_intr(dev, &sc->rge_irq, INTR_FAST | INTR_TYPE_NET | INTR_MPSAFE, + NULL, rge_intr, sc, &sc->rge_intrhand); + + if (ret) { + rge_detach(dev); + device_printf(sc->rge_dev, "couldn't set up irq\n"); + RGE_LOCK_DESTROY(sc); + goto out; + } + xlr_mac_get_hwaddr(sc); + xlr_mac_setup_hwaddr(priv); + + dbg_msg("MMIO %08lx, MII %08lx, PCS %08lx, base %08lx PHY %d IRQ %d\n", + (u_long)priv->mmio, (u_long)priv->mii_mmio, (u_long)priv->pcs_mmio, + (u_long)sc->base_addr, priv->phy_addr, sc->irq); + dbg_msg("HWADDR %02x:%02x tx %d rfr %d\n", (u_int)sc->dev_addr[4], + (u_int)sc->dev_addr[5], priv->txbucket, priv->rfrbucket); + + /* + * Set up ifmedia support. + */ + /* + * Initialize MII/media info. + */ + sc->rge_mii.mii_ifp = ifp; + sc->rge_mii.mii_readreg = rge_mii_read; + sc->rge_mii.mii_writereg = (mii_writereg_t) rge_mii_write; + sc->rge_mii.mii_statchg = rmi_xlr_mac_mii_statchg; + ifmedia_init(&sc->rge_mii.mii_media, 0, rmi_xlr_mac_mediachange, + rmi_xlr_mac_mediastatus); + ifmedia_add(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO); + sc->rge_mii.mii_media.ifm_media = sc->rge_mii.mii_media.ifm_cur->ifm_media; + + /* + * Call MI attach routine. + */ + ether_ifattach(ifp, sc->dev_addr); + + if (priv->type == XLR_GMAC) { + rmi_xlr_gmac_init(priv); + } else if (priv->type == XLR_XGMAC) { + rmi_xlr_xgmac_init(priv); + } + dbg_msg("rge_%d: Phoenix Mac at 0x%p (mtu=%d)\n", + sc->unit, priv->mmio, sc->mtu); + dev_mac[sc->unit] = sc; + if (priv->type == XLR_XGMAC && priv->instance == 0) + dev_mac_xgs0 = sc->unit; + if (priv->type == XLR_GMAC && priv->instance == 0) + dev_mac_gmac0 = sc->unit; + + if (!gmac_common_init_done) { + mac_common_init(); + gmac_common_init_done = 1; + callout_init(&xlr_tx_stop_bkp, CALLOUT_MPSAFE); + callout_reset(&xlr_tx_stop_bkp, hz, xlr_tx_q_wakeup, NULL); + callout_init(&rge_dbg_count, CALLOUT_MPSAFE); + //callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL); + } + if ((ret = rmi_xlr_mac_open(sc)) == -1) { + RGE_LOCK_DESTROY(sc); + goto out; + } +out: + if (ret < 0) { + device_printf(dev, "error - skipping\n"); + } + return ret; +} + +static void +rge_reset(struct rge_softc *sc) +{ +} + +static int +rge_detach(dev) + device_t dev; +{ +#ifdef FREEBSD_MAC_NOT_YET + struct rge_softc *sc; + struct ifnet *ifp; + + sc = device_get_softc(dev); + ifp = sc->rge_ifp; + + RGE_LOCK(sc); + rge_stop(sc); + rge_reset(sc); + RGE_UNLOCK(sc); + + ether_ifdetach(ifp); + + if (sc->rge_tbi) { + ifmedia_removeall(&sc->rge_ifmedia); + } else { + bus_generic_detach(dev); + device_delete_child(dev, sc->rge_miibus); + } + + rge_release_resources(sc); + +#endif /* FREEBSD_MAC_NOT_YET */ + return (0); +} +static int +rge_suspend(device_t dev) +{ + struct rge_softc *sc; + + sc = device_get_softc(dev); + RGE_LOCK(sc); + rge_stop(sc); + RGE_UNLOCK(sc); + + return 0; +} + +static int +rge_resume(device_t dev) +{ + panic("rge_resume(): unimplemented\n"); + return 0; +} + +static void +rge_release_resources(struct rge_softc *sc) +{ + + if (sc->rge_ifp != NULL) + if_free(sc->rge_ifp); + + if (mtx_initialized(&sc->rge_mtx)) /* XXX */ + RGE_LOCK_DESTROY(sc); +} +uint32_t gmac_rx_fail[32]; +uint32_t gmac_rx_pass[32]; + +#ifdef RX_COPY +static void +rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len) +{ + /* + * struct mbuf *m = (struct mbuf *)*(unsigned int *)((char *)addr - + * XLR_CACHELINE_SIZE); + */ + struct mbuf *m; + void *ptr; + uint32_t *temp; + struct ifnet *ifp = sc->rge_ifp; + unsigned long msgrng_flags; + int cpu = PCPU_GET(cpuid); + + + temp = (uint32_t *) MIPS_PHYS_TO_KSEG0(paddr - XLR_CACHELINE_SIZE); + + ptr = (void *)(temp + XLR_CACHELINE_SIZE); + m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); + if (m != NULL) { + m->m_len = m->m_pkthdr.len = MCLBYTES; + m_copyback(m, 0, len + BYTE_OFFSET, ptr); + /* align the data */ + m->m_data += BYTE_OFFSET; + m->m_pkthdr.len = m->m_len = len; + m->m_pkthdr.rcvif = ifp; + gmac_rx_pass[cpu]++; + } else { + gmac_rx_fail[cpu]++; + } + msgrng_access_enable(msgrng_flags); + xlr_mac_send_fr(&sc->priv, paddr, MAX_FRAME_SIZE); + msgrng_access_disable(msgrng_flags); + +#ifdef DUMP_PACKETS + { + int i = 0; + unsigned char *buf = (char *)m->m_data; + + printf("Rx Packet: length=%d\n", len); + for (i = 0; i < 64; i++) { + if (i && (i % 16) == 0) + printf("\n"); + printf("%02x ", buf[i]); + } + printf("\n"); + } +#endif + + + if (m) { + ifp->if_ipackets++; + (*ifp->if_input) (ifp, m); + } +} + +#else +static void +rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len) +{ + /* + * struct mbuf *m = (struct mbuf *)*(unsigned int *)((char *)addr - + * XLR_CACHELINE_SIZE); + */ + struct mbuf *m; + uint32_t *temp, tm, mag; + + struct ifnet *ifp = sc->rge_ifp; + + + temp = (uint32_t *) MIPS_PHYS_TO_KSEG0(paddr - XLR_CACHELINE_SIZE); + tm = temp[0]; + mag = temp[1]; + m = (struct mbuf *)tm; + if (mag != 0xf00bad) { + /* somebody else packet Error - FIXME in intialization */ + printf("cpu %d: *ERROR* Not my packet paddr %p\n", xlr_cpu_id(), (void *)paddr); + return; + } + /* align the data */ + m->m_data += BYTE_OFFSET; + m->m_pkthdr.len = m->m_len = len; + m->m_pkthdr.rcvif = ifp; + +#ifdef DUMP_PACKETS + { + int i = 0; + unsigned char *buf = (char *)m->m_data; + + printf("Rx Packet: length=%d\n", len); + for (i = 0; i < 64; i++) { + if (i && (i % 16) == 0) + printf("\n"); + printf("%02x ", buf[i]); + } + printf("\n"); + } +#endif + ifp->if_ipackets++; + (*ifp->if_input) (ifp, m); +} + +#endif + +static void +rge_intr(void *arg) +{ + struct rge_softc *sc = (struct rge_softc *)arg; + struct driver_data *priv = &(sc->priv); + xlr_reg_t *mmio = priv->mmio; + uint32_t intreg = xlr_read_reg(mmio, R_INTREG); + + if (intreg & (1 << O_INTREG__MDInt)) { + uint32_t phy_int_status = 0; + int i = 0; + + for (i = 0; i < XLR_MAX_MACS; i++) { + struct rge_softc *phy_dev = 0; + struct driver_data *phy_priv = 0; + + phy_dev = dev_mac[i]; + if (phy_dev == NULL) + continue; + + phy_priv = &phy_dev->priv; + + if (phy_priv->type == XLR_XGMAC) + continue; + + phy_int_status = rge_mii_read_internal(phy_priv->mii_mmio, + phy_priv->phy_addr, 26); + printf("rge%d: Phy addr %d, MII MMIO %lx status %x\n", phy_priv->instance, + (int)phy_priv->phy_addr, (u_long)phy_priv->mii_mmio, phy_int_status); + rmi_xlr_gmac_config_speed(phy_priv); + } + } else { + printf("[%s]: mac type = %d, instance %d error " + "interrupt: INTREG = 0x%08x\n", + __FUNCTION__, priv->type, priv->instance, intreg); + } + + /* clear all interrupts and hope to make progress */ + xlr_write_reg(mmio, R_INTREG, 0xffffffff); + + /* on A0 and B0, xgmac interrupts are routed only to xgs_1 irq */ + if ((xlr_revision_b0()) && (priv->type == XLR_XGMAC)) { + struct rge_softc *xgs0_dev = dev_mac[dev_mac_xgs0]; + struct driver_data *xgs0_priv = &xgs0_dev->priv; + xlr_reg_t *xgs0_mmio = xgs0_priv->mmio; + uint32_t xgs0_intreg = xlr_read_reg(xgs0_mmio, R_INTREG); + + if (xgs0_intreg) { + printf("[%s]: mac type = %d, instance %d error " + "interrupt: INTREG = 0x%08x\n", + __FUNCTION__, xgs0_priv->type, xgs0_priv->instance, xgs0_intreg); + + xlr_write_reg(xgs0_mmio, R_INTREG, 0xffffffff); + } + } +} + +static void +rge_start_locked(struct ifnet *ifp, int threshold) +{ + struct rge_softc *sc = ifp->if_softc; + struct mbuf *m = NULL; + int prepend_pkt = 0; + int i = 0; + struct p2d_tx_desc *tx_desc = NULL; + int cpu = xlr_cpu_id(); + uint32_t vcpu = (cpu << 2) + xlr_thr_id(); + + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) + return; + + for (i = 0; i < xlr_tot_avail_p2d[cpu]; i++) { + if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) + return; + tx_desc = get_p2d_desc(); + if (!tx_desc) { + xlr_rge_get_p2d_failed[vcpu]++; + return; + } + /* Grab a packet off the queue. */ + IFQ_DEQUEUE(&ifp->if_snd, m); + if (m == NULL) { + free_p2d_desc(tx_desc); + return; + } + prepend_pkt = rmi_xlr_mac_xmit(m, sc, 0, tx_desc); + + if (prepend_pkt) { + xlr_rge_tx_prepend[vcpu]++; + IF_PREPEND(&ifp->if_snd, m); + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + return; + } else { + ifp->if_opackets++; + xlr_rge_tx_done[vcpu]++; + } + } +} + +static void +rge_start(struct ifnet *ifp) +{ + rge_start_locked(ifp, RGE_TX_Q_SIZE); +} + +static int +rge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) +{ + struct rge_softc *sc = ifp->if_softc; + struct ifreq *ifr = (struct ifreq *)data; + int mask, error = 0; + + /* struct mii_data *mii; */ + switch (command) { + case SIOCSIFMTU: + ifp->if_mtu = ifr->ifr_mtu; + error = rmi_xlr_mac_change_mtu(sc, ifr->ifr_mtu); + break; + case SIOCSIFFLAGS: + + RGE_LOCK(sc); + if (ifp->if_flags & IFF_UP) { + /* + * If only the state of the PROMISC flag changed, + * then just use the 'set promisc mode' command + * instead of reinitializing the entire NIC. Doing a + * full re-init means reloading the firmware and + * waiting for it to start up, which may take a + * second or two. Similarly for ALLMULTI. + */ + if (ifp->if_drv_flags & IFF_DRV_RUNNING && + ifp->if_flags & IFF_PROMISC && + !(sc->flags & IFF_PROMISC)) { + sc->flags |= IFF_PROMISC; + xlr_mac_set_rx_mode(sc); + } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && + !(ifp->if_flags & IFF_PROMISC) && + sc->flags & IFF_PROMISC) { + sc->flags &= IFF_PROMISC; + xlr_mac_set_rx_mode(sc); + } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && + (ifp->if_flags ^ sc->flags) & IFF_ALLMULTI) { + rmi_xlr_mac_set_multicast_list(sc); + } else + xlr_mac_set_rx_mode(sc); + } else { + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + xlr_mac_set_rx_mode(sc); + } + } + sc->flags = ifp->if_flags; + RGE_UNLOCK(sc); + error = 0; + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + RGE_LOCK(sc); + rmi_xlr_mac_set_multicast_list(sc); + RGE_UNLOCK(sc); + error = 0; + } + break; + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + error = ifmedia_ioctl(ifp, ifr, + &sc->rge_mii.mii_media, command); + break; + case SIOCSIFCAP: + mask = ifr->ifr_reqcap ^ ifp->if_capenable; + ifp->if_hwassist = 0; + break; + default: + error = ether_ioctl(ifp, command, data); + break; + } + + return (error); +} + +static void +rge_init(void *addr) +{ + struct rge_softc *sc = (struct rge_softc *)addr; + struct ifnet *ifp; + struct driver_data *priv = &(sc->priv); + + ifp = sc->rge_ifp; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + return; + ifp->if_drv_flags |= IFF_DRV_RUNNING; + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + + rmi_xlr_mac_set_enable(priv, 1); +} + +static void +rge_stop(struct rge_softc *sc) +{ + rmi_xlr_mac_close(sc); +} + +static void +rge_watchdog(struct ifnet *sc) +{ +} + +static int +rge_shutdown(device_t dev) +{ + struct rge_softc *sc; + + sc = device_get_softc(dev); + + RGE_LOCK(sc); + rge_stop(sc); + rge_reset(sc); + RGE_UNLOCK(sc); + + return (0); +} + +static int +rmi_xlr_mac_open(struct rge_softc *sc) +{ + struct driver_data *priv = &(sc->priv); + int i; + + dbg_msg("IN\n"); + + if (rmi_xlr_mac_fill_rxfr(sc)) { + return -1; + } + mtx_lock_spin(&priv->lock); + + xlr_mac_set_rx_mode(sc); + + if (sc->unit == xlr_board_info.gmacports - 1) { + printf("Enabling MDIO interrupts\n"); + struct rge_softc *tmp = NULL; + + for (i = 0; i < xlr_board_info.gmacports; i++) { + tmp = dev_mac[i]; + if (tmp) + xlr_write_reg(tmp->priv.mmio, R_INTMASK, + ((tmp->priv.instance == 0) << O_INTMASK__MDInt)); + } + } + /* + * Configure the speed, duplex, and flow control + */ + rmi_xlr_mac_set_speed(priv, priv->speed); + rmi_xlr_mac_set_duplex(priv, priv->duplex, priv->flow_ctrl); + rmi_xlr_mac_set_enable(priv, 0); + + mtx_unlock_spin(&priv->lock); + + for (i = 0; i < 8; i++) { + atomic_set_int(&(priv->frin_to_be_sent[i]), 0); + } + + return 0; +} + +/********************************************************************** + **********************************************************************/ +static int +rmi_xlr_mac_close(struct rge_softc *sc) +{ + struct driver_data *priv = &(sc->priv); + + mtx_lock_spin(&priv->lock); + + /* + * There may have left over mbufs in the ring as well as in free in + * they will be reused next time open is called + */ + + rmi_xlr_mac_set_enable(priv, 0); + + xlr_inc_counter(NETIF_STOP_Q); + port_inc_counter(priv->instance, PORT_STOPQ); + + mtx_unlock_spin(&priv->lock); + + return 0; +} + +/********************************************************************** + **********************************************************************/ +static struct rge_softc_stats * +rmi_xlr_mac_get_stats(struct rge_softc *sc) +{ + struct driver_data *priv = &(sc->priv); + + /* unsigned long flags; */ + + mtx_lock_spin(&priv->lock); + + /* XXX update other stats here */ + + mtx_unlock_spin(&priv->lock); + + return &priv->stats; +} + +/********************************************************************** + **********************************************************************/ +static void +rmi_xlr_mac_set_multicast_list(struct rge_softc *sc) +{ +} + +/********************************************************************** + **********************************************************************/ +static int +rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu) +{ + struct driver_data *priv = &(sc->priv); + + if ((new_mtu > 9500) || (new_mtu < 64)) { + return -EINVAL; + } + mtx_lock_spin(&priv->lock); + + sc->mtu = new_mtu; + + /* Disable MAC TX/RX */ + rmi_xlr_mac_set_enable(priv, 0); + + /* Flush RX FR IN */ + /* Flush TX IN */ + rmi_xlr_mac_set_enable(priv, 1); + + mtx_unlock_spin(&priv->lock); + return 0; +} + +/********************************************************************** + **********************************************************************/ +static int +rmi_xlr_mac_fill_rxfr(struct rge_softc *sc) +{ + struct driver_data *priv = &(sc->priv); + unsigned long msgrng_flags; + int i; + int ret = 0; + void *ptr; + + dbg_msg("\n"); + if (!priv->init_frin_desc) + return ret; + priv->init_frin_desc = 0; + + dbg_msg("\n"); + for (i = 0; i < MAX_NUM_DESC; i++) { + ptr = get_buf(); + if (!ptr) { + ret = -ENOMEM; + break; + } + /* Send the free Rx desc to the MAC */ + msgrng_access_enable(msgrng_flags); + xlr_mac_send_fr(priv, vtophys(ptr), MAX_FRAME_SIZE); + msgrng_access_disable(msgrng_flags); + } + + return ret; +} + +/********************************************************************** + **********************************************************************/ +static __inline__ void * +rmi_xlr_config_spill(xlr_reg_t * mmio, + int reg_start_0, int reg_start_1, + int reg_size, int size) +{ + uint32_t spill_size = size; + void *spill = NULL; + uint64_t phys_addr = 0; + + + spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF, + M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0); + if (!spill || ((vm_offset_t)spill & (XLR_CACHELINE_SIZE - 1))) { + panic("Unable to allocate memory for spill area!\n"); + } + phys_addr = vtophys(spill); + dbg_msg("Allocate spill %d bytes at %llx\n", size, phys_addr); + xlr_write_reg(mmio, reg_start_0, (phys_addr >> 5) & 0xffffffff); + xlr_write_reg(mmio, reg_start_1, (phys_addr >> 37) & 0x07); + xlr_write_reg(mmio, reg_size, spill_size); + + return spill; +} + +static void +rmi_xlr_config_spill_area(struct driver_data *priv) +{ + /* + * if driver initialization is done parallely on multiple cpus + * spill_configured needs synchronization + */ + if (priv->spill_configured) + return; + + if (priv->type == XLR_GMAC && priv->instance % 4 != 0) { + priv->spill_configured = 1; + return; + } + priv->spill_configured = 1; + + priv->frin_spill = + rmi_xlr_config_spill(priv->mmio, + R_REG_FRIN_SPILL_MEM_START_0, + R_REG_FRIN_SPILL_MEM_START_1, + R_REG_FRIN_SPILL_MEM_SIZE, + MAX_FRIN_SPILL * + sizeof(struct fr_desc)); + + priv->class_0_spill = + rmi_xlr_config_spill(priv->mmio, + R_CLASS0_SPILL_MEM_START_0, + R_CLASS0_SPILL_MEM_START_1, + R_CLASS0_SPILL_MEM_SIZE, + MAX_CLASS_0_SPILL * + sizeof(union rx_tx_desc)); + priv->class_1_spill = + rmi_xlr_config_spill(priv->mmio, + R_CLASS1_SPILL_MEM_START_0, + R_CLASS1_SPILL_MEM_START_1, + R_CLASS1_SPILL_MEM_SIZE, + MAX_CLASS_1_SPILL * + sizeof(union rx_tx_desc)); + + priv->frout_spill = + rmi_xlr_config_spill(priv->mmio, R_FROUT_SPILL_MEM_START_0, + R_FROUT_SPILL_MEM_START_1, + R_FROUT_SPILL_MEM_SIZE, + MAX_FROUT_SPILL * + sizeof(struct fr_desc)); + + priv->class_2_spill = + rmi_xlr_config_spill(priv->mmio, + R_CLASS2_SPILL_MEM_START_0, + R_CLASS2_SPILL_MEM_START_1, + R_CLASS2_SPILL_MEM_SIZE, + MAX_CLASS_2_SPILL * + sizeof(union rx_tx_desc)); + priv->class_3_spill = + rmi_xlr_config_spill(priv->mmio, + R_CLASS3_SPILL_MEM_START_0, + R_CLASS3_SPILL_MEM_START_1, + R_CLASS3_SPILL_MEM_SIZE, + MAX_CLASS_3_SPILL * + sizeof(union rx_tx_desc)); + priv->spill_configured = 1; +} + +/***************************************************************** + * Write the MAC address to the XLR registers + * All 4 addresses are the same for now + *****************************************************************/ +static void +xlr_mac_setup_hwaddr(struct driver_data *priv) +{ + struct rge_softc *sc = priv->sc; + + xlr_write_reg(priv->mmio, R_MAC_ADDR0, + ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16) + | (sc->dev_addr[3] << 8) | (sc->dev_addr[2])) + ); + + xlr_write_reg(priv->mmio, R_MAC_ADDR0 + 1, + ((sc->dev_addr[1] << 24) | (sc-> + dev_addr[0] << 16))); + + xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2, 0xffffffff); + + xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2 + 1, 0xffffffff); + + xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3, 0xffffffff); + + xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3 + 1, 0xffffffff); + + xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, + (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) | + (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) | + (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID) + ); +} + +/***************************************************************** + * Read the MAC address from the XLR registers + * All 4 addresses are the same for now + *****************************************************************/ +static void +xlr_mac_get_hwaddr(struct rge_softc *sc) +{ + struct driver_data *priv = &(sc->priv); + + sc->dev_addr[0] = (xlr_boot1_info.mac_addr >> 40) & 0xff; + sc->dev_addr[1] = (xlr_boot1_info.mac_addr >> 32) & 0xff; + sc->dev_addr[2] = (xlr_boot1_info.mac_addr >> 24) & 0xff; + sc->dev_addr[3] = (xlr_boot1_info.mac_addr >> 16) & 0xff; + sc->dev_addr[4] = (xlr_boot1_info.mac_addr >> 8) & 0xff; + sc->dev_addr[5] = ((xlr_boot1_info.mac_addr >> 0) & 0xff) + priv->instance; +} + +/***************************************************************** + * Mac Module Initialization + *****************************************************************/ +static void +mac_common_init(void) +{ + init_p2d_allocation(); + init_tx_ring(); +#ifdef RX_COPY + init_rx_buf(); +#endif + + if (xlr_board_info.is_xls) { + if (register_msgring_handler(TX_STN_GMAC0, + rmi_xlr_mac_msgring_handler, NULL)) { + panic("Couldn't register msgring handler\n"); + } + if (register_msgring_handler(TX_STN_GMAC1, + rmi_xlr_mac_msgring_handler, NULL)) { + panic("Couldn't register msgring handler\n"); + } + } else { + if (register_msgring_handler(TX_STN_GMAC, + rmi_xlr_mac_msgring_handler, NULL)) { + panic("Couldn't register msgring handler\n"); + } + } + + /* + * Not yet if (xlr_board_atx_ii()) { if (register_msgring_handler + * (TX_STN_XGS_0, rmi_xlr_mac_msgring_handler, NULL)) { + * panic("Couldn't register msgring handler for TX_STN_XGS_0\n"); } + * if (register_msgring_handler (TX_STN_XGS_1, + * rmi_xlr_mac_msgring_handler, NULL)) { panic("Couldn't register + * msgring handler for TX_STN_XGS_1\n"); } } + */ +} diff --git a/sys/mips/rmi/dev/xlr/rge.h b/sys/mips/rmi/dev/xlr/rge.h new file mode 100644 index 000000000000..98b5847c6d81 --- /dev/null +++ b/sys/mips/rmi/dev/xlr/rge.h @@ -0,0 +1,1097 @@ +/*- + * Copyright (c) 2003-2009 RMI Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of RMI Corporation, nor the names of its contributors, + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RMI_BSD */ +#ifndef _RMI_RGE_H_ +#define _RMI_RGE_H_ + +/* #define MAC_SPLIT_MODE */ + +#define MAC_SPACING 0x400 +#define XGMAC_SPACING 0x400 + +/* PE-MCXMAC register and bit field definitions */ +#define R_MAC_CONFIG_1 0x00 +#define O_MAC_CONFIG_1__srst 31 +#define O_MAC_CONFIG_1__simr 30 +#define O_MAC_CONFIG_1__hrrmc 18 +#define W_MAC_CONFIG_1__hrtmc 2 +#define O_MAC_CONFIG_1__hrrfn 16 +#define W_MAC_CONFIG_1__hrtfn 2 +#define O_MAC_CONFIG_1__intlb 8 +#define O_MAC_CONFIG_1__rxfc 5 +#define O_MAC_CONFIG_1__txfc 4 +#define O_MAC_CONFIG_1__srxen 3 +#define O_MAC_CONFIG_1__rxen 2 +#define O_MAC_CONFIG_1__stxen 1 +#define O_MAC_CONFIG_1__txen 0 +#define R_MAC_CONFIG_2 0x01 +#define O_MAC_CONFIG_2__prlen 12 +#define W_MAC_CONFIG_2__prlen 4 +#define O_MAC_CONFIG_2__speed 8 +#define W_MAC_CONFIG_2__speed 2 +#define O_MAC_CONFIG_2__hugen 5 +#define O_MAC_CONFIG_2__flchk 4 +#define O_MAC_CONFIG_2__crce 1 +#define O_MAC_CONFIG_2__fulld 0 +#define R_IPG_IFG 0x02 +#define O_IPG_IFG__ipgr1 24 +#define W_IPG_IFG__ipgr1 7 +#define O_IPG_IFG__ipgr2 16 +#define W_IPG_IFG__ipgr2 7 +#define O_IPG_IFG__mifg 8 +#define W_IPG_IFG__mifg 8 +#define O_IPG_IFG__ipgt 0 +#define W_IPG_IFG__ipgt 7 +#define R_HALF_DUPLEX 0x03 +#define O_HALF_DUPLEX__abebt 24 +#define W_HALF_DUPLEX__abebt 4 +#define O_HALF_DUPLEX__abebe 19 +#define O_HALF_DUPLEX__bpnb 18 +#define O_HALF_DUPLEX__nobo 17 +#define O_HALF_DUPLEX__edxsdfr 16 +#define O_HALF_DUPLEX__retry 12 +#define W_HALF_DUPLEX__retry 4 +#define O_HALF_DUPLEX__lcol 0 +#define W_HALF_DUPLEX__lcol 10 +#define R_MAXIMUM_FRAME_LENGTH 0x04 +#define O_MAXIMUM_FRAME_LENGTH__maxf 0 +#define W_MAXIMUM_FRAME_LENGTH__maxf 16 +#define R_TEST 0x07 +#define O_TEST__mbof 3 +#define O_TEST__rthdf 2 +#define O_TEST__tpause 1 +#define O_TEST__sstct 0 +#define R_MII_MGMT_CONFIG 0x08 +#define O_MII_MGMT_CONFIG__scinc 5 +#define O_MII_MGMT_CONFIG__spre 4 +#define O_MII_MGMT_CONFIG__clks 3 +#define W_MII_MGMT_CONFIG__clks 3 +#define R_MII_MGMT_COMMAND 0x09 +#define O_MII_MGMT_COMMAND__scan 1 +#define O_MII_MGMT_COMMAND__rstat 0 +#define R_MII_MGMT_ADDRESS 0x0A +#define O_MII_MGMT_ADDRESS__fiad 8 +#define W_MII_MGMT_ADDRESS__fiad 5 +#define O_MII_MGMT_ADDRESS__fgad 5 +#define W_MII_MGMT_ADDRESS__fgad 0 +#define R_MII_MGMT_WRITE_DATA 0x0B +#define O_MII_MGMT_WRITE_DATA__ctld 0 +#define W_MII_MGMT_WRITE_DATA__ctld 16 +#define R_MII_MGMT_STATUS 0x0C +#define R_MII_MGMT_INDICATORS 0x0D +#define O_MII_MGMT_INDICATORS__nvalid 2 +#define O_MII_MGMT_INDICATORS__scan 1 +#define O_MII_MGMT_INDICATORS__busy 0 +#define R_INTERFACE_CONTROL 0x0E +#define O_INTERFACE_CONTROL__hrstint 31 +#define O_INTERFACE_CONTROL__tbimode 27 +#define O_INTERFACE_CONTROL__ghdmode 26 +#define O_INTERFACE_CONTROL__lhdmode 25 +#define O_INTERFACE_CONTROL__phymod 24 +#define O_INTERFACE_CONTROL__hrrmi 23 +#define O_INTERFACE_CONTROL__rspd 16 +#define O_INTERFACE_CONTROL__hr100 15 +#define O_INTERFACE_CONTROL__frcq 10 +#define O_INTERFACE_CONTROL__nocfr 9 +#define O_INTERFACE_CONTROL__dlfct 8 +#define O_INTERFACE_CONTROL__enjab 0 +#define R_INTERFACE_STATUS 0x0F +#define O_INTERFACE_STATUS__xsdfr 9 +#define O_INTERFACE_STATUS__ssrr 8 +#define W_INTERFACE_STATUS__ssrr 5 +#define O_INTERFACE_STATUS__miilf 3 +#define O_INTERFACE_STATUS__locar 2 +#define O_INTERFACE_STATUS__sqerr 1 +#define O_INTERFACE_STATUS__jabber 0 +#define R_STATION_ADDRESS_LS 0x10 +#define R_STATION_ADDRESS_MS 0x11 + +/* A-XGMAC register and bit field definitions */ +#define R_XGMAC_CONFIG_0 0x00 +#define O_XGMAC_CONFIG_0__hstmacrst 31 +#define O_XGMAC_CONFIG_0__hstrstrctl 23 +#define O_XGMAC_CONFIG_0__hstrstrfn 22 +#define O_XGMAC_CONFIG_0__hstrsttctl 18 +#define O_XGMAC_CONFIG_0__hstrsttfn 17 +#define O_XGMAC_CONFIG_0__hstrstmiim 16 +#define O_XGMAC_CONFIG_0__hstloopback 8 +#define R_XGMAC_CONFIG_1 0x01 +#define O_XGMAC_CONFIG_1__hsttctlen 31 +#define O_XGMAC_CONFIG_1__hsttfen 30 +#define O_XGMAC_CONFIG_1__hstrctlen 29 +#define O_XGMAC_CONFIG_1__hstrfen 28 +#define O_XGMAC_CONFIG_1__tfen 26 +#define O_XGMAC_CONFIG_1__rfen 24 +#define O_XGMAC_CONFIG_1__hstrctlshrtp 12 +#define O_XGMAC_CONFIG_1__hstdlyfcstx 10 +#define W_XGMAC_CONFIG_1__hstdlyfcstx 2 +#define O_XGMAC_CONFIG_1__hstdlyfcsrx 8 +#define W_XGMAC_CONFIG_1__hstdlyfcsrx 2 +#define O_XGMAC_CONFIG_1__hstppen 7 +#define O_XGMAC_CONFIG_1__hstbytswp 6 +#define O_XGMAC_CONFIG_1__hstdrplt64 5 +#define O_XGMAC_CONFIG_1__hstprmscrx 4 +#define O_XGMAC_CONFIG_1__hstlenchk 3 +#define O_XGMAC_CONFIG_1__hstgenfcs 2 +#define O_XGMAC_CONFIG_1__hstpadmode 0 +#define W_XGMAC_CONFIG_1__hstpadmode 2 +#define R_XGMAC_CONFIG_2 0x02 +#define O_XGMAC_CONFIG_2__hsttctlfrcp 31 +#define O_XGMAC_CONFIG_2__hstmlnkflth 27 +#define O_XGMAC_CONFIG_2__hstalnkflth 26 +#define O_XGMAC_CONFIG_2__rflnkflt 24 +#define W_XGMAC_CONFIG_2__rflnkflt 2 +#define O_XGMAC_CONFIG_2__hstipgextmod 16 +#define W_XGMAC_CONFIG_2__hstipgextmod 5 +#define O_XGMAC_CONFIG_2__hstrctlfrcp 15 +#define O_XGMAC_CONFIG_2__hstipgexten 5 +#define O_XGMAC_CONFIG_2__hstmipgext 0 +#define W_XGMAC_CONFIG_2__hstmipgext 5 +#define R_XGMAC_CONFIG_3 0x03 +#define O_XGMAC_CONFIG_3__hstfltrfrm 31 +#define W_XGMAC_CONFIG_3__hstfltrfrm 16 +#define O_XGMAC_CONFIG_3__hstfltrfrmdc 15 +#define W_XGMAC_CONFIG_3__hstfltrfrmdc 16 +#define R_XGMAC_STATION_ADDRESS_LS 0x04 +#define O_XGMAC_STATION_ADDRESS_LS__hstmacadr0 0 +#define W_XGMAC_STATION_ADDRESS_LS__hstmacadr0 32 +#define R_XGMAC_STATION_ADDRESS_MS 0x05 +#define R_XGMAC_MAX_FRAME_LEN 0x08 +#define O_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 16 +#define W_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 14 +#define O_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 0 +#define W_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 16 +#define R_XGMAC_REV_LEVEL 0x0B +#define O_XGMAC_REV_LEVEL__revlvl 0 +#define W_XGMAC_REV_LEVEL__revlvl 15 +#define R_XGMAC_MIIM_COMMAND 0x10 +#define O_XGMAC_MIIM_COMMAND__hstldcmd 3 +#define O_XGMAC_MIIM_COMMAND__hstmiimcmd 0 +#define W_XGMAC_MIIM_COMMAND__hstmiimcmd 3 +#define R_XGMAC_MIIM_FILED 0x11 +#define O_XGMAC_MIIM_FILED__hststfield 30 +#define W_XGMAC_MIIM_FILED__hststfield 2 +#define O_XGMAC_MIIM_FILED__hstopfield 28 +#define W_XGMAC_MIIM_FILED__hstopfield 2 +#define O_XGMAC_MIIM_FILED__hstphyadx 23 +#define W_XGMAC_MIIM_FILED__hstphyadx 5 +#define O_XGMAC_MIIM_FILED__hstregadx 18 +#define W_XGMAC_MIIM_FILED__hstregadx 5 +#define O_XGMAC_MIIM_FILED__hsttafield 16 +#define W_XGMAC_MIIM_FILED__hsttafield 2 +#define O_XGMAC_MIIM_FILED__miimrddat 0 +#define W_XGMAC_MIIM_FILED__miimrddat 16 +#define R_XGMAC_MIIM_CONFIG 0x12 +#define O_XGMAC_MIIM_CONFIG__hstnopram 7 +#define O_XGMAC_MIIM_CONFIG__hstclkdiv 0 +#define W_XGMAC_MIIM_CONFIG__hstclkdiv 7 +#define R_XGMAC_MIIM_LINK_FAIL_VECTOR 0x13 +#define O_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 0 +#define W_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 32 +#define R_XGMAC_MIIM_INDICATOR 0x14 +#define O_XGMAC_MIIM_INDICATOR__miimphylf 4 +#define O_XGMAC_MIIM_INDICATOR__miimmoncplt 3 +#define O_XGMAC_MIIM_INDICATOR__miimmonvld 2 +#define O_XGMAC_MIIM_INDICATOR__miimmon 1 +#define O_XGMAC_MIIM_INDICATOR__miimbusy 0 + +/* Glue logic register and bit field definitions */ +#define R_MAC_ADDR0 0x50 +#define R_MAC_ADDR1 0x52 +#define R_MAC_ADDR2 0x54 +#define R_MAC_ADDR3 0x56 +#define R_MAC_ADDR_MASK2 0x58 +#define R_MAC_ADDR_MASK3 0x5A +#define R_MAC_FILTER_CONFIG 0x5C +#define O_MAC_FILTER_CONFIG__BROADCAST_EN 10 +#define O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN 9 +#define O_MAC_FILTER_CONFIG__ALL_MCAST_EN 8 +#define O_MAC_FILTER_CONFIG__ALL_UCAST_EN 7 +#define O_MAC_FILTER_CONFIG__HASH_MCAST_EN 6 +#define O_MAC_FILTER_CONFIG__HASH_UCAST_EN 5 +#define O_MAC_FILTER_CONFIG__ADDR_MATCH_DISC 4 +#define O_MAC_FILTER_CONFIG__MAC_ADDR3_VALID 3 +#define O_MAC_FILTER_CONFIG__MAC_ADDR2_VALID 2 +#define O_MAC_FILTER_CONFIG__MAC_ADDR1_VALID 1 +#define O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID 0 +#define R_HASH_TABLE_VECTOR 0x30 +#define R_TX_CONTROL 0x0A0 +#define O_TX_CONTROL__Tx15Halt 31 +#define O_TX_CONTROL__Tx14Halt 30 +#define O_TX_CONTROL__Tx13Halt 29 +#define O_TX_CONTROL__Tx12Halt 28 +#define O_TX_CONTROL__Tx11Halt 27 +#define O_TX_CONTROL__Tx10Halt 26 +#define O_TX_CONTROL__Tx9Halt 25 +#define O_TX_CONTROL__Tx8Halt 24 +#define O_TX_CONTROL__Tx7Halt 23 +#define O_TX_CONTROL__Tx6Halt 22 +#define O_TX_CONTROL__Tx5Halt 21 +#define O_TX_CONTROL__Tx4Halt 20 +#define O_TX_CONTROL__Tx3Halt 19 +#define O_TX_CONTROL__Tx2Halt 18 +#define O_TX_CONTROL__Tx1Halt 17 +#define O_TX_CONTROL__Tx0Halt 16 +#define O_TX_CONTROL__TxIdle 15 +#define O_TX_CONTROL__TxEnable 14 +#define O_TX_CONTROL__TxThreshold 0 +#define W_TX_CONTROL__TxThreshold 14 +#define R_RX_CONTROL 0x0A1 +#define O_RX_CONTROL__RGMII 10 +#define O_RX_CONTROL__RxHalt 1 +#define O_RX_CONTROL__RxEnable 0 +#define R_DESC_PACK_CTRL 0x0A2 +#define O_DESC_PACK_CTRL__ByteOffset 17 +#define W_DESC_PACK_CTRL__ByteOffset 3 +#define O_DESC_PACK_CTRL__PrePadEnable 16 +#define O_DESC_PACK_CTRL__MaxEntry 14 +#define W_DESC_PACK_CTRL__MaxEntry 2 +#define O_DESC_PACK_CTRL__RegularSize 0 +#define W_DESC_PACK_CTRL__RegularSize 14 +#define R_STATCTRL 0x0A3 +#define O_STATCTRL__OverFlowEn 4 +#define O_STATCTRL__GIG 3 +#define O_STATCTRL__Sten 2 +#define O_STATCTRL__ClrCnt 1 +#define O_STATCTRL__AutoZ 0 +#define R_L2ALLOCCTRL 0x0A4 +#define O_L2ALLOCCTRL__TxL2Allocate 9 +#define W_L2ALLOCCTRL__TxL2Allocate 9 +#define O_L2ALLOCCTRL__RxL2Allocate 0 +#define W_L2ALLOCCTRL__RxL2Allocate 9 +#define R_INTMASK 0x0A5 +#define O_INTMASK__Spi4TxError 28 +#define O_INTMASK__Spi4RxError 27 +#define O_INTMASK__RGMIIHalfDupCollision 27 +#define O_INTMASK__Abort 26 +#define O_INTMASK__Underrun 25 +#define O_INTMASK__DiscardPacket 24 +#define O_INTMASK__AsyncFifoFull 23 +#define O_INTMASK__TagFull 22 +#define O_INTMASK__Class3Full 21 +#define O_INTMASK__C3EarlyFull 20 +#define O_INTMASK__Class2Full 19 +#define O_INTMASK__C2EarlyFull 18 +#define O_INTMASK__Class1Full 17 +#define O_INTMASK__C1EarlyFull 16 +#define O_INTMASK__Class0Full 15 +#define O_INTMASK__C0EarlyFull 14 +#define O_INTMASK__RxDataFull 13 +#define O_INTMASK__RxEarlyFull 12 +#define O_INTMASK__RFreeEmpty 9 +#define O_INTMASK__RFEarlyEmpty 8 +#define O_INTMASK__P2PSpillEcc 7 +#define O_INTMASK__FreeDescFull 5 +#define O_INTMASK__FreeEarlyFull 4 +#define O_INTMASK__TxFetchError 3 +#define O_INTMASK__StatCarry 2 +#define O_INTMASK__MDInt 1 +#define O_INTMASK__TxIllegal 0 +#define R_INTREG 0x0A6 +#define O_INTREG__Spi4TxError 28 +#define O_INTREG__Spi4RxError 27 +#define O_INTREG__RGMIIHalfDupCollision 27 +#define O_INTREG__Abort 26 +#define O_INTREG__Underrun 25 +#define O_INTREG__DiscardPacket 24 +#define O_INTREG__AsyncFifoFull 23 +#define O_INTREG__TagFull 22 +#define O_INTREG__Class3Full 21 +#define O_INTREG__C3EarlyFull 20 +#define O_INTREG__Class2Full 19 +#define O_INTREG__C2EarlyFull 18 +#define O_INTREG__Class1Full 17 +#define O_INTREG__C1EarlyFull 16 +#define O_INTREG__Class0Full 15 +#define O_INTREG__C0EarlyFull 14 +#define O_INTREG__RxDataFull 13 +#define O_INTREG__RxEarlyFull 12 +#define O_INTREG__RFreeEmpty 9 +#define O_INTREG__RFEarlyEmpty 8 +#define O_INTREG__P2PSpillEcc 7 +#define O_INTREG__FreeDescFull 5 +#define O_INTREG__FreeEarlyFull 4 +#define O_INTREG__TxFetchError 3 +#define O_INTREG__StatCarry 2 +#define O_INTREG__MDInt 1 +#define O_INTREG__TxIllegal 0 +#define R_TXRETRY 0x0A7 +#define O_TXRETRY__CollisionRetry 6 +#define O_TXRETRY__BusErrorRetry 5 +#define O_TXRETRY__UnderRunRetry 4 +#define O_TXRETRY__Retries 0 +#define W_TXRETRY__Retries 4 +#define R_CORECONTROL 0x0A8 +#define O_CORECONTROL__ErrorThread 4 +#define W_CORECONTROL__ErrorThread 7 +#define O_CORECONTROL__Shutdown 2 +#define O_CORECONTROL__Speed 0 +#define W_CORECONTROL__Speed 2 +#define R_BYTEOFFSET0 0x0A9 +#define R_BYTEOFFSET1 0x0AA +#define R_L2TYPE_0 0x0F0 +#define O_L2TYPE__ExtraHdrProtoSize 26 +#define W_L2TYPE__ExtraHdrProtoSize 5 +#define O_L2TYPE__ExtraHdrProtoOffset 20 +#define W_L2TYPE__ExtraHdrProtoOffset 6 +#define O_L2TYPE__ExtraHeaderSize 14 +#define W_L2TYPE__ExtraHeaderSize 6 +#define O_L2TYPE__ProtoOffset 8 +#define W_L2TYPE__ProtoOffset 6 +#define O_L2TYPE__L2HdrOffset 2 +#define W_L2TYPE__L2HdrOffset 6 +#define O_L2TYPE__L2Proto 0 +#define W_L2TYPE__L2Proto 2 +#define R_L2TYPE_1 0xF0 +#define R_L2TYPE_2 0xF0 +#define R_L2TYPE_3 0xF0 +#define R_PARSERCONFIGREG 0x100 +#define O_PARSERCONFIGREG__CRCHashPoly 8 +#define W_PARSERCONFIGREG__CRCHashPoly 7 +#define O_PARSERCONFIGREG__PrePadOffset 4 +#define W_PARSERCONFIGREG__PrePadOffset 4 +#define O_PARSERCONFIGREG__UseCAM 2 +#define O_PARSERCONFIGREG__UseHASH 1 +#define O_PARSERCONFIGREG__UseProto 0 +#define R_L3CTABLE 0x140 +#define O_L3CTABLE__Offset0 25 +#define W_L3CTABLE__Offset0 7 +#define O_L3CTABLE__Len0 21 +#define W_L3CTABLE__Len0 4 +#define O_L3CTABLE__Offset1 14 +#define W_L3CTABLE__Offset1 7 +#define O_L3CTABLE__Len1 10 +#define W_L3CTABLE__Len1 4 +#define O_L3CTABLE__Offset2 4 +#define W_L3CTABLE__Offset2 6 +#define O_L3CTABLE__Len2 0 +#define W_L3CTABLE__Len2 4 +#define O_L3CTABLE__L3HdrOffset 26 +#define W_L3CTABLE__L3HdrOffset 6 +#define O_L3CTABLE__L4ProtoOffset 20 +#define W_L3CTABLE__L4ProtoOffset 6 +#define O_L3CTABLE__IPChksumCompute 19 +#define O_L3CTABLE__L4Classify 18 +#define O_L3CTABLE__L2Proto 16 +#define W_L3CTABLE__L2Proto 2 +#define O_L3CTABLE__L3ProtoKey 0 +#define W_L3CTABLE__L3ProtoKey 16 +#define R_L4CTABLE 0x160 +#define O_L4CTABLE__Offset0 21 +#define W_L4CTABLE__Offset0 6 +#define O_L4CTABLE__Len0 17 +#define W_L4CTABLE__Len0 4 +#define O_L4CTABLE__Offset1 11 +#define W_L4CTABLE__Offset1 6 +#define O_L4CTABLE__Len1 7 +#define W_L4CTABLE__Len1 4 +#define O_L4CTABLE__TCPChksumEnable 0 +#define R_CAM4X128TABLE 0x172 +#define O_CAM4X128TABLE__ClassId 7 +#define W_CAM4X128TABLE__ClassId 2 +#define O_CAM4X128TABLE__BucketId 1 +#define W_CAM4X128TABLE__BucketId 6 +#define O_CAM4X128TABLE__UseBucket 0 +#define R_CAM4X128KEY 0x180 +#define R_TRANSLATETABLE 0x1A0 +#define R_DMACR0 0x200 +#define O_DMACR0__Data0WrMaxCr 27 +#define W_DMACR0__Data0WrMaxCr 3 +#define O_DMACR0__Data0RdMaxCr 24 +#define W_DMACR0__Data0RdMaxCr 3 +#define O_DMACR0__Data1WrMaxCr 21 +#define W_DMACR0__Data1WrMaxCr 3 +#define O_DMACR0__Data1RdMaxCr 18 +#define W_DMACR0__Data1RdMaxCr 3 +#define O_DMACR0__Data2WrMaxCr 15 +#define W_DMACR0__Data2WrMaxCr 3 +#define O_DMACR0__Data2RdMaxCr 12 +#define W_DMACR0__Data2RdMaxCr 3 +#define O_DMACR0__Data3WrMaxCr 9 +#define W_DMACR0__Data3WrMaxCr 3 +#define O_DMACR0__Data3RdMaxCr 6 +#define W_DMACR0__Data3RdMaxCr 3 +#define O_DMACR0__Data4WrMaxCr 3 +#define W_DMACR0__Data4WrMaxCr 3 +#define O_DMACR0__Data4RdMaxCr 0 +#define W_DMACR0__Data4RdMaxCr 3 +#define R_DMACR1 0x201 +#define O_DMACR1__Data5WrMaxCr 27 +#define W_DMACR1__Data5WrMaxCr 3 +#define O_DMACR1__Data5RdMaxCr 24 +#define W_DMACR1__Data5RdMaxCr 3 +#define O_DMACR1__Data6WrMaxCr 21 +#define W_DMACR1__Data6WrMaxCr 3 +#define O_DMACR1__Data6RdMaxCr 18 +#define W_DMACR1__Data6RdMaxCr 3 +#define O_DMACR1__Data7WrMaxCr 15 +#define W_DMACR1__Data7WrMaxCr 3 +#define O_DMACR1__Data7RdMaxCr 12 +#define W_DMACR1__Data7RdMaxCr 3 +#define O_DMACR1__Data8WrMaxCr 9 +#define W_DMACR1__Data8WrMaxCr 3 +#define O_DMACR1__Data8RdMaxCr 6 +#define W_DMACR1__Data8RdMaxCr 3 +#define O_DMACR1__Data9WrMaxCr 3 +#define W_DMACR1__Data9WrMaxCr 3 +#define O_DMACR1__Data9RdMaxCr 0 +#define W_DMACR1__Data9RdMaxCr 3 +#define R_DMACR2 0x202 +#define O_DMACR2__Data10WrMaxCr 27 +#define W_DMACR2__Data10WrMaxCr 3 +#define O_DMACR2__Data10RdMaxCr 24 +#define W_DMACR2__Data10RdMaxCr 3 +#define O_DMACR2__Data11WrMaxCr 21 +#define W_DMACR2__Data11WrMaxCr 3 +#define O_DMACR2__Data11RdMaxCr 18 +#define W_DMACR2__Data11RdMaxCr 3 +#define O_DMACR2__Data12WrMaxCr 15 +#define W_DMACR2__Data12WrMaxCr 3 +#define O_DMACR2__Data12RdMaxCr 12 +#define W_DMACR2__Data12RdMaxCr 3 +#define O_DMACR2__Data13WrMaxCr 9 +#define W_DMACR2__Data13WrMaxCr 3 +#define O_DMACR2__Data13RdMaxCr 6 +#define W_DMACR2__Data13RdMaxCr 3 +#define O_DMACR2__Data14WrMaxCr 3 +#define W_DMACR2__Data14WrMaxCr 3 +#define O_DMACR2__Data14RdMaxCr 0 +#define W_DMACR2__Data14RdMaxCr 3 +#define R_DMACR3 0x203 +#define O_DMACR3__Data15WrMaxCr 27 +#define W_DMACR3__Data15WrMaxCr 3 +#define O_DMACR3__Data15RdMaxCr 24 +#define W_DMACR3__Data15RdMaxCr 3 +#define O_DMACR3__SpClassWrMaxCr 21 +#define W_DMACR3__SpClassWrMaxCr 3 +#define O_DMACR3__SpClassRdMaxCr 18 +#define W_DMACR3__SpClassRdMaxCr 3 +#define O_DMACR3__JumFrInWrMaxCr 15 +#define W_DMACR3__JumFrInWrMaxCr 3 +#define O_DMACR3__JumFrInRdMaxCr 12 +#define W_DMACR3__JumFrInRdMaxCr 3 +#define O_DMACR3__RegFrInWrMaxCr 9 +#define W_DMACR3__RegFrInWrMaxCr 3 +#define O_DMACR3__RegFrInRdMaxCr 6 +#define W_DMACR3__RegFrInRdMaxCr 3 +#define O_DMACR3__FrOutWrMaxCr 3 +#define W_DMACR3__FrOutWrMaxCr 3 +#define O_DMACR3__FrOutRdMaxCr 0 +#define W_DMACR3__FrOutRdMaxCr 3 +#define R_REG_FRIN_SPILL_MEM_START_0 0x204 +#define O_REG_FRIN_SPILL_MEM_START_0__RegFrInSpillMemStart0 0 +#define W_REG_FRIN_SPILL_MEM_START_0__RegFrInSpillMemStart0 32 +#define R_REG_FRIN_SPILL_MEM_START_1 0x205 +#define O_REG_FRIN_SPILL_MEM_START_1__RegFrInSpillMemStart1 0 +#define W_REG_FRIN_SPILL_MEM_START_1__RegFrInSpillMemStart1 3 +#define R_REG_FRIN_SPILL_MEM_SIZE 0x206 +#define O_REG_FRIN_SPILL_MEM_SIZE__RegFrInSpillMemSize 0 +#define W_REG_FRIN_SPILL_MEM_SIZE__RegFrInSpillMemSize 32 +#define R_FROUT_SPILL_MEM_START_0 0x207 +#define O_FROUT_SPILL_MEM_START_0__FrOutSpillMemStart0 0 +#define W_FROUT_SPILL_MEM_START_0__FrOutSpillMemStart0 32 +#define R_FROUT_SPILL_MEM_START_1 0x208 +#define O_FROUT_SPILL_MEM_START_1__FrOutSpillMemStart1 0 +#define W_FROUT_SPILL_MEM_START_1__FrOutSpillMemStart1 3 +#define R_FROUT_SPILL_MEM_SIZE 0x209 +#define O_FROUT_SPILL_MEM_SIZE__FrOutSpillMemSize 0 +#define W_FROUT_SPILL_MEM_SIZE__FrOutSpillMemSize 32 +#define R_CLASS0_SPILL_MEM_START_0 0x20A +#define O_CLASS0_SPILL_MEM_START_0__Class0SpillMemStart0 0 +#define W_CLASS0_SPILL_MEM_START_0__Class0SpillMemStart0 32 +#define R_CLASS0_SPILL_MEM_START_1 0x20B +#define O_CLASS0_SPILL_MEM_START_1__Class0SpillMemStart1 0 +#define W_CLASS0_SPILL_MEM_START_1__Class0SpillMemStart1 3 +#define R_CLASS0_SPILL_MEM_SIZE 0x20C +#define O_CLASS0_SPILL_MEM_SIZE__Class0SpillMemSize 0 +#define W_CLASS0_SPILL_MEM_SIZE__Class0SpillMemSize 32 +#define R_JUMFRIN_SPILL_MEM_START_0 0x20D +#define O_JUMFRIN_SPILL_MEM_START_0__JumFrInSpillMemStar0 0 +#define W_JUMFRIN_SPILL_MEM_START_0__JumFrInSpillMemStar0 32 +#define R_JUMFRIN_SPILL_MEM_START_1 0x20E +#define O_JUMFRIN_SPILL_MEM_START_1__JumFrInSpillMemStart1 0 +#define W_JUMFRIN_SPILL_MEM_START_1__JumFrInSpillMemStart1 3 +#define R_JUMFRIN_SPILL_MEM_SIZE 0x20F +#define O_JUMFRIN_SPILL_MEM_SIZE__JumFrInSpillMemSize 0 +#define W_JUMFRIN_SPILL_MEM_SIZE__JumFrInSpillMemSize 32 +#define R_CLASS1_SPILL_MEM_START_0 0x210 +#define O_CLASS1_SPILL_MEM_START_0__Class1SpillMemStart0 0 +#define W_CLASS1_SPILL_MEM_START_0__Class1SpillMemStart0 32 +#define R_CLASS1_SPILL_MEM_START_1 0x211 +#define O_CLASS1_SPILL_MEM_START_1__Class1SpillMemStart1 0 +#define W_CLASS1_SPILL_MEM_START_1__Class1SpillMemStart1 3 +#define R_CLASS1_SPILL_MEM_SIZE 0x212 +#define O_CLASS1_SPILL_MEM_SIZE__Class1SpillMemSize 0 +#define W_CLASS1_SPILL_MEM_SIZE__Class1SpillMemSize 32 +#define R_CLASS2_SPILL_MEM_START_0 0x213 +#define O_CLASS2_SPILL_MEM_START_0__Class2SpillMemStart0 0 +#define W_CLASS2_SPILL_MEM_START_0__Class2SpillMemStart0 32 +#define R_CLASS2_SPILL_MEM_START_1 0x214 +#define O_CLASS2_SPILL_MEM_START_1__Class2SpillMemStart1 0 +#define W_CLASS2_SPILL_MEM_START_1__Class2SpillMemStart1 3 +#define R_CLASS2_SPILL_MEM_SIZE 0x215 +#define O_CLASS2_SPILL_MEM_SIZE__Class2SpillMemSize 0 +#define W_CLASS2_SPILL_MEM_SIZE__Class2SpillMemSize 32 +#define R_CLASS3_SPILL_MEM_START_0 0x216 +#define O_CLASS3_SPILL_MEM_START_0__Class3SpillMemStart0 0 +#define W_CLASS3_SPILL_MEM_START_0__Class3SpillMemStart0 32 +#define R_CLASS3_SPILL_MEM_START_1 0x217 +#define O_CLASS3_SPILL_MEM_START_1__Class3SpillMemStart1 0 +#define W_CLASS3_SPILL_MEM_START_1__Class3SpillMemStart1 3 +#define R_CLASS3_SPILL_MEM_SIZE 0x218 +#define O_CLASS3_SPILL_MEM_SIZE__Class3SpillMemSize 0 +#define W_CLASS3_SPILL_MEM_SIZE__Class3SpillMemSize 32 +#define R_REG_FRIN1_SPILL_MEM_START_0 0x219 +#define R_REG_FRIN1_SPILL_MEM_START_1 0x21a +#define R_REG_FRIN1_SPILL_MEM_SIZE 0x21b +#define R_SPIHNGY0 0x219 +#define O_SPIHNGY0__EG_HNGY_THRESH_0 24 +#define W_SPIHNGY0__EG_HNGY_THRESH_0 7 +#define O_SPIHNGY0__EG_HNGY_THRESH_1 16 +#define W_SPIHNGY0__EG_HNGY_THRESH_1 7 +#define O_SPIHNGY0__EG_HNGY_THRESH_2 8 +#define W_SPIHNGY0__EG_HNGY_THRESH_2 7 +#define O_SPIHNGY0__EG_HNGY_THRESH_3 0 +#define W_SPIHNGY0__EG_HNGY_THRESH_3 7 +#define R_SPIHNGY1 0x21A +#define O_SPIHNGY1__EG_HNGY_THRESH_4 24 +#define W_SPIHNGY1__EG_HNGY_THRESH_4 7 +#define O_SPIHNGY1__EG_HNGY_THRESH_5 16 +#define W_SPIHNGY1__EG_HNGY_THRESH_5 7 +#define O_SPIHNGY1__EG_HNGY_THRESH_6 8 +#define W_SPIHNGY1__EG_HNGY_THRESH_6 7 +#define O_SPIHNGY1__EG_HNGY_THRESH_7 0 +#define W_SPIHNGY1__EG_HNGY_THRESH_7 7 +#define R_SPIHNGY2 0x21B +#define O_SPIHNGY2__EG_HNGY_THRESH_8 24 +#define W_SPIHNGY2__EG_HNGY_THRESH_8 7 +#define O_SPIHNGY2__EG_HNGY_THRESH_9 16 +#define W_SPIHNGY2__EG_HNGY_THRESH_9 7 +#define O_SPIHNGY2__EG_HNGY_THRESH_10 8 +#define W_SPIHNGY2__EG_HNGY_THRESH_10 7 +#define O_SPIHNGY2__EG_HNGY_THRESH_11 0 +#define W_SPIHNGY2__EG_HNGY_THRESH_11 7 +#define R_SPIHNGY3 0x21C +#define O_SPIHNGY3__EG_HNGY_THRESH_12 24 +#define W_SPIHNGY3__EG_HNGY_THRESH_12 7 +#define O_SPIHNGY3__EG_HNGY_THRESH_13 16 +#define W_SPIHNGY3__EG_HNGY_THRESH_13 7 +#define O_SPIHNGY3__EG_HNGY_THRESH_14 8 +#define W_SPIHNGY3__EG_HNGY_THRESH_14 7 +#define O_SPIHNGY3__EG_HNGY_THRESH_15 0 +#define W_SPIHNGY3__EG_HNGY_THRESH_15 7 +#define R_SPISTRV0 0x21D +#define O_SPISTRV0__EG_STRV_THRESH_0 24 +#define W_SPISTRV0__EG_STRV_THRESH_0 7 +#define O_SPISTRV0__EG_STRV_THRESH_1 16 +#define W_SPISTRV0__EG_STRV_THRESH_1 7 +#define O_SPISTRV0__EG_STRV_THRESH_2 8 +#define W_SPISTRV0__EG_STRV_THRESH_2 7 +#define O_SPISTRV0__EG_STRV_THRESH_3 0 +#define W_SPISTRV0__EG_STRV_THRESH_3 7 +#define R_SPISTRV1 0x21E +#define O_SPISTRV1__EG_STRV_THRESH_4 24 +#define W_SPISTRV1__EG_STRV_THRESH_4 7 +#define O_SPISTRV1__EG_STRV_THRESH_5 16 +#define W_SPISTRV1__EG_STRV_THRESH_5 7 +#define O_SPISTRV1__EG_STRV_THRESH_6 8 +#define W_SPISTRV1__EG_STRV_THRESH_6 7 +#define O_SPISTRV1__EG_STRV_THRESH_7 0 +#define W_SPISTRV1__EG_STRV_THRESH_7 7 +#define R_SPISTRV2 0x21F +#define O_SPISTRV2__EG_STRV_THRESH_8 24 +#define W_SPISTRV2__EG_STRV_THRESH_8 7 +#define O_SPISTRV2__EG_STRV_THRESH_9 16 +#define W_SPISTRV2__EG_STRV_THRESH_9 7 +#define O_SPISTRV2__EG_STRV_THRESH_10 8 +#define W_SPISTRV2__EG_STRV_THRESH_10 7 +#define O_SPISTRV2__EG_STRV_THRESH_11 0 +#define W_SPISTRV2__EG_STRV_THRESH_11 7 +#define R_SPISTRV3 0x220 +#define O_SPISTRV3__EG_STRV_THRESH_12 24 +#define W_SPISTRV3__EG_STRV_THRESH_12 7 +#define O_SPISTRV3__EG_STRV_THRESH_13 16 +#define W_SPISTRV3__EG_STRV_THRESH_13 7 +#define O_SPISTRV3__EG_STRV_THRESH_14 8 +#define W_SPISTRV3__EG_STRV_THRESH_14 7 +#define O_SPISTRV3__EG_STRV_THRESH_15 0 +#define W_SPISTRV3__EG_STRV_THRESH_15 7 +#define R_TXDATAFIFO0 0x221 +#define O_TXDATAFIFO0__Tx0DataFifoStart 24 +#define W_TXDATAFIFO0__Tx0DataFifoStart 7 +#define O_TXDATAFIFO0__Tx0DataFifoSize 16 +#define W_TXDATAFIFO0__Tx0DataFifoSize 7 +#define O_TXDATAFIFO0__Tx1DataFifoStart 8 +#define W_TXDATAFIFO0__Tx1DataFifoStart 7 +#define O_TXDATAFIFO0__Tx1DataFifoSize 0 +#define W_TXDATAFIFO0__Tx1DataFifoSize 7 +#define R_TXDATAFIFO1 0x222 +#define O_TXDATAFIFO1__Tx2DataFifoStart 24 +#define W_TXDATAFIFO1__Tx2DataFifoStart 7 +#define O_TXDATAFIFO1__Tx2DataFifoSize 16 +#define W_TXDATAFIFO1__Tx2DataFifoSize 7 +#define O_TXDATAFIFO1__Tx3DataFifoStart 8 +#define W_TXDATAFIFO1__Tx3DataFifoStart 7 +#define O_TXDATAFIFO1__Tx3DataFifoSize 0 +#define W_TXDATAFIFO1__Tx3DataFifoSize 7 +#define R_TXDATAFIFO2 0x223 +#define O_TXDATAFIFO2__Tx4DataFifoStart 24 +#define W_TXDATAFIFO2__Tx4DataFifoStart 7 +#define O_TXDATAFIFO2__Tx4DataFifoSize 16 +#define W_TXDATAFIFO2__Tx4DataFifoSize 7 +#define O_TXDATAFIFO2__Tx5DataFifoStart 8 +#define W_TXDATAFIFO2__Tx5DataFifoStart 7 +#define O_TXDATAFIFO2__Tx5DataFifoSize 0 +#define W_TXDATAFIFO2__Tx5DataFifoSize 7 +#define R_TXDATAFIFO3 0x224 +#define O_TXDATAFIFO3__Tx6DataFifoStart 24 +#define W_TXDATAFIFO3__Tx6DataFifoStart 7 +#define O_TXDATAFIFO3__Tx6DataFifoSize 16 +#define W_TXDATAFIFO3__Tx6DataFifoSize 7 +#define O_TXDATAFIFO3__Tx7DataFifoStart 8 +#define W_TXDATAFIFO3__Tx7DataFifoStart 7 +#define O_TXDATAFIFO3__Tx7DataFifoSize 0 +#define W_TXDATAFIFO3__Tx7DataFifoSize 7 +#define R_TXDATAFIFO4 0x225 +#define O_TXDATAFIFO4__Tx8DataFifoStart 24 +#define W_TXDATAFIFO4__Tx8DataFifoStart 7 +#define O_TXDATAFIFO4__Tx8DataFifoSize 16 +#define W_TXDATAFIFO4__Tx8DataFifoSize 7 +#define O_TXDATAFIFO4__Tx9DataFifoStart 8 +#define W_TXDATAFIFO4__Tx9DataFifoStart 7 +#define O_TXDATAFIFO4__Tx9DataFifoSize 0 +#define W_TXDATAFIFO4__Tx9DataFifoSize 7 +#define R_TXDATAFIFO5 0x226 +#define O_TXDATAFIFO5__Tx10DataFifoStart 24 +#define W_TXDATAFIFO5__Tx10DataFifoStart 7 +#define O_TXDATAFIFO5__Tx10DataFifoSize 16 +#define W_TXDATAFIFO5__Tx10DataFifoSize 7 +#define O_TXDATAFIFO5__Tx11DataFifoStart 8 +#define W_TXDATAFIFO5__Tx11DataFifoStart 7 +#define O_TXDATAFIFO5__Tx11DataFifoSize 0 +#define W_TXDATAFIFO5__Tx11DataFifoSize 7 +#define R_TXDATAFIFO6 0x227 +#define O_TXDATAFIFO6__Tx12DataFifoStart 24 +#define W_TXDATAFIFO6__Tx12DataFifoStart 7 +#define O_TXDATAFIFO6__Tx12DataFifoSize 16 +#define W_TXDATAFIFO6__Tx12DataFifoSize 7 +#define O_TXDATAFIFO6__Tx13DataFifoStart 8 +#define W_TXDATAFIFO6__Tx13DataFifoStart 7 +#define O_TXDATAFIFO6__Tx13DataFifoSize 0 +#define W_TXDATAFIFO6__Tx13DataFifoSize 7 +#define R_TXDATAFIFO7 0x228 +#define O_TXDATAFIFO7__Tx14DataFifoStart 24 +#define W_TXDATAFIFO7__Tx14DataFifoStart 7 +#define O_TXDATAFIFO7__Tx14DataFifoSize 16 +#define W_TXDATAFIFO7__Tx14DataFifoSize 7 +#define O_TXDATAFIFO7__Tx15DataFifoStart 8 +#define W_TXDATAFIFO7__Tx15DataFifoStart 7 +#define O_TXDATAFIFO7__Tx15DataFifoSize 0 +#define W_TXDATAFIFO7__Tx15DataFifoSize 7 +#define R_RXDATAFIFO0 0x229 +#define O_RXDATAFIFO0__Rx0DataFifoStart 24 +#define W_RXDATAFIFO0__Rx0DataFifoStart 7 +#define O_RXDATAFIFO0__Rx0DataFifoSize 16 +#define W_RXDATAFIFO0__Rx0DataFifoSize 7 +#define O_RXDATAFIFO0__Rx1DataFifoStart 8 +#define W_RXDATAFIFO0__Rx1DataFifoStart 7 +#define O_RXDATAFIFO0__Rx1DataFifoSize 0 +#define W_RXDATAFIFO0__Rx1DataFifoSize 7 +#define R_RXDATAFIFO1 0x22A +#define O_RXDATAFIFO1__Rx2DataFifoStart 24 +#define W_RXDATAFIFO1__Rx2DataFifoStart 7 +#define O_RXDATAFIFO1__Rx2DataFifoSize 16 +#define W_RXDATAFIFO1__Rx2DataFifoSize 7 +#define O_RXDATAFIFO1__Rx3DataFifoStart 8 +#define W_RXDATAFIFO1__Rx3DataFifoStart 7 +#define O_RXDATAFIFO1__Rx3DataFifoSize 0 +#define W_RXDATAFIFO1__Rx3DataFifoSize 7 +#define R_RXDATAFIFO2 0x22B +#define O_RXDATAFIFO2__Rx4DataFifoStart 24 +#define W_RXDATAFIFO2__Rx4DataFifoStart 7 +#define O_RXDATAFIFO2__Rx4DataFifoSize 16 +#define W_RXDATAFIFO2__Rx4DataFifoSize 7 +#define O_RXDATAFIFO2__Rx5DataFifoStart 8 +#define W_RXDATAFIFO2__Rx5DataFifoStart 7 +#define O_RXDATAFIFO2__Rx5DataFifoSize 0 +#define W_RXDATAFIFO2__Rx5DataFifoSize 7 +#define R_RXDATAFIFO3 0x22C +#define O_RXDATAFIFO3__Rx6DataFifoStart 24 +#define W_RXDATAFIFO3__Rx6DataFifoStart 7 +#define O_RXDATAFIFO3__Rx6DataFifoSize 16 +#define W_RXDATAFIFO3__Rx6DataFifoSize 7 +#define O_RXDATAFIFO3__Rx7DataFifoStart 8 +#define W_RXDATAFIFO3__Rx7DataFifoStart 7 +#define O_RXDATAFIFO3__Rx7DataFifoSize 0 +#define W_RXDATAFIFO3__Rx7DataFifoSize 7 +#define R_RXDATAFIFO4 0x22D +#define O_RXDATAFIFO4__Rx8DataFifoStart 24 +#define W_RXDATAFIFO4__Rx8DataFifoStart 7 +#define O_RXDATAFIFO4__Rx8DataFifoSize 16 +#define W_RXDATAFIFO4__Rx8DataFifoSize 7 +#define O_RXDATAFIFO4__Rx9DataFifoStart 8 +#define W_RXDATAFIFO4__Rx9DataFifoStart 7 +#define O_RXDATAFIFO4__Rx9DataFifoSize 0 +#define W_RXDATAFIFO4__Rx9DataFifoSize 7 +#define R_RXDATAFIFO5 0x22E +#define O_RXDATAFIFO5__Rx10DataFifoStart 24 +#define W_RXDATAFIFO5__Rx10DataFifoStart 7 +#define O_RXDATAFIFO5__Rx10DataFifoSize 16 +#define W_RXDATAFIFO5__Rx10DataFifoSize 7 +#define O_RXDATAFIFO5__Rx11DataFifoStart 8 +#define W_RXDATAFIFO5__Rx11DataFifoStart 7 +#define O_RXDATAFIFO5__Rx11DataFifoSize 0 +#define W_RXDATAFIFO5__Rx11DataFifoSize 7 +#define R_RXDATAFIFO6 0x22F +#define O_RXDATAFIFO6__Rx12DataFifoStart 24 +#define W_RXDATAFIFO6__Rx12DataFifoStart 7 +#define O_RXDATAFIFO6__Rx12DataFifoSize 16 +#define W_RXDATAFIFO6__Rx12DataFifoSize 7 +#define O_RXDATAFIFO6__Rx13DataFifoStart 8 +#define W_RXDATAFIFO6__Rx13DataFifoStart 7 +#define O_RXDATAFIFO6__Rx13DataFifoSize 0 +#define W_RXDATAFIFO6__Rx13DataFifoSize 7 +#define R_RXDATAFIFO7 0x230 +#define O_RXDATAFIFO7__Rx14DataFifoStart 24 +#define W_RXDATAFIFO7__Rx14DataFifoStart 7 +#define O_RXDATAFIFO7__Rx14DataFifoSize 16 +#define W_RXDATAFIFO7__Rx14DataFifoSize 7 +#define O_RXDATAFIFO7__Rx15DataFifoStart 8 +#define W_RXDATAFIFO7__Rx15DataFifoStart 7 +#define O_RXDATAFIFO7__Rx15DataFifoSize 0 +#define W_RXDATAFIFO7__Rx15DataFifoSize 7 +#define R_XGMACPADCALIBRATION 0x231 +#define R_FREEQCARVE 0x233 +#define R_SPI4STATICDELAY0 0x240 +#define O_SPI4STATICDELAY0__DataLine7 28 +#define W_SPI4STATICDELAY0__DataLine7 4 +#define O_SPI4STATICDELAY0__DataLine6 24 +#define W_SPI4STATICDELAY0__DataLine6 4 +#define O_SPI4STATICDELAY0__DataLine5 20 +#define W_SPI4STATICDELAY0__DataLine5 4 +#define O_SPI4STATICDELAY0__DataLine4 16 +#define W_SPI4STATICDELAY0__DataLine4 4 +#define O_SPI4STATICDELAY0__DataLine3 12 +#define W_SPI4STATICDELAY0__DataLine3 4 +#define O_SPI4STATICDELAY0__DataLine2 8 +#define W_SPI4STATICDELAY0__DataLine2 4 +#define O_SPI4STATICDELAY0__DataLine1 4 +#define W_SPI4STATICDELAY0__DataLine1 4 +#define O_SPI4STATICDELAY0__DataLine0 0 +#define W_SPI4STATICDELAY0__DataLine0 4 +#define R_SPI4STATICDELAY1 0x241 +#define O_SPI4STATICDELAY1__DataLine15 28 +#define W_SPI4STATICDELAY1__DataLine15 4 +#define O_SPI4STATICDELAY1__DataLine14 24 +#define W_SPI4STATICDELAY1__DataLine14 4 +#define O_SPI4STATICDELAY1__DataLine13 20 +#define W_SPI4STATICDELAY1__DataLine13 4 +#define O_SPI4STATICDELAY1__DataLine12 16 +#define W_SPI4STATICDELAY1__DataLine12 4 +#define O_SPI4STATICDELAY1__DataLine11 12 +#define W_SPI4STATICDELAY1__DataLine11 4 +#define O_SPI4STATICDELAY1__DataLine10 8 +#define W_SPI4STATICDELAY1__DataLine10 4 +#define O_SPI4STATICDELAY1__DataLine9 4 +#define W_SPI4STATICDELAY1__DataLine9 4 +#define O_SPI4STATICDELAY1__DataLine8 0 +#define W_SPI4STATICDELAY1__DataLine8 4 +#define R_SPI4STATICDELAY2 0x242 +#define O_SPI4STATICDELAY0__TxStat1 8 +#define W_SPI4STATICDELAY0__TxStat1 4 +#define O_SPI4STATICDELAY0__TxStat0 4 +#define W_SPI4STATICDELAY0__TxStat0 4 +#define O_SPI4STATICDELAY0__RxControl 0 +#define W_SPI4STATICDELAY0__RxControl 4 +#define R_SPI4CONTROL 0x243 +#define O_SPI4CONTROL__StaticDelay 2 +#define O_SPI4CONTROL__LVDS_LVTTL 1 +#define O_SPI4CONTROL__SPI4Enable 0 +#define R_CLASSWATERMARKS 0x244 +#define O_CLASSWATERMARKS__Class0Watermark 24 +#define W_CLASSWATERMARKS__Class0Watermark 5 +#define O_CLASSWATERMARKS__Class1Watermark 16 +#define W_CLASSWATERMARKS__Class1Watermark 5 +#define O_CLASSWATERMARKS__Class3Watermark 0 +#define W_CLASSWATERMARKS__Class3Watermark 5 +#define R_RXWATERMARKS1 0x245 +#define O_RXWATERMARKS__Rx0DataWatermark 24 +#define W_RXWATERMARKS__Rx0DataWatermark 7 +#define O_RXWATERMARKS__Rx1DataWatermark 16 +#define W_RXWATERMARKS__Rx1DataWatermark 7 +#define O_RXWATERMARKS__Rx3DataWatermark 0 +#define W_RXWATERMARKS__Rx3DataWatermark 7 +#define R_RXWATERMARKS2 0x246 +#define O_RXWATERMARKS__Rx4DataWatermark 24 +#define W_RXWATERMARKS__Rx4DataWatermark 7 +#define O_RXWATERMARKS__Rx5DataWatermark 16 +#define W_RXWATERMARKS__Rx5DataWatermark 7 +#define O_RXWATERMARKS__Rx6DataWatermark 8 +#define W_RXWATERMARKS__Rx6DataWatermark 7 +#define O_RXWATERMARKS__Rx7DataWatermark 0 +#define W_RXWATERMARKS__Rx7DataWatermark 7 +#define R_RXWATERMARKS3 0x247 +#define O_RXWATERMARKS__Rx8DataWatermark 24 +#define W_RXWATERMARKS__Rx8DataWatermark 7 +#define O_RXWATERMARKS__Rx9DataWatermark 16 +#define W_RXWATERMARKS__Rx9DataWatermark 7 +#define O_RXWATERMARKS__Rx10DataWatermark 8 +#define W_RXWATERMARKS__Rx10DataWatermark 7 +#define O_RXWATERMARKS__Rx11DataWatermark 0 +#define W_RXWATERMARKS__Rx11DataWatermark 7 +#define R_RXWATERMARKS4 0x248 +#define O_RXWATERMARKS__Rx12DataWatermark 24 +#define W_RXWATERMARKS__Rx12DataWatermark 7 +#define O_RXWATERMARKS__Rx13DataWatermark 16 +#define W_RXWATERMARKS__Rx13DataWatermark 7 +#define O_RXWATERMARKS__Rx14DataWatermark 8 +#define W_RXWATERMARKS__Rx14DataWatermark 7 +#define O_RXWATERMARKS__Rx15DataWatermark 0 +#define W_RXWATERMARKS__Rx15DataWatermark 7 +#define R_FREEWATERMARKS 0x249 +#define O_FREEWATERMARKS__FreeOutWatermark 16 +#define W_FREEWATERMARKS__FreeOutWatermark 16 +#define O_FREEWATERMARKS__JumFrWatermark 8 +#define W_FREEWATERMARKS__JumFrWatermark 7 +#define O_FREEWATERMARKS__RegFrWatermark 0 +#define W_FREEWATERMARKS__RegFrWatermark 7 +#define R_EGRESSFIFOCARVINGSLOTS 0x24a + +#define CTRL_RES0 0 +#define CTRL_RES1 1 +#define CTRL_REG_FREE 2 +#define CTRL_JUMBO_FREE 3 +#define CTRL_CONT 4 +#define CTRL_EOP 5 +#define CTRL_START 6 +#define CTRL_SNGL 7 + +#define CTRL_B0_NOT_EOP 0 +#define CTRL_B0_EOP 1 + +#define R_ROUND_ROBIN_TABLE 0 +#define R_PDE_CLASS_0 0x300 +#define R_PDE_CLASS_1 0x302 +#define R_PDE_CLASS_2 0x304 +#define R_PDE_CLASS_3 0x306 + +#define R_MSG_TX_THRESHOLD 0x308 + +#define R_GMAC_JFR0_BUCKET_SIZE 0x320 +#define R_GMAC_RFR0_BUCKET_SIZE 0x321 +#define R_GMAC_TX0_BUCKET_SIZE 0x322 +#define R_GMAC_TX1_BUCKET_SIZE 0x323 +#define R_GMAC_TX2_BUCKET_SIZE 0x324 +#define R_GMAC_TX3_BUCKET_SIZE 0x325 +#define R_GMAC_JFR1_BUCKET_SIZE 0x326 +#define R_GMAC_RFR1_BUCKET_SIZE 0x327 + +#define R_XGS_TX0_BUCKET_SIZE 0x320 +#define R_XGS_TX1_BUCKET_SIZE 0x321 +#define R_XGS_TX2_BUCKET_SIZE 0x322 +#define R_XGS_TX3_BUCKET_SIZE 0x323 +#define R_XGS_TX4_BUCKET_SIZE 0x324 +#define R_XGS_TX5_BUCKET_SIZE 0x325 +#define R_XGS_TX6_BUCKET_SIZE 0x326 +#define R_XGS_TX7_BUCKET_SIZE 0x327 +#define R_XGS_TX8_BUCKET_SIZE 0x328 +#define R_XGS_TX9_BUCKET_SIZE 0x329 +#define R_XGS_TX10_BUCKET_SIZE 0x32A +#define R_XGS_TX11_BUCKET_SIZE 0x32B +#define R_XGS_TX12_BUCKET_SIZE 0x32C +#define R_XGS_TX13_BUCKET_SIZE 0x32D +#define R_XGS_TX14_BUCKET_SIZE 0x32E +#define R_XGS_TX15_BUCKET_SIZE 0x32F +#define R_XGS_JFR_BUCKET_SIZE 0x330 +#define R_XGS_RFR_BUCKET_SIZE 0x331 + +#define R_CC_CPU0_0 0x380 +#define R_CC_CPU1_0 0x388 +#define R_CC_CPU2_0 0x390 +#define R_CC_CPU3_0 0x398 +#define R_CC_CPU4_0 0x3a0 +#define R_CC_CPU5_0 0x3a8 +#define R_CC_CPU6_0 0x3b0 +#define R_CC_CPU7_0 0x3b8 + +typedef enum { + xlr_mac_speed_10, xlr_mac_speed_100, + xlr_mac_speed_1000, xlr_mac_speed_rsvd +} xlr_mac_speed_t; + +typedef enum { + xlr_mac_duplex_auto, xlr_mac_duplex_half, + xlr_mac_duplex_full +} xlr_mac_duplex_t; + +typedef enum { + xlr_mac_link_down, + xlr_mac_link_up, +} xlr_mac_link_t; + +typedef enum { + xlr_mac_fc_auto, xlr_mac_fc_disabled, xlr_mac_fc_frame, + xlr_mac_fc_collision, xlr_mac_fc_carrier +} xlr_mac_fc_t; + +/* static int mac_frin_to_be_sent_thr[8]; */ + +enum { + PORT_TX, + PORT_TX_COMPLETE, + PORT_STARTQ, + PORT_STOPQ, + PORT_START_DEV_STATE, + PORT_STOP_DEV_STATE, +}; + +struct rge_softc_stats { + unsigned long rx_frames; + unsigned long tx_frames; + unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long tx_packets; + unsigned long tx_bytes; +}; + +struct driver_data { + + /* + * Let these be the first fields in this structure the structure is + * cacheline aligned when allocated in init_etherdev + */ + struct fr_desc *frin_spill; + struct fr_desc *frout_spill; + union rx_tx_desc *class_0_spill; + union rx_tx_desc *class_1_spill; + union rx_tx_desc *class_2_spill; + union rx_tx_desc *class_3_spill; + int spill_configured; + + struct rge_softc *sc; /* pointer to freebsd device soft-pointer */ + struct rge_softc_stats stats; + struct mtx lock; + + xlr_reg_t *mmio; + xlr_reg_t *mii_mmio; + xlr_reg_t *pcs_mmio; + xlr_reg_t *serdes_mmio; + + int txbucket; + int rfrbucket; + + int phy_oldbmsr; + int phy_oldanlpar; + int phy_oldk1stsr; + int phy_oldlinkstat; + unsigned char phys_addr[2]; + + xlr_mac_speed_t speed; /* current speed */ + xlr_mac_duplex_t duplex;/* current duplex */ + xlr_mac_link_t link; /* current link */ + xlr_mac_fc_t flow_ctrl; /* current flow control setting */ + int advertising; + + int id; + int type; + int mode; + int instance; + int phy_addr; + int frin_to_be_sent[8]; + int init_frin_desc; +}; + +struct rge_softc { + int unit; + int irq; + unsigned char dev_addr[6]; + unsigned long base_addr; + unsigned long mem_end; + struct ifnet *rge_ifp; /* interface info */ + device_t rge_dev; + int mtu; + int flags; + struct driver_data priv; + struct mtx rge_mtx; + device_t rge_miibus; + struct mii_data rge_mii;/* MII/media information */ + bus_space_handle_t rge_bhandle; + bus_space_tag_t rge_btag; + void *rge_intrhand; + struct resource rge_irq; + struct resource *rge_res; + struct ifmedia rge_ifmedia; /* TBI media info */ + int rge_if_flags; + int rge_link; /* link state */ + int rge_link_evt; /* pending link event */ + struct callout rge_stat_ch; + void (*xmit) (struct ifnet *); + void (*stop) (struct rge_softc *); + int (*ioctl) (struct ifnet *, u_long, caddr_t); + struct rge_softc_stats *(*get_stats) (struct rge_softc *); + int active; + int link_up; +}; + +struct size_1_desc { + uint64_t entry0; +}; + +struct size_2_desc { + uint64_t entry0; + uint64_t entry1; +}; + +struct size_3_desc { + uint64_t entry0; + uint64_t entry1; + uint64_t entry2; +}; + +struct size_4_desc { + uint64_t entry0; + uint64_t entry1; + uint64_t entry2; + uint64_t entry3; +}; + +struct fr_desc { + struct size_1_desc d1; +}; + +union rx_tx_desc { + struct size_2_desc d2; + /* struct size_3_desc d3; */ + /* struct size_4_desc d4; */ +}; + + +extern unsigned char xlr_base_mac_addr[]; + +#endif diff --git a/sys/mips/rmi/dev/xlr/xgmac_mdio.h b/sys/mips/rmi/dev/xlr/xgmac_mdio.h new file mode 100644 index 000000000000..5d0a3d0cd536 --- /dev/null +++ b/sys/mips/rmi/dev/xlr/xgmac_mdio.h @@ -0,0 +1,127 @@ +/*- + * Copyright (c) 2003-2009 RMI Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of RMI Corporation, nor the names of its contributors, + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RMI_BSD */ +/* MDIO Low level Access routines */ +/* All Phy's accessed from GMAC0 base */ + +#ifndef _XGMAC_MDIO_H_ +#define _XGMAC_MDIO_H_ + +static inline int +xmdio_read(volatile unsigned int *_mmio, + uint32_t phy_addr, uint32_t address); +static inline void +xmdio_write(volatile unsigned int *_mmio, + uint32_t phy_addr, uint32_t address, uint32_t data); +static inline void +xmdio_address(volatile unsigned int *_mmio, + uint32_t phy_addr, uint32_t dev_ad, uint32_t address); + +static inline void +xmdio_address(volatile unsigned int *_mmio, + uint32_t phy_addr, uint32_t dev_ad, uint32_t address) +{ + uint32_t st_field = 0x0; + uint32_t op_type = 0x0; /* address operation */ + uint32_t ta_field = 0x2;/* ta field */ + + _mmio[0x11] = ((st_field & 0x3) << 30) | + ((op_type & 0x3) << 28) | + ((phy_addr & 0x1F) << 23) | + ((dev_ad & 0x1F) << 18) | + ((ta_field & 0x3) << 16) | + ((address & 0xffff) << 0); + + _mmio[0x10] = (0x0 << 3) | 0x5; + _mmio[0x10] = (0x1 << 3) | 0x5; + _mmio[0x10] = (0x0 << 3) | 0x5; + + /* wait for dev_ad cycle to complete */ + while (_mmio[0x14] & 0x1) { + }; + +} + +/* function prototypes */ +static inline int +xmdio_read(volatile unsigned int *_mmio, + uint32_t phy_addr, uint32_t address) +{ + uint32_t st_field = 0x0; + uint32_t op_type = 0x3; /* read operation */ + uint32_t ta_field = 0x2;/* ta field */ + uint32_t data = 0; + + xmdio_address(_mmio, phy_addr, 5, address); + _mmio[0x11] = ((st_field & 0x3) << 30) | + ((op_type & 0x3) << 28) | + ((phy_addr & 0x1F) << 23) | + ((5 & 0x1F) << 18) | + ((ta_field & 0x3) << 16) | + ((data & 0xffff) << 0); + + _mmio[0x10] = (0x0 << 3) | 0x5; + _mmio[0x10] = (0x1 << 3) | 0x5; + _mmio[0x10] = (0x0 << 3) | 0x5; + + /* wait for write cycle to complete */ + while (_mmio[0x14] & 0x1) { + }; + + data = _mmio[0x11] & 0xffff; + return (data); +} + +static inline void +xmdio_write(volatile unsigned int *_mmio, + uint32_t phy_addr, uint32_t address, uint32_t data) +{ + uint32_t st_field = 0x0; + uint32_t op_type = 0x1; /* write operation */ + uint32_t ta_field = 0x2;/* ta field */ + + xmdio_address(_mmio, phy_addr, 5, address); + _mmio[0x11] = ((st_field & 0x3) << 30) | + ((op_type & 0x3) << 28) | + ((phy_addr & 0x1F) << 23) | + ((5 & 0x1F) << 18) | + ((ta_field & 0x3) << 16) | + ((data & 0xffff) << 0); + + _mmio[0x10] = (0x0 << 3) | 0x5; + _mmio[0x10] = (0x1 << 3) | 0x5; + _mmio[0x10] = (0x0 << 3) | 0x5; + + /* wait for write cycle to complete */ + while (_mmio[0x14] & 0x1) { + }; + +} + +#endif