Merge ^/head r275715 through r275748.

This commit is contained in:
Dimitry Andric 2014-12-13 19:45:18 +00:00
commit be281d7007
94 changed files with 6686 additions and 1691 deletions

View File

@ -29,7 +29,7 @@
.\" @(#)ps.1 8.3 (Berkeley) 4/18/94
.\" $FreeBSD$
.\"
.Dd August 27, 2014
.Dd December 9, 2014
.Dt PS 1
.Os
.Sh NAME
@ -332,6 +332,7 @@ the include file
.It Dv "P_SINGLE_BOUNDARY" Ta No "0x400000" Ta "Threads should suspend at user boundary"
.It Dv "P_HWPMC" Ta No "0x800000" Ta "Process is using HWPMCs"
.It Dv "P_JAILED" Ta No "0x1000000" Ta "Process is in jail"
.It Dv "P_TOTAL_STOP" Ta No "0x2000000" Ta "Stopped for system suspend"
.It Dv "P_INEXEC" Ta No "0x4000000" Ta "Process is in execve()"
.It Dv "P_STATCHILD" Ta No "0x8000000" Ta "Child process stopped or exited"
.It Dv "P_INMEM" Ta No "0x10000000" Ta "Loaded into memory"

View File

@ -23,15 +23,15 @@
.\" Copyright (c) 2012, Glen Barber <gjb@FreeBSD.org>
.\" Copyright (c) 2012, Bryan Drewery <bdrewery@FreeBSD.org>
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2013 Nexenta Systems, Inc. All Rights Reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" Copyright (c) 2013, Steven Hartland <smh@FreeBSD.org>
.\" Copyright (c) 2014 Nexenta Systems, Inc. All Rights Reserved.
.\" Copyright (c) 2014, Xin LI <delphij@FreeBSD.org>
.\" Copyright (c) 2014, The FreeBSD Foundation, All Rights Reserved.
.\"
.\" $FreeBSD$
.\"
.Dd November 12, 2014
.Dd December 12, 2014
.Dt ZFS 8
.Os
.Sh NAME
@ -3505,10 +3505,9 @@ are also displayed.
.Bd -literal -offset 2n
.Li # Ic zfs allow cindys create,destroy,mount,snapshot tank/cindys
.Li # Ic zfs allow tank/cindys
-------------------------------------------------------------
Local+Descendent permissions on (tank/cindys)
user cindys create,destroy,mount,snapshot
-------------------------------------------------------------
---- Permissions on tank/cindys --------------------------------------
Local+Descendent permissions:
user cindys create,destroy,mount,snapshot
.Ed
.It Sy Example 18 No Delegating Create Time Permissions on a Tn ZFS No Dataset
.Pp
@ -3524,12 +3523,11 @@ are also displayed.
.Li # Ic zfs allow staff create,mount tank/users
.Li # Ic zfs allow -c destroy tank/users
.Li # Ic zfs allow tank/users
-------------------------------------------------------------
Create time permissions on (tank/users)
create,destroy
Local+Descendent permissions on (tank/users)
group staff create,mount
-------------------------------------------------------------
---- Permissions on tank/users ---------------------------------------
Permission sets:
destroy
Local+Descendent permissions:
group staff create,mount
.Ed
.It Xo
.Sy Example 19
@ -3547,14 +3545,11 @@ are also displayed.
.Li # Ic zfs allow -s @pset create,destroy,snapshot,mount tank/users
.Li # Ic zfs allow staff @pset tank/users
.Li # Ic zfs allow tank/users
-------------------------------------------------------------
Permission sets on (tank/users)
---- Permissions on tank/users ---------------------------------------
Permission sets:
@pset create,destroy,mount,snapshot
Create time permissions on (tank/users)
create,destroy
Local+Descendent permissions on (tank/users)
group staff @pset,create,mount
-------------------------------------------------------------
Local+Descendent permissions:
group staff @pset
.Ed
.It Sy Example 20 No Delegating Property Permissions on a Tn ZFS No Dataset
.Pp
@ -3566,16 +3561,15 @@ file system. The permissions on
are also displayed.
.Bd -literal -offset 2n
.Li # Ic zfs allow cindys quota,reservation users/home
.Li # Ic zfs allow cindys
-------------------------------------------------------------
Local+Descendent permissions on (users/home)
.Li # Ic zfs allow users/home
---- Permissions on users/home ---------------------------------------
Local+Descendent permissions:
user cindys quota,reservation
-------------------------------------------------------------
.Li # Ic su - cindys
.Li cindys% Ic zfs set quota=10G users/home/marks
.Li cindys% Ic zfs get quota users/home/marks
NAME PROPERTY VALUE SOURCE
users/home/marks quota 10G local
NAME PROPERTY VALUE SOURCE
users/home/marks quota 10G local
.Ed
.It Sy Example 21 No Removing ZFS Delegated Permissions on a Tn ZFS No Dataset
.Pp
@ -3589,14 +3583,11 @@ are also displayed.
.Bd -literal -offset 2n
.Li # Ic zfs unallow staff snapshot tank/users
.Li # Ic zfs allow tank/users
-------------------------------------------------------------
Permission sets on (tank/users)
---- Permissions on tank/users ---------------------------------------
Permission sets:
@pset create,destroy,mount,snapshot
Create time permissions on (tank/users)
create,destroy
Local+Descendent permissions on (tank/users)
group staff @pset,create,mount
-------------------------------------------------------------
Local+Descendent permissions:
group staff @pset
.Ed
.It Sy Example 22 Showing the differences between a snapshot and a ZFS Dataset
.Pp

View File

@ -1,3 +1,49 @@
2009-02-15 Alan Modra <amodra@bigpond.net.au>
* elf64-ppc.c (struct _ppc64_elf_section_data): Delete t_symndx,
add toc.symndx and toc.add.
(ppc64_elf_check_relocs): Don't set htab->tls_get_addr here.
Set up toc.add.
(get_tls_mask): Add toc_addend param, set from toc.add. Adjust all
callers.
(ppc64_elf_tls_setup): Set htab->tls_get_addr and tls_get_addr_fd.
(branch_reloc_hash_match): New function, extracted from..
(ppc64_elf_tls_optimize): ..here.
(ppc64_elf_relocate_section): Properly set addends when optimizing
tls sequences. Avoid unnecessary reading and writing of insns.
Only redo reloc when symbol changed. Bypass symbol checks when
using tlsld_got.
* elf32-ppc.c (ppc_elf_tls_setup): Correct comment.
(branch_reloc_hash_match): New function, extracted from..
(ppc_elf_tls_optimize): ..here.
(ppc_elf_relocate_section): Avoid unnecessary reading of insns.
Don't clear addend on zapped __tls_get_addr reloc.
2008-08-11 Alan Modra <amodra@bigpond.net.au>
* elf64-ppc.c (toc_adjusting_stub_needed): Any call via the plt
needs r2 valid, not just those to external syms.
2007-11-06 Alan Modra <amodra@bigpond.net.au>
* elf32-ppc.c (ppc_elf_check_relocs): Don't refcount tlsld_got here..
(ppc_elf_gc_sweep_hook): ..or here..
(ppc_elf_tls_optimize): ..or here. Make two passes through the
relocs, ensuring that tls_get_addr calls follow gd and ld relocs.
(allocate_dynrelocs): Refcount tlsld_got here.
(ppc_elf_size_dynamic_sections): Call allocate_dynrelocs before
allocating tlsld_got.
(ppc_elf_relocate_section): Remove check that a tls_get_addr
call follows gd and ld relocs.
2007-08-13 Alan Modra <amodra@bigpond.net.au>
* elf64-ppc.c (ADDI_R12_R12, LD_R11_0R2, LD_R2_0R2): Define.
Update stub comments.
(build_plt_stub): Build two variants, one without "addis".
(ppc_build_one_stub): Build stubs without "addis" if possible.
(ppc_size_one_stub): Size new stubs.
2007-07-02 Joseph Myers <joseph@codesourcery.com>
* elfxx-mips.c (mips_elf_calculate_relocation): Handle

View File

@ -1380,6 +1380,9 @@ typedef struct bfd_section
/* Nonzero if this section has TLS related relocations. */
unsigned int has_tls_reloc:1;
/* Nonzero if this section has a call to __tls_get_addr. */
unsigned int has_tls_get_addr_call:1;
/* Nonzero if this section has a gp reloc. */
unsigned int has_gp_reloc:1;
@ -1640,11 +1643,11 @@ extern asection bfd_ind_section;
/* segment_mark, sec_info_type, use_rela_p, has_tls_reloc, */ \
0, 0, 0, 0, \
\
/* has_gp_reloc, need_finalize_relax, reloc_done, */ \
0, 0, 0, \
/* has_tls_get_addr_call, has_gp_reloc, need_finalize_relax, */ \
0, 0, 0, \
\
/* vma, lma, size, rawsize */ \
0, 0, 0, 0, \
/* reloc_done, vma, lma, size, rawsize */ \
0, 0, 0, 0, 0, \
\
/* output_offset, output_section, alignment_power, */ \
0, (struct bfd_section *) &SEC, 0, \
@ -2896,6 +2899,8 @@ in the instruction. */
/* PowerPC and PowerPC64 thread-local storage relocations. */
BFD_RELOC_PPC_TLS,
BFD_RELOC_PPC_TLSGD,
BFD_RELOC_PPC_TLSLD,
BFD_RELOC_PPC_DTPMOD,
BFD_RELOC_PPC_TPREL16,
BFD_RELOC_PPC_TPREL16_LO,

View File

@ -58,10 +58,10 @@ static asection bfd_debug_section =
0, 0, 1, 0,
/* segment_mark, sec_info_type, use_rela_p, has_tls_reloc, */
0, 0, 0, 0,
/* has_gp_reloc, need_finalize_relax, reloc_done, */
0, 0, 0,
/* vma, lma, size, rawsize, */
0, 0, 0, 0,
/* has_tls_get_addr_call, has_gp_reloc, need_finalize_relax, */
0, 0, 0,
/* reloc_done, vma, lma, size, rawsize, */
0, 0, 0, 0, 0,
/* output_offset, output_section, alignment_power, */
0, NULL, 0,
/* relocation, orelocation, reloc_count, filepos, rel_filepos, */

View File

@ -746,7 +746,7 @@ static reloc_howto_type ppc_elf_howto_raw[] = {
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* Marker reloc for TLS. */
/* Marker relocs for TLS. */
HOWTO (R_PPC_TLS,
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
@ -761,6 +761,34 @@ static reloc_howto_type ppc_elf_howto_raw[] = {
0, /* dst_mask */
FALSE), /* pcrel_offset */
HOWTO (R_PPC_TLSGD,
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
32, /* bitsize */
FALSE, /* pc_relative */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
"R_PPC_TLSGD", /* name */
FALSE, /* partial_inplace */
0, /* src_mask */
0, /* dst_mask */
FALSE), /* pcrel_offset */
HOWTO (R_PPC_TLSLD,
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
32, /* bitsize */
FALSE, /* pc_relative */
0, /* bitpos */
complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
"R_PPC_TLSLD", /* name */
FALSE, /* partial_inplace */
0, /* src_mask */
0, /* dst_mask */
FALSE), /* pcrel_offset */
/* Computes the load module index of the load module that contains the
definition of its TLS sym. */
HOWTO (R_PPC_DTPMOD32,
@ -1524,6 +1552,8 @@ ppc_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
case BFD_RELOC_CTOR: r = R_PPC_ADDR32; break;
case BFD_RELOC_PPC_TOC16: r = R_PPC_TOC16; break;
case BFD_RELOC_PPC_TLS: r = R_PPC_TLS; break;
case BFD_RELOC_PPC_TLSGD: r = R_PPC_TLSGD; break;
case BFD_RELOC_PPC_TLSLD: r = R_PPC_TLSLD; break;
case BFD_RELOC_PPC_DTPMOD: r = R_PPC_DTPMOD32; break;
case BFD_RELOC_PPC_TPREL16: r = R_PPC_TPREL16; break;
case BFD_RELOC_PPC_TPREL16_LO: r = R_PPC_TPREL16_LO; break;
@ -2345,16 +2375,34 @@ struct plt_entry
bfd_vma glink_offset;
};
/* Of those relocs that might be copied as dynamic relocs, this macro
/* Of those relocs that might be copied as dynamic relocs, this function
selects those that must be copied when linking a shared library,
even when the symbol is local. */
#define MUST_BE_DYN_RELOC(RTYPE) \
((RTYPE) != R_PPC_REL24 \
&& (RTYPE) != R_PPC_REL14 \
&& (RTYPE) != R_PPC_REL14_BRTAKEN \
&& (RTYPE) != R_PPC_REL14_BRNTAKEN \
&& (RTYPE) != R_PPC_REL32)
static int
must_be_dyn_reloc (struct bfd_link_info *info,
enum elf_ppc_reloc_type r_type)
{
switch (r_type)
{
default:
return 1;
case R_PPC_REL24:
case R_PPC_REL14:
case R_PPC_REL14_BRTAKEN:
case R_PPC_REL14_BRNTAKEN:
case R_PPC_REL32:
return 0;
case R_PPC_TPREL32:
case R_PPC_TPREL16:
case R_PPC_TPREL16_LO:
case R_PPC_TPREL16_HI:
case R_PPC_TPREL16_HA:
return !info->executable;
}
}
/* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
copying dynamic variables from a shared lib into an app's dynbss
@ -2421,7 +2469,7 @@ struct ppc_elf_link_hash_table
/* The .got.plt section (VxWorks only)*/
asection *sgotplt;
/* Shortcut to .__tls_get_addr. */
/* Shortcut to __tls_get_addr. */
struct elf_link_hash_entry *tls_get_addr;
/* The bfd that forced an old-style PLT. */
@ -3040,6 +3088,7 @@ ppc_elf_check_relocs (bfd *abfd,
const Elf_Internal_Rela *rel;
const Elf_Internal_Rela *rel_end;
asection *got2, *sreloc;
struct elf_link_hash_entry *tga;
if (info->relocatable)
return TRUE;
@ -3063,6 +3112,8 @@ ppc_elf_check_relocs (bfd *abfd,
ppc_elf_howto_init ();
htab = ppc_elf_hash_table (info);
tga = elf_link_hash_lookup (&htab->elf, "__tls_get_addr",
FALSE, FALSE, TRUE);
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (abfd);
got2 = bfd_get_section_by_name (abfd, ".got2");
@ -3074,7 +3125,7 @@ ppc_elf_check_relocs (bfd *abfd,
unsigned long r_symndx;
enum elf_ppc_reloc_type r_type;
struct elf_link_hash_entry *h;
int tls_type = 0;
int tls_type;
r_symndx = ELF32_R_SYM (rel->r_info);
if (r_symndx < symtab_hdr->sh_info)
@ -3101,14 +3152,48 @@ ppc_elf_check_relocs (bfd *abfd,
BFD_ASSERT (h == htab->elf.hgot);
}
tls_type = 0;
r_type = ELF32_R_TYPE (rel->r_info);
if (h != NULL && h == tga)
switch (r_type)
{
default:
break;
case R_PPC_PLTREL24:
case R_PPC_LOCAL24PC:
case R_PPC_REL24:
case R_PPC_REL14:
case R_PPC_REL14_BRTAKEN:
case R_PPC_REL14_BRNTAKEN:
case R_PPC_ADDR24:
case R_PPC_ADDR14:
case R_PPC_ADDR14_BRTAKEN:
case R_PPC_ADDR14_BRNTAKEN:
if (rel != relocs
&& (ELF32_R_TYPE (rel[-1].r_info) == R_PPC_TLSGD
|| ELF32_R_TYPE (rel[-1].r_info) == R_PPC_TLSLD))
/* We have a new-style __tls_get_addr call with a marker
reloc. */
;
else
/* Mark this section as having an old-style call. */
sec->has_tls_get_addr_call = 1;
break;
}
switch (r_type)
{
case R_PPC_TLSGD:
case R_PPC_TLSLD:
/* These special tls relocs tie a call to __tls_get_addr with
its parameter symbol. */
break;
case R_PPC_GOT_TLSLD16:
case R_PPC_GOT_TLSLD16_LO:
case R_PPC_GOT_TLSLD16_HI:
case R_PPC_GOT_TLSLD16_HA:
htab->tlsld_got.refcount += 1;
tls_type = TLS_TLS | TLS_LD;
goto dogottls;
@ -3123,7 +3208,7 @@ ppc_elf_check_relocs (bfd *abfd,
case R_PPC_GOT_TPREL16_LO:
case R_PPC_GOT_TPREL16_HI:
case R_PPC_GOT_TPREL16_HA:
if (info->shared)
if (!info->executable)
info->flags |= DF_STATIC_TLS;
tls_type = TLS_TLS | TLS_TPREL;
goto dogottls;
@ -3358,7 +3443,7 @@ ppc_elf_check_relocs (bfd *abfd,
/* This refers only to functions defined in the shared library. */
case R_PPC_LOCAL24PC:
if (h && h == htab->elf.hgot && htab->plt_type == PLT_UNSET)
if (h != NULL && h == htab->elf.hgot && htab->plt_type == PLT_UNSET)
{
htab->plt_type = PLT_OLD;
htab->old_bfd = abfd;
@ -3381,7 +3466,11 @@ ppc_elf_check_relocs (bfd *abfd,
/* We shouldn't really be seeing these. */
case R_PPC_TPREL32:
if (info->shared)
case R_PPC_TPREL16:
case R_PPC_TPREL16_LO:
case R_PPC_TPREL16_HI:
case R_PPC_TPREL16_HA:
if (!info->executable)
info->flags |= DF_STATIC_TLS;
goto dodyn;
@ -3390,14 +3479,6 @@ ppc_elf_check_relocs (bfd *abfd,
case R_PPC_DTPREL32:
goto dodyn;
case R_PPC_TPREL16:
case R_PPC_TPREL16_LO:
case R_PPC_TPREL16_HI:
case R_PPC_TPREL16_HA:
if (info->shared)
info->flags |= DF_STATIC_TLS;
goto dodyn;
case R_PPC_REL32:
if (h == NULL
&& got2 != NULL
@ -3488,7 +3569,7 @@ ppc_elf_check_relocs (bfd *abfd,
dynamic library if we manage to avoid copy relocs for the
symbol. */
if ((info->shared
&& (MUST_BE_DYN_RELOC (r_type)
&& (must_be_dyn_reloc (info, r_type)
|| (h != NULL
&& (! info->symbolic
|| h->root.type == bfd_link_hash_defweak
@ -3583,7 +3664,7 @@ ppc_elf_check_relocs (bfd *abfd,
}
p->count += 1;
if (!MUST_BE_DYN_RELOC (r_type))
if (!must_be_dyn_reloc (info, r_type))
p->pc_count += 1;
}
@ -3903,9 +3984,6 @@ ppc_elf_gc_sweep_hook (bfd *abfd,
case R_PPC_GOT_TLSLD16_LO:
case R_PPC_GOT_TLSLD16_HI:
case R_PPC_GOT_TLSLD16_HA:
htab->tlsld_got.refcount -= 1;
/* Fall thru */
case R_PPC_GOT_TLSGD16:
case R_PPC_GOT_TLSGD16_LO:
case R_PPC_GOT_TLSGD16_HI:
@ -3979,7 +4057,8 @@ ppc_elf_gc_sweep_hook (bfd *abfd,
return TRUE;
}
/* Set htab->tls_get_addr and call the generic ELF tls_setup function. */
/* Set plt output section type, htab->tls_get_addr, and call the
generic ELF tls_setup function. */
asection *
ppc_elf_tls_setup (bfd *obfd, struct bfd_link_info *info)
@ -4000,6 +4079,43 @@ ppc_elf_tls_setup (bfd *obfd, struct bfd_link_info *info)
return _bfd_elf_tls_setup (obfd, info);
}
/* Return TRUE iff REL is a branch reloc with a global symbol matching
HASH. */
static bfd_boolean
branch_reloc_hash_match (const bfd *ibfd,
const Elf_Internal_Rela *rel,
const struct elf_link_hash_entry *hash)
{
Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
enum elf_ppc_reloc_type r_type = ELF32_R_TYPE (rel->r_info);
unsigned int r_symndx = ELF32_R_SYM (rel->r_info);
if (r_symndx >= symtab_hdr->sh_info
&& (r_type == R_PPC_PLTREL24
|| r_type == R_PPC_LOCAL24PC
|| r_type == R_PPC_REL14
|| r_type == R_PPC_REL14_BRTAKEN
|| r_type == R_PPC_REL14_BRNTAKEN
|| r_type == R_PPC_REL24
|| r_type == R_PPC_ADDR24
|| r_type == R_PPC_ADDR14
|| r_type == R_PPC_ADDR14_BRTAKEN
|| r_type == R_PPC_ADDR14_BRNTAKEN))
{
struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
struct elf_link_hash_entry *h;
h = sym_hashes[r_symndx - symtab_hdr->sh_info];
while (h->root.type == bfd_link_hash_indirect
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
if (h == hash)
return TRUE;
}
return FALSE;
}
/* Run through all the TLS relocs looking for optimization
opportunities. */
@ -4010,187 +4126,204 @@ ppc_elf_tls_optimize (bfd *obfd ATTRIBUTE_UNUSED,
bfd *ibfd;
asection *sec;
struct ppc_elf_link_hash_table *htab;
int pass;
if (info->relocatable || info->shared)
if (info->relocatable || !info->executable)
return TRUE;
htab = ppc_elf_hash_table (info);
for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
{
Elf_Internal_Sym *locsyms = NULL;
Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
/* Make two passes through the relocs. First time check that tls
relocs involved in setting up a tls_get_addr call are indeed
followed by such a call. If they are not, exclude them from
the optimizations done on the second pass. */
for (pass = 0; pass < 2; ++pass)
for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
{
Elf_Internal_Sym *locsyms = NULL;
Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
for (sec = ibfd->sections; sec != NULL; sec = sec->next)
if (sec->has_tls_reloc && !bfd_is_abs_section (sec->output_section))
{
Elf_Internal_Rela *relstart, *rel, *relend;
int expecting_tls_get_addr;
for (sec = ibfd->sections; sec != NULL; sec = sec->next)
if (sec->has_tls_reloc && !bfd_is_abs_section (sec->output_section))
{
Elf_Internal_Rela *relstart, *rel, *relend;
/* Read the relocations. */
relstart = _bfd_elf_link_read_relocs (ibfd, sec, NULL, NULL,
info->keep_memory);
if (relstart == NULL)
return FALSE;
/* Read the relocations. */
relstart = _bfd_elf_link_read_relocs (ibfd, sec, NULL, NULL,
info->keep_memory);
if (relstart == NULL)
return FALSE;
expecting_tls_get_addr = 0;
relend = relstart + sec->reloc_count;
for (rel = relstart; rel < relend; rel++)
{
enum elf_ppc_reloc_type r_type;
unsigned long r_symndx;
struct elf_link_hash_entry *h = NULL;
char *tls_mask;
char tls_set, tls_clear;
bfd_boolean is_local;
relend = relstart + sec->reloc_count;
for (rel = relstart; rel < relend; rel++)
{
enum elf_ppc_reloc_type r_type;
unsigned long r_symndx;
struct elf_link_hash_entry *h = NULL;
char *tls_mask;
char tls_set, tls_clear;
bfd_boolean is_local;
int expecting_tls_get_addr;
bfd_signed_vma *got_count;
r_symndx = ELF32_R_SYM (rel->r_info);
if (r_symndx >= symtab_hdr->sh_info)
{
struct elf_link_hash_entry **sym_hashes;
r_symndx = ELF32_R_SYM (rel->r_info);
if (r_symndx >= symtab_hdr->sh_info)
{
struct elf_link_hash_entry **sym_hashes;
sym_hashes = elf_sym_hashes (ibfd);
h = sym_hashes[r_symndx - symtab_hdr->sh_info];
while (h->root.type == bfd_link_hash_indirect
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
}
sym_hashes = elf_sym_hashes (ibfd);
h = sym_hashes[r_symndx - symtab_hdr->sh_info];
while (h->root.type == bfd_link_hash_indirect
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
}
is_local = FALSE;
if (h == NULL
|| !h->def_dynamic)
is_local = TRUE;
expecting_tls_get_addr = 0;
is_local = FALSE;
if (h == NULL
|| !h->def_dynamic)
is_local = TRUE;
r_type = ELF32_R_TYPE (rel->r_info);
switch (r_type)
{
case R_PPC_GOT_TLSLD16:
case R_PPC_GOT_TLSLD16_LO:
case R_PPC_GOT_TLSLD16_HI:
case R_PPC_GOT_TLSLD16_HA:
/* These relocs should never be against a symbol
defined in a shared lib. Leave them alone if
that turns out to be the case. */
expecting_tls_get_addr = 0;
htab->tlsld_got.refcount -= 1;
if (!is_local)
continue;
r_type = ELF32_R_TYPE (rel->r_info);
switch (r_type)
{
case R_PPC_GOT_TLSLD16:
case R_PPC_GOT_TLSLD16_LO:
expecting_tls_get_addr = 1;
/* Fall thru */
/* LD -> LE */
tls_set = 0;
tls_clear = TLS_LD;
expecting_tls_get_addr = 1;
break;
case R_PPC_GOT_TLSLD16_HI:
case R_PPC_GOT_TLSLD16_HA:
/* These relocs should never be against a symbol
defined in a shared lib. Leave them alone if
that turns out to be the case. */
if (!is_local)
continue;
case R_PPC_GOT_TLSGD16:
case R_PPC_GOT_TLSGD16_LO:
case R_PPC_GOT_TLSGD16_HI:
case R_PPC_GOT_TLSGD16_HA:
if (is_local)
/* GD -> LE */
/* LD -> LE */
tls_set = 0;
else
/* GD -> IE */
tls_set = TLS_TLS | TLS_TPRELGD;
tls_clear = TLS_GD;
expecting_tls_get_addr = 1;
break;
tls_clear = TLS_LD;
break;
case R_PPC_GOT_TPREL16:
case R_PPC_GOT_TPREL16_LO:
case R_PPC_GOT_TPREL16_HI:
case R_PPC_GOT_TPREL16_HA:
expecting_tls_get_addr = 0;
if (is_local)
{
/* IE -> LE */
case R_PPC_GOT_TLSGD16:
case R_PPC_GOT_TLSGD16_LO:
expecting_tls_get_addr = 1;
/* Fall thru */
case R_PPC_GOT_TLSGD16_HI:
case R_PPC_GOT_TLSGD16_HA:
if (is_local)
/* GD -> LE */
tls_set = 0;
tls_clear = TLS_TPREL;
break;
}
else
else
/* GD -> IE */
tls_set = TLS_TLS | TLS_TPRELGD;
tls_clear = TLS_GD;
break;
case R_PPC_GOT_TPREL16:
case R_PPC_GOT_TPREL16_LO:
case R_PPC_GOT_TPREL16_HI:
case R_PPC_GOT_TPREL16_HA:
if (is_local)
{
/* IE -> LE */
tls_set = 0;
tls_clear = TLS_TPREL;
break;
}
else
continue;
default:
continue;
}
case R_PPC_REL14:
case R_PPC_REL14_BRTAKEN:
case R_PPC_REL14_BRNTAKEN:
case R_PPC_REL24:
if (expecting_tls_get_addr
&& h != NULL
&& h == htab->tls_get_addr)
{
struct plt_entry *ent = find_plt_ent (h, NULL, 0);
if (ent != NULL && ent->plt.refcount > 0)
ent->plt.refcount -= 1;
}
expecting_tls_get_addr = 0;
continue;
if (pass == 0)
{
if (!expecting_tls_get_addr
|| !sec->has_tls_get_addr_call)
continue;
default:
expecting_tls_get_addr = 0;
continue;
}
if (rel + 1 < relend
&& branch_reloc_hash_match (ibfd, rel + 1,
htab->tls_get_addr))
continue;
if (h != NULL)
{
if (tls_set == 0)
{
/* We managed to get rid of a got entry. */
if (h->got.refcount > 0)
h->got.refcount -= 1;
}
tls_mask = &ppc_elf_hash_entry (h)->tls_mask;
}
else
{
Elf_Internal_Sym *sym;
bfd_signed_vma *lgot_refs;
char *lgot_masks;
/* Uh oh, we didn't find the expected call. We
could just mark this symbol to exclude it
from tls optimization but it's safer to skip
the entire section. */
sec->has_tls_reloc = 0;
break;
}
if (locsyms == NULL)
{
locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
if (locsyms == NULL)
locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
symtab_hdr->sh_info,
0, NULL, NULL, NULL);
if (locsyms == NULL)
{
if (elf_section_data (sec)->relocs != relstart)
free (relstart);
return FALSE;
}
}
sym = locsyms + r_symndx;
lgot_refs = elf_local_got_refcounts (ibfd);
if (lgot_refs == NULL)
abort ();
if (tls_set == 0)
{
/* We managed to get rid of a got entry. */
if (lgot_refs[r_symndx] > 0)
lgot_refs[r_symndx] -= 1;
}
lgot_masks = (char *) (lgot_refs + symtab_hdr->sh_info);
tls_mask = &lgot_masks[r_symndx];
}
if (h != NULL)
{
tls_mask = &ppc_elf_hash_entry (h)->tls_mask;
got_count = &h->got.refcount;
}
else
{
Elf_Internal_Sym *sym;
bfd_signed_vma *lgot_refs;
char *lgot_masks;
*tls_mask |= tls_set;
*tls_mask &= ~tls_clear;
}
if (locsyms == NULL)
{
locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
if (locsyms == NULL)
locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
symtab_hdr->sh_info,
0, NULL, NULL, NULL);
if (locsyms == NULL)
{
if (elf_section_data (sec)->relocs != relstart)
free (relstart);
return FALSE;
}
}
sym = locsyms + r_symndx;
lgot_refs = elf_local_got_refcounts (ibfd);
if (lgot_refs == NULL)
abort ();
lgot_masks = (char *) (lgot_refs + symtab_hdr->sh_info);
tls_mask = &lgot_masks[r_symndx];
got_count = &lgot_refs[r_symndx];
}
if (elf_section_data (sec)->relocs != relstart)
free (relstart);
if (tls_set == 0)
{
/* We managed to get rid of a got entry. */
if (*got_count > 0)
*got_count -= 1;
}
if (expecting_tls_get_addr)
{
struct plt_entry *ent;
ent = find_plt_ent (htab->tls_get_addr, NULL, 0);
if (ent != NULL && ent->plt.refcount > 0)
ent->plt.refcount -= 1;
}
*tls_mask |= tls_set;
*tls_mask &= ~tls_clear;
}
if (elf_section_data (sec)->relocs != relstart)
free (relstart);
}
if (locsyms != NULL
&& (symtab_hdr->contents != (unsigned char *) locsyms))
{
if (!info->keep_memory)
free (locsyms);
else
symtab_hdr->contents = (unsigned char *) locsyms;
}
if (locsyms != NULL
&& (symtab_hdr->contents != (unsigned char *) locsyms))
{
if (!info->keep_memory)
free (locsyms);
else
symtab_hdr->contents = (unsigned char *) locsyms;
}
}
}
return TRUE;
}
@ -4615,8 +4748,11 @@ allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
if (eh->tls_mask == (TLS_TLS | TLS_LD)
&& !eh->elf.def_dynamic)
/* If just an LD reloc, we'll just use htab->tlsld_got.offset. */
eh->elf.got.offset = (bfd_vma) -1;
{
/* If just an LD reloc, we'll just use htab->tlsld_got.offset. */
htab->tlsld_got.refcount += 1;
eh->elf.got.offset = (bfd_vma) -1;
}
else
{
bfd_boolean dyn;
@ -4664,7 +4800,7 @@ allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
if (info->shared)
{
/* Relocs that use pc_count are those that appear on a call insn,
or certain REL relocs (see MUST_BE_DYN_RELOC) that can be
or certain REL relocs (see must_be_dyn_reloc) that can be
generated via assembly. We want calls to protected symbols to
resolve directly to the function rather than going via the plt.
If people want function pointer comparisons to work as expected
@ -4891,6 +5027,9 @@ ppc_elf_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
*local_got = (bfd_vma) -1;
}
/* Allocate space for global sym dynamic relocs. */
elf_link_hash_traverse (elf_hash_table (info), allocate_dynrelocs, info);
if (htab->tlsld_got.refcount > 0)
{
htab->tlsld_got.offset = allocate_got (htab, 8);
@ -4900,9 +5039,6 @@ ppc_elf_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
else
htab->tlsld_got.offset = (bfd_vma) -1;
/* Allocate space for global sym dynamic relocs. */
elf_link_hash_traverse (elf_hash_table (info), allocate_dynrelocs, info);
if (htab->got != NULL && htab->plt_type != PLT_VXWORKS)
{
unsigned int g_o_t = 32768;
@ -5754,16 +5890,13 @@ ppc_elf_relocate_section (bfd *output_bfd,
for the final instruction stream. */
tls_mask = 0;
tls_gd = 0;
if (IS_PPC_TLS_RELOC (r_type))
if (h != NULL)
tls_mask = ((struct ppc_elf_link_hash_entry *) h)->tls_mask;
else if (local_got_offsets != NULL)
{
if (h != NULL)
tls_mask = ((struct ppc_elf_link_hash_entry *) h)->tls_mask;
else if (local_got_offsets != NULL)
{
char *lgot_masks;
lgot_masks = (char *) (local_got_offsets + symtab_hdr->sh_info);
tls_mask = lgot_masks[r_symndx];
}
char *lgot_masks;
lgot_masks = (char *) (local_got_offsets + symtab_hdr->sh_info);
tls_mask = lgot_masks[r_symndx];
}
/* Ensure reloc mapping code below stays sane. */
@ -5870,85 +6003,147 @@ ppc_elf_relocate_section (bfd *output_bfd,
case R_PPC_GOT_TLSGD16_LO:
tls_gd = TLS_TPRELGD;
if (tls_mask != 0 && (tls_mask & TLS_GD) == 0)
goto tls_get_addr_check;
goto tls_ldgd_opt;
break;
case R_PPC_GOT_TLSLD16:
case R_PPC_GOT_TLSLD16_LO:
if (tls_mask != 0 && (tls_mask & TLS_LD) == 0)
{
tls_get_addr_check:
if (rel + 1 < relend)
unsigned int insn1, insn2;
bfd_vma offset;
tls_ldgd_opt:
offset = (bfd_vma) -1;
/* If not using the newer R_PPC_TLSGD/LD to mark
__tls_get_addr calls, we must trust that the call
stays with its arg setup insns, ie. that the next
reloc is the __tls_get_addr call associated with
the current reloc. Edit both insns. */
if (input_section->has_tls_get_addr_call
&& rel + 1 < relend
&& branch_reloc_hash_match (input_bfd, rel + 1,
htab->tls_get_addr))
offset = rel[1].r_offset;
if ((tls_mask & tls_gd) != 0)
{
enum elf_ppc_reloc_type r_type2;
unsigned long r_symndx2;
struct elf_link_hash_entry *h2;
bfd_vma insn1, insn2;
bfd_vma offset;
/* The next instruction should be a call to
__tls_get_addr. Peek at the reloc to be sure. */
r_type2 = ELF32_R_TYPE (rel[1].r_info);
r_symndx2 = ELF32_R_SYM (rel[1].r_info);
if (r_symndx2 < symtab_hdr->sh_info
|| (r_type2 != R_PPC_REL14
&& r_type2 != R_PPC_REL14_BRTAKEN
&& r_type2 != R_PPC_REL14_BRNTAKEN
&& r_type2 != R_PPC_REL24
&& r_type2 != R_PPC_PLTREL24))
break;
h2 = sym_hashes[r_symndx2 - symtab_hdr->sh_info];
while (h2->root.type == bfd_link_hash_indirect
|| h2->root.type == bfd_link_hash_warning)
h2 = (struct elf_link_hash_entry *) h2->root.u.i.link;
if (h2 == NULL || h2 != htab->tls_get_addr)
break;
/* OK, it checks out. Replace the call. */
offset = rel[1].r_offset;
/* IE */
insn1 = bfd_get_32 (output_bfd,
contents + rel->r_offset - d_offset);
if ((tls_mask & tls_gd) != 0)
insn1 &= (1 << 26) - 1;
insn1 |= 32 << 26; /* lwz */
if (offset != (bfd_vma) -1)
{
/* IE */
insn1 &= (1 << 26) - 1;
insn1 |= 32 << 26; /* lwz */
rel[1].r_info
= ELF32_R_INFO (ELF32_R_SYM (rel[1].r_info),
R_PPC_NONE);
insn2 = 0x7c631214; /* add 3,3,2 */
rel[1].r_info = ELF32_R_INFO (r_symndx2, R_PPC_NONE);
rel[1].r_addend = 0;
r_type = (((r_type - (R_PPC_GOT_TLSGD16 & 3)) & 3)
+ R_PPC_GOT_TPREL16);
rel->r_info = ELF32_R_INFO (r_symndx, r_type);
bfd_put_32 (output_bfd, insn2, contents + offset);
}
else
{
/* LE */
insn1 = 0x3c620000; /* addis 3,2,0 */
insn2 = 0x38630000; /* addi 3,3,0 */
if (tls_gd == 0)
{
/* Was an LD reloc. */
r_symndx = 0;
rel->r_addend = htab->elf.tls_sec->vma + DTP_OFFSET;
}
r_type = R_PPC_TPREL16_HA;
rel->r_info = ELF32_R_INFO (r_symndx, r_type);
rel[1].r_info = ELF32_R_INFO (r_symndx,
R_PPC_TPREL16_LO);
rel[1].r_offset += d_offset;
rel[1].r_addend = rel->r_addend;
}
bfd_put_32 (output_bfd, insn1, contents + rel->r_offset - d_offset);
bfd_put_32 (output_bfd, insn2, contents + offset);
r_type = (((r_type - (R_PPC_GOT_TLSGD16 & 3)) & 3)
+ R_PPC_GOT_TPREL16);
rel->r_info = ELF32_R_INFO (r_symndx, r_type);
}
else
{
/* LE */
insn1 = 0x3c620000; /* addis 3,2,0 */
if (tls_gd == 0)
{
/* We changed the symbol on an LD reloc. Start over
in order to get h, sym, sec etc. right. */
rel--;
continue;
/* Was an LD reloc. */
for (r_symndx = 0;
r_symndx < symtab_hdr->sh_info;
r_symndx++)
if (local_sections[r_symndx] == sec)
break;
if (r_symndx >= symtab_hdr->sh_info)
r_symndx = 0;
rel->r_addend = htab->elf.tls_sec->vma + DTP_OFFSET;
if (r_symndx != 0)
rel->r_addend -= (local_syms[r_symndx].st_value
+ sec->output_offset
+ sec->output_section->vma);
}
r_type = R_PPC_TPREL16_HA;
rel->r_info = ELF32_R_INFO (r_symndx, r_type);
if (offset != (bfd_vma) -1)
{
rel[1].r_info = ELF32_R_INFO (r_symndx, R_PPC_TPREL16_LO);
rel[1].r_offset = offset + d_offset;
rel[1].r_addend = rel->r_addend;
insn2 = 0x38630000; /* addi 3,3,0 */
bfd_put_32 (output_bfd, insn2, contents + offset);
}
}
bfd_put_32 (output_bfd, insn1,
contents + rel->r_offset - d_offset);
if (tls_gd == 0)
{
/* We changed the symbol on an LD reloc. Start over
in order to get h, sym, sec etc. right. */
rel--;
continue;
}
}
break;
case R_PPC_TLSGD:
if (tls_mask != 0 && (tls_mask & TLS_GD) == 0)
{
unsigned int insn2;
bfd_vma offset = rel->r_offset;
if ((tls_mask & TLS_TPRELGD) != 0)
{
/* IE */
r_type = R_PPC_NONE;
insn2 = 0x7c631214; /* add 3,3,2 */
}
else
{
/* LE */
r_type = R_PPC_TPREL16_LO;
rel->r_offset += d_offset;
insn2 = 0x38630000; /* addi 3,3,0 */
}
rel->r_info = ELF32_R_INFO (r_symndx, r_type);
bfd_put_32 (output_bfd, insn2, contents + offset);
/* Zap the reloc on the _tls_get_addr call too. */
BFD_ASSERT (offset == rel[1].r_offset);
rel[1].r_info = ELF32_R_INFO (ELF32_R_SYM (rel[1].r_info),
R_PPC_NONE);
}
break;
case R_PPC_TLSLD:
if (tls_mask != 0 && (tls_mask & TLS_LD) == 0)
{
unsigned int insn2;
for (r_symndx = 0;
r_symndx < symtab_hdr->sh_info;
r_symndx++)
if (local_sections[r_symndx] == sec)
break;
if (r_symndx >= symtab_hdr->sh_info)
r_symndx = 0;
rel->r_addend = htab->elf.tls_sec->vma + DTP_OFFSET;
if (r_symndx != 0)
rel->r_addend -= (local_syms[r_symndx].st_value
+ sec->output_offset
+ sec->output_section->vma);
rel->r_info = ELF32_R_INFO (r_symndx, R_PPC_TPREL16_LO);
rel->r_offset += d_offset;
insn2 = 0x38630000; /* addi 3,3,0 */
bfd_put_32 (output_bfd, insn2,
contents + rel->r_offset - d_offset);
/* Zap the reloc on the _tls_get_addr call too. */
BFD_ASSERT (rel->r_offset - d_offset == rel[1].r_offset);
rel[1].r_info = ELF32_R_INFO (ELF32_R_SYM (rel[1].r_info),
R_PPC_NONE);
rel--;
continue;
}
break;
}
@ -6003,6 +6198,8 @@ ppc_elf_relocate_section (bfd *output_bfd,
case R_PPC_NONE:
case R_PPC_TLS:
case R_PPC_TLSGD:
case R_PPC_TLSLD:
case R_PPC_EMB_MRKREF:
case R_PPC_GNU_VTINHERIT:
case R_PPC_GNU_VTENTRY:
@ -6044,6 +6241,7 @@ ppc_elf_relocate_section (bfd *output_bfd,
case R_PPC_GOT16_LO:
case R_PPC_GOT16_HI:
case R_PPC_GOT16_HA:
tls_mask = 0;
dogot:
{
/* Relocation is to the entry for this symbol in the global
@ -6342,7 +6540,7 @@ ppc_elf_relocate_section (bfd *output_bfd,
&& (h == NULL
|| ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
|| h->root.type != bfd_link_hash_undefweak)
&& (MUST_BE_DYN_RELOC (r_type)
&& (must_be_dyn_reloc (info, r_type)
|| !SYMBOL_CALLS_LOCAL (info, h)))
|| (ELIMINATE_COPY_RELOCS
&& !info->shared
@ -6411,10 +6609,10 @@ ppc_elf_relocate_section (bfd *output_bfd,
outrel.r_info = ELF32_R_INFO (0, R_PPC_RELATIVE);
else
{
long indx;
long indx = 0;
if (bfd_is_abs_section (sec))
indx = 0;
if (r_symndx == 0 || bfd_is_abs_section (sec))
;
else if (sec == NULL || sec->owner == NULL)
{
bfd_set_error (bfd_error_bad_value);

File diff suppressed because it is too large Load Diff

View File

@ -1169,6 +1169,8 @@ static const char *const bfd_reloc_code_real_names[] = { "@@uninitialized@@",
"BFD_RELOC_PPC64_PLTGOT16_DS",
"BFD_RELOC_PPC64_PLTGOT16_LO_DS",
"BFD_RELOC_PPC_TLS",
"BFD_RELOC_PPC_TLSGD",
"BFD_RELOC_PPC_TLSLD",
"BFD_RELOC_PPC_DTPMOD",
"BFD_RELOC_PPC_TPREL16",
"BFD_RELOC_PPC_TPREL16_LO",

View File

@ -2626,6 +2626,10 @@ ENUMDOC
ENUM
BFD_RELOC_PPC_TLS
ENUMX
BFD_RELOC_PPC_TLSGD
ENUMX
BFD_RELOC_PPC_TLSLD
ENUMX
BFD_RELOC_PPC_DTPMOD
ENUMX

View File

@ -1,6 +1,6 @@
/* Object file "section" support for the BFD library.
Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
Free Software Foundation, Inc.
Written by Cygnus Support.
@ -382,6 +382,9 @@ CODE_FRAGMENT
. {* Nonzero if this section has TLS related relocations. *}
. unsigned int has_tls_reloc:1;
.
. {* Nonzero if this section has a call to __tls_get_addr. *}
. unsigned int has_tls_get_addr_call:1;
.
. {* Nonzero if this section has a gp reloc. *}
. unsigned int has_gp_reloc:1;
.
@ -642,11 +645,11 @@ CODE_FRAGMENT
. {* segment_mark, sec_info_type, use_rela_p, has_tls_reloc, *} \
. 0, 0, 0, 0, \
. \
. {* has_gp_reloc, need_finalize_relax, reloc_done, *} \
. 0, 0, 0, \
. {* has_tls_get_addr_call, has_gp_reloc, need_finalize_relax, *} \
. 0, 0, 0, \
. \
. {* vma, lma, size, rawsize *} \
. 0, 0, 0, 0, \
. {* reloc_done, vma, lma, size, rawsize *} \
. 0, 0, 0, 0, 0, \
. \
. {* output_offset, output_section, alignment_power, *} \
. 0, (struct bfd_section *) &SEC, 0, \

View File

@ -1,3 +1,8 @@
2009-03-04 Alan Modra <amodra@bigpond.net.au>
* ppc.h (R_PPC_TLSGD, R_PPC_TLSLD): Add new relocs.
* ppc64.h (R_PPC64_TLSGD, R_PPC64_TLSLD): Add new relocs.
2007-06-29 Joseph Myers <joseph@codesourcery.com>
* ppc.h (Tag_GNU_Power_ABI_FP): Define.

View File

@ -100,6 +100,8 @@ START_RELOC_NUMBERS (elf_ppc_reloc_type)
RELOC_NUMBER (R_PPC_GOT_DTPREL16_LO, 92)
RELOC_NUMBER (R_PPC_GOT_DTPREL16_HI, 93)
RELOC_NUMBER (R_PPC_GOT_DTPREL16_HA, 94)
RELOC_NUMBER (R_PPC_TLSGD, 95)
RELOC_NUMBER (R_PPC_TLSLD, 96)
/* The remaining relocs are from the Embedded ELF ABI, and are not
in the SVR4 ELF ABI. */

View File

@ -1,5 +1,5 @@
/* PPC64 ELF support for BFD.
Copyright 2003 Free Software Foundation, Inc.
Copyright 2003, 2005, 2009 Free Software Foundation, Inc.
This file is part of BFD, the Binary File Descriptor library.
@ -136,6 +136,8 @@ START_RELOC_NUMBERS (elf_ppc64_reloc_type)
RELOC_NUMBER (R_PPC64_DTPREL16_HIGHERA, 104)
RELOC_NUMBER (R_PPC64_DTPREL16_HIGHEST, 105)
RELOC_NUMBER (R_PPC64_DTPREL16_HIGHESTA, 106)
RELOC_NUMBER (R_PPC64_TLSGD, 107)
RELOC_NUMBER (R_PPC64_TLSLD, 108)
/* These are GNU extensions to enable C++ vtable garbage collection. */
RELOC_NUMBER (R_PPC64_GNU_VTINHERIT, 253)

View File

@ -205,6 +205,8 @@
..
netinet
..
opencrypto
..
pjdfstest
chflags
..

View File

@ -1386,6 +1386,9 @@ typedef struct bfd_section
/* Nonzero if this section has TLS related relocations. */
unsigned int has_tls_reloc:1;
/* Nonzero if this section has a call to __tls_get_addr. */
unsigned int has_tls_get_addr_call:1;
/* Nonzero if this section has a gp reloc. */
unsigned int has_gp_reloc:1;
@ -1646,11 +1649,11 @@ extern asection bfd_ind_section;
/* segment_mark, sec_info_type, use_rela_p, has_tls_reloc, */ \
0, 0, 0, 0, \
\
/* has_gp_reloc, need_finalize_relax, reloc_done, */ \
0, 0, 0, \
/* has_tls_get_addr_call, has_gp_reloc, need_finalize_relax, */ \
0, 0, 0, \
\
/* vma, lma, size, rawsize */ \
0, 0, 0, 0, \
/* reloc_done, vma, lma, size, rawsize */ \
0, 0, 0, 0, 0, \
\
/* output_offset, output_section, alignment_power, */ \
0, (struct bfd_section *) &SEC, 0, \
@ -2903,6 +2906,8 @@ in the instruction. */
/* PowerPC and PowerPC64 thread-local storage relocations. */
BFD_RELOC_PPC_TLS,
BFD_RELOC_PPC_TLSGD,
BFD_RELOC_PPC_TLSLD,
BFD_RELOC_PPC_DTPMOD,
BFD_RELOC_PPC_TPREL16,
BFD_RELOC_PPC_TPREL16_LO,

View File

@ -1,8 +1,16 @@
.\" $OpenBSD: crypto.4,v 1.4 2002/09/12 07:15:03 deraadt Exp $
.\" $NetBSD: crypto.4,v 1.24 2014/01/27 21:23:59 pgoyette Exp $
.\"
.\" Copyright (c) 2001 Theo de Raadt
.\" Copyright (c) 2008 The NetBSD Foundation, Inc.
.\" Copyright (c) 2014 The FreeBSD Foundation
.\" All rights reserved.
.\"
.\" Portions of this documentation were written by John-Mark Gurney
.\" under sponsorship of the FreeBSD Foundation and
.\" Rubicon Communications, LLC (Netgate).
.\"
.\" This code is derived from software contributed to The NetBSD Foundation
.\" by Coyote Point Systems, Inc.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
@ -11,99 +19,378 @@
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\" 3. The name of the author may not be used to endorse or promote products
.\" derived from this software without specific prior written permission.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
.\" WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
.\" DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
.\" INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
.\" (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
.\" SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
.\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
.\" ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
.\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
.\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
.\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
.\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.\"
.\"
.\" Copyright (c) 2004
.\" Jonathan Stone <jonathan@dsg.stanford.edu>. All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY Jonathan Stone AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL Jonathan Stone OR THE VOICES IN HIS HEAD
.\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
.\" THE POSSIBILITY OF SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd September 7, 2010
.Dd December 12, 2014
.Dt CRYPTO 4
.Os
.Sh NAME
.Nm crypto ,
.Nm cryptodev
.Nd hardware crypto access driver
.Nd user-mode access to hardware-accelerated cryptography
.Sh SYNOPSIS
.Cd device crypto
.Cd device cryptodev
.Pp
.In sys/ioctl.h
.In sys/time.h
.In crypto/cryptodev.h
.Sh DESCRIPTION
The
.Nm
driver provides a device-independent framework to support
cryptographic operations in the kernel.
driver gives user-mode applications access to hardware-accelerated
cryptographic transforms, as implemented by the
.Xr opencrypto 9
in-kernel interface.
.Pp
The
.Nm cryptodev
driver provides userland applications access to this support
through the
.Pa /dev/crypto
device.
This node primarily operates in an
special device provides an
.Xr ioctl 2
based model, permitting a variety of applications to query device capabilities,
submit transactions, and get results.
based interface.
User-mode applications should open the special device,
then issue
.Xr ioctl 2
calls on the descriptor.
User-mode access to
.Pa /dev/crypto
is controlled by three
.Xr sysctl 8
variables,
.Ic kern.userasymcrypto
and
.Ic kern.cryptodevallowsoft .
See
.Xr sysctl 7
for additional details.
.Pp
The
.Nm
device provides two distinct modes of operation: one mode for
symmetric-keyed cryptographic requests, and a second mode for
both asymmetric-key (public-key/private-key) requests, and for
modular arithmetic (for Diffie-Hellman key exchange and other
cryptographic protocols).
The two modes are described separately below.
.Sh THEORY OF OPERATION
Regardless of whether symmetric-key or asymmetric-key operations are
to be performed, use of the device requires a basic series of steps:
.Pp
.Bl -enum
.It
Open a file descriptor for the device.
See
.Xr open 2 .
.It
If any symmetric operation will be performed,
create one session, with
.Dv CIOCGSESSION .
Most applications will require at least one symmetric session.
Since cipher and MAC keys are tied to sessions, many
applications will require more.
Asymmetric operations do not use sessions.
.It
Submit requests, synchronously with
.Dv CIOCCRYPT
(symmetric)
or
.Dv CIOCKEY
(asymmetric).
.It
Destroy one session with
.Dv CIOCFSESSION .
.It
Close the device with
.Xr close 2 .
.El
.Sh SYMMETRIC-KEY OPERATION
The symmetric-key operation mode provides a context-based API
to traditional symmetric-key encryption (or privacy) algorithms,
or to keyed and unkeyed one-way hash (HMAC and MAC) algorithms.
The symmetric-key mode also permits fused operation,
where the hardware performs both a privacy algorithm and an integrity-check
algorithm in a single pass over the data: either a fused
encrypt/HMAC-generate operation, or a fused HMAC-verify/decrypt operation.
.Pp
To use symmetric mode, you must first create a session specifying
the algorithm(s) and key(s) to use; then issue encrypt or decrypt
requests against the session.
.Ss Algorithms
For a list of supported algorithms, see
.Xr crypto 7
and
.Xr crypto 9 .
.Ss IOCTL Request Descriptions
.\"
.Bl -tag -width CIOCGSESSION
.\"
.It Dv CRIOGET Fa int *fd
Clone the fd argument to
.Xr ioctl 2 ,
yielding a new file descriptor for the creation of sessions.
.\"
.It Dv CIOCFINDDEV Fa struct crypt_find_op *fop
.Bd -literal
struct crypt_find_op {
int crid; /* driver id + flags */
char name[32]; /* device/driver name */
};
.Ed
If
.Ar count
given in the specification, and is greater than 0, a maximum of one
.Nm
device is created.
.Fa crid
is -1, then find the driver named
.Fa name
and return the id in
.Fa crid .
If
.Fa crid
is not -1, return the name of the driver with
.Fa crid
in
.Fa name .
In either case, if the driver is not found,
.Dv ENOENT
is returned.
.It Dv CIOCGSESSION Fa struct session_op *sessp
.Bd -literal
struct session_op {
u_int32_t cipher; /* e.g. CRYPTO_DES_CBC */
u_int32_t mac; /* e.g. CRYPTO_MD5_HMAC */
u_int32_t keylen; /* cipher key */
void * key;
int mackeylen; /* mac key */
void * mackey;
u_int32_t ses; /* returns: ses # */
};
.Ed
Create a new cryptographic session on a file descriptor for the device;
that is, a persistent object specific to the chosen
privacy algorithm, integrity algorithm, and keys specified in
.Fa sessp .
The special value 0 for either privacy or integrity
is reserved to indicate that the indicated operation (privacy or integrity)
is not desired for this session.
.Pp
The following
Multiple sessions may be bound to a single file descriptor.
The session ID returned in
.Fa sessp-\*[Gt]ses
is supplied as a required field in the symmetric-operation structure
.Fa crypt_op
for future encryption or hashing requests.
.\" .Pp
.\" This implementation will never return a session ID of 0 for a successful
.\" creation of a session, which is a
.\" .Nx
.\" extension.
.Pp
For non-zero symmetric-key privacy algorithms, the privacy algorithm
must be specified in
.Fa sessp-\*[Gt]cipher ,
the key length in
.Fa sessp-\*[Gt]keylen ,
and the key value in the octets addressed by
.Fa sessp-\*[Gt]key .
.Pp
For keyed one-way hash algorithms, the one-way hash must be specified
in
.Fa sessp-\*[Gt]mac ,
the key length in
.Fa sessp-\*[Gt]mackey ,
and the key value in the octets addressed by
.Fa sessp-\*[Gt]mackeylen .
.\"
.Pp
Support for a specific combination of fused privacy and
integrity-check algorithms depends on whether the underlying
hardware supports that combination.
Not all combinations are supported
by all hardware, even if the hardware supports each operation as a
stand-alone non-fused operation.
.It Dv CIOCCRYPT Fa struct crypt_op *cr_op
.Bd -literal
struct crypt_op {
u_int32_t ses;
u_int16_t op; /* e.g. COP_ENCRYPT */
u_int16_t flags;
u_int len;
caddr_t src, dst;
caddr_t mac; /* must be large enough for result */
caddr_t iv;
};
.Ed
Request a symmetric-key (or hash) operation.
The file descriptor argument to
.Xr ioctl 2
calls apply only to the
.Nm
devices:
.Bl -tag -width ".Dv CIOCGSESSION"
.It Dv CIOCGSESSION
Setup a new crypto session for a new type of operation.
.It Dv CIOCFSESSION
Free a previously established session.
.It Dv CIOCCRYPT
Perform a crypto operation against a previously setup session.
must have been bound to a valid session.
To encrypt, set
.Fa cr_op-\*[Gt]op
to
.Dv COP_ENCRYPT .
To decrypt, set
.Fa cr_op-\*[Gt]op
to
.Dv COP_DECRYPT .
The field
.Fa cr_op-\*[Gt]len
supplies the length of the input buffer; the fields
.Fa cr_op-\*[Gt]src ,
.Fa cr_op-\*[Gt]dst ,
.Fa cr_op-\*[Gt]mac ,
.Fa cr_op-\*[Gt]iv
supply the addresses of the input buffer, output buffer,
one-way hash, and initialization vector, respectively.
.It Dv CIOCCRYPTAEAD Fa struct crypt_aead *cr_aead
.Bd -literal
struct crypt_aead {
u_int32_t ses;
u_int16_t op; /* e.g. COP_ENCRYPT */
u_int16_t flags;
u_int len;
u_int aadlen;
u_int ivlen;
caddr_t src, dst;
caddr_t aad;
caddr_t tag; /* must be large enough for result */
caddr_t iv;
};
.Ed
The
.Dv CIOCCRYPTAEAD
is similar to the
.Dv CIOCCRYPT
but provides additional data in
.Fa cr_aead-\*[Gt]aad
to include in the authentication mode.
.It Dv CIOCFSESSION Fa u_int32_t ses_id
Destroys the /dev/crypto session associated with the file-descriptor
argument.
.It Dv CIOCNFSESSION Fa struct crypt_sfop *sfop ;
.Bd -literal
struct crypt_sfop {
size_t count;
u_int32_t *sesid;
};
.Ed
Destroys the
.Fa sfop-\*[Gt]count
sessions specified by the
.Fa sfop
array of session identifiers.
.El
.Sh FEATURES
Depending on hardware being present, the following symmetric and
asymmetric cryptographic features are potentially available from
.Pa /dev/crypto :
.\"
.Sh ASYMMETRIC-KEY OPERATION
.Ss Asymmetric-key algorithms
Contingent upon hardware support, the following asymmetric
(public-key/private-key; or key-exchange subroutine) operations may
also be available:
.Pp
.Bl -tag -width ".Dv CRYPTO_RIPEMD160_HMAC" -offset indent -compact
.It Dv CRYPTO_DES_CBC
.It Dv CRYPTO_3DES_CBC
.It Dv CRYPTO_BLF_CBC
.It Dv CRYPTO_CAMELLIA_CBC
.It Dv CRYPTO_CAST_CBC
.It Dv CRYPTO_SKIPJACK_CBC
.It Dv CRYPTO_MD5_HMAC
.It Dv CRYPTO_SHA1_HMAC
.It Dv CRYPTO_RIPEMD160_HMAC
.It Dv CRYPTO_MD5_KPDK
.It Dv CRYPTO_SHA1_KPDK
.It Dv CRYPTO_AES_CBC
.It Dv CRYPTO_ARC4
.It Dv CRYPTO_MD5
.It Dv CRYPTO_SHA1
.It Dv CRK_MOD_EXP
.It Dv CRK_MOD_EXP_CRT
.It Dv CRK_DSA_SIGN
.It Dv CRK_DSA_VERIFY
.It Dv CRK_DH_COMPUTE_KEY
.Bl -column "CRK_DH_COMPUTE_KEY" "Input parameter" "Output parameter" -offset indent -compact
.It Em "Algorithm" Ta "Input parameter" Ta "Output parameter"
.It Em " " Ta "Count" Ta "Count"
.It Dv CRK_MOD_EXP Ta 3 Ta 1
.It Dv CRK_MOD_EXP_CRT Ta 6 Ta 1
.It Dv CRK_DSA_SIGN Ta 5 Ta 2
.It Dv CRK_DSA_VERIFY Ta 7 Ta 0
.It Dv CRK_DH_COMPUTE_KEY Ta 3 Ta 1
.El
.Sh FILES
.Bl -tag -width ".Pa /dev/crypto" -compact
.It Pa /dev/crypto
crypto access device
.Pp
See below for discussion of the input and output parameter counts.
.Ss Asymmetric-key commands
.Bl -tag -width CIOCKEY
.It Dv CIOCASYMFEAT Fa int *feature_mask
Returns a bitmask of supported asymmetric-key operations.
Each of the above-listed asymmetric operations is present
if and only if the bit position numbered by the code for that operation
is set.
For example,
.Dv CRK_MOD_EXP
is available if and only if the bit
.Pq 1 \*[Lt]\*[Lt] Dv CRK_MOD_EXP
is set.
.It Dv CIOCKEY Fa struct crypt_kop *kop
.Bd -literal
struct crypt_kop {
u_int crk_op; /* e.g. CRK_MOD_EXP */
u_int crk_status; /* return status */
u_short crk_iparams; /* # of input params */
u_short crk_oparams; /* # of output params */
u_int crk_pad1;
struct crparam crk_param[CRK_MAXPARAM];
};
/* Bignum parameter, in packed bytes. */
struct crparam {
void * crp_p;
u_int crp_nbits;
};
.Ed
Performs an asymmetric-key operation from the list above.
The specific operation is supplied in
.Fa kop-\*[Gt]crk_op ;
final status for the operation is returned in
.Fa kop-\*[Gt]crk_status .
The number of input arguments and the number of output arguments
is specified in
.Fa kop-\*[Gt]crk_iparams
and
.Fa kop-\*[Gt]crk_iparams ,
respectively.
The field
.Fa crk_param[]
must be filled in with exactly
.Fa kop-\*[Gt]crk_iparams + kop-\*[Gt]crk_oparams
arguments, each encoded as a
.Fa struct crparam
(address, bitlength) pair.
.Pp
The semantics of these arguments are currently undocumented.
.El
.Sh SEE ALSO
.Xr aesni 4 ,
@ -112,6 +399,7 @@ crypto access device
.Xr padlock 4 ,
.Xr safe 4 ,
.Xr ubsec 4 ,
.Xr crypto 7 ,
.Xr geli 8 ,
.Xr crypto 9
.Sh HISTORY
@ -123,3 +411,24 @@ The
.Nm
driver was imported to
.Fx 5.0 .
.Sh BUGS
Error checking and reporting is weak.
.Pp
The values specified for symmetric-key key sizes to
.Dv CIOCGSESSION
must exactly match the values expected by
.Xr opencrypto 9 .
The output buffer and MAC buffers supplied to
.Dv CIOCCRYPT
must follow whether privacy or integrity algorithms were specified for
session: if you request a
.No non- Ns Dv NULL
algorithm, you must supply a suitably-sized buffer.
.Pp
The scheme for passing arguments for asymmetric requests is baroque.
.Pp
The naming inconsistency between
.Dv CRIOGET
and the various
.Dv CIOC Ns \&*
names is an unfortunate historical artifact.

View File

@ -9,6 +9,7 @@ MAN= adding_user.7 \
bsd.snmpmod.mk.7 \
build.7 \
clocks.7 \
crypto.7 \
c99.7 \
development.7 \
environ.7 \

141
share/man/man7/crypto.7 Normal file
View File

@ -0,0 +1,141 @@
.\" Copyright (c) 2014 The FreeBSD Foundation
.\" All rights reserved.
.\"
.\" This documentation was written by John-Mark Gurney under
.\" the sponsorship of the FreeBSD Foundation and
.\" Rubicon Communications, LLC (Netgate).
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd December 12, 2014
.Dt CRYPTO 7
.Os
.Sh NAME
.Nm crypto
.Nd OpenCrypto algorithms
.Sh SYNOPSIS
In the kernel configuration file:
.Cd "device crypto"
.Pp
Or load the crypto.ko module.
.Sh DESCRIPTION
The following cryptographic algorithms that are part of the OpenCrypto
framework have the following requirements.
.Pp
Cipher algorithms:
.Bl -tag -width ".Dv CRYPTO_AES_CBC"
.It Dv CRYPTO_AES_CBC
.Bl -tag -width "Block size :" -compact -offset indent
.It IV size :
16
.It Block size :
16
.It Key size :
16, 24 or 32
.El
.Pp
This algorithm implements Cipher-block chaining.
.It Dv CRYPTO_AES_NIST_GCM_16
.Bl -tag -width "Block size :" -compact -offset indent
.It IV size :
12
.It Block size :
1
.It Key size :
16, 24 or 32
.It Digest size :
16
.El
.Pp
This algorithm implements Galois/Counter Mode.
This is the cipher part of an AEAD
.Pq Authenticated Encryption with Associated Data
mode.
This requires use of the use of a proper authentication mode, one of
.Dv CRYPTO_AES_128_NIST_GMAC ,
.Dv CRYPTO_AES_192_NIST_GMAC
or
.Dv CRYPTO_AES_256_NIST_GMAC ,
that corresponds with the number of bits in the key that you are using.
.Pp
The associated data (if any) must be provided by the authentication mode op.
The authentication tag will be read/written from/to the offset crd_inject
specified in the descriptor for the authentication mode.
.Pp
Note: You must provide an IV on every call.
.It Dv CRYPTO_AES_ICM
.Bl -tag -width "Block size :" -compact -offset indent
.It IV size :
16
.It Block size :
1 (aesni), 16 (software)
.It Key size :
16, 24 or 32
.El
.Pp
This algorithm implements Integer Counter Mode.
This is similar to what most people call counter mode, but instead of the
counter being split into a nonce and a counter part, then entire nonce is
used as the initial counter.
This does mean that if a counter is required that rolls over at 32 bits,
the transaction need to be split into two parts where the counter rolls over.
The counter incremented as a 128-bit big endian number.
.Pp
Note: You must provide an IV on every call.
.It Dv CRYPTO_AES_XTS
.Bl -tag -width "Block size :" -compact -offset indent
.It IV size :
16
.It Block size :
16
.It Key size :
32 or 64
.El
.Pp
This algorithm implements XEX Tweakable Block Cipher with Ciphertext Stealing
as defined in NIST SP 800-38E.
.Pp
NOTE: The ciphertext stealing part is not implemented which is why this cipher
is listed as having a block size of 16 instead of 1.
.El
.Pp
Authentication algorithms:
.Bl -tag -width ".Dv CRYPTO_AES_256_NIST_GMAC"
.It CRYPTO_AES_128_NIST_GMAC
See
.Dv CRYPTO_AES_NIST_GCM_16
in the cipher mode section.
.It CRYPTO_AES_192_NIST_GMAC
See
.Dv CRYPTO_AES_NIST_GCM_16
in the cipher mode section.
.It CRYPTO_AES_256_NIST_GMAC
See
.Dv CRYPTO_AES_NIST_GCM_16
in the cipher mode section.
.El
.Sh SEE ALSO
.Xr crypto 4 ,
.Xr crypto 9
.Sh BUGS
Not all the implemented algorithms are listed.

View File

@ -17,7 +17,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd April 18, 2014
.Dd December 12, 2014
.Dt CRYPTO 9
.Os
.Sh NAME
@ -175,17 +175,26 @@ Contains an algorithm identifier.
Currently supported algorithms are:
.Pp
.Bl -tag -width ".Dv CRYPTO_RIPEMD160_HMAC" -compact
.It Dv CRYPTO_AES_128_NIST_GMAC
.It Dv CRYPTO_AES_192_NIST_GMAC
.It Dv CRYPTO_AES_256_NIST_GMAC
.It Dv CRYPTO_AES_CBC
.It Dv CRYPTO_AES_ICM
.It Dv CRYPTO_AES_NIST_GCM_16
.It Dv CRYPTO_AES_NIST_GMAC
.It Dv CRYPTO_AES_XTS
.It Dv CRYPTO_ARC4
.It Dv CRYPTO_BLF_CBC
.It Dv CRYPTO_CAMELLIA_CBC
.It Dv CRYPTO_CAST_CBC
.It Dv CRYPTO_DEFLATE_COMP
.It Dv CRYPTO_DES_CBC
.It Dv CRYPTO_3DES_CBC
.It Dv CRYPTO_SKIPJACK_CBC
.It Dv CRYPTO_MD5
.It Dv CRYPTO_MD5_HMAC
.It Dv CRYPTO_MD5_KPDK
.It Dv CRYPTO_NULL_HMAC
.It Dv CRYPTO_NULL_CBC
.It Dv CRYPTO_RIPEMD160_HMAC
.It Dv CRYPTO_SHA1
.It Dv CRYPTO_SHA1_HMAC
@ -193,8 +202,7 @@ Currently supported algorithms are:
.It Dv CRYPTO_SHA2_256_HMAC
.It Dv CRYPTO_SHA2_384_HMAC
.It Dv CRYPTO_SHA2_512_HMAC
.It Dv CRYPTO_NULL_HMAC
.It Dv CRYPTO_NULL_CBC
.It Dv CRYPTO_SKIPJACK_CBC
.El
.It Va cri_klen
Specifies the length of the key in bits, for variable-size key
@ -207,7 +215,8 @@ Contains the key to be used with the algorithm.
.It Va cri_iv
Contains an explicit initialization vector (IV), if it does not prefix
the data.
This field is ignored during initialization.
This field is ignored during initialization
.Pq Nm crypto_newsession .
If no IV is explicitly passed (see below on details), a random IV is used
by the device driver processing the request.
.It Va cri_next
@ -296,8 +305,6 @@ The buffer pointed to by
is an
.Vt uio
structure.
.It Dv CRYPTO_F_REL
Must return data in the same place.
.It Dv CRYPTO_F_BATCH
Batch operation if possible.
.It Dv CRYPTO_F_CBIMM
@ -363,7 +370,7 @@ The following flags are defined:
For encryption algorithms, this bit is set when encryption is required
(when not set, decryption is performed).
.It Dv CRD_F_IV_PRESENT
For encryption algorithms, this bit is set when the IV already
For encryption, this bit is set when the IV already
precedes the data, so the
.Va crd_inject
value will be ignored and no IV will be written in the buffer.
@ -372,7 +379,7 @@ at the location pointed to by
.Va crd_inject .
The IV length is assumed to be equal to the blocksize of the
encryption algorithm.
Some applications that do special
Applications that do special
.Dq "IV cooking" ,
such as the half-IV mode in
.Xr ipsec 4 ,
@ -403,6 +410,8 @@ field for the given operation.
Otherwise, the key is taken at newsession time from the
.Va cri_key
field.
As calculating the key schedule may take a while, it is recommended that often
used keys are given their own session.
.It Dv CRD_F_COMP
For compression algorithms, this bit is set when compression is required (when
not set, decompression is performed).
@ -642,6 +651,7 @@ most of the framework code
.Sh SEE ALSO
.Xr crypto 4 ,
.Xr ipsec 4 ,
.Xr crypto 7 ,
.Xr malloc 9 ,
.Xr sleep 9
.Sh HISTORY

View File

@ -11,7 +11,7 @@
# are exceptions). Recursive makes usually add MK_FOO=no for options that they wish
# to omit from that make.
#
# Makefiles must include bsd.srcpot.mk before they test the value of any MK_FOO
# Makefiles must include bsd.mkopt.mk before they test the value of any MK_FOO
# variable.
#
# Makefiles may also assume that this file is included by bsd.own.mk should it

View File

@ -11,7 +11,7 @@
# are exceptions). Recursive makes usually add MK_FOO=no for options that they wish
# to omit from that make.
#
# Makefiles must include bsd.srcpot.mk before they test the value of any MK_FOO
# Makefiles must include bsd.mkopt.mk before they test the value of any MK_FOO
# variable.
#
# Makefiles may also assume that this file is included by src.opts.mk should it

View File

@ -205,6 +205,7 @@ uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
u_int zfs_arc_free_target = 0;
static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS);
static int sysctl_vfs_zfs_arc_meta_limit(SYSCTL_HANDLER_ARGS);
#ifdef _KERNEL
static void
@ -259,6 +260,15 @@ sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS)
return (0);
}
/*
* Must be declared here, before the definition of corresponding kstat
* macro which uses the same names will confuse the compiler.
*/
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_meta_limit,
CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(uint64_t),
sysctl_vfs_zfs_arc_meta_limit, "QU",
"ARC metadata limit");
#endif
/*
@ -413,6 +423,9 @@ typedef struct arc_stats {
kstat_named_t arcstat_duplicate_buffers;
kstat_named_t arcstat_duplicate_buffers_size;
kstat_named_t arcstat_duplicate_reads;
kstat_named_t arcstat_meta_used;
kstat_named_t arcstat_meta_limit;
kstat_named_t arcstat_meta_max;
} arc_stats_t;
static arc_stats_t arc_stats = {
@ -490,7 +503,10 @@ static arc_stats_t arc_stats = {
{ "memory_throttle_count", KSTAT_DATA_UINT64 },
{ "duplicate_buffers", KSTAT_DATA_UINT64 },
{ "duplicate_buffers_size", KSTAT_DATA_UINT64 },
{ "duplicate_reads", KSTAT_DATA_UINT64 }
{ "duplicate_reads", KSTAT_DATA_UINT64 },
{ "arc_meta_used", KSTAT_DATA_UINT64 },
{ "arc_meta_limit", KSTAT_DATA_UINT64 },
{ "arc_meta_max", KSTAT_DATA_UINT64 }
};
#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
@ -552,6 +568,9 @@ static arc_state_t *arc_l2c_only;
#define arc_c ARCSTAT(arcstat_c) /* target size of cache */
#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
#define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
#define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */
#define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */
#define L2ARC_IS_VALID_COMPRESS(_c_) \
((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
@ -559,13 +578,6 @@ static arc_state_t *arc_l2c_only;
static int arc_no_grow; /* Don't try to grow cache size */
static uint64_t arc_tempreserve;
static uint64_t arc_loaned_bytes;
static uint64_t arc_meta_used;
static uint64_t arc_meta_limit;
static uint64_t arc_meta_max = 0;
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RD, &arc_meta_used, 0,
"ARC metadata used");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RW, &arc_meta_limit, 0,
"ARC metadata limit");
typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
@ -626,6 +638,26 @@ struct arc_buf_hdr {
list_node_t b_l2node;
};
#ifdef _KERNEL
static int
sysctl_vfs_zfs_arc_meta_limit(SYSCTL_HANDLER_ARGS)
{
uint64_t val;
int err;
val = arc_meta_limit;
err = sysctl_handle_64(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (val <= 0 || val > arc_c_max)
return (EINVAL);
arc_meta_limit = val;
return (0);
}
#endif
static arc_buf_t *arc_eviction_list;
static kmutex_t arc_eviction_mtx;
static arc_buf_hdr_t arc_eviction_hdr;
@ -1479,7 +1511,7 @@ arc_space_consume(uint64_t space, arc_space_type_t type)
break;
}
atomic_add_64(&arc_meta_used, space);
ARCSTAT_INCR(arcstat_meta_used, space);
atomic_add_64(&arc_size, space);
}
@ -1506,7 +1538,7 @@ arc_space_return(uint64_t space, arc_space_type_t type)
ASSERT(arc_meta_used >= space);
if (arc_meta_max < arc_meta_used)
arc_meta_max = arc_meta_used;
atomic_add_64(&arc_meta_used, -space);
ARCSTAT_INCR(arcstat_meta_used, -space);
ASSERT(arc_size >= space);
atomic_add_64(&arc_size, -space);
}

View File

@ -429,7 +429,7 @@ traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
break;
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
if (err == 0 && dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
err = traverse_visitbp(td, dnp, &dnp->dn_spill, &czb);
}

View File

@ -633,12 +633,11 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
dn->dn_free_txg <= tx->tx_txg;
/*
* We will either remove a spill block when a file is being removed
* or we have been asked to remove it.
* Remove the spill block if we have been explicitly asked to
* remove it, or if the object is being removed.
*/
if (dn->dn_rm_spillblk[txgoff] ||
((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) && freeing_dnode)) {
if ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) {
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
kill_spill = B_TRUE;
dn->dn_rm_spillblk[txgoff] = 0;
}

View File

@ -644,16 +644,14 @@ dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
void
dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
{
ASSERT(ds->ds_owner == tag && ds->ds_dbuf != NULL);
ASSERT3P(ds->ds_owner, ==, tag);
ASSERT(ds->ds_dbuf != NULL);
mutex_enter(&ds->ds_lock);
ds->ds_owner = NULL;
mutex_exit(&ds->ds_lock);
dsl_dataset_long_rele(ds, tag);
if (ds->ds_dbuf != NULL)
dsl_dataset_rele(ds, tag);
else
dsl_dataset_evict(NULL, ds);
dsl_dataset_rele(ds, tag);
}
boolean_t

View File

@ -167,8 +167,8 @@ dsl_prop_get_ds(dsl_dataset_t *ds, const char *propname,
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
inheritable = (prop == ZPROP_INVAL || zfs_prop_inheritable(prop));
snapshot = (ds->ds_phys != NULL && dsl_dataset_is_snapshot(ds));
zapobj = (ds->ds_phys == NULL ? 0 : ds->ds_phys->ds_props_obj);
snapshot = dsl_dataset_is_snapshot(ds);
zapobj = ds->ds_phys->ds_props_obj;
if (zapobj != 0) {
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
@ -543,7 +543,7 @@ dsl_prop_set_sync_impl(dsl_dataset_t *ds, const char *propname,
isint = (dodefault(propname, 8, 1, &intval) == 0);
if (ds->ds_phys != NULL && dsl_dataset_is_snapshot(ds)) {
if (dsl_dataset_is_snapshot(ds)) {
ASSERT(version >= SPA_VERSION_SNAP_PROPS);
if (ds->ds_phys->ds_props_obj == 0) {
dmu_buf_will_dirty(ds->ds_dbuf, tx);
@ -640,7 +640,7 @@ dsl_prop_set_sync_impl(dsl_dataset_t *ds, const char *propname,
if (isint) {
VERIFY0(dsl_prop_get_int_ds(ds, propname, &intval));
if (ds->ds_phys != NULL && dsl_dataset_is_snapshot(ds)) {
if (dsl_dataset_is_snapshot(ds)) {
dsl_prop_cb_record_t *cbr;
/*
* It's a snapshot; nothing can inherit this

View File

@ -414,12 +414,11 @@ dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx)
&scn->scn_phys, tx));
}
extern int zfs_vdev_async_write_active_min_dirty_percent;
static boolean_t
dsl_scan_check_pause(dsl_scan_t *scn, const zbookmark_phys_t *zb)
{
uint64_t elapsed_nanosecs;
unsigned int mintime;
/* we never skip user/group accounting objects */
if (zb && (int64_t)zb->zb_object < 0)
return (B_FALSE);
@ -434,12 +433,28 @@ dsl_scan_check_pause(dsl_scan_t *scn, const zbookmark_phys_t *zb)
if (zb && zb->zb_level != 0)
return (B_FALSE);
mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
/*
* We pause if:
* - we have scanned for the maximum time: an entire txg
* timeout (default 5 sec)
* or
* - we have scanned for at least the minimum time (default 1 sec
* for scrub, 3 sec for resilver), and either we have sufficient
* dirty data that we are starting to write more quickly
* (default 30%), or someone is explicitly waiting for this txg
* to complete.
* or
* - the spa is shutting down because this pool is being exported
* or the machine is rebooting.
*/
int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scan_min_time_ms;
elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
if (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
uint64_t elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
if (elapsed_nanosecs / NANOSEC >= zfs_txg_timeout ||
(NSEC2MSEC(elapsed_nanosecs) > mintime &&
txg_sync_waiting(scn->scn_dp)) ||
(txg_sync_waiting(scn->scn_dp) ||
dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent)) ||
spa_shutting_down(scn->scn_dp->dp_spa)) {
if (zb) {
dprintf("pausing at bookmark %llx/%llx/%llx/%llx\n",

View File

@ -811,6 +811,7 @@ extern boolean_t spa_is_root(spa_t *spa);
extern boolean_t spa_writeable(spa_t *spa);
extern boolean_t spa_has_pending_synctask(spa_t *spa);
extern int spa_maxblocksize(spa_t *spa);
extern void zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp);
extern int spa_mode(spa_t *spa);
extern uint64_t zfs_strtonum(const char *str, char **nptr);

View File

@ -265,7 +265,7 @@ zio_buf_alloc(size_t size)
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
int flags = zio_exclude_metadata ? KM_NODEBUG : 0;
ASSERT3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
if (zio_use_uma)
return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
@ -284,7 +284,7 @@ zio_data_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
if (zio_use_uma)
return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
@ -297,7 +297,7 @@ zio_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
if (zio_use_uma)
kmem_cache_free(zio_buf_cache[c], buf);
@ -310,7 +310,7 @@ zio_data_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
if (zio_use_uma)
kmem_cache_free(zio_data_buf_cache[c], buf);
@ -657,6 +657,86 @@ zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags)
return (zio_null(NULL, spa, NULL, done, private, flags));
}
void
zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp)
{
if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
zfs_panic_recover("blkptr at %p has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp));
}
if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS ||
BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) {
zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu",
bp, (longlong_t)BP_GET_CHECKSUM(bp));
}
if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS ||
BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) {
zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu",
bp, (longlong_t)BP_GET_COMPRESS(bp));
}
if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
zfs_panic_recover("blkptr at %p has invalid LSIZE %llu",
bp, (longlong_t)BP_GET_LSIZE(bp));
}
if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
zfs_panic_recover("blkptr at %p has invalid PSIZE %llu",
bp, (longlong_t)BP_GET_PSIZE(bp));
}
if (BP_IS_EMBEDDED(bp)) {
if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) {
zfs_panic_recover("blkptr at %p has invalid ETYPE %llu",
bp, (longlong_t)BPE_GET_ETYPE(bp));
}
}
/*
* Pool-specific checks.
*
* Note: it would be nice to verify that the blk_birth and
* BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze()
* allows the birth time of log blocks (and dmu_sync()-ed blocks
* that are in the log) to be arbitrarily large.
*/
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]);
if (vdevid >= spa->spa_root_vdev->vdev_children) {
zfs_panic_recover("blkptr at %p DVA %u has invalid "
"VDEV %llu",
bp, i, (longlong_t)vdevid);
}
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL) {
zfs_panic_recover("blkptr at %p DVA %u has invalid "
"VDEV %llu",
bp, i, (longlong_t)vdevid);
}
if (vd->vdev_ops == &vdev_hole_ops) {
zfs_panic_recover("blkptr at %p DVA %u has hole "
"VDEV %llu",
bp, i, (longlong_t)vdevid);
}
if (vd->vdev_ops == &vdev_missing_ops) {
/*
* "missing" vdevs are valid during import, but we
* don't have their detailed info (e.g. asize), so
* we can't perform any more checks on them.
*/
continue;
}
uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]);
if (BP_IS_GANG(bp))
asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
if (offset + asize > vd->vdev_asize) {
zfs_panic_recover("blkptr at %p DVA %u has invalid "
"OFFSET %llu",
bp, i, (longlong_t)offset);
}
}
}
zio_t *
zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
void *data, uint64_t size, zio_done_func_t *done, void *private,
@ -664,6 +744,8 @@ zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
{
zio_t *zio;
zfs_blkptr_verify(spa, bp);
zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
data, size, done, private,
ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
@ -2958,7 +3040,8 @@ zio_checksum_verify(zio_t *zio)
if ((error = zio_checksum_error(zio, &info)) != 0) {
zio->io_error = error;
if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
if (error == ECKSUM &&
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
zfs_ereport_start_checksum(zio->io_spa,
zio->io_vd, zio, zio->io_offset,
zio->io_size, NULL, &info);

View File

@ -3223,6 +3223,7 @@ libkern/strtoq.c standard
libkern/strtoul.c standard
libkern/strtouq.c standard
libkern/strvalid.c standard
libkern/timingsafe_bcmp.c standard
net/bpf.c standard
net/bpf_buffer.c optional bpf
net/bpf_jitter.c optional bpf_jitter
@ -3880,6 +3881,8 @@ opencrypto/cryptodev.c optional cryptodev
opencrypto/cryptodev_if.m optional crypto
opencrypto/cryptosoft.c optional crypto
opencrypto/cryptodeflate.c optional crypto
opencrypto/gmac.c optional crypto
opencrypto/gfmult.c optional crypto
opencrypto/rmd160.c optional crypto | ipsec
opencrypto/skipjack.c optional crypto
opencrypto/xform.c optional crypto

View File

@ -129,9 +129,14 @@ amd64/pci/pci_cfgreg.c optional pci
cddl/contrib/opensolaris/common/atomic/amd64/opensolaris_atomic.S optional zfs compile-with "${ZFS_S}"
crypto/aesni/aeskeys_amd64.S optional aesni
crypto/aesni/aesni.c optional aesni
aesni_ghash.o optional aesni \
dependency "$S/crypto/aesni/aesni_ghash.c" \
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \
no-implicit-rule \
clean "aesni_ghash.o"
aesni_wrap.o optional aesni \
dependency "$S/crypto/aesni/aesni_wrap.c" \
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -maes ${.IMPSRC}" \
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -msse4 -maes ${.IMPSRC}" \
no-implicit-rule \
clean "aesni_wrap.o"
crypto/blowfish/bf_enc.c optional crypto | ipsec

View File

@ -116,9 +116,14 @@ bf_enc.o optional crypto | ipsec \
no-implicit-rule
crypto/aesni/aeskeys_i386.S optional aesni
crypto/aesni/aesni.c optional aesni
aesni_ghash.o optional aesni \
dependency "$S/crypto/aesni/aesni_ghash.c" \
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \
no-implicit-rule \
clean "aesni_ghash.o"
aesni_wrap.o optional aesni \
dependency "$S/crypto/aesni/aesni_wrap.c" \
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -maes ${.IMPSRC}" \
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -msse4 -maes ${.IMPSRC}" \
no-implicit-rule \
clean "aesni_wrap.o"
crypto/des/arch/i386/des_enc.S optional crypto | ipsec | netsmb

View File

@ -1,8 +1,13 @@
/*-
* Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by John-Mark Gurney
* under sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@ -39,8 +44,10 @@ __FBSDID("$FreeBSD$");
#include <sys/rwlock.h>
#include <sys/bus.h>
#include <sys/uio.h>
#include <sys/mbuf.h>
#include <crypto/aesni/aesni.h>
#include <cryptodev_if.h>
#include <opencrypto/gmac.h>
struct aesni_softc {
int32_t cid;
@ -56,7 +63,7 @@ static void aesni_freesession_locked(struct aesni_softc *sc,
static int aesni_cipher_setup(struct aesni_session *ses,
struct cryptoini *encini);
static int aesni_cipher_process(struct aesni_session *ses,
struct cryptodesc *enccrd, struct cryptop *crp);
struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp);
MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data");
@ -79,12 +86,12 @@ aesni_probe(device_t dev)
return (EINVAL);
}
if ((cpu_feature & CPUID_SSE2) == 0) {
device_printf(dev, "No SSE2 support but AESNI!?!\n");
if ((cpu_feature2 & CPUID2_SSE41) == 0) {
device_printf(dev, "No SSE4.1 support.\n");
return (EINVAL);
}
device_set_desc_copy(dev, "AES-CBC,AES-XTS");
device_set_desc_copy(dev, "AES-CBC,AES-XTS,AES-GCM,AES-ICM");
return (0);
}
@ -105,6 +112,11 @@ aesni_attach(device_t dev)
rw_init(&sc->lock, "aesni_lock");
crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0);
crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
return (0);
}
@ -144,8 +156,10 @@ aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
struct cryptoini *encini;
int error;
if (sidp == NULL || cri == NULL)
if (sidp == NULL || cri == NULL) {
CRYPTDEB("no sidp or cri");
return (EINVAL);
}
sc = device_get_softc(dev);
ses = NULL;
@ -153,17 +167,32 @@ aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
for (; cri != NULL; cri = cri->cri_next) {
switch (cri->cri_alg) {
case CRYPTO_AES_CBC:
case CRYPTO_AES_ICM:
case CRYPTO_AES_XTS:
if (encini != NULL)
case CRYPTO_AES_NIST_GCM_16:
if (encini != NULL) {
CRYPTDEB("encini already set");
return (EINVAL);
}
encini = cri;
break;
case CRYPTO_AES_128_NIST_GMAC:
case CRYPTO_AES_192_NIST_GMAC:
case CRYPTO_AES_256_NIST_GMAC:
/*
* nothing to do here, maybe in the future cache some
* values for GHASH
*/
break;
default:
CRYPTDEB("unhandled algorithm");
return (EINVAL);
}
}
if (encini == NULL)
if (encini == NULL) {
CRYPTDEB("no cipher");
return (EINVAL);
}
rw_wlock(&sc->lock);
/*
@ -195,6 +224,7 @@ aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
error = aesni_cipher_setup(ses, encini);
if (error != 0) {
CRYPTDEB("setup failed");
rw_wlock(&sc->lock);
aesni_freesession_locked(sc, ses);
rw_wunlock(&sc->lock);
@ -214,7 +244,7 @@ aesni_freesession_locked(struct aesni_softc *sc, struct aesni_session *ses)
sid = ses->id;
TAILQ_REMOVE(&sc->sessions, ses, next);
ctx = ses->fpu_ctx;
bzero(ses, sizeof(*ses));
*ses = (struct aesni_session){};
ses->id = sid;
ses->fpu_ctx = ctx;
TAILQ_INSERT_HEAD(&sc->sessions, ses, next);
@ -248,11 +278,13 @@ aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
{
struct aesni_softc *sc = device_get_softc(dev);
struct aesni_session *ses = NULL;
struct cryptodesc *crd, *enccrd;
int error;
struct cryptodesc *crd, *enccrd, *authcrd;
int error, needauth;
error = 0;
enccrd = NULL;
authcrd = NULL;
needauth = 0;
/* Sanity check. */
if (crp == NULL)
@ -266,6 +298,7 @@ aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
switch (crd->crd_alg) {
case CRYPTO_AES_CBC:
case CRYPTO_AES_ICM:
case CRYPTO_AES_XTS:
if (enccrd != NULL) {
error = EINVAL;
@ -273,11 +306,41 @@ aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
}
enccrd = crd;
break;
case CRYPTO_AES_NIST_GCM_16:
if (enccrd != NULL) {
error = EINVAL;
goto out;
}
enccrd = crd;
needauth = 1;
break;
case CRYPTO_AES_128_NIST_GMAC:
case CRYPTO_AES_192_NIST_GMAC:
case CRYPTO_AES_256_NIST_GMAC:
if (authcrd != NULL) {
error = EINVAL;
goto out;
}
authcrd = crd;
needauth = 1;
break;
default:
return (EINVAL);
error = EINVAL;
goto out;
}
}
if (enccrd == NULL || (enccrd->crd_len % AES_BLOCK_LEN) != 0) {
if (enccrd == NULL || (needauth && authcrd == NULL)) {
error = EINVAL;
goto out;
}
/* CBC & XTS can only handle full blocks for now */
if ((enccrd->crd_alg == CRYPTO_AES_CBC || enccrd->crd_alg ==
CRYPTO_AES_XTS) && (enccrd->crd_len % AES_BLOCK_LEN) != 0) {
error = EINVAL;
goto out;
}
@ -293,7 +356,7 @@ aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
goto out;
}
error = aesni_cipher_process(ses, enccrd, crp);
error = aesni_cipher_process(ses, enccrd, authcrd, crp);
if (error != 0)
goto out;
@ -307,21 +370,26 @@ uint8_t *
aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
int *allocated)
{
struct mbuf *m;
struct uio *uio;
struct iovec *iov;
uint8_t *addr;
if (crp->crp_flags & CRYPTO_F_IMBUF)
goto alloc;
else if (crp->crp_flags & CRYPTO_F_IOV) {
if (crp->crp_flags & CRYPTO_F_IMBUF) {
m = (struct mbuf *)crp->crp_buf;
if (m->m_next != NULL)
goto alloc;
addr = mtod(m, uint8_t *);
} else if (crp->crp_flags & CRYPTO_F_IOV) {
uio = (struct uio *)crp->crp_buf;
if (uio->uio_iovcnt != 1)
goto alloc;
iov = uio->uio_iov;
addr = (u_char *)iov->iov_base + enccrd->crd_skip;
addr = (uint8_t *)iov->iov_base;
} else
addr = (u_char *)crp->crp_buf;
addr = (uint8_t *)crp->crp_buf;
*allocated = 0;
addr += enccrd->crd_skip;
return (addr);
alloc:
@ -376,18 +444,40 @@ aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini)
return (error);
}
/*
* authcrd contains the associated date.
*/
static int
aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
struct cryptop *crp)
struct cryptodesc *authcrd, struct cryptop *crp)
{
uint8_t tag[GMAC_DIGEST_LEN];
struct thread *td;
uint8_t *buf;
int error, allocated;
uint8_t *buf, *authbuf;
int error, allocated, authallocated;
int ivlen, encflag;
encflag = (enccrd->crd_flags & CRD_F_ENCRYPT) == CRD_F_ENCRYPT;
if ((enccrd->crd_alg == CRYPTO_AES_ICM ||
enccrd->crd_alg == CRYPTO_AES_NIST_GCM_16) &&
(enccrd->crd_flags & CRD_F_IV_EXPLICIT) == 0)
return (EINVAL);
buf = aesni_cipher_alloc(enccrd, crp, &allocated);
if (buf == NULL)
return (ENOMEM);
authbuf = NULL;
authallocated = 0;
if (authcrd != NULL) {
authbuf = aesni_cipher_alloc(authcrd, crp, &authallocated);
if (authbuf == NULL) {
error = ENOMEM;
goto out1;
}
}
td = curthread;
error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL |
FPU_KERN_KTHR);
@ -401,42 +491,91 @@ aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
goto out;
}
if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
crypto_copyback(crp->crp_flags, crp->crp_buf,
enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
if (ses->algo == CRYPTO_AES_CBC) {
/* XXX - validate that enccrd and authcrd have/use same key? */
switch (enccrd->crd_alg) {
case CRYPTO_AES_CBC:
case CRYPTO_AES_ICM:
ivlen = AES_BLOCK_LEN;
break;
case CRYPTO_AES_XTS:
ivlen = 8;
break;
case CRYPTO_AES_NIST_GCM_16:
ivlen = 12; /* should support arbitarily larger */
break;
}
/* Setup ses->iv */
bzero(ses->iv, sizeof ses->iv);
if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(enccrd->crd_iv, ses->iv, ivlen);
else if (encflag && ((enccrd->crd_flags & CRD_F_IV_PRESENT) != 0))
arc4rand(ses->iv, ivlen, 0);
else
crypto_copydata(crp->crp_flags, crp->crp_buf,
enccrd->crd_inject, ivlen, ses->iv);
if (authcrd != NULL && !encflag)
crypto_copydata(crp->crp_flags, crp->crp_buf,
authcrd->crd_inject, GMAC_DIGEST_LEN, tag);
else
bzero(tag, sizeof tag);
/* Do work */
switch (ses->algo) {
case CRYPTO_AES_CBC:
if (encflag)
aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
enccrd->crd_len, buf, buf, ses->iv);
} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
else
aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
enccrd->crd_len, buf, ses->iv);
break;
case CRYPTO_AES_ICM:
/* encryption & decryption are the same */
aesni_encrypt_icm(ses->rounds, ses->enc_schedule,
enccrd->crd_len, buf, buf, ses->iv);
break;
case CRYPTO_AES_XTS:
if (encflag)
aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
ses->xts_schedule, enccrd->crd_len, buf, buf,
ses->iv);
}
} else {
if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
else
crypto_copydata(crp->crp_flags, crp->crp_buf,
enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
if (ses->algo == CRYPTO_AES_CBC) {
aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
enccrd->crd_len, buf, ses->iv);
} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
ses->xts_schedule, enccrd->crd_len, buf, buf,
ses->iv);
break;
case CRYPTO_AES_NIST_GCM_16:
if (encflag)
AES_GCM_encrypt(buf, buf, authbuf, ses->iv, tag,
enccrd->crd_len, authcrd->crd_len, ivlen,
ses->enc_schedule, ses->rounds);
else {
if (!AES_GCM_decrypt(buf, buf, authbuf, ses->iv, tag,
enccrd->crd_len, authcrd->crd_len, ivlen,
ses->enc_schedule, ses->rounds))
error = EBADMSG;
}
break;
}
if (allocated)
crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
enccrd->crd_len, buf);
if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
crypto_copydata(crp->crp_flags, crp->crp_buf,
enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
AES_BLOCK_LEN, ses->iv);
/*
* OpenBSD doesn't copy this back. This primes the IV for the next
* chain. Why do we not do it for decrypt?
*/
if (encflag && enccrd->crd_alg == CRYPTO_AES_CBC)
bcopy(buf + enccrd->crd_len - AES_BLOCK_LEN, ses->iv, AES_BLOCK_LEN);
if (!error && authcrd != NULL) {
crypto_copyback(crp->crp_flags, crp->crp_buf,
authcrd->crd_inject, GMAC_DIGEST_LEN, tag);
}
out:
fpu_kern_leave(td, ses->fpu_ctx);
out1:
@ -444,5 +583,7 @@ aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
bzero(buf, enccrd->crd_len);
free(buf, M_AESNI);
}
if (authallocated)
free(authbuf, M_AESNI);
return (error);
}

View File

@ -88,6 +88,9 @@ void aesni_encrypt_ecb(int rounds, const void *key_schedule /*__aligned(16)*/,
size_t len, const uint8_t *from, uint8_t *to);
void aesni_decrypt_ecb(int rounds, const void *key_schedule /*__aligned(16)*/,
size_t len, const uint8_t *from, uint8_t *to);
void aesni_encrypt_icm(int rounds, const void *key_schedule /*__aligned(16)*/,
size_t len, const uint8_t *from, uint8_t *to,
const uint8_t iv[AES_BLOCK_LEN]);
void aesni_encrypt_xts(int rounds, const void *data_schedule /*__aligned(16)*/,
const void *tweak_schedule /*__aligned(16)*/, size_t len,
@ -96,6 +99,16 @@ void aesni_decrypt_xts(int rounds, const void *data_schedule /*__aligned(16)*/,
const void *tweak_schedule /*__aligned(16)*/, size_t len,
const uint8_t *from, uint8_t *to, const uint8_t iv[AES_BLOCK_LEN]);
/* GCM & GHASH functions */
void AES_GCM_encrypt(const unsigned char *in, unsigned char *out,
const unsigned char *addt, const unsigned char *ivec,
unsigned char *tag, uint32_t nbytes, uint32_t abytes, int ibytes,
const unsigned char *key, int nr);
int AES_GCM_decrypt(const unsigned char *in, unsigned char *out,
const unsigned char *addt, const unsigned char *ivec,
unsigned char *tag, uint32_t nbytes, uint32_t abytes, int ibytes,
const unsigned char *key, int nr);
int aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key,
int keylen);
uint8_t *aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,

View File

@ -0,0 +1,803 @@
/*-
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by John-Mark Gurney under
* the sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*
* $FreeBSD$
*
*/
/*
* Figure 5, 8 and 12 are copied from the Intel white paper:
* Intel® Carry-Less Multiplication Instruction and its Usage for
* Computing the GCM Mode
*
* and as such are:
* Copyright © 2010 Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef _KERNEL
#include <crypto/aesni/aesni.h>
#else
#include <stdint.h>
#endif
#include <wmmintrin.h>
#include <emmintrin.h>
#include <smmintrin.h>
static inline int
m128icmp(__m128i a, __m128i b)
{
__m128i cmp;
cmp = _mm_cmpeq_epi32(a, b);
return _mm_movemask_epi8(cmp) == 0xffff;
}
#ifdef __i386__
static inline __m128i
_mm_insert_epi64(__m128i a, int64_t b, const int ndx)
{
if (!ndx) {
a = _mm_insert_epi32(a, b, 0);
a = _mm_insert_epi32(a, b >> 32, 1);
} else {
a = _mm_insert_epi32(a, b, 2);
a = _mm_insert_epi32(a, b >> 32, 3);
}
return a;
}
#endif
/* some code from carry-less-multiplication-instruction-in-gcm-mode-paper.pdf */
/* Figure 5. Code Sample - Performing Ghash Using Algorithms 1 and 5 (C) */
static void
gfmul(__m128i a, __m128i b, __m128i *res)
{
__m128i tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9;
tmp3 = _mm_clmulepi64_si128(a, b, 0x00);
tmp4 = _mm_clmulepi64_si128(a, b, 0x10);
tmp5 = _mm_clmulepi64_si128(a, b, 0x01);
tmp6 = _mm_clmulepi64_si128(a, b, 0x11);
tmp4 = _mm_xor_si128(tmp4, tmp5);
tmp5 = _mm_slli_si128(tmp4, 8);
tmp4 = _mm_srli_si128(tmp4, 8);
tmp3 = _mm_xor_si128(tmp3, tmp5);
tmp6 = _mm_xor_si128(tmp6, tmp4);
tmp7 = _mm_srli_epi32(tmp3, 31);
tmp8 = _mm_srli_epi32(tmp6, 31);
tmp3 = _mm_slli_epi32(tmp3, 1);
tmp6 = _mm_slli_epi32(tmp6, 1);
tmp9 = _mm_srli_si128(tmp7, 12);
tmp8 = _mm_slli_si128(tmp8, 4);
tmp7 = _mm_slli_si128(tmp7, 4);
tmp3 = _mm_or_si128(tmp3, tmp7);
tmp6 = _mm_or_si128(tmp6, tmp8);
tmp6 = _mm_or_si128(tmp6, tmp9);
tmp7 = _mm_slli_epi32(tmp3, 31);
tmp8 = _mm_slli_epi32(tmp3, 30);
tmp9 = _mm_slli_epi32(tmp3, 25);
tmp7 = _mm_xor_si128(tmp7, tmp8);
tmp7 = _mm_xor_si128(tmp7, tmp9);
tmp8 = _mm_srli_si128(tmp7, 4);
tmp7 = _mm_slli_si128(tmp7, 12);
tmp3 = _mm_xor_si128(tmp3, tmp7);
tmp2 = _mm_srli_epi32(tmp3, 1);
tmp4 = _mm_srli_epi32(tmp3, 2);
tmp5 = _mm_srli_epi32(tmp3, 7);
tmp2 = _mm_xor_si128(tmp2, tmp4);
tmp2 = _mm_xor_si128(tmp2, tmp5);
tmp2 = _mm_xor_si128(tmp2, tmp8);
tmp3 = _mm_xor_si128(tmp3, tmp2);
tmp6 = _mm_xor_si128(tmp6, tmp3);
*res = tmp6;
}
/*
* Figure 8. Code Sample - Performing Ghash Using an Aggregated Reduction
* Method */
static void
reduce4(__m128i H1, __m128i H2, __m128i H3, __m128i H4,
__m128i X1, __m128i X2, __m128i X3, __m128i X4, __m128i *res)
{
/*algorithm by Krzysztof Jankowski, Pierre Laurent - Intel*/
__m128i H1_X1_lo, H1_X1_hi, H2_X2_lo, H2_X2_hi, H3_X3_lo,
H3_X3_hi, H4_X4_lo, H4_X4_hi, lo, hi;
__m128i tmp0, tmp1, tmp2, tmp3;
__m128i tmp4, tmp5, tmp6, tmp7;
__m128i tmp8, tmp9;
H1_X1_lo = _mm_clmulepi64_si128(H1, X1, 0x00);
H2_X2_lo = _mm_clmulepi64_si128(H2, X2, 0x00);
H3_X3_lo = _mm_clmulepi64_si128(H3, X3, 0x00);
H4_X4_lo = _mm_clmulepi64_si128(H4, X4, 0x00);
lo = _mm_xor_si128(H1_X1_lo, H2_X2_lo);
lo = _mm_xor_si128(lo, H3_X3_lo);
lo = _mm_xor_si128(lo, H4_X4_lo);
H1_X1_hi = _mm_clmulepi64_si128(H1, X1, 0x11);
H2_X2_hi = _mm_clmulepi64_si128(H2, X2, 0x11);
H3_X3_hi = _mm_clmulepi64_si128(H3, X3, 0x11);
H4_X4_hi = _mm_clmulepi64_si128(H4, X4, 0x11);
hi = _mm_xor_si128(H1_X1_hi, H2_X2_hi);
hi = _mm_xor_si128(hi, H3_X3_hi);
hi = _mm_xor_si128(hi, H4_X4_hi);
tmp0 = _mm_shuffle_epi32(H1, 78);
tmp4 = _mm_shuffle_epi32(X1, 78);
tmp0 = _mm_xor_si128(tmp0, H1);
tmp4 = _mm_xor_si128(tmp4, X1);
tmp1 = _mm_shuffle_epi32(H2, 78);
tmp5 = _mm_shuffle_epi32(X2, 78);
tmp1 = _mm_xor_si128(tmp1, H2);
tmp5 = _mm_xor_si128(tmp5, X2);
tmp2 = _mm_shuffle_epi32(H3, 78);
tmp6 = _mm_shuffle_epi32(X3, 78);
tmp2 = _mm_xor_si128(tmp2, H3);
tmp6 = _mm_xor_si128(tmp6, X3);
tmp3 = _mm_shuffle_epi32(H4, 78);
tmp7 = _mm_shuffle_epi32(X4, 78);
tmp3 = _mm_xor_si128(tmp3, H4);
tmp7 = _mm_xor_si128(tmp7, X4);
tmp0 = _mm_clmulepi64_si128(tmp0, tmp4, 0x00);
tmp1 = _mm_clmulepi64_si128(tmp1, tmp5, 0x00);
tmp2 = _mm_clmulepi64_si128(tmp2, tmp6, 0x00);
tmp3 = _mm_clmulepi64_si128(tmp3, tmp7, 0x00);
tmp0 = _mm_xor_si128(tmp0, lo);
tmp0 = _mm_xor_si128(tmp0, hi);
tmp0 = _mm_xor_si128(tmp1, tmp0);
tmp0 = _mm_xor_si128(tmp2, tmp0);
tmp0 = _mm_xor_si128(tmp3, tmp0);
tmp4 = _mm_slli_si128(tmp0, 8);
tmp0 = _mm_srli_si128(tmp0, 8);
lo = _mm_xor_si128(tmp4, lo);
hi = _mm_xor_si128(tmp0, hi);
tmp3 = lo;
tmp6 = hi;
tmp7 = _mm_srli_epi32(tmp3, 31);
tmp8 = _mm_srli_epi32(tmp6, 31);
tmp3 = _mm_slli_epi32(tmp3, 1);
tmp6 = _mm_slli_epi32(tmp6, 1);
tmp9 = _mm_srli_si128(tmp7, 12);
tmp8 = _mm_slli_si128(tmp8, 4);
tmp7 = _mm_slli_si128(tmp7, 4);
tmp3 = _mm_or_si128(tmp3, tmp7);
tmp6 = _mm_or_si128(tmp6, tmp8);
tmp6 = _mm_or_si128(tmp6, tmp9);
tmp7 = _mm_slli_epi32(tmp3, 31);
tmp8 = _mm_slli_epi32(tmp3, 30);
tmp9 = _mm_slli_epi32(tmp3, 25);
tmp7 = _mm_xor_si128(tmp7, tmp8);
tmp7 = _mm_xor_si128(tmp7, tmp9);
tmp8 = _mm_srli_si128(tmp7, 4);
tmp7 = _mm_slli_si128(tmp7, 12);
tmp3 = _mm_xor_si128(tmp3, tmp7);
tmp2 = _mm_srli_epi32(tmp3, 1);
tmp4 = _mm_srli_epi32(tmp3, 2);
tmp5 = _mm_srli_epi32(tmp3, 7);
tmp2 = _mm_xor_si128(tmp2, tmp4);
tmp2 = _mm_xor_si128(tmp2, tmp5);
tmp2 = _mm_xor_si128(tmp2, tmp8);
tmp3 = _mm_xor_si128(tmp3, tmp2);
tmp6 = _mm_xor_si128(tmp6, tmp3);
*res = tmp6;
}
/*
* Figure 12. AES-GCM: Processing Four Blocks in Parallel with Aggregated
* Every Four Blocks
*/
/*
* per NIST SP-800-38D, 5.2.1.1, len(p) <= 2^39-256 (in bits), or
* 2^32-256*8*16 bytes.
*/
void
AES_GCM_encrypt(const unsigned char *in, unsigned char *out,
const unsigned char *addt, const unsigned char *ivec,
unsigned char *tag, uint32_t nbytes, uint32_t abytes, int ibytes,
const unsigned char *key, int nr)
{
int i, j ,k;
__m128i tmp1, tmp2, tmp3, tmp4;
__m128i tmp5, tmp6, tmp7, tmp8;
__m128i H, H2, H3, H4, Y, T;
__m128i *KEY = (__m128i*)key;
__m128i ctr1, ctr2, ctr3, ctr4;
__m128i ctr5, ctr6, ctr7, ctr8;
__m128i last_block = _mm_setzero_si128();
__m128i ONE = _mm_set_epi32(0, 1, 0, 0);
__m128i EIGHT = _mm_set_epi32(0, 8, 0, 0);
__m128i BSWAP_EPI64 = _mm_set_epi8(8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,
7);
__m128i BSWAP_MASK = _mm_set_epi8(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,
15);
__m128i X = _mm_setzero_si128();
if (ibytes == 96/8) {
Y = _mm_loadu_si128((__m128i*)ivec);
Y = _mm_insert_epi32(Y, 0x1000000, 3);
/*(Compute E[ZERO, KS] and E[Y0, KS] together*/
tmp1 = _mm_xor_si128(X, KEY[0]);
tmp2 = _mm_xor_si128(Y, KEY[0]);
for (j=1; j < nr-1; j+=2) {
tmp1 = _mm_aesenc_si128(tmp1, KEY[j]);
tmp2 = _mm_aesenc_si128(tmp2, KEY[j]);
tmp1 = _mm_aesenc_si128(tmp1, KEY[j+1]);
tmp2 = _mm_aesenc_si128(tmp2, KEY[j+1]);
}
tmp1 = _mm_aesenc_si128(tmp1, KEY[nr-1]);
tmp2 = _mm_aesenc_si128(tmp2, KEY[nr-1]);
H = _mm_aesenclast_si128(tmp1, KEY[nr]);
T = _mm_aesenclast_si128(tmp2, KEY[nr]);
H = _mm_shuffle_epi8(H, BSWAP_MASK);
} else {
tmp1 = _mm_xor_si128(X, KEY[0]);
for (j=1; j <nr; j++)
tmp1 = _mm_aesenc_si128(tmp1, KEY[j]);
H = _mm_aesenclast_si128(tmp1, KEY[nr]);
H = _mm_shuffle_epi8(H, BSWAP_MASK);
Y = _mm_setzero_si128();
for (i=0; i < ibytes/16; i++) {
tmp1 = _mm_loadu_si128(&((__m128i*)ivec)[i]);
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
Y = _mm_xor_si128(Y, tmp1);
gfmul(Y, H, &Y);
}
if (ibytes%16) {
for (j=0; j < ibytes%16; j++)
((unsigned char*)&last_block)[j] = ivec[i*16+j];
tmp1 = last_block;
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
Y = _mm_xor_si128(Y, tmp1);
gfmul(Y, H, &Y);
}
tmp1 = _mm_insert_epi64(tmp1, (uint64_t)ibytes*8, 0);
tmp1 = _mm_insert_epi64(tmp1, 0, 1);
Y = _mm_xor_si128(Y, tmp1);
gfmul(Y, H, &Y);
Y = _mm_shuffle_epi8(Y, BSWAP_MASK); /*Compute E(K, Y0)*/
tmp1 = _mm_xor_si128(Y, KEY[0]);
for (j=1; j < nr; j++)
tmp1 = _mm_aesenc_si128(tmp1, KEY[j]);
T = _mm_aesenclast_si128(tmp1, KEY[nr]);
}
gfmul(H,H,&H2);
gfmul(H,H2,&H3);
gfmul(H,H3,&H4);
for (i=0; i<abytes/16/4; i++) {
tmp1 = _mm_loadu_si128(&((__m128i*)addt)[i*4]);
tmp2 = _mm_loadu_si128(&((__m128i*)addt)[i*4+1]);
tmp3 = _mm_loadu_si128(&((__m128i*)addt)[i*4+2]);
tmp4 = _mm_loadu_si128(&((__m128i*)addt)[i*4+3]);
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
tmp2 = _mm_shuffle_epi8(tmp2, BSWAP_MASK);
tmp3 = _mm_shuffle_epi8(tmp3, BSWAP_MASK);
tmp4 = _mm_shuffle_epi8(tmp4, BSWAP_MASK);
tmp1 = _mm_xor_si128(X, tmp1);
reduce4(H, H2, H3, H4, tmp4, tmp3, tmp2, tmp1, &X);
}
for (i=i*4; i<abytes/16; i++) {
tmp1 = _mm_loadu_si128(&((__m128i*)addt)[i]);
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
X = _mm_xor_si128(X,tmp1);
gfmul(X, H, &X);
}
if (abytes%16) {
last_block = _mm_setzero_si128();
for (j=0; j<abytes%16; j++)
((unsigned char*)&last_block)[j] = addt[i*16+j];
tmp1 = last_block;
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
X =_mm_xor_si128(X,tmp1);
gfmul(X,H,&X);
}
ctr1 = _mm_shuffle_epi8(Y, BSWAP_EPI64);
ctr1 = _mm_add_epi64(ctr1, ONE);
ctr2 = _mm_add_epi64(ctr1, ONE);
ctr3 = _mm_add_epi64(ctr2, ONE);
ctr4 = _mm_add_epi64(ctr3, ONE);
ctr5 = _mm_add_epi64(ctr4, ONE);
ctr6 = _mm_add_epi64(ctr5, ONE);
ctr7 = _mm_add_epi64(ctr6, ONE);
ctr8 = _mm_add_epi64(ctr7, ONE);
for (i=0; i<nbytes/16/8; i++) {
tmp1 = _mm_shuffle_epi8(ctr1, BSWAP_EPI64);
tmp2 = _mm_shuffle_epi8(ctr2, BSWAP_EPI64);
tmp3 = _mm_shuffle_epi8(ctr3, BSWAP_EPI64);
tmp4 = _mm_shuffle_epi8(ctr4, BSWAP_EPI64);
tmp5 = _mm_shuffle_epi8(ctr5, BSWAP_EPI64);
tmp6 = _mm_shuffle_epi8(ctr6, BSWAP_EPI64);
tmp7 = _mm_shuffle_epi8(ctr7, BSWAP_EPI64);
tmp8 = _mm_shuffle_epi8(ctr8, BSWAP_EPI64);
ctr1 = _mm_add_epi64(ctr1, EIGHT);
ctr2 = _mm_add_epi64(ctr2, EIGHT);
ctr3 = _mm_add_epi64(ctr3, EIGHT);
ctr4 = _mm_add_epi64(ctr4, EIGHT);
ctr5 = _mm_add_epi64(ctr5, EIGHT);
ctr6 = _mm_add_epi64(ctr6, EIGHT);
ctr7 = _mm_add_epi64(ctr7, EIGHT);
ctr8 = _mm_add_epi64(ctr8, EIGHT);
tmp1 =_mm_xor_si128(tmp1, KEY[0]);
tmp2 =_mm_xor_si128(tmp2, KEY[0]);
tmp3 =_mm_xor_si128(tmp3, KEY[0]);
tmp4 =_mm_xor_si128(tmp4, KEY[0]);
tmp5 =_mm_xor_si128(tmp5, KEY[0]);
tmp6 =_mm_xor_si128(tmp6, KEY[0]);
tmp7 =_mm_xor_si128(tmp7, KEY[0]);
tmp8 =_mm_xor_si128(tmp8, KEY[0]);
for (j=1; j<nr; j++) {
tmp1 = _mm_aesenc_si128(tmp1, KEY[j]);
tmp2 = _mm_aesenc_si128(tmp2, KEY[j]);
tmp3 = _mm_aesenc_si128(tmp3, KEY[j]);
tmp4 = _mm_aesenc_si128(tmp4, KEY[j]);
tmp5 = _mm_aesenc_si128(tmp5, KEY[j]);
tmp6 = _mm_aesenc_si128(tmp6, KEY[j]);
tmp7 = _mm_aesenc_si128(tmp7, KEY[j]);
tmp8 = _mm_aesenc_si128(tmp8, KEY[j]);
}
tmp1 =_mm_aesenclast_si128(tmp1, KEY[nr]);
tmp2 =_mm_aesenclast_si128(tmp2, KEY[nr]);
tmp3 =_mm_aesenclast_si128(tmp3, KEY[nr]);
tmp4 =_mm_aesenclast_si128(tmp4, KEY[nr]);
tmp5 =_mm_aesenclast_si128(tmp5, KEY[nr]);
tmp6 =_mm_aesenclast_si128(tmp6, KEY[nr]);
tmp7 =_mm_aesenclast_si128(tmp7, KEY[nr]);
tmp8 =_mm_aesenclast_si128(tmp8, KEY[nr]);
tmp1 = _mm_xor_si128(tmp1,
_mm_loadu_si128(&((__m128i*)in)[i*8+0]));
tmp2 = _mm_xor_si128(tmp2,
_mm_loadu_si128(&((__m128i*)in)[i*8+1]));
tmp3 = _mm_xor_si128(tmp3,
_mm_loadu_si128(&((__m128i*)in)[i*8+2]));
tmp4 = _mm_xor_si128(tmp4,
_mm_loadu_si128(&((__m128i*)in)[i*8+3]));
tmp5 = _mm_xor_si128(tmp5,
_mm_loadu_si128(&((__m128i*)in)[i*8+4]));
tmp6 = _mm_xor_si128(tmp6,
_mm_loadu_si128(&((__m128i*)in)[i*8+5]));
tmp7 = _mm_xor_si128(tmp7,
_mm_loadu_si128(&((__m128i*)in)[i*8+6]));
tmp8 = _mm_xor_si128(tmp8,
_mm_loadu_si128(&((__m128i*)in)[i*8+7]));
_mm_storeu_si128(&((__m128i*)out)[i*8+0], tmp1);
_mm_storeu_si128(&((__m128i*)out)[i*8+1], tmp2);
_mm_storeu_si128(&((__m128i*)out)[i*8+2], tmp3);
_mm_storeu_si128(&((__m128i*)out)[i*8+3], tmp4);
_mm_storeu_si128(&((__m128i*)out)[i*8+4], tmp5);
_mm_storeu_si128(&((__m128i*)out)[i*8+5], tmp6);
_mm_storeu_si128(&((__m128i*)out)[i*8+6], tmp7);
_mm_storeu_si128(&((__m128i*)out)[i*8+7], tmp8);
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
tmp2 = _mm_shuffle_epi8(tmp2, BSWAP_MASK);
tmp3 = _mm_shuffle_epi8(tmp3, BSWAP_MASK);
tmp4 = _mm_shuffle_epi8(tmp4, BSWAP_MASK);
tmp5 = _mm_shuffle_epi8(tmp5, BSWAP_MASK);
tmp6 = _mm_shuffle_epi8(tmp6, BSWAP_MASK);
tmp7 = _mm_shuffle_epi8(tmp7, BSWAP_MASK);
tmp8 = _mm_shuffle_epi8(tmp8, BSWAP_MASK);
tmp1 = _mm_xor_si128(X, tmp1);
reduce4(H, H2, H3, H4, tmp4, tmp3, tmp2, tmp1, &X);
tmp5 = _mm_xor_si128(X, tmp5);
reduce4(H, H2, H3, H4, tmp8, tmp7, tmp6, tmp5, &X);
}
for (k=i*8; k<nbytes/16; k++) {
tmp1 = _mm_shuffle_epi8(ctr1, BSWAP_EPI64);
ctr1 = _mm_add_epi64(ctr1, ONE);
tmp1 = _mm_xor_si128(tmp1, KEY[0]);
for (j=1; j<nr-1; j+=2) {
tmp1 = _mm_aesenc_si128(tmp1, KEY[j]);
tmp1 = _mm_aesenc_si128(tmp1, KEY[j+1]);
}
tmp1 = _mm_aesenc_si128(tmp1, KEY[nr-1]);
tmp1 = _mm_aesenclast_si128(tmp1, KEY[nr]);
tmp1 = _mm_xor_si128(tmp1, _mm_loadu_si128(&((__m128i*)in)[k]));
_mm_storeu_si128(&((__m128i*)out)[k], tmp1);
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
X = _mm_xor_si128(X, tmp1);
gfmul(X,H,&X);
}
//If remains one incomplete block
if (nbytes%16) {
tmp1 = _mm_shuffle_epi8(ctr1, BSWAP_EPI64);
tmp1 = _mm_xor_si128(tmp1, KEY[0]);
for (j=1; j<nr-1; j+=2) {
tmp1 = _mm_aesenc_si128(tmp1, KEY[j]);
tmp1 = _mm_aesenc_si128(tmp1, KEY[j+1]);
}
tmp1 = _mm_aesenc_si128(tmp1, KEY[nr-1]);
tmp1 = _mm_aesenclast_si128(tmp1, KEY[nr]);
tmp1 = _mm_xor_si128(tmp1, _mm_loadu_si128(&((__m128i*)in)[k]));
last_block = tmp1;
for (j=0; j<nbytes%16; j++)
out[k*16+j] = ((unsigned char*)&last_block)[j];
for ((void)j; j<16; j++)
((unsigned char*)&last_block)[j] = 0;
tmp1 = last_block;
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
X = _mm_xor_si128(X, tmp1);
gfmul(X, H, &X);
}
tmp1 = _mm_insert_epi64(tmp1, (uint64_t)nbytes*8, 0);
tmp1 = _mm_insert_epi64(tmp1, (uint64_t)abytes*8, 1);
X = _mm_xor_si128(X, tmp1);
gfmul(X,H,&X);
X = _mm_shuffle_epi8(X, BSWAP_MASK);
T = _mm_xor_si128(X, T);
_mm_storeu_si128((__m128i*)tag, T);
}
/* My modification of _encrypt to be _decrypt */
int
AES_GCM_decrypt(const unsigned char *in, unsigned char *out,
const unsigned char *addt, const unsigned char *ivec,
unsigned char *tag, uint32_t nbytes, uint32_t abytes, int ibytes,
const unsigned char *key, int nr)
{
int i, j ,k;
__m128i tmp1, tmp2, tmp3, tmp4;
__m128i tmp5, tmp6, tmp7, tmp8;
__m128i H, H2, H3, H4, Y, T;
__m128i *KEY = (__m128i*)key;
__m128i ctr1, ctr2, ctr3, ctr4;
__m128i ctr5, ctr6, ctr7, ctr8;
__m128i last_block = _mm_setzero_si128();
__m128i ONE = _mm_set_epi32(0, 1, 0, 0);
__m128i EIGHT = _mm_set_epi32(0, 8, 0, 0);
__m128i BSWAP_EPI64 = _mm_set_epi8(8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,
7);
__m128i BSWAP_MASK = _mm_set_epi8(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,
15);
__m128i X = _mm_setzero_si128();
if (ibytes == 96/8) {
Y = _mm_loadu_si128((__m128i*)ivec);
Y = _mm_insert_epi32(Y, 0x1000000, 3);
/*(Compute E[ZERO, KS] and E[Y0, KS] together*/
tmp1 = _mm_xor_si128(X, KEY[0]);
tmp2 = _mm_xor_si128(Y, KEY[0]);
for (j=1; j < nr-1; j+=2) {
tmp1 = _mm_aesenc_si128(tmp1, KEY[j]);
tmp2 = _mm_aesenc_si128(tmp2, KEY[j]);
tmp1 = _mm_aesenc_si128(tmp1, KEY[j+1]);
tmp2 = _mm_aesenc_si128(tmp2, KEY[j+1]);
}
tmp1 = _mm_aesenc_si128(tmp1, KEY[nr-1]);
tmp2 = _mm_aesenc_si128(tmp2, KEY[nr-1]);
H = _mm_aesenclast_si128(tmp1, KEY[nr]);
T = _mm_aesenclast_si128(tmp2, KEY[nr]);
H = _mm_shuffle_epi8(H, BSWAP_MASK);
} else {
tmp1 = _mm_xor_si128(X, KEY[0]);
for (j=1; j <nr; j++)
tmp1 = _mm_aesenc_si128(tmp1, KEY[j]);
H = _mm_aesenclast_si128(tmp1, KEY[nr]);
H = _mm_shuffle_epi8(H, BSWAP_MASK);
Y = _mm_setzero_si128();
for (i=0; i < ibytes/16; i++) {
tmp1 = _mm_loadu_si128(&((__m128i*)ivec)[i]);
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
Y = _mm_xor_si128(Y, tmp1);
gfmul(Y, H, &Y);
}
if (ibytes%16) {
for (j=0; j < ibytes%16; j++)
((unsigned char*)&last_block)[j] = ivec[i*16+j];
tmp1 = last_block;
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
Y = _mm_xor_si128(Y, tmp1);
gfmul(Y, H, &Y);
}
tmp1 = _mm_insert_epi64(tmp1, (uint64_t)ibytes*8, 0);
tmp1 = _mm_insert_epi64(tmp1, 0, 1);
Y = _mm_xor_si128(Y, tmp1);
gfmul(Y, H, &Y);
Y = _mm_shuffle_epi8(Y, BSWAP_MASK); /*Compute E(K, Y0)*/
tmp1 = _mm_xor_si128(Y, KEY[0]);
for (j=1; j < nr; j++)
tmp1 = _mm_aesenc_si128(tmp1, KEY[j]);
T = _mm_aesenclast_si128(tmp1, KEY[nr]);
}
gfmul(H,H,&H2);
gfmul(H,H2,&H3);
gfmul(H,H3,&H4);
for (i=0; i<abytes/16/4; i++) {
tmp1 = _mm_loadu_si128(&((__m128i*)addt)[i*4]);
tmp2 = _mm_loadu_si128(&((__m128i*)addt)[i*4+1]);
tmp3 = _mm_loadu_si128(&((__m128i*)addt)[i*4+2]);
tmp4 = _mm_loadu_si128(&((__m128i*)addt)[i*4+3]);
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
tmp2 = _mm_shuffle_epi8(tmp2, BSWAP_MASK);
tmp3 = _mm_shuffle_epi8(tmp3, BSWAP_MASK);
tmp4 = _mm_shuffle_epi8(tmp4, BSWAP_MASK);
tmp1 = _mm_xor_si128(X, tmp1);
reduce4(H, H2, H3, H4, tmp4, tmp3, tmp2, tmp1, &X);
}
for (i=i*4; i<abytes/16; i++) {
tmp1 = _mm_loadu_si128(&((__m128i*)addt)[i]);
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
X = _mm_xor_si128(X,tmp1);
gfmul(X, H, &X);
}
if (abytes%16) {
last_block = _mm_setzero_si128();
for (j=0; j<abytes%16; j++)
((unsigned char*)&last_block)[j] = addt[i*16+j];
tmp1 = last_block;
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
X =_mm_xor_si128(X,tmp1);
gfmul(X,H,&X);
}
/* This is where we validate the cipher text before decrypt */
for (i = 0; i<nbytes/16/4; i++) {
tmp1 = _mm_loadu_si128(&((__m128i*)in)[i*4]);
tmp2 = _mm_loadu_si128(&((__m128i*)in)[i*4+1]);
tmp3 = _mm_loadu_si128(&((__m128i*)in)[i*4+2]);
tmp4 = _mm_loadu_si128(&((__m128i*)in)[i*4+3]);
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
tmp2 = _mm_shuffle_epi8(tmp2, BSWAP_MASK);
tmp3 = _mm_shuffle_epi8(tmp3, BSWAP_MASK);
tmp4 = _mm_shuffle_epi8(tmp4, BSWAP_MASK);
tmp1 = _mm_xor_si128(X, tmp1);
reduce4(H, H2, H3, H4, tmp4, tmp3, tmp2, tmp1, &X);
}
for (i = i*4; i<nbytes/16; i++) {
tmp1 = _mm_loadu_si128(&((__m128i*)in)[i]);
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
X = _mm_xor_si128(X, tmp1);
gfmul(X,H,&X);
}
if (nbytes%16) {
last_block = _mm_setzero_si128();
for (j=0; j<nbytes%16; j++)
((unsigned char*)&last_block)[j] = in[i*16+j];
tmp1 = last_block;
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
X = _mm_xor_si128(X, tmp1);
gfmul(X, H, &X);
}
tmp1 = _mm_insert_epi64(tmp1, (uint64_t)nbytes*8, 0);
tmp1 = _mm_insert_epi64(tmp1, (uint64_t)abytes*8, 1);
X = _mm_xor_si128(X, tmp1);
gfmul(X,H,&X);
X = _mm_shuffle_epi8(X, BSWAP_MASK);
T = _mm_xor_si128(X, T);
if (!m128icmp(T, _mm_loadu_si128((__m128i*)tag)))
return 0; //in case the authentication failed
ctr1 = _mm_shuffle_epi8(Y, BSWAP_EPI64);
ctr1 = _mm_add_epi64(ctr1, ONE);
ctr2 = _mm_add_epi64(ctr1, ONE);
ctr3 = _mm_add_epi64(ctr2, ONE);
ctr4 = _mm_add_epi64(ctr3, ONE);
ctr5 = _mm_add_epi64(ctr4, ONE);
ctr6 = _mm_add_epi64(ctr5, ONE);
ctr7 = _mm_add_epi64(ctr6, ONE);
ctr8 = _mm_add_epi64(ctr7, ONE);
for (i=0; i<nbytes/16/8; i++) {
tmp1 = _mm_shuffle_epi8(ctr1, BSWAP_EPI64);
tmp2 = _mm_shuffle_epi8(ctr2, BSWAP_EPI64);
tmp3 = _mm_shuffle_epi8(ctr3, BSWAP_EPI64);
tmp4 = _mm_shuffle_epi8(ctr4, BSWAP_EPI64);
tmp5 = _mm_shuffle_epi8(ctr5, BSWAP_EPI64);
tmp6 = _mm_shuffle_epi8(ctr6, BSWAP_EPI64);
tmp7 = _mm_shuffle_epi8(ctr7, BSWAP_EPI64);
tmp8 = _mm_shuffle_epi8(ctr8, BSWAP_EPI64);
ctr1 = _mm_add_epi64(ctr1, EIGHT);
ctr2 = _mm_add_epi64(ctr2, EIGHT);
ctr3 = _mm_add_epi64(ctr3, EIGHT);
ctr4 = _mm_add_epi64(ctr4, EIGHT);
ctr5 = _mm_add_epi64(ctr5, EIGHT);
ctr6 = _mm_add_epi64(ctr6, EIGHT);
ctr7 = _mm_add_epi64(ctr7, EIGHT);
ctr8 = _mm_add_epi64(ctr8, EIGHT);
tmp1 =_mm_xor_si128(tmp1, KEY[0]);
tmp2 =_mm_xor_si128(tmp2, KEY[0]);
tmp3 =_mm_xor_si128(tmp3, KEY[0]);
tmp4 =_mm_xor_si128(tmp4, KEY[0]);
tmp5 =_mm_xor_si128(tmp5, KEY[0]);
tmp6 =_mm_xor_si128(tmp6, KEY[0]);
tmp7 =_mm_xor_si128(tmp7, KEY[0]);
tmp8 =_mm_xor_si128(tmp8, KEY[0]);
for (j=1; j<nr; j++) {
tmp1 = _mm_aesenc_si128(tmp1, KEY[j]);
tmp2 = _mm_aesenc_si128(tmp2, KEY[j]);
tmp3 = _mm_aesenc_si128(tmp3, KEY[j]);
tmp4 = _mm_aesenc_si128(tmp4, KEY[j]);
tmp5 = _mm_aesenc_si128(tmp5, KEY[j]);
tmp6 = _mm_aesenc_si128(tmp6, KEY[j]);
tmp7 = _mm_aesenc_si128(tmp7, KEY[j]);
tmp8 = _mm_aesenc_si128(tmp8, KEY[j]);
}
tmp1 =_mm_aesenclast_si128(tmp1, KEY[nr]);
tmp2 =_mm_aesenclast_si128(tmp2, KEY[nr]);
tmp3 =_mm_aesenclast_si128(tmp3, KEY[nr]);
tmp4 =_mm_aesenclast_si128(tmp4, KEY[nr]);
tmp5 =_mm_aesenclast_si128(tmp5, KEY[nr]);
tmp6 =_mm_aesenclast_si128(tmp6, KEY[nr]);
tmp7 =_mm_aesenclast_si128(tmp7, KEY[nr]);
tmp8 =_mm_aesenclast_si128(tmp8, KEY[nr]);
tmp1 = _mm_xor_si128(tmp1,
_mm_loadu_si128(&((__m128i*)in)[i*8+0]));
tmp2 = _mm_xor_si128(tmp2,
_mm_loadu_si128(&((__m128i*)in)[i*8+1]));
tmp3 = _mm_xor_si128(tmp3,
_mm_loadu_si128(&((__m128i*)in)[i*8+2]));
tmp4 = _mm_xor_si128(tmp4,
_mm_loadu_si128(&((__m128i*)in)[i*8+3]));
tmp5 = _mm_xor_si128(tmp5,
_mm_loadu_si128(&((__m128i*)in)[i*8+4]));
tmp6 = _mm_xor_si128(tmp6,
_mm_loadu_si128(&((__m128i*)in)[i*8+5]));
tmp7 = _mm_xor_si128(tmp7,
_mm_loadu_si128(&((__m128i*)in)[i*8+6]));
tmp8 = _mm_xor_si128(tmp8,
_mm_loadu_si128(&((__m128i*)in)[i*8+7]));
_mm_storeu_si128(&((__m128i*)out)[i*8+0], tmp1);
_mm_storeu_si128(&((__m128i*)out)[i*8+1], tmp2);
_mm_storeu_si128(&((__m128i*)out)[i*8+2], tmp3);
_mm_storeu_si128(&((__m128i*)out)[i*8+3], tmp4);
_mm_storeu_si128(&((__m128i*)out)[i*8+4], tmp5);
_mm_storeu_si128(&((__m128i*)out)[i*8+5], tmp6);
_mm_storeu_si128(&((__m128i*)out)[i*8+6], tmp7);
_mm_storeu_si128(&((__m128i*)out)[i*8+7], tmp8);
tmp1 = _mm_shuffle_epi8(tmp1, BSWAP_MASK);
tmp2 = _mm_shuffle_epi8(tmp2, BSWAP_MASK);
tmp3 = _mm_shuffle_epi8(tmp3, BSWAP_MASK);
tmp4 = _mm_shuffle_epi8(tmp4, BSWAP_MASK);
tmp5 = _mm_shuffle_epi8(tmp5, BSWAP_MASK);
tmp6 = _mm_shuffle_epi8(tmp6, BSWAP_MASK);
tmp7 = _mm_shuffle_epi8(tmp7, BSWAP_MASK);
tmp8 = _mm_shuffle_epi8(tmp8, BSWAP_MASK);
}
for (k=i*8; k<nbytes/16; k++) {
tmp1 = _mm_shuffle_epi8(ctr1, BSWAP_EPI64);
ctr1 = _mm_add_epi64(ctr1, ONE);
tmp1 = _mm_xor_si128(tmp1, KEY[0]);
for (j=1; j<nr-1; j+=2) {
tmp1 = _mm_aesenc_si128(tmp1, KEY[j]);
tmp1 = _mm_aesenc_si128(tmp1, KEY[j+1]);
}
tmp1 = _mm_aesenc_si128(tmp1, KEY[nr-1]);
tmp1 = _mm_aesenclast_si128(tmp1, KEY[nr]);
tmp1 = _mm_xor_si128(tmp1, _mm_loadu_si128(&((__m128i*)in)[k]));
_mm_storeu_si128(&((__m128i*)out)[k], tmp1);
}
//If remains one incomplete block
if (nbytes%16) {
tmp1 = _mm_shuffle_epi8(ctr1, BSWAP_EPI64);
tmp1 = _mm_xor_si128(tmp1, KEY[0]);
for (j=1; j<nr-1; j+=2) {
tmp1 = _mm_aesenc_si128(tmp1, KEY[j]);
tmp1 = _mm_aesenc_si128(tmp1, KEY[j+1]);
}
tmp1 = _mm_aesenc_si128(tmp1, KEY[nr-1]);
tmp1 = _mm_aesenclast_si128(tmp1, KEY[nr]);
tmp1 = _mm_xor_si128(tmp1, _mm_loadu_si128(&((__m128i*)in)[k]));
last_block = tmp1;
for (j=0; j<nbytes%16; j++)
out[k*16+j] = ((unsigned char*)&last_block)[j];
}
return 1; //when sucessfull returns 1
}

View File

@ -3,8 +3,13 @@
* Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
* Copyright (c) 2010-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
* Copyright 2012-2013 John-Mark Gurney <jmg@FreeBSD.org>
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by John-Mark Gurney
* under sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@ -29,15 +34,18 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/libkern.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <crypto/aesni/aesni.h>
#include <opencrypto/gmac.h>
#include "aesencdec.h"
#include <smmintrin.h>
MALLOC_DECLARE(M_AESNI);
@ -176,6 +184,104 @@ aesni_decrypt_ecb(int rounds, const void *key_schedule, size_t len,
}
}
/*
* mixed endian increment, low 64bits stored in hi word to be compatible
* with _icm's BSWAP.
*/
static inline __m128i
nextc(__m128i x)
{
const __m128i ONE = _mm_setr_epi32(0, 0, 1, 0);
const __m128i ZERO = _mm_setzero_si128();
x = _mm_add_epi64(x, ONE);
__m128i t = _mm_cmpeq_epi64(x, ZERO);
t = _mm_unpackhi_epi64(t, ZERO);
x = _mm_sub_epi64(x, t);
return x;
}
void
aesni_encrypt_icm(int rounds, const void *key_schedule, size_t len,
const uint8_t *from, uint8_t *to, const uint8_t iv[AES_BLOCK_LEN])
{
__m128i tot;
__m128i tmp1, tmp2, tmp3, tmp4;
__m128i tmp5, tmp6, tmp7, tmp8;
__m128i ctr1, ctr2, ctr3, ctr4;
__m128i ctr5, ctr6, ctr7, ctr8;
__m128i BSWAP_EPI64;
__m128i tout[8];
struct blocks8 *top;
const struct blocks8 *blks;
size_t i, cnt;
BSWAP_EPI64 = _mm_set_epi8(8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7);
ctr1 = _mm_loadu_si128((__m128i*)iv);
ctr1 = _mm_shuffle_epi8(ctr1, BSWAP_EPI64);
cnt = len / AES_BLOCK_LEN / 8;
for (i = 0; i < cnt; i++) {
tmp1 = _mm_shuffle_epi8(ctr1, BSWAP_EPI64);
ctr2 = nextc(ctr1);
tmp2 = _mm_shuffle_epi8(ctr2, BSWAP_EPI64);
ctr3 = nextc(ctr2);
tmp3 = _mm_shuffle_epi8(ctr3, BSWAP_EPI64);
ctr4 = nextc(ctr3);
tmp4 = _mm_shuffle_epi8(ctr4, BSWAP_EPI64);
ctr5 = nextc(ctr4);
tmp5 = _mm_shuffle_epi8(ctr5, BSWAP_EPI64);
ctr6 = nextc(ctr5);
tmp6 = _mm_shuffle_epi8(ctr6, BSWAP_EPI64);
ctr7 = nextc(ctr6);
tmp7 = _mm_shuffle_epi8(ctr7, BSWAP_EPI64);
ctr8 = nextc(ctr7);
tmp8 = _mm_shuffle_epi8(ctr8, BSWAP_EPI64);
ctr1 = nextc(ctr8);
blks = (const struct blocks8 *)from;
top = (struct blocks8 *)to;
aesni_enc8(rounds - 1, key_schedule, tmp1, tmp2, tmp3, tmp4,
tmp5, tmp6, tmp7, tmp8, tout);
top->blk[0] = blks->blk[0] ^ tout[0];
top->blk[1] = blks->blk[1] ^ tout[1];
top->blk[2] = blks->blk[2] ^ tout[2];
top->blk[3] = blks->blk[3] ^ tout[3];
top->blk[4] = blks->blk[4] ^ tout[4];
top->blk[5] = blks->blk[5] ^ tout[5];
top->blk[6] = blks->blk[6] ^ tout[6];
top->blk[7] = blks->blk[7] ^ tout[7];
from += AES_BLOCK_LEN * 8;
to += AES_BLOCK_LEN * 8;
}
i *= 8;
cnt = len / AES_BLOCK_LEN;
for (; i < cnt; i++) {
tmp1 = _mm_shuffle_epi8(ctr1, BSWAP_EPI64);
ctr1 = nextc(ctr1);
tot = aesni_enc(rounds - 1, key_schedule, tmp1);
tot = tot ^ _mm_loadu_si128((const __m128i *)from);
_mm_storeu_si128((__m128i *)to, tot);
from += AES_BLOCK_LEN;
to += AES_BLOCK_LEN;
}
/* handle remaining partial round */
if (len % AES_BLOCK_LEN != 0) {
tmp1 = _mm_shuffle_epi8(ctr1, BSWAP_EPI64);
tot = aesni_enc(rounds - 1, key_schedule, tmp1);
tot = tot ^ _mm_loadu_si128((const __m128i *)from);
memcpy(to, &tot, len % AES_BLOCK_LEN);
}
}
#define AES_XTS_BLOCKSIZE 16
#define AES_XTS_IVSIZE 8
#define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */
@ -333,8 +439,15 @@ int
aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key,
int keylen)
{
int decsched;
decsched = 1;
switch (ses->algo) {
case CRYPTO_AES_ICM:
case CRYPTO_AES_NIST_GCM_16:
decsched = 0;
/* FALLTHROUGH */
case CRYPTO_AES_CBC:
switch (keylen) {
case 128:
@ -347,6 +460,7 @@ aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key,
ses->rounds = AES256_ROUNDS;
break;
default:
CRYPTDEB("invalid CBC/ICM/GCM key length");
return (EINVAL);
}
break;
@ -359,6 +473,7 @@ aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key,
ses->rounds = AES256_ROUNDS;
break;
default:
CRYPTDEB("invalid XTS key length");
return (EINVAL);
}
break;
@ -367,13 +482,13 @@ aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key,
}
aesni_set_enckey(key, ses->enc_schedule, ses->rounds);
aesni_set_deckey(ses->enc_schedule, ses->dec_schedule, ses->rounds);
if (ses->algo == CRYPTO_AES_CBC)
arc4rand(ses->iv, sizeof(ses->iv), 0);
else /* if (ses->algo == CRYPTO_AES_XTS) */ {
if (decsched)
aesni_set_deckey(ses->enc_schedule, ses->dec_schedule,
ses->rounds);
if (ses->algo == CRYPTO_AES_XTS)
aesni_set_enckey(key + keylen / 16, ses->xts_schedule,
ses->rounds);
}
return (0);
}

View File

@ -75,7 +75,7 @@ struct padlock_sha_ctx {
CTASSERT(sizeof(struct padlock_sha_ctx) <= sizeof(union authctx));
static void padlock_sha_init(struct padlock_sha_ctx *ctx);
static int padlock_sha_update(struct padlock_sha_ctx *ctx, uint8_t *buf,
static int padlock_sha_update(struct padlock_sha_ctx *ctx, const uint8_t *buf,
uint16_t bufsize);
static void padlock_sha1_final(uint8_t *hash, struct padlock_sha_ctx *ctx);
static void padlock_sha256_final(uint8_t *hash, struct padlock_sha_ctx *ctx);
@ -83,16 +83,16 @@ static void padlock_sha256_final(uint8_t *hash, struct padlock_sha_ctx *ctx);
static struct auth_hash padlock_hmac_sha1 = {
CRYPTO_SHA1_HMAC, "HMAC-SHA1",
20, SHA1_HASH_LEN, SHA1_HMAC_BLOCK_LEN, sizeof(struct padlock_sha_ctx),
(void (*)(void *))padlock_sha_init,
(int (*)(void *, uint8_t *, uint16_t))padlock_sha_update,
(void (*)(void *))padlock_sha_init, NULL, NULL,
(int (*)(void *, const uint8_t *, uint16_t))padlock_sha_update,
(void (*)(uint8_t *, void *))padlock_sha1_final
};
static struct auth_hash padlock_hmac_sha256 = {
CRYPTO_SHA2_256_HMAC, "HMAC-SHA2-256",
32, SHA2_256_HASH_LEN, SHA2_256_HMAC_BLOCK_LEN, sizeof(struct padlock_sha_ctx),
(void (*)(void *))padlock_sha_init,
(int (*)(void *, uint8_t *, uint16_t))padlock_sha_update,
(void (*)(void *))padlock_sha_init, NULL, NULL,
(int (*)(void *, const uint8_t *, uint16_t))padlock_sha_update,
(void (*)(uint8_t *, void *))padlock_sha256_final
};
@ -167,7 +167,7 @@ padlock_sha_init(struct padlock_sha_ctx *ctx)
}
static int
padlock_sha_update(struct padlock_sha_ctx *ctx, uint8_t *buf, uint16_t bufsize)
padlock_sha_update(struct padlock_sha_ctx *ctx, const uint8_t *buf, uint16_t bufsize)
{
if (ctx->psc_size - ctx->psc_offset < bufsize) {

View File

@ -234,12 +234,22 @@ platform_setup_intr(device_t dev, device_t mmio_dev,
return (0);
}
static int
platform_poll(device_t dev)
{
mips_dcache_wbinv_all();
return (0);
}
static device_method_t virtio_mmio_platform_methods[] = {
DEVMETHOD(device_probe, virtio_mmio_platform_probe),
DEVMETHOD(device_attach, virtio_mmio_platform_attach),
/* virtio_mmio_if.h */
DEVMETHOD(virtio_mmio_note, platform_note),
DEVMETHOD(virtio_mmio_poll, platform_poll),
DEVMETHOD(virtio_mmio_setup_intr, platform_setup_intr),
DEVMETHOD_END
};

View File

@ -57,6 +57,7 @@
#include "common/t4_msg.h"
#include "firmware/t4fw_interface.h"
#define KTR_CXGBE KTR_SPARE3
MALLOC_DECLARE(M_CXGBE);
#define CXGBE_UNIMPLEMENTED(s) \
panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)

View File

@ -31,7 +31,6 @@
#ifndef __T4_TOM_H__
#define __T4_TOM_H__
#define KTR_CXGBE KTR_SPARE3
#define LISTEN_HASH_SIZE 32
/*

View File

@ -114,6 +114,7 @@ static int vtmmio_alloc_virtqueues(device_t, int, int,
struct vq_alloc_info *);
static int vtmmio_setup_intr(device_t, enum intr_type);
static void vtmmio_stop(device_t);
static void vtmmio_poll(device_t);
static int vtmmio_reinit(device_t, uint64_t);
static void vtmmio_reinit_complete(device_t);
static void vtmmio_notify_virtqueue(device_t, uint16_t);
@ -182,6 +183,7 @@ static device_method_t vtmmio_methods[] = {
DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues),
DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr),
DEVMETHOD(virtio_bus_stop, vtmmio_stop),
DEVMETHOD(virtio_bus_poll, vtmmio_poll),
DEVMETHOD(virtio_bus_reinit, vtmmio_reinit),
DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete),
DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue),
@ -550,6 +552,17 @@ vtmmio_stop(device_t dev)
vtmmio_reset(device_get_softc(dev));
}
static void
vtmmio_poll(device_t dev)
{
struct vtmmio_softc *sc;
sc = device_get_softc(dev);
if (sc->platform != NULL)
VIRTIO_MMIO_POLL(sc->platform);
}
static int
vtmmio_reinit(device_t dev, uint64_t features)
{

View File

@ -66,6 +66,13 @@ METHOD int note {
int val;
} DEFAULT virtio_mmio_note;
#
# Inform backend we are going to poll virtqueue.
#
METHOD int poll {
device_t dev;
};
#
# Setup backend-specific interrupts.
#

View File

@ -87,3 +87,8 @@ METHOD void write_device_config {
void *src;
int len;
};
METHOD void poll {
device_t dev;
};

View File

@ -567,8 +567,11 @@ virtqueue_poll(struct virtqueue *vq, uint32_t *len)
{
void *cookie;
while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
VIRTIO_BUS_POLL(vq->vq_dev);
while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
cpu_spinwait();
VIRTIO_BUS_POLL(vq->vq_dev);
}
return (cookie);
}

View File

@ -101,7 +101,7 @@ g_eli_crypto_cipher(u_int algo, int enc, u_char *data, size_t datasize,
crp->crp_opaque = NULL;
crp->crp_callback = g_eli_crypto_done;
crp->crp_buf = (void *)data;
crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL;
crp->crp_flags = CRYPTO_F_CBIFSYNC;
crp->crp_desc = crd;
error = crypto_dispatch(crp);

View File

@ -479,7 +479,7 @@ g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp)
crp->crp_opaque = (void *)bp;
crp->crp_buf = (void *)data;
data += encr_secsize;
crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL;
crp->crp_flags = CRYPTO_F_CBIFSYNC;
if (g_eli_batch)
crp->crp_flags |= CRYPTO_F_BATCH;
if (bp->bio_cmd == BIO_WRITE) {

View File

@ -286,7 +286,7 @@ g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp)
crp->crp_callback = g_eli_crypto_write_done;
else /* if (bp->bio_cmd == BIO_READ) */
crp->crp_callback = g_eli_crypto_read_done;
crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL;
crp->crp_flags = CRYPTO_F_CBIFSYNC;
if (g_eli_batch)
crp->crp_flags |= CRYPTO_F_BATCH;
crp->crp_desc = crd;

View File

@ -289,7 +289,7 @@ kern_execve(td, args, mac_p)
args->endp - args->begin_envv);
if (p->p_flag & P_HADTHREADS) {
PROC_LOCK(p);
if (thread_single(SINGLE_BOUNDARY)) {
if (thread_single(p, SINGLE_BOUNDARY)) {
PROC_UNLOCK(p);
exec_free_args(args);
return (ERESTART); /* Try again later. */
@ -308,9 +308,9 @@ kern_execve(td, args, mac_p)
* force other threads to suicide.
*/
if (error == 0)
thread_single(SINGLE_EXIT);
thread_single(p, SINGLE_EXIT);
else
thread_single_end();
thread_single_end(p, SINGLE_BOUNDARY);
PROC_UNLOCK(p);
}
if ((td->td_pflags & TDP_EXECVMSPC) != 0) {

View File

@ -207,7 +207,7 @@ exit1(struct thread *td, int rv)
* re-check all suspension request, the thread should
* either be suspended there or exit.
*/
if (!thread_single(SINGLE_EXIT))
if (!thread_single(p, SINGLE_EXIT))
/*
* All other activity in this process is now
* stopped. Threading support has been turned

View File

@ -317,7 +317,7 @@ fork_norfproc(struct thread *td, int flags)
if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
(flags & (RFCFDG | RFFDG))) {
PROC_LOCK(p1);
if (thread_single(SINGLE_BOUNDARY)) {
if (thread_single(p1, SINGLE_BOUNDARY)) {
PROC_UNLOCK(p1);
return (ERESTART);
}
@ -348,7 +348,7 @@ fork_norfproc(struct thread *td, int flags)
if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
(flags & (RFCFDG | RFFDG))) {
PROC_LOCK(p1);
thread_single_end();
thread_single_end(p1, SINGLE_BOUNDARY);
PROC_UNLOCK(p1);
}
return (error);
@ -384,6 +384,7 @@ do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2,
p2->p_pid = trypid;
AUDIT_ARG_PID(p2->p_pid);
LIST_INSERT_HEAD(&allproc, p2, p_list);
allproc_gen++;
LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
tidhash_add(td2);
PROC_LOCK(p2);

View File

@ -2896,3 +2896,141 @@ static SYSCTL_NODE(_kern_proc, KERN_PROC_OSREL, osrel, CTLFLAG_RW |
static SYSCTL_NODE(_kern_proc, KERN_PROC_SIGTRAMP, sigtramp, CTLFLAG_RD |
CTLFLAG_MPSAFE, sysctl_kern_proc_sigtramp,
"Process signal trampoline location");
int allproc_gen;
void
stop_all_proc(void)
{
struct proc *cp, *p;
int r, gen;
bool restart, seen_stopped, seen_exiting, stopped_some;
cp = curproc;
/*
* stop_all_proc() assumes that all process which have
* usermode must be stopped, except current process, for
* obvious reasons. Since other threads in the process
* establishing global stop could unstop something, disable
* calls from multithreaded processes as precaution. The
* service must not be user-callable anyway.
*/
KASSERT((cp->p_flag & P_HADTHREADS) == 0 ||
(cp->p_flag & P_KTHREAD) != 0, ("mt stop_all_proc"));
allproc_loop:
sx_xlock(&allproc_lock);
gen = allproc_gen;
seen_exiting = seen_stopped = stopped_some = restart = false;
LIST_REMOVE(cp, p_list);
LIST_INSERT_HEAD(&allproc, cp, p_list);
for (;;) {
p = LIST_NEXT(cp, p_list);
if (p == NULL)
break;
LIST_REMOVE(cp, p_list);
LIST_INSERT_AFTER(p, cp, p_list);
PROC_LOCK(p);
if ((p->p_flag & (P_KTHREAD | P_SYSTEM |
P_TOTAL_STOP)) != 0) {
PROC_UNLOCK(p);
continue;
}
if ((p->p_flag & P_WEXIT) != 0) {
seen_exiting = true;
PROC_UNLOCK(p);
continue;
}
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
/*
* Stopped processes are tolerated when there
* are no other processes which might continue
* them. P_STOPPED_SINGLE but not
* P_TOTAL_STOP process still has at least one
* thread running.
*/
seen_stopped = true;
PROC_UNLOCK(p);
continue;
}
_PHOLD(p);
sx_xunlock(&allproc_lock);
r = thread_single(p, SINGLE_ALLPROC);
if (r != 0)
restart = true;
else
stopped_some = true;
_PRELE(p);
PROC_UNLOCK(p);
sx_xlock(&allproc_lock);
}
/* Catch forked children we did not see in iteration. */
if (gen != allproc_gen)
restart = true;
sx_xunlock(&allproc_lock);
if (restart || stopped_some || seen_exiting || seen_stopped) {
kern_yield(PRI_USER);
goto allproc_loop;
}
}
void
resume_all_proc(void)
{
struct proc *cp, *p;
cp = curproc;
sx_xlock(&allproc_lock);
LIST_REMOVE(cp, p_list);
LIST_INSERT_HEAD(&allproc, cp, p_list);
for (;;) {
p = LIST_NEXT(cp, p_list);
if (p == NULL)
break;
LIST_REMOVE(cp, p_list);
LIST_INSERT_AFTER(p, cp, p_list);
PROC_LOCK(p);
if ((p->p_flag & P_TOTAL_STOP) != 0) {
sx_xunlock(&allproc_lock);
_PHOLD(p);
thread_single_end(p, SINGLE_ALLPROC);
_PRELE(p);
PROC_UNLOCK(p);
sx_xlock(&allproc_lock);
} else {
PROC_UNLOCK(p);
}
}
sx_xunlock(&allproc_lock);
}
#define TOTAL_STOP_DEBUG 1
#ifdef TOTAL_STOP_DEBUG
volatile static int ap_resume;
#include <sys/mount.h>
static int
sysctl_debug_stop_all_proc(SYSCTL_HANDLER_ARGS)
{
int error, val;
val = 0;
ap_resume = 0;
error = sysctl_handle_int(oidp, &val, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (val != 0) {
stop_all_proc();
syncer_suspend();
while (ap_resume == 0)
;
syncer_resume();
resume_all_proc();
}
return (0);
}
SYSCTL_PROC(_debug, OID_AUTO, stop_all_proc, CTLTYPE_INT | CTLFLAG_RW |
CTLFLAG_MPSAFE, (void *)&ap_resume, 0, sysctl_debug_stop_all_proc, "I",
"");
#endif

View File

@ -2491,7 +2491,7 @@ ptracestop(struct thread *td, int sig)
cv_broadcast(&p->p_dbgwait);
}
stopme:
thread_suspend_switch(td);
thread_suspend_switch(td, p);
if (p->p_xthread == td)
p->p_xthread = NULL;
if (!(p->p_flag & P_TRACED))
@ -2752,7 +2752,7 @@ issignal(struct thread *td)
p->p_xstat = sig;
PROC_SLOCK(p);
sig_suspend_threads(td, p, 0);
thread_suspend_switch(td);
thread_suspend_switch(td, p);
PROC_SUNLOCK(p);
mtx_lock(&ps->ps_mtx);
break;
@ -2933,7 +2933,7 @@ sigexit(td, sig)
* XXX If another thread attempts to single-thread before us
* (e.g. via fork()), we won't get a dump at all.
*/
if ((sigprop(sig) & SA_CORE) && (thread_single(SINGLE_NO_EXIT) == 0)) {
if ((sigprop(sig) & SA_CORE) && thread_single(p, SINGLE_NO_EXIT) == 0) {
p->p_sig = sig;
/*
* Log signals which would cause core dumps

View File

@ -445,7 +445,7 @@ thread_exit(void)
if (p->p_numthreads == p->p_suspcount) {
thread_lock(p->p_singlethread);
wakeup_swapper = thread_unsuspend_one(
p->p_singlethread);
p->p_singlethread, p);
thread_unlock(p->p_singlethread);
if (wakeup_swapper)
kick_proc0();
@ -576,7 +576,7 @@ calc_remaining(struct proc *p, int mode)
remaining = p->p_numthreads;
else if (mode == SINGLE_BOUNDARY)
remaining = p->p_numthreads - p->p_boundary_count;
else if (mode == SINGLE_NO_EXIT)
else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
remaining = p->p_numthreads - p->p_suspcount;
else
panic("calc_remaining: wrong mode %d", mode);
@ -587,7 +587,7 @@ static int
remain_for_mode(int mode)
{
return (1);
return (mode == SINGLE_ALLPROC ? 0 : 1);
}
static int
@ -603,21 +603,41 @@ weed_inhib(int mode, struct thread *td2, struct proc *p)
switch (mode) {
case SINGLE_EXIT:
if (TD_IS_SUSPENDED(td2))
wakeup_swapper |= thread_unsuspend_one(td2);
wakeup_swapper |= thread_unsuspend_one(td2, p);
if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
wakeup_swapper |= sleepq_abort(td2, EINTR);
break;
case SINGLE_BOUNDARY:
if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
wakeup_swapper |= thread_unsuspend_one(td2);
wakeup_swapper |= thread_unsuspend_one(td2, p);
if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
wakeup_swapper |= sleepq_abort(td2, ERESTART);
break;
case SINGLE_NO_EXIT:
if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
wakeup_swapper |= thread_unsuspend_one(td2);
wakeup_swapper |= thread_unsuspend_one(td2, p);
if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
wakeup_swapper |= sleepq_abort(td2, ERESTART);
case SINGLE_ALLPROC:
/*
* ALLPROC suspend tries to avoid spurious EINTR for
* threads sleeping interruptable, by suspending the
* thread directly, similarly to sig_suspend_threads().
* Since such sleep is not performed at the user
* boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
* is used to avoid immediate un-suspend.
*/
if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
TDF_ALLPROCSUSP)) == 0)
wakeup_swapper |= thread_unsuspend_one(td2, p);
if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) {
if ((td2->td_flags & TDF_SBDRY) == 0) {
thread_suspend_one(td2);
td2->td_flags |= TDF_ALLPROCSUSP;
} else {
wakeup_swapper |= sleepq_abort(td2, ERESTART);
}
}
break;
}
return (wakeup_swapper);
@ -637,19 +657,29 @@ weed_inhib(int mode, struct thread *td2, struct proc *p)
* any sleeping threads that are interruptable. (PCATCH).
*/
int
thread_single(int mode)
thread_single(struct proc *p, int mode)
{
struct thread *td;
struct thread *td2;
struct proc *p;
int remaining, wakeup_swapper;
td = curthread;
p = td->td_proc;
KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
("invalid mode %d", mode));
/*
* If allowing non-ALLPROC singlethreading for non-curproc
* callers, calc_remaining() and remain_for_mode() should be
* adjusted to also account for td->td_proc != p. For now
* this is not implemented because it is not used.
*/
KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
(mode != SINGLE_ALLPROC && td->td_proc == p),
("mode %d proc %p curproc %p", mode, p, td->td_proc));
mtx_assert(&Giant, MA_NOTOWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
if ((p->p_flag & P_HADTHREADS) == 0)
if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
return (0);
/* Is someone already single threading? */
@ -666,6 +696,8 @@ thread_single(int mode)
else
p->p_flag &= ~P_SINGLE_BOUNDARY;
}
if (mode == SINGLE_ALLPROC)
p->p_flag |= P_TOTAL_STOP;
p->p_flag |= P_STOPPED_SINGLE;
PROC_SLOCK(p);
p->p_singlethread = td;
@ -679,13 +711,13 @@ thread_single(int mode)
continue;
thread_lock(td2);
td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
if (TD_IS_INHIBITED(td2))
if (TD_IS_INHIBITED(td2)) {
wakeup_swapper |= weed_inhib(mode, td2, p);
#ifdef SMP
else if (TD_IS_RUNNING(td2) && td != td2) {
} else if (TD_IS_RUNNING(td2) && td != td2) {
forward_signal(td2);
}
#endif
}
thread_unlock(td2);
}
if (wakeup_swapper)
@ -703,7 +735,7 @@ thread_single(int mode)
* Wake us up when everyone else has suspended.
* In the mean time we suspend as well.
*/
thread_suspend_switch(td);
thread_suspend_switch(td, p);
remaining = calc_remaining(p, mode);
}
if (mode == SINGLE_EXIT) {
@ -813,8 +845,9 @@ thread_suspend_check(int return_instead)
* Ignore suspend requests for stop signals if they
* are deferred.
*/
if (P_SHOULDSTOP(p) == P_STOPPED_SIG &&
td->td_flags & TDF_SBDRY) {
if ((P_SHOULDSTOP(p) == P_STOPPED_SIG ||
(p->p_flag & P_TOTAL_STOP) != 0) &&
(td->td_flags & TDF_SBDRY) != 0) {
KASSERT(return_instead,
("TDF_SBDRY set for unsafe thread_suspend_check"));
return (0);
@ -841,7 +874,7 @@ thread_suspend_check(int return_instead)
if (p->p_numthreads == p->p_suspcount + 1) {
thread_lock(p->p_singlethread);
wakeup_swapper =
thread_unsuspend_one(p->p_singlethread);
thread_unsuspend_one(p->p_singlethread, p);
thread_unlock(p->p_singlethread);
if (wakeup_swapper)
kick_proc0();
@ -874,11 +907,9 @@ thread_suspend_check(int return_instead)
}
void
thread_suspend_switch(struct thread *td)
thread_suspend_switch(struct thread *td, struct proc *p)
{
struct proc *p;
p = td->td_proc;
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
@ -886,8 +917,10 @@ thread_suspend_switch(struct thread *td)
* We implement thread_suspend_one in stages here to avoid
* dropping the proc lock while the thread lock is owned.
*/
thread_stopped(p);
p->p_suspcount++;
if (p == td->td_proc) {
thread_stopped(p);
p->p_suspcount++;
}
PROC_UNLOCK(p);
thread_lock(td);
td->td_flags &= ~TDF_NEEDSUSPCHK;
@ -905,8 +938,9 @@ thread_suspend_switch(struct thread *td)
void
thread_suspend_one(struct thread *td)
{
struct proc *p = td->td_proc;
struct proc *p;
p = td->td_proc;
PROC_SLOCK_ASSERT(p, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
@ -917,15 +951,17 @@ thread_suspend_one(struct thread *td)
}
int
thread_unsuspend_one(struct thread *td)
thread_unsuspend_one(struct thread *td, struct proc *p)
{
struct proc *p = td->td_proc;
PROC_SLOCK_ASSERT(p, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
TD_CLR_SUSPENDED(td);
p->p_suspcount--;
td->td_flags &= ~TDF_ALLPROCSUSP;
if (td->td_proc == p) {
PROC_SLOCK_ASSERT(p, MA_OWNED);
p->p_suspcount--;
}
return (setrunnable(td));
}
@ -945,7 +981,7 @@ thread_unsuspend(struct proc *p)
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
if (TD_IS_SUSPENDED(td)) {
wakeup_swapper |= thread_unsuspend_one(td);
wakeup_swapper |= thread_unsuspend_one(td, p);
}
thread_unlock(td);
}
@ -956,9 +992,12 @@ thread_unsuspend(struct proc *p)
* threading request. Now we've downgraded to single-threaded,
* let it continue.
*/
thread_lock(p->p_singlethread);
wakeup_swapper = thread_unsuspend_one(p->p_singlethread);
thread_unlock(p->p_singlethread);
if (p->p_singlethread->td_proc == p) {
thread_lock(p->p_singlethread);
wakeup_swapper = thread_unsuspend_one(
p->p_singlethread, p);
thread_unlock(p->p_singlethread);
}
}
if (wakeup_swapper)
kick_proc0();
@ -968,15 +1007,20 @@ thread_unsuspend(struct proc *p)
* End the single threading mode..
*/
void
thread_single_end(void)
thread_single_end(struct proc *p, int mode)
{
struct thread *td;
struct proc *p;
int wakeup_swapper;
p = curproc;
KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
("invalid mode %d", mode));
PROC_LOCK_ASSERT(p, MA_OWNED);
p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
(mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
("mode %d does not match P_TOTAL_STOP", mode));
p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
P_TOTAL_STOP);
PROC_SLOCK(p);
p->p_singlethread = NULL;
wakeup_swapper = 0;
@ -986,12 +1030,11 @@ thread_single_end(void)
* on the process. The single threader must be allowed
* to continue however as this is a bad place to stop.
*/
if (p->p_numthreads != remain_for_mode(SINGLE_EXIT) &&
!P_SHOULDSTOP(p)) {
if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
if (TD_IS_SUSPENDED(td)) {
wakeup_swapper |= thread_unsuspend_one(td);
wakeup_swapper |= thread_unsuspend_one(td, p);
}
thread_unlock(td);
}

View File

@ -133,7 +133,6 @@ print_ct(struct clocktime *ct)
int
clock_ct_to_ts(struct clocktime *ct, struct timespec *ts)
{
time_t secs;
int i, year, days;
year = ct->year;
@ -167,11 +166,10 @@ clock_ct_to_ts(struct clocktime *ct, struct timespec *ts)
days += days_in_month(year, i);
days += (ct->day - 1);
/* Add hours, minutes, seconds. */
secs = ((days * 24 + ct->hour) * 60 + ct->min) * 60 + ct->sec;
ts->tv_sec = secs;
ts->tv_sec = (((time_t)days * 24 + ct->hour) * 60 + ct->min) * 60 +
ct->sec;
ts->tv_nsec = ct->nsec;
if (ct_debug)
printf(" = %ld.%09ld\n", (long)ts->tv_sec, (long)ts->tv_nsec);
return (0);

View File

@ -1567,6 +1567,7 @@ buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
int error;
ASSERT_BO_WLOCKED(bo);
KASSERT((bo->bo_flag & BO_DEAD) == 0, ("dead bo %p", bo));
KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
bp->b_xflags |= xflags;
@ -2802,16 +2803,6 @@ vgonel(struct vnode *vp)
VI_UNLOCK(vp);
vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM);
/*
* Clean out any buffers associated with the vnode.
* If the flush fails, just toss the buffers.
*/
mp = NULL;
if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
(void) vn_start_secondary_write(vp, &mp, V_WAIT);
if (vinvalbuf(vp, V_SAVE, 0, 0) != 0)
vinvalbuf(vp, 0, 0, 0);
/*
* If purging an active vnode, it must be closed and
* deactivated before being reclaimed.
@ -2826,6 +2817,29 @@ vgonel(struct vnode *vp)
}
if (vp->v_type == VSOCK)
vfs_unp_reclaim(vp);
/*
* Clean out any buffers associated with the vnode.
* If the flush fails, just toss the buffers.
*/
mp = NULL;
if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
(void) vn_start_secondary_write(vp, &mp, V_WAIT);
if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) {
while (vinvalbuf(vp, 0, 0, 0) != 0)
;
}
#ifdef INVARIANTS
BO_LOCK(&vp->v_bufobj);
KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) &&
vp->v_bufobj.bo_dirty.bv_cnt == 0 &&
TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) &&
vp->v_bufobj.bo_clean.bv_cnt == 0,
("vp %p bufobj not invalidated", vp));
vp->v_bufobj.bo_flag |= BO_DEAD;
BO_UNLOCK(&vp->v_bufobj);
#endif
/*
* Reclaim the vnode.
*/

View File

@ -1600,7 +1600,7 @@ vn_suspendable(struct vnode *vp, struct mount **mpp)
static int
vn_start_write_locked(struct mount *mp, int flags)
{
int error;
int error, mflags;
mtx_assert(MNT_MTX(mp), MA_OWNED);
error = 0;
@ -1610,13 +1610,15 @@ vn_start_write_locked(struct mount *mp, int flags)
*/
if ((curthread->td_pflags & TDP_IGNSUSP) == 0 ||
mp->mnt_susp_owner != curthread) {
mflags = ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ?
(flags & PCATCH) : 0) | (PUSER - 1);
while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
if (flags & V_NOWAIT) {
error = EWOULDBLOCK;
goto unlock;
}
error = msleep(&mp->mnt_flag, MNT_MTX(mp),
(PUSER - 1) | (flags & PCATCH), "suspfs", 0);
error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags,
"suspfs", 0);
if (error)
goto unlock;
}
@ -1732,8 +1734,9 @@ vn_start_secondary_write(vp, mpp, flags)
/*
* Wait for the suspension to finish.
*/
error = msleep(&mp->mnt_flag, MNT_MTX(mp),
(PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
error = msleep(&mp->mnt_flag, MNT_MTX(mp), (PUSER - 1) | PDROP |
((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? (flags & PCATCH) : 0),
"suspfs", 0);
vfs_rel(mp);
if (error == 0)
goto retry;

View File

@ -0,0 +1,32 @@
/* $OpenBSD: timingsafe_bcmp.c,v 1.2 2014/06/10 04:16:57 deraadt Exp $ */
/*
* Copyright (c) 2010 Damien Miller. All rights reserved.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* $FreeBSD$
*
*/
#include <sys/libkern.h>
int
timingsafe_bcmp(const void *b1, const void *b2, size_t n)
{
const unsigned char *p1 = b1, *p2 = b2;
int ret = 0;
for (; n > 0; n--)
ret |= *p1++ ^ *p2++;
return (ret != 0);
}

View File

@ -423,13 +423,8 @@ xlr_sec_process(device_t dev, struct cryptop *crp, int hint)
cmd->op.source_buf = (uint64_t) (unsigned long)crp->crp_buf;
cmd->op.source_buf_size = crp->crp_ilen;
if (crp->crp_flags & CRYPTO_F_REL) {
cmd->op.dest_buf = (uint64_t) (unsigned long)crp->crp_buf;
cmd->op.dest_buf_size = crp->crp_ilen;
} else {
cmd->op.dest_buf = (uint64_t) (unsigned long)crp->crp_buf;
cmd->op.dest_buf_size = crp->crp_ilen;
}
cmd->op.dest_buf = (uint64_t) (unsigned long)crp->crp_buf;
cmd->op.dest_buf_size = crp->crp_ilen;
cmd->op.num_packets = 1;
cmd->op.num_fragments = 1;

View File

@ -7,13 +7,18 @@ SRCS= aesni.c
SRCS+= aeskeys_${MACHINE_CPUARCH}.S
SRCS+= device_if.h bus_if.h opt_bus.h cryptodev_if.h
OBJS+= aesni_wrap.o
OBJS+= aesni_ghash.o aesni_wrap.o
# Remove -nostdinc so we can get the intrinsics.
aesni_ghash.o: aesni_ghash.c
# XXX - gcc won't understand -mpclmul
${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} \
-mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}
${CTFCONVERT_CMD}
aesni_wrap.o: aesni_wrap.c
${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} \
-mmmx -msse -maes ${.IMPSRC}
-mmmx -msse -msse4 -maes ${.IMPSRC}
${CTFCONVERT_CMD}
.include <bsd.kmod.mk>

View File

@ -18,6 +18,7 @@ SRCS += camellia.c camellia-api.c
SRCS += des_ecb.c des_enc.c des_setkey.c
SRCS += sha1.c sha2.c sha256c.c
SRCS += siphash.c
SRCS += gmac.c gfmult.c
SRCS += opt_param.h cryptodev_if.h bus_if.h device_if.h
SRCS += opt_ddb.h

View File

@ -93,6 +93,13 @@ ifdead_transmit(struct ifnet *ifp, struct mbuf *m)
return (ENXIO);
}
static uint64_t
ifdead_get_counter(struct ifnet *ifp, ift_counter cnt)
{
return (0);
}
void
if_dead(struct ifnet *ifp)
{
@ -104,4 +111,5 @@ if_dead(struct ifnet *ifp)
ifp->if_resolvemulti = ifdead_resolvemulti;
ifp->if_qflush = ifdead_qflush;
ifp->if_transmit = ifdead_transmit;
ifp->if_get_counter = ifdead_get_counter;
}

View File

@ -5698,7 +5698,6 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt
#ifdef INET
case AF_INET:
if (ipsec4_in_reject(m, &inp->ip_inp.inp)) {
IPSECSTAT_INC(ips_in_polvio);
SCTP_STAT_INCR(sctps_hdrops);
goto out;
}
@ -5707,7 +5706,6 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt
#ifdef INET6
case AF_INET6:
if (ipsec6_in_reject(m, &inp->ip_inp.inp)) {
IPSEC6STAT_INC(ips_in_polvio);
SCTP_STAT_INCR(sctps_hdrops);
goto out;
}

View File

@ -894,12 +894,10 @@ tcp_input(struct mbuf **mp, int *offp, int proto)
#ifdef IPSEC
#ifdef INET6
if (isipv6 && ipsec6_in_reject(m, inp)) {
IPSEC6STAT_INC(ips_in_polvio);
goto dropunlock;
} else
#endif /* INET6 */
if (ipsec4_in_reject(m, inp) != 0) {
IPSECSTAT_INC(ips_in_polvio);
goto dropunlock;
}
#endif /* IPSEC */

View File

@ -323,7 +323,6 @@ udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
/* Check AH/ESP integrity. */
if (ipsec4_in_reject(n, inp)) {
m_freem(n);
IPSECSTAT_INC(ips_in_polvio);
return;
}
#ifdef IPSEC_NAT_T

View File

@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$");
#include <netinet/in_pcb.h>
#ifdef IPSEC
#include <netinet6/ip6_ipsec.h>
#include <netipsec/ipsec.h>
#include <netipsec/ipsec6.h>
#include <netipsec/key.h>
@ -109,21 +110,6 @@ ip6_forward(struct mbuf *m, int srcrt)
struct m_tag *fwd_tag;
char ip6bufs[INET6_ADDRSTRLEN], ip6bufd[INET6_ADDRSTRLEN];
#ifdef IPSEC
/*
* Check AH/ESP integrity.
*/
/*
* Don't increment ip6s_cantforward because this is the check
* before forwarding packet actually.
*/
if (ipsec6_in_reject(m, NULL)) {
IPSEC6STAT_INC(ips_in_polvio);
m_freem(m);
return;
}
#endif /* IPSEC */
/*
* Do not forward packets to multicast destination (should be handled
* by ip6_mforward().
@ -148,6 +134,17 @@ ip6_forward(struct mbuf *m, int srcrt)
m_freem(m);
return;
}
#ifdef IPSEC
/*
* Check if this packet has an active SA and needs to be dropped
* instead of forwarded.
*/
if (ip6_ipsec_fwd(m) != 0) {
IP6STAT_INC(ip6s_cantforward);
m_freem(m);
return;
}
#endif /* IPSEC */
#ifdef IPSTEALTH
if (!V_ip6stealth) {

View File

@ -118,28 +118,18 @@ ip6_ipsec_filtertunnel(struct mbuf *m)
/*
* Check if this packet has an active SA and needs to be dropped instead
* of forwarded.
* Called from ip6_input().
* Called from ip6_forward().
* 1 = drop packet, 0 = forward packet.
*/
int
ip6_ipsec_fwd(struct mbuf *m)
{
#ifdef IPSEC
struct secpolicy *sp;
int error;
sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND, &error);
if (sp != NULL) {
/*
* Check security policy against packet attributes.
*/
error = ipsec_in_reject(sp, m);
KEY_FREESP(&sp);
}
if (error != 0)
return (1);
#endif /* IPSEC */
#ifdef IPSEC
return (ipsec6_in_reject(m, NULL));
#else
return (0);
#endif /* !IPSEC */
}
/*
@ -152,31 +142,15 @@ ip6_ipsec_fwd(struct mbuf *m)
int
ip6_ipsec_input(struct mbuf *m, int nxt)
{
#ifdef IPSEC
struct secpolicy *sp;
int error;
/*
* enforce IPsec policy checking if we are seeing last header.
* note that we do not visit this with protocols with pcb layer
* code - like udp/tcp/raw ip.
*/
if ((inet6sw[ip6_protox[nxt]].pr_flags & PR_LASTHDR) != 0 &&
ipsec6_in_reject(m, NULL)) {
sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND, &error);
if (sp != NULL) {
/*
* Check security policy against packet attributes.
*/
error = ipsec_in_reject(sp, m);
KEY_FREESP(&sp);
} else {
/* XXX error stat??? */
error = EINVAL;
DPRINTF(("%s: no SP, packet discarded\n", __func__));/*XXX*/
}
if (error != 0)
return (1);
}
if ((inet6sw[ip6_protox[nxt]].pr_flags & PR_LASTHDR) != 0)
return (ipsec6_in_reject(m, NULL));
#endif /* IPSEC */
return (0);
}

View File

@ -264,7 +264,6 @@ rip6_input(struct mbuf **mp, int *offp, int proto)
*/
if (n && ipsec6_in_reject(n, last)) {
m_freem(n);
IPSEC6STAT_INC(ips_in_polvio);
/* Do not inject data into pcb. */
} else
#endif /* IPSEC */
@ -296,7 +295,6 @@ rip6_input(struct mbuf **mp, int *offp, int proto)
*/
if ((last != NULL) && ipsec6_in_reject(m, last)) {
m_freem(m);
IPSEC6STAT_INC(ips_in_polvio);
IP6STAT_DEC(ip6s_delivered);
/* Do not inject data into pcb. */
INP_RUNLOCK(last);

View File

@ -158,7 +158,6 @@ udp6_append(struct inpcb *inp, struct mbuf *n, int off,
/* Check AH/ESP integrity. */
if (ipsec6_in_reject(n, inp)) {
m_freem(n);
IPSEC6STAT_INC(ips_in_polvio);
return;
}
#endif /* IPSEC */

View File

@ -99,35 +99,31 @@ cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
}
/*
* Return a pointer to iov/offset of location in iovec list.
* Return the index and offset of location in iovec list.
*/
struct iovec *
int
cuio_getptr(struct uio *uio, int loc, int *off)
{
struct iovec *iov = uio->uio_iov;
int iol = uio->uio_iovcnt;
int ind, len;
while (loc >= 0) {
/* Normal end of search */
if (loc < iov->iov_len) {
ind = 0;
while (loc >= 0 && ind < uio->uio_iovcnt) {
len = uio->uio_iov[ind].iov_len;
if (len > loc) {
*off = loc;
return (iov);
return (ind);
}
loc -= len;
ind++;
}
loc -= iov->iov_len;
if (iol == 0) {
if (loc == 0) {
/* Point at the end of valid data */
*off = iov->iov_len;
return (iov);
} else
return (NULL);
} else {
iov++, iol--;
}
}
if (ind > 0 && loc == 0) {
ind--;
*off = uio->uio_iov[ind].iov_len;
return (ind);
}
return (NULL);
return (-1);
}
/*
@ -196,3 +192,47 @@ crypto_apply(int flags, caddr_t buf, int off, int len,
error = (*f)(arg, buf + off, len);
return (error);
}
void
crypto_mbuftoiov(struct mbuf *mbuf, struct iovec **iovptr, int *cnt,
int *allocated)
{
struct iovec *iov;
struct mbuf *m, *mtmp;
int i, j;
*allocated = 0;
iov = *iovptr;
if (iov == NULL)
*cnt = 0;
m = mbuf;
i = 0;
while (m != NULL) {
if (i == *cnt) {
/* we need to allocate a larger array */
j = 1;
mtmp = m;
while ((mtmp = mtmp->m_next) != NULL)
j++;
iov = malloc(sizeof *iov * (i + j), M_CRYPTO_DATA,
M_WAITOK);
*allocated = 1;
*cnt = i + j;
memcpy(iov, *iovptr, sizeof *iov * i);
}
iov[i].iov_base = m->m_data;
iov[i].iov_len = m->m_len;
i++;
m = m->m_next;
}
if (*allocated)
KASSERT(*cnt == i, ("did not allocate correct amount: %d != %d",
*cnt, i));
*iovptr = iov;
*cnt = i;
}

View File

@ -368,9 +368,8 @@ crypto_select_driver(const struct cryptoini *cri, int flags)
best = cap;
}
}
if (best != NULL)
return best;
if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
if (best == NULL && match == CRYPTOCAP_F_HARDWARE &&
(flags & CRYPTOCAP_F_SOFTWARE)) {
/* sort of an Algol 68-style for loop */
match = CRYPTOCAP_F_SOFTWARE;
goto again;
@ -421,9 +420,12 @@ crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
(*sid) <<= 32;
(*sid) |= (lid & 0xffffffff);
cap->cc_sessions++;
}
} else
} else
CRYPTDEB("dev newsession failed");
} else {
CRYPTDEB("no driver");
err = EINVAL;
}
CRYPTO_DRIVER_UNLOCK();
return err;
}

View File

@ -3,6 +3,12 @@
/*-
* Copyright (c) 2001 Theo de Raadt
* Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by John-Mark Gurney
* under sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -36,6 +42,7 @@
__FBSDID("$FreeBSD$");
#include "opt_compat.h"
#include "opt_kdtrace.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -55,10 +62,15 @@ __FBSDID("$FreeBSD$");
#include <sys/fcntl.h>
#include <sys/bus.h>
#include <sys/user.h>
#include <sys/sdt.h>
#include <opencrypto/cryptodev.h>
#include <opencrypto/xform.h>
SDT_PROVIDER_DECLARE(opencrypto);
SDT_PROBE_DEFINE1(opencrypto, dev, ioctl, error, "int"/*line number*/);
#ifdef COMPAT_FREEBSD32
#include <sys/mount.h>
#include <compat/freebsd32/freebsd32.h>
@ -315,6 +327,8 @@ static int csefree(struct csession *);
static int cryptodev_op(struct csession *, struct crypt_op *,
struct ucred *, struct thread *td);
static int cryptodev_aead(struct csession *, struct crypt_aead *,
struct ucred *, struct thread *);
static int cryptodev_key(struct crypt_kop *);
static int cryptodev_find(struct crypt_find_op *);
@ -324,15 +338,23 @@ static int cryptodev_find(struct crypt_find_op *);
* by device name/class or through search constraints.
*/
static int
checkforsoftware(int crid)
checkforsoftware(int *cridp)
{
int crid;
crid = *cridp;
if (!crypto_devallowsoft) {
if (crid & CRYPTOCAP_F_SOFTWARE)
return EINVAL; /* XXX */
if (crid & CRYPTOCAP_F_SOFTWARE) {
if (crid & CRYPTOCAP_F_HARDWARE) {
*cridp = CRYPTOCAP_F_HARDWARE;
return 0;
}
return EINVAL;
}
if ((crid & CRYPTOCAP_F_HARDWARE) == 0 &&
(crypto_getcaps(crid) & CRYPTOCAP_F_HARDWARE) == 0)
return EINVAL; /* XXX */
return EINVAL;
}
return 0;
}
@ -352,6 +374,7 @@ cryptof_ioctl(
struct csession *cse;
struct session_op *sop;
struct crypt_op *cop;
struct crypt_aead *caead;
struct enc_xform *txform = NULL;
struct auth_hash *thash = NULL;
struct crypt_kop *kop;
@ -412,7 +435,15 @@ cryptof_ioctl(
case CRYPTO_CAMELLIA_CBC:
txform = &enc_xform_camellia;
break;
case CRYPTO_AES_ICM:
txform = &enc_xform_aes_icm;
break;
case CRYPTO_AES_NIST_GCM_16:
txform = &enc_xform_aes_nist_gcm;
break;
default:
CRYPTDEB("invalid cipher");
return (EINVAL);
}
@ -437,6 +468,16 @@ cryptof_ioctl(
case CRYPTO_RIPEMD160_HMAC:
thash = &auth_hash_hmac_ripemd_160;
break;
case CRYPTO_AES_128_NIST_GMAC:
thash = &auth_hash_nist_gmac_aes_128;
break;
case CRYPTO_AES_192_NIST_GMAC:
thash = &auth_hash_nist_gmac_aes_192;
break;
case CRYPTO_AES_256_NIST_GMAC:
thash = &auth_hash_nist_gmac_aes_256;
break;
#ifdef notdef
case CRYPTO_MD5:
thash = &auth_hash_md5;
@ -449,6 +490,7 @@ cryptof_ioctl(
thash = &auth_hash_null;
break;
default:
CRYPTDEB("invalid mac");
return (EINVAL);
}
@ -460,6 +502,7 @@ cryptof_ioctl(
crie.cri_klen = sop->keylen * 8;
if (sop->keylen > txform->maxkey ||
sop->keylen < txform->minkey) {
CRYPTDEB("invalid cipher parameters");
error = EINVAL;
goto bail;
}
@ -467,8 +510,10 @@ cryptof_ioctl(
crie.cri_key = malloc(crie.cri_klen / 8,
M_XDATA, M_WAITOK);
if ((error = copyin(sop->key, crie.cri_key,
crie.cri_klen / 8)))
crie.cri_klen / 8))) {
CRYPTDEB("invalid key");
goto bail;
}
if (thash)
crie.cri_next = &cria;
}
@ -477,6 +522,7 @@ cryptof_ioctl(
cria.cri_alg = thash->type;
cria.cri_klen = sop->mackeylen * 8;
if (sop->mackeylen != thash->keysize) {
CRYPTDEB("invalid mac key length");
error = EINVAL;
goto bail;
}
@ -485,8 +531,10 @@ cryptof_ioctl(
cria.cri_key = malloc(cria.cri_klen / 8,
M_XDATA, M_WAITOK);
if ((error = copyin(sop->mackey, cria.cri_key,
cria.cri_klen / 8)))
cria.cri_klen / 8))) {
CRYPTDEB("invalid mac key");
goto bail;
}
}
}
@ -497,14 +545,18 @@ cryptof_ioctl(
#endif
) {
crid = SES2(sop)->crid;
error = checkforsoftware(crid);
if (error)
error = checkforsoftware(&crid);
if (error) {
CRYPTDEB("checkforsoftware");
goto bail;
}
} else
crid = CRYPTOCAP_F_HARDWARE;
error = crypto_newsession(&sid, (txform ? &crie : &cria), crid);
if (error)
if (error) {
CRYPTDEB("crypto_newsession");
goto bail;
}
cse = csecreate(fcr, sid, crie.cri_key, crie.cri_klen,
cria.cri_key, cria.cri_klen, sop->cipher, sop->mac, txform,
@ -513,6 +565,7 @@ cryptof_ioctl(
if (cse == NULL) {
crypto_freesession(sid);
error = EINVAL;
CRYPTDEB("csecreate");
goto bail;
}
sop->ses = cse->ses;
@ -559,8 +612,10 @@ cryptof_ioctl(
#endif
cop = (struct crypt_op *)data;
cse = csefind(fcr, cop->ses);
if (cse == NULL)
if (cse == NULL) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
return (EINVAL);
}
error = cryptodev_op(cse, cop, active_cred, td);
#ifdef COMPAT_FREEBSD32
if (error == 0 && cmd == CIOCCRYPT32)
@ -614,6 +669,13 @@ cryptof_ioctl(
case CIOCFINDDEV:
error = cryptodev_find((struct crypt_find_op *)data);
break;
case CIOCCRYPTAEAD:
caead = (struct crypt_aead *)data;
cse = csefind(fcr, caead->ses);
if (cse == NULL)
return (EINVAL);
error = cryptodev_aead(cse, caead, active_cred, td);
break;
default:
error = EINVAL;
break;
@ -636,12 +698,16 @@ cryptodev_op(
struct cryptodesc *crde = NULL, *crda = NULL;
int error;
if (cop->len > 256*1024-4)
if (cop->len > 256*1024-4) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
return (E2BIG);
}
if (cse->txform) {
if (cop->len == 0 || (cop->len % cse->txform->blocksize) != 0)
if (cop->len == 0 || (cop->len % cse->txform->blocksize) != 0) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
return (EINVAL);
}
}
cse->uio.uio_iov = &cse->iovec;
@ -661,6 +727,7 @@ cryptodev_op(
crp = crypto_getreq((cse->txform != NULL) + (cse->thash != NULL));
if (crp == NULL) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
error = ENOMEM;
goto bail;
}
@ -673,13 +740,17 @@ cryptodev_op(
if (cse->txform)
crde = crp->crp_desc;
else {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
error = EINVAL;
goto bail;
}
}
if ((error = copyin(cop->src, cse->uio.uio_iov[0].iov_base, cop->len)))
if ((error = copyin(cop->src, cse->uio.uio_iov[0].iov_base,
cop->len))) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
if (crda) {
crda->crd_skip = 0;
@ -714,15 +785,20 @@ cryptodev_op(
if (cop->iv) {
if (crde == NULL) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
error = EINVAL;
goto bail;
}
if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
error = EINVAL;
goto bail;
}
if ((error = copyin(cop->iv, cse->tmp_iv, cse->txform->blocksize)))
if ((error = copyin(cop->iv, cse->tmp_iv,
cse->txform->blocksize))) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
bcopy(cse->tmp_iv, crde->crd_iv, cse->txform->blocksize);
crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
crde->crd_skip = 0;
@ -735,6 +811,7 @@ cryptodev_op(
}
if (cop->mac && crda == NULL) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
error = EINVAL;
goto bail;
}
@ -753,6 +830,163 @@ cryptodev_op(
error = msleep(crp, &cse->lock, PWAIT, "crydev", 0);
mtx_unlock(&cse->lock);
if (error != 0) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
if (crp->crp_etype == EAGAIN) {
crp->crp_etype = 0;
crp->crp_flags &= ~CRYPTO_F_DONE;
goto again;
}
if (crp->crp_etype != 0) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
error = crp->crp_etype;
goto bail;
}
if (cse->error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
error = cse->error;
goto bail;
}
if (cop->dst &&
(error = copyout(cse->uio.uio_iov[0].iov_base, cop->dst,
cop->len))) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
if (cop->mac &&
(error = copyout((caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
cop->mac, cse->thash->hashsize))) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
bail:
if (crp)
crypto_freereq(crp);
if (cse->uio.uio_iov[0].iov_base)
free(cse->uio.uio_iov[0].iov_base, M_XDATA);
return (error);
}
static int
cryptodev_aead(
struct csession *cse,
struct crypt_aead *caead,
struct ucred *active_cred,
struct thread *td)
{
struct uio *uio;
struct cryptop *crp = NULL;
struct cryptodesc *crde = NULL, *crda = NULL;
int error;
if (caead->len > 256*1024-4 || caead->aadlen > 256*1024-4)
return (E2BIG);
if (cse->txform == NULL || cse->thash == NULL || caead->tag == NULL ||
(caead->len % cse->txform->blocksize) != 0)
return (EINVAL);
uio = &cse->uio;
uio->uio_iov = &cse->iovec;
uio->uio_iovcnt = 1;
uio->uio_offset = 0;
uio->uio_resid = caead->len + caead->aadlen + cse->thash->hashsize;
uio->uio_segflg = UIO_SYSSPACE;
uio->uio_rw = UIO_WRITE;
uio->uio_td = td;
uio->uio_iov[0].iov_len = uio->uio_resid;
uio->uio_iov[0].iov_base = malloc(uio->uio_iov[0].iov_len,
M_XDATA, M_WAITOK);
crp = crypto_getreq(2);
if (crp == NULL) {
error = ENOMEM;
goto bail;
}
crda = crp->crp_desc;
crde = crda->crd_next;
if ((error = copyin(caead->src, cse->uio.uio_iov[0].iov_base,
caead->len)))
goto bail;
if ((error = copyin(caead->aad, (char *)cse->uio.uio_iov[0].iov_base +
caead->len, caead->aadlen)))
goto bail;
crda->crd_skip = caead->len;
crda->crd_len = caead->aadlen;
crda->crd_inject = caead->len + caead->aadlen;
crda->crd_alg = cse->mac;
crda->crd_key = cse->mackey;
crda->crd_klen = cse->mackeylen * 8;
if (caead->op == COP_ENCRYPT)
crde->crd_flags |= CRD_F_ENCRYPT;
else
crde->crd_flags &= ~CRD_F_ENCRYPT;
/* crde->crd_skip set below */
crde->crd_len = caead->len;
crde->crd_inject = 0;
crde->crd_alg = cse->cipher;
crde->crd_key = cse->key;
crde->crd_klen = cse->keylen * 8;
crp->crp_ilen = caead->len + caead->aadlen;
crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
| (caead->flags & COP_F_BATCH);
crp->crp_buf = (caddr_t)&cse->uio.uio_iov;
crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
crp->crp_sid = cse->sid;
crp->crp_opaque = (void *)cse;
if (caead->iv) {
if (caead->ivlen > sizeof cse->tmp_iv) {
error = EINVAL;
goto bail;
}
if ((error = copyin(caead->iv, cse->tmp_iv, caead->ivlen)))
goto bail;
bcopy(cse->tmp_iv, crde->crd_iv, caead->ivlen);
crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
crde->crd_skip = 0;
} else {
crde->crd_flags |= CRD_F_IV_PRESENT;
crde->crd_skip = cse->txform->blocksize;
crde->crd_len -= cse->txform->blocksize;
}
if ((error = copyin(caead->tag, (caddr_t)cse->uio.uio_iov[0].iov_base +
caead->len + caead->aadlen, cse->thash->hashsize)))
goto bail;
again:
/*
* Let the dispatch run unlocked, then, interlock against the
* callback before checking if the operation completed and going
* to sleep. This insures drivers don't inherit our lock which
* results in a lock order reversal between crypto_dispatch forced
* entry and the crypto_done callback into us.
*/
error = crypto_dispatch(crp);
mtx_lock(&cse->lock);
if (error == 0 && (crp->crp_flags & CRYPTO_F_DONE) == 0)
error = msleep(crp, &cse->lock, PWAIT, "crydev", 0);
mtx_unlock(&cse->lock);
if (error != 0)
goto bail;
@ -772,20 +1006,17 @@ cryptodev_op(
goto bail;
}
if (cop->dst &&
(error = copyout(cse->uio.uio_iov[0].iov_base, cop->dst, cop->len)))
if (caead->dst && (error = copyout(cse->uio.uio_iov[0].iov_base,
caead->dst, caead->len)))
goto bail;
if (cop->mac &&
(error = copyout((caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
cop->mac, cse->thash->hashsize)))
if ((error = copyout((caddr_t)cse->uio.uio_iov[0].iov_base +
caead->len + caead->aadlen, caead->tag, cse->thash->hashsize)))
goto bail;
bail:
if (crp)
crypto_freereq(crp);
if (cse->uio.uio_iov[0].iov_base)
free(cse->uio.uio_iov[0].iov_base, M_XDATA);
crypto_freereq(crp);
free(cse->uio.uio_iov[0].iov_base, M_XDATA);
return (error);
}
@ -919,14 +1150,16 @@ static int
cryptodev_find(struct crypt_find_op *find)
{
device_t dev;
size_t fnlen = sizeof find->name;
if (find->crid != -1) {
dev = crypto_find_device_byhid(find->crid);
if (dev == NULL)
return (ENOENT);
strlcpy(find->name, device_get_nameunit(dev),
sizeof(find->name));
strncpy(find->name, device_get_nameunit(dev), fnlen);
find->name[fnlen - 1] = '\x0';
} else {
find->name[fnlen - 1] = '\x0';
find->crid = crypto_find_driver(find->name);
if (find->crid == -1)
return (ENOENT);

View File

@ -23,6 +23,12 @@
* PURPOSE.
*
* Copyright (c) 2001 Theo de Raadt
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by John-Mark Gurney
* under sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -100,20 +106,23 @@
#define CAMELLIA_BLOCK_LEN 16
#define EALG_MAX_BLOCK_LEN AES_BLOCK_LEN /* Keep this updated */
/* Maximum hash algorithm result length */
#define AALG_MAX_RESULT_LEN 64 /* Keep this updated */
#define CRYPTO_ALGORITHM_MIN 1
#define CRYPTO_DES_CBC 1
#define CRYPTO_3DES_CBC 2
#define CRYPTO_BLF_CBC 3
#define CRYPTO_CAST_CBC 4
#define CRYPTO_SKIPJACK_CBC 5
#define CRYPTO_MD5_HMAC 6
#define CRYPTO_SHA1_HMAC 7
#define CRYPTO_RIPEMD160_HMAC 8
#define CRYPTO_MD5_KPDK 9
#define CRYPTO_SHA1_KPDK 10
#define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */
#define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */
#define CRYPTO_ARC4 12
#define CRYPTO_DES_CBC 1
#define CRYPTO_3DES_CBC 2
#define CRYPTO_BLF_CBC 3
#define CRYPTO_CAST_CBC 4
#define CRYPTO_SKIPJACK_CBC 5
#define CRYPTO_MD5_HMAC 6
#define CRYPTO_SHA1_HMAC 7
#define CRYPTO_RIPEMD160_HMAC 8
#define CRYPTO_MD5_KPDK 9
#define CRYPTO_SHA1_KPDK 10
#define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */
#define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */
#define CRYPTO_ARC4 12
#define CRYPTO_MD5 13
#define CRYPTO_SHA1 14
#define CRYPTO_NULL_HMAC 15
@ -122,9 +131,18 @@
#define CRYPTO_SHA2_256_HMAC 18
#define CRYPTO_SHA2_384_HMAC 19
#define CRYPTO_SHA2_512_HMAC 20
#define CRYPTO_CAMELLIA_CBC 21
#define CRYPTO_CAMELLIA_CBC 21
#define CRYPTO_AES_XTS 22
#define CRYPTO_ALGORITHM_MAX 22 /* Keep updated - see below */
#define CRYPTO_AES_ICM 23 /* commonly known as CTR mode */
#define CRYPTO_AES_NIST_GMAC 24 /* cipher side */
#define CRYPTO_AES_NIST_GCM_16 25 /* 16 byte ICV */
#define CRYPTO_AES_128_NIST_GMAC 26 /* auth side */
#define CRYPTO_AES_192_NIST_GMAC 27 /* auth side */
#define CRYPTO_AES_256_NIST_GMAC 28 /* auth side */
#define CRYPTO_ALGORITHM_MAX 28 /* Keep updated - see below */
#define CRYPTO_ALGO_VALID(x) ((x) >= CRYPTO_ALGORITHM_MIN && \
(x) <= CRYPTO_ALGORITHM_MAX)
/* Algorithm flags */
#define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */
@ -182,6 +200,20 @@ struct crypt_op {
caddr_t iv;
};
/* op and flags the same as crypt_op */
struct crypt_aead {
u_int32_t ses;
u_int16_t op; /* i.e. COP_ENCRYPT */
u_int16_t flags;
u_int len;
u_int aadlen;
u_int ivlen;
caddr_t src, dst; /* become iov[] inside kernel */
caddr_t aad; /* additional authenticated data */
caddr_t tag; /* must fit for chosen TAG length */
caddr_t iv;
};
/*
* Parameters for looking up a crypto driver/device by
* device name or by id. The latter are returned for
@ -239,6 +271,7 @@ struct crypt_kop {
#define CIOCGSESSION2 _IOWR('c', 106, struct session2_op)
#define CIOCKEY2 _IOWR('c', 107, struct crypt_kop)
#define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op)
#define CIOCCRYPTAEAD _IOWR('c', 109, struct crypt_aead)
struct cryptotstat {
struct timespec acc; /* total accumulated time */
@ -269,6 +302,14 @@ struct cryptostats {
};
#ifdef _KERNEL
#if 0
#define CRYPTDEB(s) do { printf("%s:%d: %s\n", __FILE__, __LINE__, s); \
} while (0)
#else
#define CRYPTDEB(s) do { } while (0)
#endif
/* Standard initialization structure beginning */
struct cryptoini {
int cri_alg; /* Algorithm to use */
@ -292,14 +333,15 @@ struct cryptodesc {
place, so don't copy. */
#define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */
#define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */
#define CRD_F_COMP 0x0f /* Set when doing compression */
#define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */
#define CRD_F_COMP 0x0f /* Set when doing compression */
struct cryptoini CRD_INI; /* Initialization/context data */
#define crd_iv CRD_INI.cri_iv
#define crd_key CRD_INI.cri_key
#define crd_alg CRD_INI.cri_alg
#define crd_klen CRD_INI.cri_klen
#define crd_esn CRD_INI.cri_esn
#define crd_iv CRD_INI.cri_iv
#define crd_key CRD_INI.cri_key
#define crd_alg CRD_INI.cri_alg
#define crd_klen CRD_INI.cri_klen
struct cryptodesc *crd_next;
};
@ -324,9 +366,8 @@ struct cryptop {
*/
int crp_flags;
#define CRYPTO_F_IMBUF 0x0001 /* Input/output are mbuf chains */
#define CRYPTO_F_IOV 0x0002 /* Input/output are uio */
#define CRYPTO_F_REL 0x0004 /* Must return data in same place */
#define CRYPTO_F_IMBUF 0x0001 /* Input/output are mbuf chains */
#define CRYPTO_F_IOV 0x0002 /* Input/output are uio */
#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */
#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */
#define CRYPTO_F_DONE 0x0020 /* Operation completed */
@ -341,12 +382,12 @@ struct cryptop {
struct bintime crp_tstamp; /* performance time stamp */
};
#define CRYPTO_BUF_CONTIG 0x0
#define CRYPTO_BUF_IOV 0x1
#define CRYPTO_BUF_MBUF 0x2
#define CRYPTO_BUF_CONTIG 0x0
#define CRYPTO_BUF_IOV 0x1
#define CRYPTO_BUF_MBUF 0x2
#define CRYPTO_OP_DECRYPT 0x0
#define CRYPTO_OP_ENCRYPT 0x1
#define CRYPTO_OP_DECRYPT 0x0
#define CRYPTO_OP_ENCRYPT 0x1
/*
* Hints passed to process methods.
@ -381,9 +422,9 @@ MALLOC_DECLARE(M_CRYPTO_DATA);
extern int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
extern int crypto_freesession(u_int64_t sid);
#define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE
#define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE
#define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */
#define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE
#define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE
#define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */
extern int32_t crypto_get_driverid(device_t dev, int flags);
extern int crypto_find_driver(const char *);
extern device_t crypto_find_device_byhid(int hid);
@ -418,10 +459,15 @@ extern int crypto_devallowsoft; /* only use hardware crypto */
struct uio;
extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
extern void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
extern struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
extern int cuio_getptr(struct uio *uio, int loc, int *off);
extern int cuio_apply(struct uio *uio, int off, int len,
int (*f)(void *, void *, u_int), void *arg);
struct mbuf;
struct iovec;
extern void crypto_mbuftoiov(struct mbuf *mbuf, struct iovec **iovptr,
int *cnt, int *allocated);
extern void crypto_copyback(int flags, caddr_t buf, int off, int size,
caddr_t in);
extern void crypto_copydata(int flags, caddr_t buf, int off, int size,

View File

@ -9,6 +9,12 @@
* supported the development of this code.
*
* Copyright (c) 2000, 2001 Angelos D. Keromytis
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by John-Mark Gurney
* under sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
*
* Permission to use, copy, and modify this software with or without fee
* is hereby granted, provided that this entire notice is included in
@ -37,6 +43,8 @@ __FBSDID("$FreeBSD$");
#include <sys/uio.h>
#include <sys/lock.h>
#include <sys/rwlock.h>
#include <sys/endian.h>
#include <sys/limits.h>
#include <crypto/blowfish/blowfish.h>
#include <crypto/sha1.h>
@ -64,6 +72,7 @@ u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
static int swcr_authenc(struct cryptop *crp);
static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
static int swcr_freesession(device_t dev, u_int64_t tid);
static int swcr_freesession_locked(device_t dev, u_int64_t tid);
@ -76,36 +85,48 @@ swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
int flags)
{
unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
struct enc_xform *exf;
int i, k, j, blks;
int i, j, k, blks, ind, count, ivlen;
struct uio *uio, uiolcl;
struct iovec iovlcl[4];
struct iovec *iov;
int iovcnt, iovalloc;
int error;
error = 0;
exf = sw->sw_exf;
blks = exf->blocksize;
ivlen = exf->ivsize;
/* Check for non-padded data */
if (crd->crd_len % blks)
return EINVAL;
if (crd->crd_alg == CRYPTO_AES_ICM &&
(crd->crd_flags & CRD_F_IV_EXPLICIT) == 0)
return (EINVAL);
/* Initialize the IV */
if (crd->crd_flags & CRD_F_ENCRYPT) {
/* IV explicitly provided ? */
if (crd->crd_flags & CRD_F_IV_EXPLICIT)
bcopy(crd->crd_iv, iv, blks);
bcopy(crd->crd_iv, iv, ivlen);
else
arc4rand(iv, blks, 0);
arc4rand(iv, ivlen, 0);
/* Do we need to write the IV */
if (!(crd->crd_flags & CRD_F_IV_PRESENT))
crypto_copyback(flags, buf, crd->crd_inject, blks, iv);
crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
} else { /* Decryption */
/* IV explicitly provided ? */
/* IV explicitly provided ? */
if (crd->crd_flags & CRD_F_IV_EXPLICIT)
bcopy(crd->crd_iv, iv, blks);
bcopy(crd->crd_iv, iv, ivlen);
else {
/* Get IV off buf */
crypto_copydata(flags, buf, crd->crd_inject, blks, iv);
crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
}
}
@ -114,341 +135,184 @@ swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
if (sw->sw_kschedule)
exf->zerokey(&(sw->sw_kschedule));
error = exf->setkey(&sw->sw_kschedule,
crd->crd_key, crd->crd_klen / 8);
if (error)
return (error);
}
ivp = iv;
/*
* xforms that provide a reinit method perform all IV
* handling themselves.
*/
if (exf->reinit)
exf->reinit(sw->sw_kschedule, iv);
if (flags & CRYPTO_F_IMBUF) {
struct mbuf *m = (struct mbuf *) buf;
/* Find beginning of data */
m = m_getptr(m, crd->crd_skip, &k);
if (m == NULL)
return EINVAL;
i = crd->crd_len;
while (i > 0) {
/*
* If there's insufficient data at the end of
* an mbuf, we have to do some copying.
*/
if (m->m_len < k + blks && m->m_len != k) {
m_copydata(m, k, blks, blk);
/* Actual encryption/decryption */
if (exf->reinit) {
if (crd->crd_flags & CRD_F_ENCRYPT) {
exf->encrypt(sw->sw_kschedule,
blk);
} else {
exf->decrypt(sw->sw_kschedule,
blk);
}
} else if (crd->crd_flags & CRD_F_ENCRYPT) {
/* XOR with previous block */
for (j = 0; j < blks; j++)
blk[j] ^= ivp[j];
exf->encrypt(sw->sw_kschedule, blk);
/*
* Keep encrypted block for XOR'ing
* with next block
*/
bcopy(blk, iv, blks);
ivp = iv;
} else { /* decrypt */
/*
* Keep encrypted block for XOR'ing
* with next block
*/
if (ivp == iv)
bcopy(blk, piv, blks);
else
bcopy(blk, iv, blks);
exf->decrypt(sw->sw_kschedule, blk);
/* XOR with previous block */
for (j = 0; j < blks; j++)
blk[j] ^= ivp[j];
if (ivp == iv)
bcopy(piv, iv, blks);
else
ivp = iv;
}
/* Copy back decrypted block */
m_copyback(m, k, blks, blk);
/* Advance pointer */
m = m_getptr(m, k + blks, &k);
if (m == NULL)
return EINVAL;
i -= blks;
/* Could be done... */
if (i == 0)
break;
}
/* Skip possibly empty mbufs */
if (k == m->m_len) {
for (m = m->m_next; m && m->m_len == 0;
m = m->m_next)
;
k = 0;
}
/* Sanity check */
if (m == NULL)
return EINVAL;
/*
* Warning: idat may point to garbage here, but
* we only use it in the while() loop, only if
* there are indeed enough data.
*/
idat = mtod(m, unsigned char *) + k;
while (m->m_len >= k + blks && i > 0) {
if (exf->reinit) {
if (crd->crd_flags & CRD_F_ENCRYPT) {
exf->encrypt(sw->sw_kschedule,
idat);
} else {
exf->decrypt(sw->sw_kschedule,
idat);
}
} else if (crd->crd_flags & CRD_F_ENCRYPT) {
/* XOR with previous block/IV */
for (j = 0; j < blks; j++)
idat[j] ^= ivp[j];
exf->encrypt(sw->sw_kschedule, idat);
ivp = idat;
} else { /* decrypt */
/*
* Keep encrypted block to be used
* in next block's processing.
*/
if (ivp == iv)
bcopy(idat, piv, blks);
else
bcopy(idat, iv, blks);
exf->decrypt(sw->sw_kschedule, idat);
/* XOR with previous block/IV */
for (j = 0; j < blks; j++)
idat[j] ^= ivp[j];
if (ivp == iv)
bcopy(piv, iv, blks);
else
ivp = iv;
}
idat += blks;
k += blks;
i -= blks;
}
}
return 0; /* Done with mbuf encryption/decryption */
} else if (flags & CRYPTO_F_IOV) {
struct uio *uio = (struct uio *) buf;
struct iovec *iov;
/* Find beginning of data */
iov = cuio_getptr(uio, crd->crd_skip, &k);
if (iov == NULL)
return EINVAL;
i = crd->crd_len;
while (i > 0) {
/*
* If there's insufficient data at the end of
* an iovec, we have to do some copying.
*/
if (iov->iov_len < k + blks && iov->iov_len != k) {
cuio_copydata(uio, k, blks, blk);
/* Actual encryption/decryption */
if (exf->reinit) {
if (crd->crd_flags & CRD_F_ENCRYPT) {
exf->encrypt(sw->sw_kschedule,
blk);
} else {
exf->decrypt(sw->sw_kschedule,
blk);
}
} else if (crd->crd_flags & CRD_F_ENCRYPT) {
/* XOR with previous block */
for (j = 0; j < blks; j++)
blk[j] ^= ivp[j];
exf->encrypt(sw->sw_kschedule, blk);
/*
* Keep encrypted block for XOR'ing
* with next block
*/
bcopy(blk, iv, blks);
ivp = iv;
} else { /* decrypt */
/*
* Keep encrypted block for XOR'ing
* with next block
*/
if (ivp == iv)
bcopy(blk, piv, blks);
else
bcopy(blk, iv, blks);
exf->decrypt(sw->sw_kschedule, blk);
/* XOR with previous block */
for (j = 0; j < blks; j++)
blk[j] ^= ivp[j];
if (ivp == iv)
bcopy(piv, iv, blks);
else
ivp = iv;
}
/* Copy back decrypted block */
cuio_copyback(uio, k, blks, blk);
/* Advance pointer */
iov = cuio_getptr(uio, k + blks, &k);
if (iov == NULL)
return EINVAL;
i -= blks;
/* Could be done... */
if (i == 0)
break;
}
/*
* Warning: idat may point to garbage here, but
* we only use it in the while() loop, only if
* there are indeed enough data.
*/
idat = (char *)iov->iov_base + k;
while (iov->iov_len >= k + blks && i > 0) {
if (exf->reinit) {
if (crd->crd_flags & CRD_F_ENCRYPT) {
exf->encrypt(sw->sw_kschedule,
idat);
} else {
exf->decrypt(sw->sw_kschedule,
idat);
}
} else if (crd->crd_flags & CRD_F_ENCRYPT) {
/* XOR with previous block/IV */
for (j = 0; j < blks; j++)
idat[j] ^= ivp[j];
exf->encrypt(sw->sw_kschedule, idat);
ivp = idat;
} else { /* decrypt */
/*
* Keep encrypted block to be used
* in next block's processing.
*/
if (ivp == iv)
bcopy(idat, piv, blks);
else
bcopy(idat, iv, blks);
exf->decrypt(sw->sw_kschedule, idat);
/* XOR with previous block/IV */
for (j = 0; j < blks; j++)
idat[j] ^= ivp[j];
if (ivp == iv)
bcopy(piv, iv, blks);
else
ivp = iv;
}
idat += blks;
k += blks;
i -= blks;
}
if (k == iov->iov_len) {
iov++;
k = 0;
}
}
return 0; /* Done with iovec encryption/decryption */
} else { /* contiguous buffer */
if (exf->reinit) {
for (i = crd->crd_skip;
i < crd->crd_skip + crd->crd_len; i += blks) {
if (crd->crd_flags & CRD_F_ENCRYPT)
exf->encrypt(sw->sw_kschedule, buf + i);
else
exf->decrypt(sw->sw_kschedule, buf + i);
}
} else if (crd->crd_flags & CRD_F_ENCRYPT) {
for (i = crd->crd_skip;
i < crd->crd_skip + crd->crd_len; i += blks) {
/* XOR with the IV/previous block, as appropriate. */
if (i == crd->crd_skip)
for (k = 0; k < blks; k++)
buf[i + k] ^= ivp[k];
else
for (k = 0; k < blks; k++)
buf[i + k] ^= buf[i + k - blks];
exf->encrypt(sw->sw_kschedule, buf + i);
}
} else { /* Decrypt */
/*
* Start at the end, so we don't need to keep the encrypted
* block as the IV for the next block.
*/
for (i = crd->crd_skip + crd->crd_len - blks;
i >= crd->crd_skip; i -= blks) {
exf->decrypt(sw->sw_kschedule, buf + i);
/* XOR with the IV/previous block, as appropriate */
if (i == crd->crd_skip)
for (k = 0; k < blks; k++)
buf[i + k] ^= ivp[k];
else
for (k = 0; k < blks; k++)
buf[i + k] ^= buf[i + k - blks];
}
}
return 0; /* Done with contiguous buffer encryption/decryption */
iov = iovlcl;
iovcnt = nitems(iovlcl);
iovalloc = 0;
uio = &uiolcl;
if ((flags & CRYPTO_F_IMBUF) != 0) {
crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt,
&iovalloc);
uio->uio_iov = iov;
uio->uio_iovcnt = iovcnt;
} else if ((flags & CRYPTO_F_IOV) != 0)
uio = (struct uio *)buf;
else {
iov[0].iov_base = buf;
iov[0].iov_len = crd->crd_skip + crd->crd_len;
uio->uio_iov = iov;
uio->uio_iovcnt = 1;
}
/* Unreachable */
return EINVAL;
ivp = iv;
if (exf->reinit) {
/*
* xforms that provide a reinit method perform all IV
* handling themselves.
*/
exf->reinit(sw->sw_kschedule, iv);
}
count = crd->crd_skip;
ind = cuio_getptr(uio, count, &k);
if (ind == -1) {
error = EINVAL;
goto out;
}
i = crd->crd_len;
while (i > 0) {
/*
* If there's insufficient data at the end of
* an iovec, we have to do some copying.
*/
if (uio->uio_iov[ind].iov_len < k + blks &&
uio->uio_iov[ind].iov_len != k) {
cuio_copydata(uio, count, blks, blk);
/* Actual encryption/decryption */
if (exf->reinit) {
if (crd->crd_flags & CRD_F_ENCRYPT) {
exf->encrypt(sw->sw_kschedule,
blk);
} else {
exf->decrypt(sw->sw_kschedule,
blk);
}
} else if (crd->crd_flags & CRD_F_ENCRYPT) {
/* XOR with previous block */
for (j = 0; j < blks; j++)
blk[j] ^= ivp[j];
exf->encrypt(sw->sw_kschedule, blk);
/*
* Keep encrypted block for XOR'ing
* with next block
*/
bcopy(blk, iv, blks);
ivp = iv;
} else { /* decrypt */
/*
* Keep encrypted block for XOR'ing
* with next block
*/
nivp = (ivp == iv) ? iv2 : iv;
bcopy(blk, nivp, blks);
exf->decrypt(sw->sw_kschedule, blk);
/* XOR with previous block */
for (j = 0; j < blks; j++)
blk[j] ^= ivp[j];
ivp = nivp;
}
/* Copy back decrypted block */
cuio_copyback(uio, count, blks, blk);
count += blks;
/* Advance pointer */
ind = cuio_getptr(uio, count, &k);
if (ind == -1) {
error = EINVAL;
goto out;
}
i -= blks;
/* Could be done... */
if (i == 0)
break;
}
/*
* Warning: idat may point to garbage here, but
* we only use it in the while() loop, only if
* there are indeed enough data.
*/
idat = (char *)uio->uio_iov[ind].iov_base + k;
while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) {
if (exf->reinit) {
if (crd->crd_flags & CRD_F_ENCRYPT) {
exf->encrypt(sw->sw_kschedule,
idat);
} else {
exf->decrypt(sw->sw_kschedule,
idat);
}
} else if (crd->crd_flags & CRD_F_ENCRYPT) {
/* XOR with previous block/IV */
for (j = 0; j < blks; j++)
idat[j] ^= ivp[j];
exf->encrypt(sw->sw_kschedule, idat);
ivp = idat;
} else { /* decrypt */
/*
* Keep encrypted block to be used
* in next block's processing.
*/
nivp = (ivp == iv) ? iv2 : iv;
bcopy(idat, nivp, blks);
exf->decrypt(sw->sw_kschedule, idat);
/* XOR with previous block/IV */
for (j = 0; j < blks; j++)
idat[j] ^= ivp[j];
ivp = nivp;
}
idat += blks;
count += blks;
k += blks;
i -= blks;
}
/*
* Advance to the next iov if the end of the current iov
* is aligned with the end of a cipher block.
* Note that the code is equivalent to calling:
* ind = cuio_getptr(uio, count, &k);
*/
if (i > 0 && k == uio->uio_iov[ind].iov_len) {
k = 0;
ind++;
if (ind >= uio->uio_iovcnt) {
error = EINVAL;
goto out;
}
}
}
out:
if (iovalloc)
free(iov, M_CRYPTO_DATA);
return (error);
}
static void
@ -583,6 +447,181 @@ swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
return 0;
}
CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */
CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */
/*
* Apply a combined encryption-authentication transformation
*/
static int
swcr_authenc(struct cryptop *crp)
{
uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
u_char *blk = (u_char *)blkbuf;
u_char aalg[AALG_MAX_RESULT_LEN];
u_char uaalg[AALG_MAX_RESULT_LEN];
u_char iv[EALG_MAX_BLOCK_LEN];
union authctx ctx;
struct cryptodesc *crd, *crda = NULL, *crde = NULL;
struct swcr_data *sw, *swa, *swe = NULL;
struct auth_hash *axf = NULL;
struct enc_xform *exf = NULL;
caddr_t buf = (caddr_t)crp->crp_buf;
uint32_t *blkp;
int aadlen, blksz, i, ivlen, len, iskip, oskip, r;
ivlen = blksz = iskip = oskip = 0;
for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
sw && sw->sw_alg != crd->crd_alg;
sw = sw->sw_next)
;
if (sw == NULL)
return (EINVAL);
switch (sw->sw_alg) {
case CRYPTO_AES_NIST_GCM_16:
case CRYPTO_AES_NIST_GMAC:
swe = sw;
crde = crd;
exf = swe->sw_exf;
ivlen = 12;
break;
case CRYPTO_AES_128_NIST_GMAC:
case CRYPTO_AES_192_NIST_GMAC:
case CRYPTO_AES_256_NIST_GMAC:
swa = sw;
crda = crd;
axf = swa->sw_axf;
if (swa->sw_ictx == 0)
return (EINVAL);
bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
blksz = axf->blocksize;
break;
default:
return (EINVAL);
}
}
if (crde == NULL || crda == NULL)
return (EINVAL);
if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16 &&
(crde->crd_flags & CRD_F_IV_EXPLICIT) == 0)
return (EINVAL);
if (crde->crd_klen != crda->crd_klen)
return (EINVAL);
/* Initialize the IV */
if (crde->crd_flags & CRD_F_ENCRYPT) {
/* IV explicitly provided ? */
if (crde->crd_flags & CRD_F_IV_EXPLICIT)
bcopy(crde->crd_iv, iv, ivlen);
else
arc4rand(iv, ivlen, 0);
/* Do we need to write the IV */
if (!(crde->crd_flags & CRD_F_IV_PRESENT))
crypto_copyback(crp->crp_flags, buf, crde->crd_inject,
ivlen, iv);
} else { /* Decryption */
/* IV explicitly provided ? */
if (crde->crd_flags & CRD_F_IV_EXPLICIT)
bcopy(crde->crd_iv, iv, ivlen);
else {
/* Get IV off buf */
crypto_copydata(crp->crp_flags, buf, crde->crd_inject,
ivlen, iv);
}
}
/* Supply MAC with IV */
if (axf->Reinit)
axf->Reinit(&ctx, iv, ivlen);
/* Supply MAC with AAD */
aadlen = crda->crd_len;
for (i = iskip; i < crda->crd_len; i += blksz) {
len = MIN(crda->crd_len - i, blksz - oskip);
crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len,
blk + oskip);
bzero(blk + len + oskip, blksz - len - oskip);
axf->Update(&ctx, blk, blksz);
oskip = 0; /* reset initial output offset */
}
if (exf->reinit)
exf->reinit(swe->sw_kschedule, iv);
/* Do encryption/decryption with MAC */
for (i = 0; i < crde->crd_len; i += blksz) {
len = MIN(crde->crd_len - i, blksz);
if (len < blksz)
bzero(blk, blksz);
crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len,
blk);
if (crde->crd_flags & CRD_F_ENCRYPT) {
exf->encrypt(swe->sw_kschedule, blk);
axf->Update(&ctx, blk, len);
crypto_copyback(crp->crp_flags, buf,
crde->crd_skip + i, len, blk);
} else {
axf->Update(&ctx, blk, len);
}
}
/* Do any required special finalization */
switch (crda->crd_alg) {
case CRYPTO_AES_128_NIST_GMAC:
case CRYPTO_AES_192_NIST_GMAC:
case CRYPTO_AES_256_NIST_GMAC:
/* length block */
bzero(blk, blksz);
blkp = (uint32_t *)blk + 1;
*blkp = htobe32(aadlen * 8);
blkp = (uint32_t *)blk + 3;
*blkp = htobe32(crde->crd_len * 8);
axf->Update(&ctx, blk, blksz);
break;
}
/* Finalize MAC */
axf->Final(aalg, &ctx);
/* Validate tag */
if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
crypto_copydata(crp->crp_flags, buf, crda->crd_inject,
axf->hashsize, uaalg);
r = timingsafe_bcmp(aalg, uaalg, axf->hashsize);
if (r == 0) {
/* tag matches, decrypt data */
for (i = 0; i < crde->crd_len; i += blksz) {
len = MIN(crde->crd_len - i, blksz);
if (len < blksz)
bzero(blk, blksz);
crypto_copydata(crp->crp_flags, buf,
crde->crd_skip + i, len, blk);
if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
exf->decrypt(swe->sw_kschedule, blk);
}
crypto_copyback(crp->crp_flags, buf,
crde->crd_skip + i, len, blk);
}
} else
return (EBADMSG);
} else {
/* Inject the authentication data */
crypto_copyback(crp->crp_flags, buf, crda->crd_inject,
axf->hashsize, aalg);
}
return (0);
}
/*
* Apply a compression/decompression algorithm
*/
@ -747,6 +786,16 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
case CRYPTO_AES_XTS:
txf = &enc_xform_aes_xts;
goto enccommon;
case CRYPTO_AES_ICM:
txf = &enc_xform_aes_icm;
goto enccommon;
case CRYPTO_AES_NIST_GCM_16:
txf = &enc_xform_aes_nist_gcm;
goto enccommon;
case CRYPTO_AES_NIST_GMAC:
txf = &enc_xform_aes_nist_gmac;
(*swd)->sw_exf = txf;
break;
case CRYPTO_CAMELLIA_CBC:
txf = &enc_xform_camellia;
goto enccommon;
@ -865,6 +914,31 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
(*swd)->sw_axf = axf;
break;
#endif
case CRYPTO_AES_128_NIST_GMAC:
axf = &auth_hash_nist_gmac_aes_128;
goto auth4common;
case CRYPTO_AES_192_NIST_GMAC:
axf = &auth_hash_nist_gmac_aes_192;
goto auth4common;
case CRYPTO_AES_256_NIST_GMAC:
axf = &auth_hash_nist_gmac_aes_256;
auth4common:
(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
M_NOWAIT);
if ((*swd)->sw_ictx == NULL) {
swcr_freesession_locked(dev, i);
rw_runlock(&swcr_sessions_lock);
return ENOBUFS;
}
axf->Init((*swd)->sw_ictx);
axf->Setkey((*swd)->sw_ictx, cri->cri_key,
cri->cri_klen / 8);
(*swd)->sw_axf = axf;
break;
case CRYPTO_DEFLATE_COMP:
cxf = &comp_algo_deflate;
(*swd)->sw_cxf = cxf;
@ -925,6 +999,9 @@ swcr_freesession_locked(device_t dev, u_int64_t tid)
case CRYPTO_SKIPJACK_CBC:
case CRYPTO_RIJNDAEL128_CBC:
case CRYPTO_AES_XTS:
case CRYPTO_AES_ICM:
case CRYPTO_AES_NIST_GCM_16:
case CRYPTO_AES_NIST_GMAC:
case CRYPTO_CAMELLIA_CBC:
case CRYPTO_NULL_CBC:
txf = swd->sw_exf;
@ -1050,6 +1127,7 @@ swcr_process(device_t dev, struct cryptop *crp, int hint)
case CRYPTO_SKIPJACK_CBC:
case CRYPTO_RIJNDAEL128_CBC:
case CRYPTO_AES_XTS:
case CRYPTO_AES_ICM:
case CRYPTO_CAMELLIA_CBC:
if ((crp->crp_etype = swcr_encdec(crd, sw,
crp->crp_buf, crp->crp_flags)) != 0)
@ -1074,6 +1152,14 @@ swcr_process(device_t dev, struct cryptop *crp, int hint)
goto done;
break;
case CRYPTO_AES_NIST_GCM_16:
case CRYPTO_AES_NIST_GMAC:
case CRYPTO_AES_128_NIST_GMAC:
case CRYPTO_AES_192_NIST_GMAC:
case CRYPTO_AES_256_NIST_GMAC:
crp->crp_etype = swcr_authenc(crp);
goto done;
case CRYPTO_DEFLATE_COMP:
if ((crp->crp_etype = swcr_compdec(crd, sw,
crp->crp_buf, crp->crp_flags)) != 0)
@ -1144,6 +1230,12 @@ swcr_attach(device_t dev)
REGISTER(CRYPTO_SHA1);
REGISTER(CRYPTO_RIJNDAEL128_CBC);
REGISTER(CRYPTO_AES_XTS);
REGISTER(CRYPTO_AES_ICM);
REGISTER(CRYPTO_AES_NIST_GCM_16);
REGISTER(CRYPTO_AES_NIST_GMAC);
REGISTER(CRYPTO_AES_128_NIST_GMAC);
REGISTER(CRYPTO_AES_192_NIST_GMAC);
REGISTER(CRYPTO_AES_256_NIST_GMAC);
REGISTER(CRYPTO_CAMELLIA_CBC);
REGISTER(CRYPTO_DEFLATE_COMP);
#undef REGISTER

275
sys/opencrypto/gfmult.c Normal file
View File

@ -0,0 +1,275 @@
/*-
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by John-Mark Gurney under
* the sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#include "gfmult.h"
#define REV_POLY_REDUCT 0xe1 /* 0x87 bit reversed */
/* reverse the bits of a nibble */
static const uint8_t nib_rev[] = {
0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf,
};
/* calulate v * 2 */
static inline struct gf128
gf128_mulalpha(struct gf128 v)
{
uint64_t mask;
mask = !!(v.v[1] & 1);
mask = ~(mask - 1);
v.v[1] = (v.v[1] >> 1) | ((v.v[0] & 1) << 63);
v.v[0] = (v.v[0] >> 1) ^ ((mask & REV_POLY_REDUCT) << 56);
return v;
}
/*
* Generate a table for 0-16 * h. Store the results in the table w/ indexes
* bit reversed, and the words striped across the values.
*/
void
gf128_genmultable(struct gf128 h, struct gf128table *t)
{
struct gf128 tbl[16];
int i;
tbl[0] = MAKE_GF128(0, 0);
tbl[1] = h;
for (i = 2; i < 16; i += 2) {
tbl[i] = gf128_mulalpha(tbl[i / 2]);
tbl[i + 1] = gf128_add(tbl[i], h);
}
for (i = 0; i < 16; i++) {
t->a[nib_rev[i]] = tbl[i].v[0] >> 32;
t->b[nib_rev[i]] = tbl[i].v[0];
t->c[nib_rev[i]] = tbl[i].v[1] >> 32;
t->d[nib_rev[i]] = tbl[i].v[1];
}
}
/*
* Generate tables containing h, h^2, h^3 and h^4, starting at 0.
*/
void
gf128_genmultable4(struct gf128 h, struct gf128table4 *t)
{
struct gf128 h2, h3, h4;
gf128_genmultable(h, &t->tbls[0]);
h2 = gf128_mul(h, &t->tbls[0]);
gf128_genmultable(h2, &t->tbls[1]);
h3 = gf128_mul(h, &t->tbls[1]);
gf128_genmultable(h3, &t->tbls[2]);
h4 = gf128_mul(h2, &t->tbls[1]);
gf128_genmultable(h4, &t->tbls[3]);
}
/*
* Read a row from the table.
*/
static inline struct gf128
readrow(struct gf128table *tbl, unsigned bits)
{
struct gf128 r;
bits = bits % 16;
r.v[0] = ((uint64_t)tbl->a[bits] << 32) | tbl->b[bits];
r.v[1] = ((uint64_t)tbl->c[bits] << 32) | tbl->d[bits];
return r;
}
/*
* These are the reduction values. Since we are dealing with bit reversed
* version, the values need to be bit reversed, AND the indexes are also
* bit reversed to make lookups quicker.
*/
static uint16_t reduction[] = {
0x0000, 0x1c20, 0x3840, 0x2460, 0x7080, 0x6ca0, 0x48c0, 0x54e0,
0xe100, 0xfd20, 0xd940, 0xc560, 0x9180, 0x8da0, 0xa9c0, 0xb5e0,
};
/*
* Calculate:
* (x*2^4 + word[3,0]*h) *
* 2^4 + word[7,4]*h) *
* ...
* 2^4 + word[63,60]*h
*/
static struct gf128
gfmultword(uint64_t word, struct gf128 x, struct gf128table *tbl)
{
struct gf128 row;
unsigned bits;
unsigned redbits;
int i;
for (i = 0; i < 64; i += 4) {
bits = word % 16;
/* fetch row */
row = readrow(tbl, bits);
/* x * 2^4 */
redbits = x.v[1] % 16;
x.v[1] = (x.v[1] >> 4) | (x.v[0] % 16) << 60;
x.v[0] >>= 4;
x.v[0] ^= (uint64_t)reduction[redbits] << (64 - 16);
word >>= 4;
x = gf128_add(x, row);
}
return x;
}
/*
* Calculate
* (x*2^4 + worda[3,0]*h^4+wordb[3,0]*h^3+...+wordd[3,0]*h) *
* ...
* 2^4 + worda[63,60]*h^4+ ... + wordd[63,60]*h
*
* Passing/returning struct is .5% faster than passing in via pointer on
* amd64.
*/
static struct gf128
gfmultword4(uint64_t worda, uint64_t wordb, uint64_t wordc, uint64_t wordd,
struct gf128 x, struct gf128table4 *tbl)
{
struct gf128 rowa, rowb, rowc, rowd;
unsigned bitsa, bitsb, bitsc, bitsd;
unsigned redbits;
int i;
/*
* XXX - nibble reverse words to save a shift? probably not as
* nibble reverse would take 20 ops (5 * 4) verse 16
*/
for (i = 0; i < 64; i += 4) {
bitsa = worda % 16;
bitsb = wordb % 16;
bitsc = wordc % 16;
bitsd = wordd % 16;
/* fetch row */
rowa = readrow(&tbl->tbls[3], bitsa);
rowb = readrow(&tbl->tbls[2], bitsb);
rowc = readrow(&tbl->tbls[1], bitsc);
rowd = readrow(&tbl->tbls[0], bitsd);
/* x * 2^4 */
redbits = x.v[1] % 16;
x.v[1] = (x.v[1] >> 4) | (x.v[0] % 16) << 60;
x.v[0] >>= 4;
x.v[0] ^= (uint64_t)reduction[redbits] << (64 - 16);
worda >>= 4;
wordb >>= 4;
wordc >>= 4;
wordd >>= 4;
x = gf128_add(x, gf128_add(rowa, gf128_add(rowb,
gf128_add(rowc, rowd))));
}
return x;
}
struct gf128
gf128_mul(struct gf128 v, struct gf128table *tbl)
{
struct gf128 ret;
ret = MAKE_GF128(0, 0);
ret = gfmultword(v.v[1], ret, tbl);
ret = gfmultword(v.v[0], ret, tbl);
return ret;
}
/*
* Calculate a*h^4 + b*h^3 + c*h^2 + d*h, or:
* (((a*h+b)*h+c)*h+d)*h
*/
struct gf128
gf128_mul4(struct gf128 a, struct gf128 b, struct gf128 c, struct gf128 d,
struct gf128table4 *tbl)
{
struct gf128 tmp;
tmp = MAKE_GF128(0, 0);
tmp = gfmultword4(a.v[1], b.v[1], c.v[1], d.v[1], tmp, tbl);
tmp = gfmultword4(a.v[0], b.v[0], c.v[0], d.v[0], tmp, tbl);
return tmp;
}
/*
* a = data[0..15] + r
* b = data[16..31]
* c = data[32..47]
* d = data[48..63]
*
* Calculate a*h^4 + b*h^3 + c*h^2 + d*h, or:
* (((a*h+b)*h+c)*h+d)*h
*/
struct gf128
gf128_mul4b(struct gf128 r, const uint8_t *v, struct gf128table4 *tbl)
{
struct gf128 a, b, c, d;
struct gf128 tmp;
tmp = MAKE_GF128(0, 0);
a = gf128_add(r, gf128_read(&v[0*16]));
b = gf128_read(&v[1*16]);
c = gf128_read(&v[2*16]);
d = gf128_read(&v[3*16]);
tmp = gfmultword4(a.v[1], b.v[1], c.v[1], d.v[1], tmp, tbl);
tmp = gfmultword4(a.v[0], b.v[0], c.v[0], d.v[0], tmp, tbl);
return tmp;
}

128
sys/opencrypto/gfmult.h Normal file
View File

@ -0,0 +1,128 @@
/*-
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by John-Mark Gurney under
* the sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _GFMULT_H_
#define _GFMULT_H_
#ifdef __APPLE__
#define __aligned(x) __attribute__((__aligned__(x)))
#define be64dec(buf) __builtin_bswap64(*(uint64_t *)buf)
#define be64enc(buf, x) (*(uint64_t *)buf = __builtin_bswap64(x))
#else
#include <sys/endian.h>
#endif
#ifdef _KERNEL
#include <sys/types.h>
#else
#include <stdint.h>
#include <strings.h>
#endif
#define REQ_ALIGN (16 * 4)
/*
* The rows are striped across cache lines. Note that the indexes
* are bit reversed to make accesses quicker.
*/
struct gf128table {
uint32_t a[16] __aligned(REQ_ALIGN); /* bits 0 - 31 */
uint32_t b[16] __aligned(REQ_ALIGN); /* bits 63 - 32 */
uint32_t c[16] __aligned(REQ_ALIGN); /* bits 95 - 64 */
uint32_t d[16] __aligned(REQ_ALIGN); /* bits 127 - 96 */
} __aligned(REQ_ALIGN);
/*
* A set of tables that contain h, h^2, h^3, h^4. To be used w/ gf128_mul4.
*/
struct gf128table4 {
struct gf128table tbls[4];
};
/*
* GCM per spec is bit reversed in memory. So byte 0 is really bit reversed
* and contains bits 0-7. We can deal w/ this by using right shifts and
* related math instead of having to bit reverse everything. This means that
* the low bits are in v[0] (bits 0-63) and reverse order, while the high
* bits are in v[1] (bits 64-127) and reverse order. The high bit of v[0] is
* bit 0, and the low bit of v[1] is bit 127.
*/
struct gf128 {
uint64_t v[2];
};
/* Note that we don't bit reverse in MAKE_GF128. */
#define MAKE_GF128(a, b) ((struct gf128){.v = { (a), (b) } })
#define GF128_EQ(a, b) ((((a).v[0] ^ (b).v[0]) | \
((a).v[1] ^ (b).v[1])) == 0)
static inline struct gf128
gf128_read(const uint8_t *buf)
{
struct gf128 r;
r.v[0] = be64dec(buf);
buf += sizeof(uint64_t);
r.v[1] = be64dec(buf);
return r;
}
static inline void
gf128_write(struct gf128 v, uint8_t *buf)
{
uint64_t tmp;
be64enc(buf, v.v[0]);
buf += sizeof tmp;
be64enc(buf, v.v[1]);
}
static inline struct gf128 __pure /* XXX - __pure2 instead */
gf128_add(struct gf128 a, struct gf128 b)
{
a.v[0] ^= b.v[0];
a.v[1] ^= b.v[1];
return a;
}
void gf128_genmultable(struct gf128 h, struct gf128table *t);
void gf128_genmultable4(struct gf128 h, struct gf128table4 *t);
struct gf128 gf128_mul(struct gf128 v, struct gf128table *tbl);
struct gf128 gf128_mul4(struct gf128 a, struct gf128 b, struct gf128 c,
struct gf128 d, struct gf128table4 *tbl);
struct gf128 gf128_mul4b(struct gf128 r, const uint8_t *v,
struct gf128table4 *tbl);
#endif /* _GFMULT_H_ */

119
sys/opencrypto/gmac.c Normal file
View File

@ -0,0 +1,119 @@
/*-
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by John-Mark Gurney under
* the sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#include <sys/types.h>
#include <sys/systm.h>
#include <opencrypto/gfmult.h>
#include <opencrypto/gmac.h>
void
AES_GMAC_Init(struct aes_gmac_ctx *agc)
{
bzero(agc, sizeof *agc);
}
void
AES_GMAC_Setkey(struct aes_gmac_ctx *agc, const uint8_t *key, uint16_t klen)
{
const uint8_t zeros[GMAC_BLOCK_LEN] = {};
struct gf128 h;
uint8_t hbuf[GMAC_BLOCK_LEN];
agc->rounds = rijndaelKeySetupEnc(agc->keysched, key, klen * 8);
rijndaelEncrypt(agc->keysched, agc->rounds, zeros, hbuf);
h = gf128_read(hbuf);
gf128_genmultable4(h, &agc->ghashtbl);
explicit_bzero(&h, sizeof h);
explicit_bzero(hbuf, sizeof hbuf);
}
void
AES_GMAC_Reinit(struct aes_gmac_ctx *agc, const uint8_t *iv, uint16_t ivlen)
{
KASSERT(ivlen <= sizeof agc->counter, ("passed ivlen too large!"));
bcopy(iv, agc->counter, ivlen);
}
int
AES_GMAC_Update(struct aes_gmac_ctx *agc, const uint8_t *data, uint16_t len)
{
struct gf128 v;
uint8_t buf[GMAC_BLOCK_LEN] = {};
int i;
v = agc->hash;
while (len > 0) {
if (len >= 4*GMAC_BLOCK_LEN) {
i = 4*GMAC_BLOCK_LEN;
v = gf128_mul4b(v, data, &agc->ghashtbl);
} else if (len >= GMAC_BLOCK_LEN) {
i = GMAC_BLOCK_LEN;
v = gf128_add(v, gf128_read(data));
v = gf128_mul(v, &agc->ghashtbl.tbls[0]);
} else {
i = len;
bcopy(data, buf, i);
v = gf128_add(v, gf128_read(&buf[0]));
v = gf128_mul(v, &agc->ghashtbl.tbls[0]);
explicit_bzero(buf, sizeof buf);
}
len -= i;
data += i;
}
agc->hash = v;
explicit_bzero(&v, sizeof v);
return (0);
}
void
AES_GMAC_Final(uint8_t digest[GMAC_DIGEST_LEN], struct aes_gmac_ctx *agc)
{
uint8_t enccntr[GMAC_BLOCK_LEN];
struct gf128 a;
/* XXX - zero additional bytes? */
agc->counter[GMAC_BLOCK_LEN - 1] = 1;
rijndaelEncrypt(agc->keysched, agc->rounds, agc->counter, enccntr);
a = gf128_add(agc->hash, gf128_read(enccntr));
gf128_write(a, digest);
explicit_bzero(enccntr, sizeof enccntr);
}

55
sys/opencrypto/gmac.h Normal file
View File

@ -0,0 +1,55 @@
/*-
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by John-Mark Gurney under
* the sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _GMAC_H_
#include "gfmult.h"
#include <crypto/rijndael/rijndael.h>
#define GMAC_BLOCK_LEN 16
#define GMAC_DIGEST_LEN 16
struct aes_gmac_ctx {
struct gf128table4 ghashtbl;
struct gf128 hash;
uint32_t keysched[4*(RIJNDAEL_MAXNR + 1)];
uint8_t counter[GMAC_BLOCK_LEN];
int rounds;
};
void AES_GMAC_Init(struct aes_gmac_ctx *);
void AES_GMAC_Setkey(struct aes_gmac_ctx *, const uint8_t *, uint16_t);
void AES_GMAC_Reinit(struct aes_gmac_ctx *, const uint8_t *, uint16_t);
int AES_GMAC_Update(struct aes_gmac_ctx *, const uint8_t *, uint16_t);
void AES_GMAC_Final(uint8_t [GMAC_DIGEST_LEN], struct aes_gmac_ctx *);
#endif /* _GMAC_H_ */

View File

@ -24,6 +24,12 @@
* Copyright (C) 2001, Angelos D. Keromytis.
*
* Copyright (C) 2008, Damien Miller
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by John-Mark Gurney
* under sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
*
* Permission to use, copy, and modify this software with or without fee
* is hereby granted, provided that this entire notice is included in
@ -76,6 +82,7 @@ static int blf_setkey(u_int8_t **, u_int8_t *, int);
static int cast5_setkey(u_int8_t **, u_int8_t *, int);
static int skipjack_setkey(u_int8_t **, u_int8_t *, int);
static int rijndael128_setkey(u_int8_t **, u_int8_t *, int);
static int aes_icm_setkey(u_int8_t **, u_int8_t *, int);
static int aes_xts_setkey(u_int8_t **, u_int8_t *, int);
static int cml_setkey(u_int8_t **, u_int8_t *, int);
@ -99,6 +106,8 @@ static void rijndael128_decrypt(caddr_t, u_int8_t *);
static void aes_xts_decrypt(caddr_t, u_int8_t *);
static void cml_decrypt(caddr_t, u_int8_t *);
static void aes_icm_crypt(caddr_t, u_int8_t *);
static void null_zerokey(u_int8_t **);
static void des1_zerokey(u_int8_t **);
static void des3_zerokey(u_int8_t **);
@ -106,103 +115,145 @@ static void blf_zerokey(u_int8_t **);
static void cast5_zerokey(u_int8_t **);
static void skipjack_zerokey(u_int8_t **);
static void rijndael128_zerokey(u_int8_t **);
static void aes_icm_zerokey(u_int8_t **);
static void aes_xts_zerokey(u_int8_t **);
static void cml_zerokey(u_int8_t **);
static void aes_icm_reinit(caddr_t, u_int8_t *);
static void aes_xts_reinit(caddr_t, u_int8_t *);
static void aes_gcm_reinit(caddr_t, u_int8_t *);
static void null_init(void *);
static int null_update(void *, u_int8_t *, u_int16_t);
static void null_reinit(void *ctx, const u_int8_t *buf, u_int16_t len);
static int null_update(void *, const u_int8_t *, u_int16_t);
static void null_final(u_int8_t *, void *);
static int MD5Update_int(void *, u_int8_t *, u_int16_t);
static int MD5Update_int(void *, const u_int8_t *, u_int16_t);
static void SHA1Init_int(void *);
static int SHA1Update_int(void *, u_int8_t *, u_int16_t);
static int SHA1Update_int(void *, const u_int8_t *, u_int16_t);
static void SHA1Final_int(u_int8_t *, void *);
static int RMD160Update_int(void *, u_int8_t *, u_int16_t);
static int SHA256Update_int(void *, u_int8_t *, u_int16_t);
static int SHA384Update_int(void *, u_int8_t *, u_int16_t);
static int SHA512Update_int(void *, u_int8_t *, u_int16_t);
static int RMD160Update_int(void *, const u_int8_t *, u_int16_t);
static int SHA256Update_int(void *, const u_int8_t *, u_int16_t);
static int SHA384Update_int(void *, const u_int8_t *, u_int16_t);
static int SHA512Update_int(void *, const u_int8_t *, u_int16_t);
static u_int32_t deflate_compress(u_int8_t *, u_int32_t, u_int8_t **);
static u_int32_t deflate_decompress(u_int8_t *, u_int32_t, u_int8_t **);
#define AESICM_BLOCKSIZE 16
struct aes_icm_ctx {
u_int32_t ac_ek[4*(RIJNDAEL_MAXNR + 1)];
/* ac_block is initalized to IV */
u_int8_t ac_block[AESICM_BLOCKSIZE];
int ac_nr;
};
MALLOC_DEFINE(M_XDATA, "xform", "xform data buffers");
/* Encryption instances */
struct enc_xform enc_xform_null = {
CRYPTO_NULL_CBC, "NULL",
/* NB: blocksize of 4 is to generate a properly aligned ESP header */
NULL_BLOCK_LEN, 0, 256, /* 2048 bits, max key */
NULL_BLOCK_LEN, NULL_BLOCK_LEN, 0, 256, /* 2048 bits, max key */
null_encrypt,
null_decrypt,
null_setkey,
null_zerokey,
NULL
NULL,
};
struct enc_xform enc_xform_des = {
CRYPTO_DES_CBC, "DES",
DES_BLOCK_LEN, 8, 8,
DES_BLOCK_LEN, DES_BLOCK_LEN, 8, 8,
des1_encrypt,
des1_decrypt,
des1_setkey,
des1_zerokey,
NULL
NULL,
};
struct enc_xform enc_xform_3des = {
CRYPTO_3DES_CBC, "3DES",
DES3_BLOCK_LEN, 24, 24,
DES3_BLOCK_LEN, DES3_BLOCK_LEN, 24, 24,
des3_encrypt,
des3_decrypt,
des3_setkey,
des3_zerokey,
NULL
NULL,
};
struct enc_xform enc_xform_blf = {
CRYPTO_BLF_CBC, "Blowfish",
BLOWFISH_BLOCK_LEN, 5, 56 /* 448 bits, max key */,
BLOWFISH_BLOCK_LEN, BLOWFISH_BLOCK_LEN, 5, 56 /* 448 bits, max key */,
blf_encrypt,
blf_decrypt,
blf_setkey,
blf_zerokey,
NULL
NULL,
};
struct enc_xform enc_xform_cast5 = {
CRYPTO_CAST_CBC, "CAST-128",
CAST128_BLOCK_LEN, 5, 16,
CAST128_BLOCK_LEN, CAST128_BLOCK_LEN, 5, 16,
cast5_encrypt,
cast5_decrypt,
cast5_setkey,
cast5_zerokey,
NULL
NULL,
};
struct enc_xform enc_xform_skipjack = {
CRYPTO_SKIPJACK_CBC, "Skipjack",
SKIPJACK_BLOCK_LEN, 10, 10,
SKIPJACK_BLOCK_LEN, SKIPJACK_BLOCK_LEN, 10, 10,
skipjack_encrypt,
skipjack_decrypt,
skipjack_setkey,
skipjack_decrypt, skipjack_setkey,
skipjack_zerokey,
NULL
NULL,
};
struct enc_xform enc_xform_rijndael128 = {
CRYPTO_RIJNDAEL128_CBC, "Rijndael-128/AES",
RIJNDAEL128_BLOCK_LEN, 8, 32,
RIJNDAEL128_BLOCK_LEN, RIJNDAEL128_BLOCK_LEN, 16, 32,
rijndael128_encrypt,
rijndael128_decrypt,
rijndael128_setkey,
rijndael128_zerokey,
NULL
NULL,
};
struct enc_xform enc_xform_aes_icm = {
CRYPTO_AES_ICM, "AES-ICM",
RIJNDAEL128_BLOCK_LEN, RIJNDAEL128_BLOCK_LEN, 16, 32,
aes_icm_crypt,
aes_icm_crypt,
aes_icm_setkey,
rijndael128_zerokey,
aes_icm_reinit,
};
struct enc_xform enc_xform_aes_nist_gcm = {
CRYPTO_AES_NIST_GCM_16, "AES-GCM",
1, 12, 16, 32,
aes_icm_crypt,
aes_icm_crypt,
aes_icm_setkey,
aes_icm_zerokey,
aes_gcm_reinit,
};
struct enc_xform enc_xform_aes_nist_gmac = {
CRYPTO_AES_NIST_GMAC, "AES-GMAC",
1, 12, 16, 32,
NULL,
NULL,
NULL,
NULL,
NULL,
};
struct enc_xform enc_xform_aes_xts = {
CRYPTO_AES_XTS, "AES-XTS",
RIJNDAEL128_BLOCK_LEN, 32, 64,
RIJNDAEL128_BLOCK_LEN, 8, 32, 64,
aes_xts_encrypt,
aes_xts_decrypt,
aes_xts_setkey,
@ -212,85 +263,115 @@ struct enc_xform enc_xform_aes_xts = {
struct enc_xform enc_xform_arc4 = {
CRYPTO_ARC4, "ARC4",
1, 1, 32,
1, 1, 1, 32,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL
};
struct enc_xform enc_xform_camellia = {
CRYPTO_CAMELLIA_CBC, "Camellia",
CAMELLIA_BLOCK_LEN, 8, 32,
CAMELLIA_BLOCK_LEN, CAMELLIA_BLOCK_LEN, 8, 32,
cml_encrypt,
cml_decrypt,
cml_setkey,
cml_zerokey,
NULL
NULL,
};
/* Authentication instances */
struct auth_hash auth_hash_null = {
struct auth_hash auth_hash_null = { /* NB: context isn't used */
CRYPTO_NULL_HMAC, "NULL-HMAC",
0, NULL_HASH_LEN, NULL_HMAC_BLOCK_LEN, sizeof(int), /* NB: context isn't used */
null_init, null_update, null_final
0, NULL_HASH_LEN, sizeof(int), NULL_HMAC_BLOCK_LEN,
null_init, null_reinit, null_reinit, null_update, null_final
};
struct auth_hash auth_hash_hmac_md5 = {
CRYPTO_MD5_HMAC, "HMAC-MD5",
16, MD5_HASH_LEN, MD5_HMAC_BLOCK_LEN, sizeof(MD5_CTX),
(void (*) (void *)) MD5Init, MD5Update_int,
16, MD5_HASH_LEN, sizeof(MD5_CTX), MD5_HMAC_BLOCK_LEN,
(void (*) (void *)) MD5Init, NULL, NULL, MD5Update_int,
(void (*) (u_int8_t *, void *)) MD5Final
};
struct auth_hash auth_hash_hmac_sha1 = {
CRYPTO_SHA1_HMAC, "HMAC-SHA1",
20, SHA1_HASH_LEN, SHA1_HMAC_BLOCK_LEN, sizeof(SHA1_CTX),
SHA1Init_int, SHA1Update_int, SHA1Final_int
20, SHA1_HASH_LEN, sizeof(SHA1_CTX), SHA1_HMAC_BLOCK_LEN,
SHA1Init_int, NULL, NULL, SHA1Update_int, SHA1Final_int
};
struct auth_hash auth_hash_hmac_ripemd_160 = {
CRYPTO_RIPEMD160_HMAC, "HMAC-RIPEMD-160",
20, RIPEMD160_HASH_LEN, RIPEMD160_HMAC_BLOCK_LEN, sizeof(RMD160_CTX),
(void (*)(void *)) RMD160Init, RMD160Update_int,
20, RIPEMD160_HASH_LEN, sizeof(RMD160_CTX), RIPEMD160_HMAC_BLOCK_LEN,
(void (*)(void *)) RMD160Init, NULL, NULL, RMD160Update_int,
(void (*)(u_int8_t *, void *)) RMD160Final
};
struct auth_hash auth_hash_key_md5 = {
CRYPTO_MD5_KPDK, "Keyed MD5",
0, MD5_KPDK_HASH_LEN, 0, sizeof(MD5_CTX),
(void (*)(void *)) MD5Init, MD5Update_int,
0, MD5_KPDK_HASH_LEN, sizeof(MD5_CTX), 0,
(void (*)(void *)) MD5Init, NULL, NULL, MD5Update_int,
(void (*)(u_int8_t *, void *)) MD5Final
};
struct auth_hash auth_hash_key_sha1 = {
CRYPTO_SHA1_KPDK, "Keyed SHA1",
0, SHA1_KPDK_HASH_LEN, 0, sizeof(SHA1_CTX),
SHA1Init_int, SHA1Update_int, SHA1Final_int
0, SHA1_KPDK_HASH_LEN, sizeof(SHA1_CTX), 0,
SHA1Init_int, NULL, NULL, SHA1Update_int, SHA1Final_int
};
struct auth_hash auth_hash_hmac_sha2_256 = {
CRYPTO_SHA2_256_HMAC, "HMAC-SHA2-256",
32, SHA2_256_HASH_LEN, SHA2_256_HMAC_BLOCK_LEN, sizeof(SHA256_CTX),
(void (*)(void *)) SHA256_Init, SHA256Update_int,
32, SHA2_256_HASH_LEN, sizeof(SHA256_CTX), SHA2_256_HMAC_BLOCK_LEN,
(void (*)(void *)) SHA256_Init, NULL, NULL, SHA256Update_int,
(void (*)(u_int8_t *, void *)) SHA256_Final
};
struct auth_hash auth_hash_hmac_sha2_384 = {
CRYPTO_SHA2_384_HMAC, "HMAC-SHA2-384",
48, SHA2_384_HASH_LEN, SHA2_384_HMAC_BLOCK_LEN, sizeof(SHA384_CTX),
(void (*)(void *)) SHA384_Init, SHA384Update_int,
48, SHA2_384_HASH_LEN, sizeof(SHA384_CTX), SHA2_384_HMAC_BLOCK_LEN,
(void (*)(void *)) SHA384_Init, NULL, NULL, SHA384Update_int,
(void (*)(u_int8_t *, void *)) SHA384_Final
};
struct auth_hash auth_hash_hmac_sha2_512 = {
CRYPTO_SHA2_512_HMAC, "HMAC-SHA2-512",
64, SHA2_512_HASH_LEN, SHA2_512_HMAC_BLOCK_LEN, sizeof(SHA512_CTX),
(void (*)(void *)) SHA512_Init, SHA512Update_int,
64, SHA2_512_HASH_LEN, sizeof(SHA512_CTX), SHA2_512_HMAC_BLOCK_LEN,
(void (*)(void *)) SHA512_Init, NULL, NULL, SHA512Update_int,
(void (*)(u_int8_t *, void *)) SHA512_Final
};
struct auth_hash auth_hash_nist_gmac_aes_128 = {
CRYPTO_AES_128_NIST_GMAC, "GMAC-AES-128",
16, 16, sizeof(struct aes_gmac_ctx), GMAC_BLOCK_LEN,
(void (*)(void *)) AES_GMAC_Init,
(void (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Setkey,
(void (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Reinit,
(int (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Update,
(void (*)(u_int8_t *, void *)) AES_GMAC_Final
};
struct auth_hash auth_hash_nist_gmac_aes_192 = {
CRYPTO_AES_192_NIST_GMAC, "GMAC-AES-192",
24, 16, sizeof(struct aes_gmac_ctx), GMAC_BLOCK_LEN,
(void (*)(void *)) AES_GMAC_Init,
(void (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Setkey,
(void (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Reinit,
(int (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Update,
(void (*)(u_int8_t *, void *)) AES_GMAC_Final
};
struct auth_hash auth_hash_nist_gmac_aes_256 = {
CRYPTO_AES_256_NIST_GMAC, "GMAC-AES-256",
32, 16, sizeof(struct aes_gmac_ctx), GMAC_BLOCK_LEN,
(void (*)(void *)) AES_GMAC_Init,
(void (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Setkey,
(void (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Reinit,
(int (*)(void *, const u_int8_t *, u_int16_t)) AES_GMAC_Update,
(void (*)(u_int8_t *, void *)) AES_GMAC_Final
};
/* Compression instance */
struct comp_algo comp_algo_deflate = {
CRYPTO_DEFLATE_COMP, "Deflate",
@ -579,6 +660,74 @@ rijndael128_zerokey(u_int8_t **sched)
*sched = NULL;
}
void
aes_icm_reinit(caddr_t key, u_int8_t *iv)
{
struct aes_icm_ctx *ctx;
ctx = (struct aes_icm_ctx *)key;
bcopy(iv, ctx->ac_block, AESICM_BLOCKSIZE);
}
void
aes_gcm_reinit(caddr_t key, u_int8_t *iv)
{
struct aes_icm_ctx *ctx;
aes_icm_reinit(key, iv);
ctx = (struct aes_icm_ctx *)key;
/* GCM starts with 2 as counter 1 is used for final xor of tag. */
bzero(&ctx->ac_block[AESICM_BLOCKSIZE - 4], 4);
ctx->ac_block[AESICM_BLOCKSIZE - 1] = 2;
}
void
aes_icm_crypt(caddr_t key, u_int8_t *data)
{
struct aes_icm_ctx *ctx;
u_int8_t keystream[AESICM_BLOCKSIZE];
int i;
ctx = (struct aes_icm_ctx *)key;
rijndaelEncrypt(ctx->ac_ek, ctx->ac_nr, ctx->ac_block, keystream);
for (i = 0; i < AESICM_BLOCKSIZE; i++)
data[i] ^= keystream[i];
explicit_bzero(keystream, sizeof(keystream));
/* increment counter */
for (i = AESICM_BLOCKSIZE - 1;
i >= 0; i--)
if (++ctx->ac_block[i]) /* continue on overflow */
break;
}
int
aes_icm_setkey(u_int8_t **sched, u_int8_t *key, int len)
{
struct aes_icm_ctx *ctx;
*sched = malloc(sizeof(struct aes_icm_ctx), M_CRYPTO_DATA,
M_NOWAIT | M_ZERO);
if (*sched == NULL)
return ENOMEM;
ctx = (struct aes_icm_ctx *)*sched;
ctx->ac_nr = rijndaelKeySetupEnc(ctx->ac_ek, (u_char *)key, len * 8);
if (ctx->ac_nr == 0)
return EINVAL;
return 0;
}
void
aes_icm_zerokey(u_int8_t **sched)
{
bzero(*sched, sizeof(struct aes_icm_ctx));
free(*sched, M_CRYPTO_DATA);
*sched = NULL;
}
#define AES_XTS_BLOCKSIZE 16
#define AES_XTS_IVSIZE 8
#define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */
@ -728,8 +877,13 @@ null_init(void *ctx)
{
}
static void
null_reinit(void *ctx, const u_int8_t *buf, u_int16_t len)
{
}
static int
null_update(void *ctx, u_int8_t *buf, u_int16_t len)
null_update(void *ctx, const u_int8_t *buf, u_int16_t len)
{
return 0;
}
@ -742,14 +896,14 @@ null_final(u_int8_t *buf, void *ctx)
}
static int
RMD160Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
RMD160Update_int(void *ctx, const u_int8_t *buf, u_int16_t len)
{
RMD160Update(ctx, buf, len);
return 0;
}
static int
MD5Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
MD5Update_int(void *ctx, const u_int8_t *buf, u_int16_t len)
{
MD5Update(ctx, buf, len);
return 0;
@ -762,7 +916,7 @@ SHA1Init_int(void *ctx)
}
static int
SHA1Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
SHA1Update_int(void *ctx, const u_int8_t *buf, u_int16_t len)
{
SHA1Update(ctx, buf, len);
return 0;
@ -775,21 +929,21 @@ SHA1Final_int(u_int8_t *blk, void *ctx)
}
static int
SHA256Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
SHA256Update_int(void *ctx, const u_int8_t *buf, u_int16_t len)
{
SHA256_Update(ctx, buf, len);
return 0;
}
static int
SHA384Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
SHA384Update_int(void *ctx, const u_int8_t *buf, u_int16_t len)
{
SHA384_Update(ctx, buf, len);
return 0;
}
static int
SHA512Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
SHA512Update_int(void *ctx, const u_int8_t *buf, u_int16_t len)
{
SHA512_Update(ctx, buf, len);
return 0;

View File

@ -9,6 +9,12 @@
* supported the development of this code.
*
* Copyright (c) 2000 Angelos D. Keromytis
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by John-Mark Gurney
* under sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
*
* Permission to use, copy, and modify this software without fee
* is hereby granted, provided that this entire notice is included in
@ -29,6 +35,7 @@
#include <crypto/sha1.h>
#include <crypto/sha2/sha2.h>
#include <opencrypto/rmd160.h>
#include <opencrypto/gmac.h>
/* Declarations */
struct auth_hash {
@ -36,10 +43,12 @@ struct auth_hash {
char *name;
u_int16_t keysize;
u_int16_t hashsize;
u_int16_t blocksize;
u_int16_t ctxsize;
u_int16_t blocksize;
void (*Init) (void *);
int (*Update) (void *, u_int8_t *, u_int16_t);
void (*Setkey) (void *, const u_int8_t *, u_int16_t);
void (*Reinit) (void *, const u_int8_t *, u_int16_t);
int (*Update) (void *, const u_int8_t *, u_int16_t);
void (*Final) (u_int8_t *, void *);
};
@ -50,6 +59,7 @@ struct enc_xform {
int type;
char *name;
u_int16_t blocksize;
u_int16_t ivsize;
u_int16_t minkey, maxkey;
void (*encrypt) (caddr_t, u_int8_t *);
void (*decrypt) (caddr_t, u_int8_t *);
@ -73,6 +83,7 @@ union authctx {
SHA256_CTX sha256ctx;
SHA384_CTX sha384ctx;
SHA512_CTX sha512ctx;
struct aes_gmac_ctx aes_gmac_ctx;
};
extern struct enc_xform enc_xform_null;
@ -82,6 +93,9 @@ extern struct enc_xform enc_xform_blf;
extern struct enc_xform enc_xform_cast5;
extern struct enc_xform enc_xform_skipjack;
extern struct enc_xform enc_xform_rijndael128;
extern struct enc_xform enc_xform_aes_icm;
extern struct enc_xform enc_xform_aes_nist_gcm;
extern struct enc_xform enc_xform_aes_nist_gmac;
extern struct enc_xform enc_xform_aes_xts;
extern struct enc_xform enc_xform_arc4;
extern struct enc_xform enc_xform_camellia;
@ -95,6 +109,9 @@ extern struct auth_hash auth_hash_hmac_ripemd_160;
extern struct auth_hash auth_hash_hmac_sha2_256;
extern struct auth_hash auth_hash_hmac_sha2_384;
extern struct auth_hash auth_hash_hmac_sha2_512;
extern struct auth_hash auth_hash_nist_gmac_aes_128;
extern struct auth_hash auth_hash_nist_gmac_aes_192;
extern struct auth_hash auth_hash_nist_gmac_aes_256;
extern struct comp_algo comp_algo_deflate;

View File

@ -1190,7 +1190,8 @@ svc_run_internal(SVCGROUP *grp, bool_t ismaster)
mtx_unlock(&grp->sg_lock);
p = curproc;
PROC_LOCK(p);
if (P_SHOULDSTOP(p)) {
if (P_SHOULDSTOP(p) ||
(p->p_flag & P_TOTAL_STOP) != 0) {
thread_suspend_check(0);
PROC_UNLOCK(p);
mtx_lock(&grp->sg_lock);

View File

@ -112,6 +112,7 @@ struct bufobj {
*/
#define BO_ONWORKLST (1 << 0) /* On syncer work-list */
#define BO_WWAIT (1 << 1) /* Wait for output to complete */
#define BO_DEAD (1 << 2) /* Dead; only with INVARIANTS */
#define BO_LOCKPTR(bo) (&(bo)->bo_lock)
#define BO_LOCK(bo) rw_wlock(BO_LOCKPTR((bo)))

View File

@ -80,6 +80,7 @@ struct malloc_type;
uint32_t arc4random(void);
void arc4rand(void *ptr, u_int len, int reseed);
int bcmp(const void *, const void *, size_t);
int timingsafe_bcmp(const void *, const void *, size_t);
void *bsearch(const void *, const void *, size_t,
size_t, int (*)(const void *, const void *));
#ifndef HAVE_INLINE_FFS

View File

@ -361,7 +361,7 @@ do { \
#define TDF_CANSWAP 0x00000040 /* Thread can be swapped. */
#define TDF_SLEEPABORT 0x00000080 /* sleepq_abort was called. */
#define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */
#define TDF_UNUSED09 0x00000200 /* --available-- */
#define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */
#define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */
#define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */
#define TDF_TIMOFAIL 0x00001000 /* Timeout from sleep after we were awake. */
@ -652,7 +652,7 @@ struct proc {
#define P_SINGLE_BOUNDARY 0x400000 /* Threads should suspend at user boundary. */
#define P_HWPMC 0x800000 /* Process is using HWPMCs */
#define P_JAILED 0x1000000 /* Process is in jail. */
#define P_UNUSED1 0x2000000
#define P_TOTAL_STOP 0x2000000 /* Stopped in proc_stop_total. */
#define P_INEXEC 0x4000000 /* Process is in execve(). */
#define P_STATCHILD 0x8000000 /* Child process stopped or exited. */
#define P_INMEM 0x10000000 /* Loaded into memory. */
@ -713,6 +713,7 @@ struct proc {
#define SINGLE_NO_EXIT 0
#define SINGLE_EXIT 1
#define SINGLE_BOUNDARY 2
#define SINGLE_ALLPROC 3
#ifdef MALLOC_DECLARE
MALLOC_DECLARE(M_PARGS);
@ -839,6 +840,7 @@ extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl;
extern u_long pgrphash;
extern struct sx allproc_lock;
extern int allproc_gen;
extern struct sx proctree_lock;
extern struct mtx ppeers_lock;
extern struct proc proc0; /* Process slot for swapper. */
@ -962,8 +964,8 @@ void thread_exit(void) __dead2;
void thread_free(struct thread *td);
void thread_link(struct thread *td, struct proc *p);
void thread_reap(void);
int thread_single(int how);
void thread_single_end(void);
int thread_single(struct proc *p, int how);
void thread_single_end(struct proc *p, int how);
void thread_stash(struct thread *td);
void thread_stopped(struct proc *p);
void childproc_stopped(struct proc *child, int reason);
@ -971,14 +973,17 @@ void childproc_continued(struct proc *child);
void childproc_exited(struct proc *child);
int thread_suspend_check(int how);
bool thread_suspend_check_needed(void);
void thread_suspend_switch(struct thread *);
void thread_suspend_switch(struct thread *, struct proc *p);
void thread_suspend_one(struct thread *td);
void thread_unlink(struct thread *td);
void thread_unsuspend(struct proc *p);
int thread_unsuspend_one(struct thread *td);
int thread_unsuspend_one(struct thread *td, struct proc *p);
void thread_wait(struct proc *p);
struct thread *thread_find(struct proc *p, lwpid_t tid);
void stop_all_proc(void);
void resume_all_proc(void);
static __inline int
curthread_pflags_set(int flags)
{

View File

@ -6,6 +6,7 @@ TESTSDIR= ${TESTSBASE}/sys
TESTS_SUBDIRS+= kern
TESTS_SUBDIRS+= netinet
TESTS_SUBDIRS+= opencrypto
# Items not integrated into kyua runs by default
SUBDIR+= pjdfstest

View File

@ -0,0 +1,14 @@
# $FreeBSD$
TESTSDIR= ${TESTSBASE}/sys/opencrypto
BINDIR= ${TESTSDIR}
PLAIN_TESTS_SH= runtests
TEST_METADATA.foo+=required_programs="python"
PYMODULES= cryptodev.py cryptodevh.py cryptotest.py dpkt.py
FILESDIR= ${TESTSDIR}
FILES= ${PYMODULES}
.include <bsd.test.mk>

View File

@ -0,0 +1,561 @@
#!/usr/bin/env python
#
# Copyright (c) 2014 The FreeBSD Foundation
# Copyright 2014 John-Mark Gurney
# All rights reserved.
#
# This software was developed by John-Mark Gurney under
# the sponsorship from the FreeBSD Foundation.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#
import array
import dpkt
from fcntl import ioctl
import os
import signal
from struct import pack as _pack
from cryptodevh import *
__all__ = [ 'Crypto', 'MismatchError', ]
class FindOp(dpkt.Packet):
__byte_order__ = '@'
__hdr__ = ( ('crid', 'i', 0),
('name', '32s', 0),
)
class SessionOp(dpkt.Packet):
__byte_order__ = '@'
__hdr__ = ( ('cipher', 'I', 0),
('mac', 'I', 0),
('keylen', 'I', 0),
('key', 'P', 0),
('mackeylen', 'i', 0),
('mackey', 'P', 0),
('ses', 'I', 0),
)
class SessionOp2(dpkt.Packet):
__byte_order__ = '@'
__hdr__ = ( ('cipher', 'I', 0),
('mac', 'I', 0),
('keylen', 'I', 0),
('key', 'P', 0),
('mackeylen', 'i', 0),
('mackey', 'P', 0),
('ses', 'I', 0),
('crid', 'i', 0),
('pad0', 'i', 0),
('pad1', 'i', 0),
('pad2', 'i', 0),
('pad3', 'i', 0),
)
class CryptOp(dpkt.Packet):
__byte_order__ = '@'
__hdr__ = ( ('ses', 'I', 0),
('op', 'H', 0),
('flags', 'H', 0),
('len', 'I', 0),
('src', 'P', 0),
('dst', 'P', 0),
('mac', 'P', 0),
('iv', 'P', 0),
)
class CryptAEAD(dpkt.Packet):
__byte_order__ = '@'
__hdr__ = (
('ses', 'I', 0),
('op', 'H', 0),
('flags', 'H', 0),
('len', 'I', 0),
('aadlen', 'I', 0),
('ivlen', 'I', 0),
('src', 'P', 0),
('dst', 'P', 0),
('aad', 'P', 0),
('tag', 'P', 0),
('iv', 'P', 0),
)
# h2py.py can't handle multiarg macros
CRIOGET = 3221513060
CIOCGSESSION = 3224396645
CIOCGSESSION2 = 3225445226
CIOCFSESSION = 2147771238
CIOCCRYPT = 3224396647
CIOCKEY = 3230688104
CIOCASYMFEAT = 1074029417
CIOCKEY2 = 3230688107
CIOCFINDDEV = 3223610220
CIOCCRYPTAEAD = 3225445229
def _getdev():
fd = os.open('/dev/crypto', os.O_RDWR)
buf = array.array('I', [0])
ioctl(fd, CRIOGET, buf, 1)
os.close(fd)
return buf[0]
_cryptodev = _getdev()
def _findop(crid, name):
fop = FindOp()
fop.crid = crid
fop.name = name
s = array.array('B', fop.pack_hdr())
ioctl(_cryptodev, CIOCFINDDEV, s, 1)
fop.unpack(s)
try:
idx = fop.name.index('\x00')
name = fop.name[:idx]
except ValueError:
name = fop.name
return fop.crid, name
class Crypto:
@staticmethod
def findcrid(name):
return _findop(-1, name)[0]
@staticmethod
def getcridname(crid):
return _findop(crid, '')[1]
def __init__(self, cipher=0, key=None, mac=0, mackey=None,
crid=CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE):
self._ses = None
ses = SessionOp2()
ses.cipher = cipher
ses.mac = mac
if key is not None:
ses.keylen = len(key)
k = array.array('B', key)
ses.key = k.buffer_info()[0]
else:
self.key = None
if mackey is not None:
ses.mackeylen = len(mackey)
mk = array.array('B', mackey)
ses.mackey = mk.buffer_info()[0]
self._maclen = 16 # parameterize?
else:
self._maclen = None
if not cipher and not mac:
raise ValueError('one of cipher or mac MUST be specified.')
ses.crid = CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE
#ses.crid = CRYPTOCAP_F_HARDWARE
#ses.crid = CRYPTOCAP_F_SOFTWARE
#ses.crid = 0
#print `ses`
s = array.array('B', ses.pack_hdr())
#print `s`
ioctl(_cryptodev, CIOCGSESSION2, s, 1)
ses.unpack(s)
self._ses = ses.ses
def __del__(self):
if self._ses is None:
return
try:
ioctl(_cryptodev, CIOCFSESSION, _pack('I', self._ses))
except TypeError:
pass
self._ses = None
def _doop(self, op, src, iv):
cop = CryptOp()
cop.ses = self._ses
cop.op = op
cop.flags = 0
cop.len = len(src)
s = array.array('B', src)
cop.src = cop.dst = s.buffer_info()[0]
if self._maclen is not None:
m = array.array('B', [0] * self._maclen)
cop.mac = m.buffer_info()[0]
ivbuf = array.array('B', iv)
cop.iv = ivbuf.buffer_info()[0]
#print 'cop:', `cop`
ioctl(_cryptodev, CIOCCRYPT, str(cop))
s = s.tostring()
if self._maclen is not None:
return s, m.tostring()
return s
def _doaead(self, op, src, aad, iv, tag=None):
caead = CryptAEAD()
caead.ses = self._ses
caead.op = op
caead.flags = CRD_F_IV_EXPLICIT
caead.flags = 0
caead.len = len(src)
s = array.array('B', src)
caead.src = caead.dst = s.buffer_info()[0]
caead.aadlen = len(aad)
saad = array.array('B', aad)
caead.aad = saad.buffer_info()[0]
if self._maclen is None:
raise ValueError('must have a tag length')
if tag is None:
tag = array.array('B', [0] * self._maclen)
else:
assert len(tag) == self._maclen, `len(tag), self._maclen`
tag = array.array('B', tag)
caead.tag = tag.buffer_info()[0]
ivbuf = array.array('B', iv)
caead.ivlen = len(iv)
caead.iv = ivbuf.buffer_info()[0]
ioctl(_cryptodev, CIOCCRYPTAEAD, str(caead))
s = s.tostring()
return s, tag.tostring()
def perftest(self, op, size, timeo=3):
import random
import time
inp = array.array('B', (random.randint(0, 255) for x in xrange(size)))
out = array.array('B', inp)
# prep ioctl
cop = CryptOp()
cop.ses = self._ses
cop.op = op
cop.flags = 0
cop.len = len(inp)
s = array.array('B', inp)
cop.src = s.buffer_info()[0]
cop.dst = out.buffer_info()[0]
if self._maclen is not None:
m = array.array('B', [0] * self._maclen)
cop.mac = m.buffer_info()[0]
ivbuf = array.array('B', (random.randint(0, 255) for x in xrange(16)))
cop.iv = ivbuf.buffer_info()[0]
exit = [ False ]
def alarmhandle(a, b, exit=exit):
exit[0] = True
oldalarm = signal.signal(signal.SIGALRM, alarmhandle)
signal.alarm(timeo)
start = time.time()
reps = 0
while not exit[0]:
ioctl(_cryptodev, CIOCCRYPT, str(cop))
reps += 1
end = time.time()
signal.signal(signal.SIGALRM, oldalarm)
print 'time:', end - start
print 'perf MB/sec:', (reps * size) / (end - start) / 1024 / 1024
def encrypt(self, data, iv, aad=None):
if aad is None:
return self._doop(COP_ENCRYPT, data, iv)
else:
return self._doaead(COP_ENCRYPT, data, aad,
iv)
def decrypt(self, data, iv, aad=None, tag=None):
if aad is None:
return self._doop(COP_DECRYPT, data, iv)
else:
return self._doaead(COP_DECRYPT, data, aad,
iv, tag=tag)
class MismatchError(Exception):
pass
class KATParser:
def __init__(self, fname, fields):
self.fp = open(fname)
self.fields = set(fields)
self._pending = None
def __iter__(self):
while True:
didread = False
if self._pending is not None:
i = self._pending
self._pending = None
else:
i = self.fp.readline()
didread = True
if didread and not i:
return
if (i and i[0] == '#') or not i.strip():
continue
if i[0] == '[':
yield i[1:].split(']', 1)[0], self.fielditer()
else:
raise ValueError('unknown line: %s' % `i`)
def eatblanks(self):
while True:
line = self.fp.readline()
if line == '':
break
line = line.strip()
if line:
break
return line
def fielditer(self):
while True:
values = {}
line = self.eatblanks()
if not line or line[0] == '[':
self._pending = line
return
while True:
try:
f, v = line.split(' =')
except:
if line == 'FAIL':
f, v = 'FAIL', ''
else:
print 'line:', `line`
raise
v = v.strip()
if f in values:
raise ValueError('already present: %s' % `f`)
values[f] = v
line = self.fp.readline().strip()
if not line:
break
# we should have everything
remain = self.fields.copy() - set(values.keys())
# XXX - special case GCM decrypt
if remain and not ('FAIL' in values and 'PT' in remain):
raise ValueError('not all fields found: %s' % `remain`)
yield values
def _spdechex(s):
return ''.join(s.split()).decode('hex')
if __name__ == '__main__':
if True:
try:
crid = Crypto.findcrid('aesni0')
print 'aesni:', crid
except IOError:
print 'aesni0 not found'
for i in xrange(10):
try:
name = Crypto.getcridname(i)
print '%2d: %s' % (i, `name`)
except IOError:
pass
elif False:
kp = KATParser('/usr/home/jmg/aesni.testing/format tweak value input - data unit seq no/XTSGenAES128.rsp', [ 'COUNT', 'DataUnitLen', 'Key', 'DataUnitSeqNumber', 'PT', 'CT' ])
for mode, ni in kp:
print `i`, `ni`
for j in ni:
print `j`
elif False:
key = _spdechex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = _spdechex('00000000000000000000000000000001')
pt = _spdechex('ab3cabed693a32946055524052afe3c9cb49664f09fc8b7da824d924006b7496353b8c1657c5dec564d8f38d7432e1de35aae9d95590e66278d4acce883e51abaf94977fcd3679660109a92bf7b2973ccd547f065ec6cee4cb4a72a5e9f45e615d920d76cb34cba482467b3e21422a7242e7d931330c0fbf465c3a3a46fae943029fd899626dda542750a1eee253df323c6ef1573f1c8c156613e2ea0a6cdbf2ae9701020be2d6a83ecb7f3f9d8e')
#pt = _spdechex('00000000000000000000000000000000')
ct = _spdechex('f42c33853ecc5ce2949865fdb83de3bff1089e9360c94f830baebfaff72836ab5236f77212f1e7396c8c54ac73d81986375a6e9e299cfeca5ba051ed25e8d1affa5beaf6c1d2b45e90802408f2ced21663497e906de5f29341e5e52ddfea5363d628b3eb7806835e17bae051b3a6da3f8e2941fe44384eac17a9d298d2c331ca8320c775b5d53263a5e905059d891b21dede2d8110fd427c7bd5a9a274ddb47b1945ee79522203b6e297d0e399ef')
c = Crypto(CRYPTO_AES_ICM, key)
enc = c.encrypt(pt, iv)
print 'enc:', enc.encode('hex')
print ' ct:', ct.encode('hex')
assert ct == enc
dec = c.decrypt(ct, iv)
print 'dec:', dec.encode('hex')
print ' pt:', pt.encode('hex')
assert pt == dec
elif False:
key = _spdechex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = _spdechex('00000000000000000000000000000001')
pt = _spdechex('ab3cabed693a32946055524052afe3c9cb49664f09fc8b7da824d924006b7496353b8c1657c5dec564d8f38d7432e1de35aae9d95590e66278d4acce883e51abaf94977fcd3679660109a92bf7b2973ccd547f065ec6cee4cb4a72a5e9f45e615d920d76cb34cba482467b3e21422a7242e7d931330c0fbf465c3a3a46fae943029fd899626dda542750a1eee253df323c6ef1573f1c8c156613e2ea0a6cdbf2ae9701020be2d6a83ecb7f3f9d8e0a3f')
#pt = _spdechex('00000000000000000000000000000000')
ct = _spdechex('f42c33853ecc5ce2949865fdb83de3bff1089e9360c94f830baebfaff72836ab5236f77212f1e7396c8c54ac73d81986375a6e9e299cfeca5ba051ed25e8d1affa5beaf6c1d2b45e90802408f2ced21663497e906de5f29341e5e52ddfea5363d628b3eb7806835e17bae051b3a6da3f8e2941fe44384eac17a9d298d2c331ca8320c775b5d53263a5e905059d891b21dede2d8110fd427c7bd5a9a274ddb47b1945ee79522203b6e297d0e399ef3768')
c = Crypto(CRYPTO_AES_ICM, key)
enc = c.encrypt(pt, iv)
print 'enc:', enc.encode('hex')
print ' ct:', ct.encode('hex')
assert ct == enc
dec = c.decrypt(ct, iv)
print 'dec:', dec.encode('hex')
print ' pt:', pt.encode('hex')
assert pt == dec
elif False:
key = _spdechex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = _spdechex('6eba2716ec0bd6fa5cdef5e6d3a795bc')
pt = _spdechex('ab3cabed693a32946055524052afe3c9cb49664f09fc8b7da824d924006b7496353b8c1657c5dec564d8f38d7432e1de35aae9d95590e66278d4acce883e51abaf94977fcd3679660109a92bf7b2973ccd547f065ec6cee4cb4a72a5e9f45e615d920d76cb34cba482467b3e21422a7242e7d931330c0fbf465c3a3a46fae943029fd899626dda542750a1eee253df323c6ef1573f1c8c156613e2ea0a6cdbf2ae9701020be2d6a83ecb7f3f9d8e0a3f')
ct = _spdechex('f1f81f12e72e992dbdc304032705dc75dc3e4180eff8ee4819906af6aee876d5b00b7c36d282a445ce3620327be481e8e53a8e5a8e5ca9abfeb2281be88d12ffa8f46d958d8224738c1f7eea48bda03edbf9adeb900985f4fa25648b406d13a886c25e70cfdecdde0ad0f2991420eb48a61c64fd797237cf2798c2675b9bb744360b0a3f329ac53bbceb4e3e7456e6514f1a9d2f06c236c31d0f080b79c15dce1096357416602520daa098b17d1af427')
c = Crypto(CRYPTO_AES_CBC, key)
enc = c.encrypt(pt, iv)
print 'enc:', enc.encode('hex')
print ' ct:', ct.encode('hex')
assert ct == enc
dec = c.decrypt(ct, iv)
print 'dec:', dec.encode('hex')
print ' pt:', pt.encode('hex')
assert pt == dec
elif False:
key = _spdechex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = _spdechex('b3d8cc017cbb89b39e0f67e2')
pt = _spdechex('c3b3c41f113a31b73d9a5cd4321030')
aad = _spdechex('24825602bd12a984e0092d3e448eda5f')
ct = _spdechex('93fe7d9e9bfd10348a5606e5cafa7354')
ct = _spdechex('93fe7d9e9bfd10348a5606e5cafa73')
tag = _spdechex('0032a1dc85f1c9786925a2e71d8272dd')
tag = _spdechex('8d11a0929cb3fbe1fef01a4a38d5f8ea')
c = Crypto(CRYPTO_AES_NIST_GCM_16, key,
mac=CRYPTO_AES_128_NIST_GMAC, mackey=key)
enc, enctag = c.encrypt(pt, iv, aad=aad)
print 'enc:', enc.encode('hex')
print ' ct:', ct.encode('hex')
assert enc == ct
print 'etg:', enctag.encode('hex')
print 'tag:', tag.encode('hex')
assert enctag == tag
# Make sure we get EBADMSG
#enctag = enctag[:-1] + 'a'
dec, dectag = c.decrypt(ct, iv, aad=aad, tag=enctag)
print 'dec:', dec.encode('hex')
print ' pt:', pt.encode('hex')
assert dec == pt
print 'dtg:', dectag.encode('hex')
print 'tag:', tag.encode('hex')
assert dectag == tag
elif False:
key = _spdechex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = _spdechex('b3d8cc017cbb89b39e0f67e2')
key = key + iv[:4]
iv = iv[4:]
pt = _spdechex('c3b3c41f113a31b73d9a5cd432103069')
aad = _spdechex('24825602bd12a984e0092d3e448eda5f')
ct = _spdechex('93fe7d9e9bfd10348a5606e5cafa7354')
tag = _spdechex('0032a1dc85f1c9786925a2e71d8272dd')
c = Crypto(CRYPTO_AES_GCM_16, key, mac=CRYPTO_AES_128_GMAC, mackey=key)
enc, enctag = c.encrypt(pt, iv, aad=aad)
print 'enc:', enc.encode('hex')
print ' ct:', ct.encode('hex')
assert enc == ct
print 'etg:', enctag.encode('hex')
print 'tag:', tag.encode('hex')
assert enctag == tag
elif False:
for i in xrange(100000):
c = Crypto(CRYPTO_AES_XTS, '1bbfeadf539daedcae33ced497343f3ca1f2474ad932b903997d44707db41382'.decode('hex'))
data = '52a42bca4e9425a25bbc8c8bf6129dec'.decode('hex')
ct = '517e602becd066b65fa4f4f56ddfe240'.decode('hex')
iv = _pack('QQ', 71, 0)
enc = c.encrypt(data, iv)
assert enc == ct
elif True:
c = Crypto(CRYPTO_AES_XTS, '1bbfeadf539daedcae33ced497343f3ca1f2474ad932b903997d44707db41382'.decode('hex'))
data = '52a42bca4e9425a25bbc8c8bf6129dec'.decode('hex')
ct = '517e602becd066b65fa4f4f56ddfe240'.decode('hex')
iv = _pack('QQ', 71, 0)
enc = c.encrypt(data, iv)
assert enc == ct
dec = c.decrypt(enc, iv)
assert dec == data
#c.perftest(COP_ENCRYPT, 192*1024, reps=30000)
else:
key = '1bbfeadf539daedcae33ced497343f3ca1f2474ad932b903997d44707db41382'.decode('hex')
print 'XTS %d testing:' % (len(key) * 8)
c = Crypto(CRYPTO_AES_XTS, key)
for i in [ 8192, 192*1024]:
print 'block size: %d' % i
c.perftest(COP_ENCRYPT, i)
c.perftest(COP_DECRYPT, i)

View File

@ -0,0 +1,250 @@
# $FreeBSD$
# Generated by h2py from stdin
# Included from sys/ioccom.h
IOCPARM_SHIFT = 13
IOCPARM_MASK = ((1 << IOCPARM_SHIFT) - 1)
def IOCPARM_LEN(x): return (((x) >> 16) & IOCPARM_MASK)
def IOCBASECMD(x): return ((x) & ~(IOCPARM_MASK << 16))
def IOCGROUP(x): return (((x) >> 8) & 0xff)
IOCPARM_MAX = (1 << IOCPARM_SHIFT)
IOC_VOID = 0x20000000
IOC_OUT = 0x40000000
IOC_IN = 0x80000000
IOC_INOUT = (IOC_IN|IOC_OUT)
IOC_DIRMASK = (IOC_VOID|IOC_OUT|IOC_IN)
# Included from sys/cdefs.h
def __has_feature(x): return 0
def __has_include(x): return 0
def __has_builtin(x): return 0
__GNUCLIKE_ASM = 3
__GNUCLIKE_ASM = 2
__GNUCLIKE___TYPEOF = 1
__GNUCLIKE___OFFSETOF = 1
__GNUCLIKE___SECTION = 1
__GNUCLIKE_CTOR_SECTION_HANDLING = 1
__GNUCLIKE_BUILTIN_CONSTANT_P = 1
__GNUCLIKE_BUILTIN_VARARGS = 1
__GNUCLIKE_BUILTIN_STDARG = 1
__GNUCLIKE_BUILTIN_VAALIST = 1
__GNUC_VA_LIST_COMPATIBILITY = 1
__GNUCLIKE_BUILTIN_NEXT_ARG = 1
__GNUCLIKE_BUILTIN_MEMCPY = 1
__CC_SUPPORTS_INLINE = 1
__CC_SUPPORTS___INLINE = 1
__CC_SUPPORTS___INLINE__ = 1
__CC_SUPPORTS___FUNC__ = 1
__CC_SUPPORTS_WARNING = 1
__CC_SUPPORTS_VARADIC_XXX = 1
__CC_SUPPORTS_DYNAMIC_ARRAY_INIT = 1
def __P(protos): return protos
def __STRING(x): return #x
def __XSTRING(x): return __STRING(x)
def __P(protos): return ()
def __STRING(x): return "x"
def __aligned(x): return __attribute__((__aligned__(x)))
def __section(x): return __attribute__((__section__(x)))
def __aligned(x): return __attribute__((__aligned__(x)))
def __section(x): return __attribute__((__section__(x)))
def _Alignas(x): return alignas(x)
def _Alignas(x): return __aligned(x)
def _Alignof(x): return alignof(x)
def _Alignof(x): return __alignof(x)
def __nonnull(x): return __attribute__((__nonnull__(x)))
def __predict_true(exp): return __builtin_expect((exp), 1)
def __predict_false(exp): return __builtin_expect((exp), 0)
def __predict_true(exp): return (exp)
def __predict_false(exp): return (exp)
def __format_arg(fmtarg): return __attribute__((__format_arg__ (fmtarg)))
def __GLOBL(sym): return __GLOBL1(sym)
def __FBSDID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
def __RCSID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
def __RCSID_SOURCE(s): return __IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s)
def __SCCSID(s): return __IDSTRING(__CONCAT(__sccsid_,__LINE__),s)
def __COPYRIGHT(s): return __IDSTRING(__CONCAT(__copyright_,__LINE__),s)
_POSIX_C_SOURCE = 199009
_POSIX_C_SOURCE = 199209
__XSI_VISIBLE = 700
_POSIX_C_SOURCE = 200809
__XSI_VISIBLE = 600
_POSIX_C_SOURCE = 200112
__XSI_VISIBLE = 500
_POSIX_C_SOURCE = 199506
_POSIX_C_SOURCE = 198808
__POSIX_VISIBLE = 200809
__ISO_C_VISIBLE = 1999
__POSIX_VISIBLE = 200112
__ISO_C_VISIBLE = 1999
__POSIX_VISIBLE = 199506
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199309
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199209
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199009
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 198808
__ISO_C_VISIBLE = 0
__POSIX_VISIBLE = 0
__XSI_VISIBLE = 0
__BSD_VISIBLE = 0
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 0
__XSI_VISIBLE = 0
__BSD_VISIBLE = 0
__ISO_C_VISIBLE = 1999
__POSIX_VISIBLE = 0
__XSI_VISIBLE = 0
__BSD_VISIBLE = 0
__ISO_C_VISIBLE = 2011
__POSIX_VISIBLE = 200809
__XSI_VISIBLE = 700
__BSD_VISIBLE = 1
__ISO_C_VISIBLE = 2011
__NO_TLS = 1
CRYPTO_DRIVERS_INITIAL = 4
CRYPTO_SW_SESSIONS = 32
NULL_HASH_LEN = 16
MD5_HASH_LEN = 16
SHA1_HASH_LEN = 20
RIPEMD160_HASH_LEN = 20
SHA2_256_HASH_LEN = 32
SHA2_384_HASH_LEN = 48
SHA2_512_HASH_LEN = 64
MD5_KPDK_HASH_LEN = 16
SHA1_KPDK_HASH_LEN = 20
HASH_MAX_LEN = SHA2_512_HASH_LEN
NULL_HMAC_BLOCK_LEN = 64
MD5_HMAC_BLOCK_LEN = 64
SHA1_HMAC_BLOCK_LEN = 64
RIPEMD160_HMAC_BLOCK_LEN = 64
SHA2_256_HMAC_BLOCK_LEN = 64
SHA2_384_HMAC_BLOCK_LEN = 128
SHA2_512_HMAC_BLOCK_LEN = 128
HMAC_MAX_BLOCK_LEN = SHA2_512_HMAC_BLOCK_LEN
HMAC_IPAD_VAL = 0x36
HMAC_OPAD_VAL = 0x5C
NULL_BLOCK_LEN = 4
DES_BLOCK_LEN = 8
DES3_BLOCK_LEN = 8
BLOWFISH_BLOCK_LEN = 8
SKIPJACK_BLOCK_LEN = 8
CAST128_BLOCK_LEN = 8
RIJNDAEL128_BLOCK_LEN = 16
AES_BLOCK_LEN = RIJNDAEL128_BLOCK_LEN
CAMELLIA_BLOCK_LEN = 16
EALG_MAX_BLOCK_LEN = AES_BLOCK_LEN
AALG_MAX_RESULT_LEN = 64
CRYPTO_ALGORITHM_MIN = 1
CRYPTO_DES_CBC = 1
CRYPTO_3DES_CBC = 2
CRYPTO_BLF_CBC = 3
CRYPTO_CAST_CBC = 4
CRYPTO_SKIPJACK_CBC = 5
CRYPTO_MD5_HMAC = 6
CRYPTO_SHA1_HMAC = 7
CRYPTO_RIPEMD160_HMAC = 8
CRYPTO_MD5_KPDK = 9
CRYPTO_SHA1_KPDK = 10
CRYPTO_RIJNDAEL128_CBC = 11
CRYPTO_AES_CBC = 11
CRYPTO_ARC4 = 12
CRYPTO_MD5 = 13
CRYPTO_SHA1 = 14
CRYPTO_NULL_HMAC = 15
CRYPTO_NULL_CBC = 16
CRYPTO_DEFLATE_COMP = 17
CRYPTO_SHA2_256_HMAC = 18
CRYPTO_SHA2_384_HMAC = 19
CRYPTO_SHA2_512_HMAC = 20
CRYPTO_CAMELLIA_CBC = 21
CRYPTO_AES_XTS = 22
CRYPTO_AES_ICM = 23
CRYPTO_AES_NIST_GMAC = 24
CRYPTO_AES_NIST_GCM_16 = 25
CRYPTO_AES_128_NIST_GMAC = 26
CRYPTO_AES_192_NIST_GMAC = 27
CRYPTO_AES_256_NIST_GMAC = 28
CRYPTO_ALGORITHM_MAX = 28
CRYPTO_ALG_FLAG_SUPPORTED = 0x01
CRYPTO_ALG_FLAG_RNG_ENABLE = 0x02
CRYPTO_ALG_FLAG_DSA_SHA = 0x04
CRYPTO_FLAG_HARDWARE = 0x01000000
CRYPTO_FLAG_SOFTWARE = 0x02000000
COP_ENCRYPT = 1
COP_DECRYPT = 2
COP_F_BATCH = 0x0008
CRK_MAXPARAM = 8
CRK_ALGORITM_MIN = 0
CRK_MOD_EXP = 0
CRK_MOD_EXP_CRT = 1
CRK_DSA_SIGN = 2
CRK_DSA_VERIFY = 3
CRK_DH_COMPUTE_KEY = 4
CRK_ALGORITHM_MAX = 4
CRF_MOD_EXP = (1 << CRK_MOD_EXP)
CRF_MOD_EXP_CRT = (1 << CRK_MOD_EXP_CRT)
CRF_DSA_SIGN = (1 << CRK_DSA_SIGN)
CRF_DSA_VERIFY = (1 << CRK_DSA_VERIFY)
CRF_DH_COMPUTE_KEY = (1 << CRK_DH_COMPUTE_KEY)
CRD_F_ENCRYPT = 0x01
CRD_F_IV_PRESENT = 0x02
CRD_F_IV_EXPLICIT = 0x04
CRD_F_DSA_SHA_NEEDED = 0x08
CRD_F_COMP = 0x0f
CRD_F_KEY_EXPLICIT = 0x10
CRYPTO_F_IMBUF = 0x0001
CRYPTO_F_IOV = 0x0002
CRYPTO_F_BATCH = 0x0008
CRYPTO_F_CBIMM = 0x0010
CRYPTO_F_DONE = 0x0020
CRYPTO_F_CBIFSYNC = 0x0040
CRYPTO_BUF_CONTIG = 0x0
CRYPTO_BUF_IOV = 0x1
CRYPTO_BUF_MBUF = 0x2
CRYPTO_OP_DECRYPT = 0x0
CRYPTO_OP_ENCRYPT = 0x1
CRYPTO_HINT_MORE = 0x1
def CRYPTO_SESID2HID(_sid): return (((_sid) >> 32) & 0x00ffffff)
def CRYPTO_SESID2CAPS(_sid): return (((_sid) >> 32) & 0xff000000)
def CRYPTO_SESID2LID(_sid): return (((u_int32_t) (_sid)) & 0xffffffff)
CRYPTOCAP_F_HARDWARE = CRYPTO_FLAG_HARDWARE
CRYPTOCAP_F_SOFTWARE = CRYPTO_FLAG_SOFTWARE
CRYPTOCAP_F_SYNC = 0x04000000
CRYPTO_SYMQ = 0x1
CRYPTO_ASYMQ = 0x2

View File

@ -0,0 +1,265 @@
#!/usr/bin/env python
#
# Copyright (c) 2014 The FreeBSD Foundation
# All rights reserved.
#
# This software was developed by John-Mark Gurney under
# the sponsorship from the FreeBSD Foundation.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#
import cryptodev
import itertools
import os
import struct
import unittest
from cryptodev import *
from glob import iglob
katdir = '/usr/local/share/nist-kat'
def katg(base, glob):
return iglob(os.path.join(katdir, base, glob))
aesmodules = [ 'cryptosoft0', 'aesni0', ]
desmodules = [ 'cryptosoft0', ]
shamodules = [ 'cryptosoft0', ]
def GenTestCase(cname):
try:
crid = cryptodev.Crypto.findcrid(cname)
except IOError:
return None
class GendCryptoTestCase(unittest.TestCase):
###############
##### AES #####
###############
@unittest.skipIf(cname not in aesmodules, 'skipping AES on %s' % `cname`)
def test_xts(self):
for i in katg('XTSTestVectors/format tweak value input - data unit seq no', '*.rsp'):
self.runXTS(i, cryptodev.CRYPTO_AES_XTS)
def test_cbc(self):
for i in katg('KAT_AES', 'CBC[GKV]*.rsp'):
self.runCBC(i)
def test_gcm(self):
for i in katg('gcmtestvectors', 'gcmEncrypt*'):
self.runGCM(i, 'ENCRYPT')
for i in katg('gcmtestvectors', 'gcmDecrypt*'):
self.runGCM(i, 'DECRYPT')
_gmacsizes = { 32: cryptodev.CRYPTO_AES_256_NIST_GMAC,
24: cryptodev.CRYPTO_AES_192_NIST_GMAC,
16: cryptodev.CRYPTO_AES_128_NIST_GMAC,
}
def runGCM(self, fname, mode):
curfun = None
if mode == 'ENCRYPT':
swapptct = False
curfun = Crypto.encrypt
elif mode == 'DECRYPT':
swapptct = True
curfun = Crypto.decrypt
else:
raise RuntimeError('unknown mode: %s' % `mode`)
for bogusmode, lines in cryptodev.KATParser(fname,
[ 'Count', 'Key', 'IV', 'CT', 'AAD', 'Tag', 'PT', ]):
for data in lines:
curcnt = int(data['Count'])
cipherkey = data['Key'].decode('hex')
iv = data['IV'].decode('hex')
aad = data['AAD'].decode('hex')
tag = data['Tag'].decode('hex')
if 'FAIL' not in data:
pt = data['PT'].decode('hex')
ct = data['CT'].decode('hex')
if len(iv) != 12:
# XXX - isn't supported
continue
c = Crypto(cryptodev.CRYPTO_AES_NIST_GCM_16,
cipherkey,
mac=self._gmacsizes[len(cipherkey)],
mackey=cipherkey, crid=crid)
if mode == 'ENCRYPT':
rct, rtag = c.encrypt(pt, iv, aad)
rtag = rtag[:len(tag)]
data['rct'] = rct.encode('hex')
data['rtag'] = rtag.encode('hex')
self.assertEqual(rct, ct, `data`)
self.assertEqual(rtag, tag, `data`)
else:
if len(tag) != 16:
continue
args = (ct, iv, aad, tag)
if 'FAIL' in data:
self.assertRaises(IOError,
c.decrypt, *args)
else:
rpt, rtag = c.decrypt(*args)
data['rpt'] = rpt.encode('hex')
data['rtag'] = rtag.encode('hex')
self.assertEqual(rpt, pt,
`data`)
def runCBC(self, fname):
curfun = None
for mode, lines in cryptodev.KATParser(fname,
[ 'COUNT', 'KEY', 'IV', 'PLAINTEXT', 'CIPHERTEXT', ]):
if mode == 'ENCRYPT':
swapptct = False
curfun = Crypto.encrypt
elif mode == 'DECRYPT':
swapptct = True
curfun = Crypto.decrypt
else:
raise RuntimeError('unknown mode: %s' % `mode`)
for data in lines:
curcnt = int(data['COUNT'])
cipherkey = data['KEY'].decode('hex')
iv = data['IV'].decode('hex')
pt = data['PLAINTEXT'].decode('hex')
ct = data['CIPHERTEXT'].decode('hex')
if swapptct:
pt, ct = ct, pt
# run the fun
c = Crypto(cryptodev.CRYPTO_AES_CBC, cipherkey, crid=crid)
r = curfun(c, pt, iv)
self.assertEqual(r, ct)
def runXTS(self, fname, meth):
curfun = None
for mode, lines in cryptodev.KATParser(fname,
[ 'COUNT', 'DataUnitLen', 'Key', 'DataUnitSeqNumber', 'PT',
'CT' ]):
if mode == 'ENCRYPT':
swapptct = False
curfun = Crypto.encrypt
elif mode == 'DECRYPT':
swapptct = True
curfun = Crypto.decrypt
else:
raise RuntimeError('unknown mode: %s' % `mode`)
for data in lines:
curcnt = int(data['COUNT'])
nbits = int(data['DataUnitLen'])
cipherkey = data['Key'].decode('hex')
iv = struct.pack('QQ', int(data['DataUnitSeqNumber']), 0)
pt = data['PT'].decode('hex')
ct = data['CT'].decode('hex')
if nbits % 128 != 0:
# XXX - mark as skipped
continue
if swapptct:
pt, ct = ct, pt
# run the fun
c = Crypto(meth, cipherkey, crid=crid)
r = curfun(c, pt, iv)
self.assertEqual(r, ct)
###############
##### DES #####
###############
@unittest.skipIf(cname not in desmodules, 'skipping DES on %s' % `cname`)
def test_tdes(self):
for i in katg('KAT_TDES', 'TCBC[a-z]*.rsp'):
self.runTDES(i)
def runTDES(self, fname):
curfun = None
for mode, lines in cryptodev.KATParser(fname,
[ 'COUNT', 'KEYs', 'IV', 'PLAINTEXT', 'CIPHERTEXT', ]):
if mode == 'ENCRYPT':
swapptct = False
curfun = Crypto.encrypt
elif mode == 'DECRYPT':
swapptct = True
curfun = Crypto.decrypt
else:
raise RuntimeError('unknown mode: %s' % `mode`)
for data in lines:
curcnt = int(data['COUNT'])
key = data['KEYs'] * 3
cipherkey = key.decode('hex')
iv = data['IV'].decode('hex')
pt = data['PLAINTEXT'].decode('hex')
ct = data['CIPHERTEXT'].decode('hex')
if swapptct:
pt, ct = ct, pt
# run the fun
c = Crypto(cryptodev.CRYPTO_3DES_CBC, cipherkey, crid=crid)
r = curfun(c, pt, iv)
self.assertEqual(r, ct)
###############
##### SHA #####
###############
@unittest.skipIf(cname not in shamodules, 'skipping SHA on %s' % `cname`)
def test_sha(self):
# SHA not available in software
pass
#for i in iglob('SHA1*'):
# self.runSHA(i)
def test_sha1hmac(self):
for i in katg('hmactestvectors', 'HMAC.rsp'):
self.runSHA1HMAC(i)
def runSHA1HMAC(self, fname):
for bogusmode, lines in cryptodev.KATParser(fname,
[ 'Count', 'Klen', 'Tlen', 'Key', 'Msg', 'Mac' ]):
for data in lines:
key = data['Key'].decode('hex')
msg = data['Msg'].decode('hex')
mac = data['Mac'].decode('hex')
if len(key) != 20:
# XXX - implementation bug
continue
c = Crypto(mac=cryptodev.CRYPTO_SHA1_HMAC,
mackey=key, crid=crid)
r = c.encrypt(msg)
self.assertEqual(r, mac, `data`)
return GendCryptoTestCase
cryptosoft = GenTestCase('cryptosoft0')
aesni = GenTestCase('aesni0')
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,160 @@
# $FreeBSD$
# $Id: dpkt.py 114 2005-09-11 15:15:12Z dugsong $
"""fast, simple packet creation / parsing, with definitions for the
basic TCP/IP protocols.
"""
__author__ = 'Dug Song <dugsong@monkey.org>'
__copyright__ = 'Copyright (c) 2004 Dug Song'
__license__ = 'BSD'
__url__ = 'http://monkey.org/~dugsong/dpkt/'
__version__ = '1.2'
try:
from itertools import izip as _it_izip
except ImportError:
_it_izip = zip
from struct import calcsize as _st_calcsize, \
pack as _st_pack, unpack as _st_unpack, error as _st_error
from re import compile as _re_compile
intchr = _re_compile(r"(?P<int>[0-9]+)(?P<chr>.)")
class MetaPacket(type):
def __new__(cls, clsname, clsbases, clsdict):
if '__hdr__' in clsdict:
st = clsdict['__hdr__']
clsdict['__hdr_fields__'] = [ x[0] for x in st ]
clsdict['__hdr_fmt__'] = clsdict.get('__byte_order__', '>') + \
''.join([ x[1] for x in st ])
clsdict['__hdr_len__'] = _st_calcsize(clsdict['__hdr_fmt__'])
clsdict['__hdr_defaults__'] = \
dict(zip(clsdict['__hdr_fields__'], [ x[2] for x in st ]))
clsdict['__slots__'] = clsdict['__hdr_fields__']
return type.__new__(cls, clsname, clsbases, clsdict)
class Packet(object):
"""Packet class
__hdr__ should be defined as a list of (name, structfmt, default) tuples
__byte_order__ can be set to override the default ('>')
"""
__metaclass__ = MetaPacket
data = ''
def __init__(self, *args, **kwargs):
"""Packet constructor with ([buf], [field=val,...]) prototype.
Arguments:
buf -- packet buffer to unpack
Optional keyword arguments correspond to packet field names.
"""
if args:
self.unpack(args[0])
else:
for k in self.__hdr_fields__:
setattr(self, k, self.__hdr_defaults__[k])
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __len__(self):
return self.__hdr_len__ + len(self.data)
def __repr__(self):
l = [ '%s=%r' % (k, getattr(self, k))
for k in self.__hdr_defaults__
if getattr(self, k) != self.__hdr_defaults__[k] ]
if self.data:
l.append('data=%r' % self.data)
return '%s(%s)' % (self.__class__.__name__, ', '.join(l))
def __str__(self):
return self.pack_hdr() + str(self.data)
def pack_hdr(self):
"""Return packed header string."""
try:
return _st_pack(self.__hdr_fmt__,
*[ getattr(self, k) for k in self.__hdr_fields__ ])
except _st_error:
vals = []
for k in self.__hdr_fields__:
v = getattr(self, k)
if isinstance(v, tuple):
vals.extend(v)
else:
vals.append(v)
return _st_pack(self.__hdr_fmt__, *vals)
def unpack(self, buf):
"""Unpack packet header fields from buf, and set self.data."""
res = list(_st_unpack(self.__hdr_fmt__, buf[:self.__hdr_len__]))
for e, k in enumerate(self.__slots__):
sfmt = self.__hdr__[e][1]
mat = intchr.match(sfmt)
if mat and mat.group('chr') != 's':
cnt = int(mat.group('int'))
setattr(self, k, list(res[:cnt]))
del res[:cnt]
else:
if sfmt[-1] == 's':
i = res[0].find('\x00')
if i != -1:
res[0] = res[0][:i]
setattr(self, k, res[0])
del res[0]
assert len(res) == 0
self.data = buf[self.__hdr_len__:]
# XXX - ''.join([(len(`chr(x)`)==3) and chr(x) or '.' for x in range(256)])
__vis_filter = """................................ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[.]^_`abcdefghijklmnopqrstuvwxyz{|}~................................................................................................................................."""
def hexdump(buf, length=16):
"""Return a hexdump output string of the given buffer."""
n = 0
res = []
while buf:
line, buf = buf[:length], buf[length:]
hexa = ' '.join(['%02x' % ord(x) for x in line])
line = line.translate(__vis_filter)
res.append(' %04d: %-*s %s' % (n, length * 3, hexa, line))
n += length
return '\n'.join(res)
def in_cksum_add(s, buf):
"""in_cksum_add(cksum, buf) -> cksum
Return accumulated Internet checksum.
"""
nleft = len(buf)
i = 0
while nleft > 1:
s += ord(buf[i]) * 256 + ord(buf[i+1])
i += 2
nleft -= 2
if nleft:
s += ord(buf[i]) * 256
return s
def in_cksum_done(s):
"""Fold and return Internet checksum."""
while (s >> 16):
s = (s >> 16) + (s & 0xffff)
return (~s & 0xffff)
def in_cksum(buf):
"""Return computed Internet checksum."""
return in_cksum_done(in_cksum_add(0, buf))
try:
import psyco
psyco.bind(in_cksum)
psyco.bind(Packet)
except ImportError:
pass

View File

@ -0,0 +1,60 @@
#!/bin/sh -
#
# Copyright (c) 2014 The FreeBSD Foundation
# All rights reserved.
#
# This software was developed by John-Mark Gurney under
# the sponsorship from the FreeBSD Foundation.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#
set -e
if [ ! -d /usr/local/share/nist-kat ]; then
echo 'Skipping, nist-kat package not installed for test vectors.'
exit 0
fi
if kldload aesni 2>/dev/null; then
unloadaesni=1
fi
if kldload cryptodev 2>/dev/null; then
unloadcdev=1
fi
# Run software crypto test
oldcdas=$(sysctl -e kern.cryptodevallowsoft)
sysctl kern.cryptodevallowsoft=1
python $(dirname $0)/cryptotest.py
sysctl "$oldcdas"
if [ x"$unloadcdev" = x"1" ]; then
kldunload cryptodev
fi
if [ x"$unloadaesni" = x"1" ]; then
kldunload aesni
fi

View File

@ -6,7 +6,7 @@ PROG= mkimg
SRCS= format.c image.c mkimg.c scheme.c
MAN= mkimg.1
MKIMG_VERSION=20141003
MKIMG_VERSION=20141211
mkimg.o: Makefile
CFLAGS+=-DMKIMG_VERSION=${MKIMG_VERSION}

View File

@ -71,7 +71,7 @@ struct qcow_header {
uint32_t l1_entries;
uint64_t l1_offset;
uint64_t refcnt_offset;
uint32_t refcnt_entries;
uint32_t refcnt_clstrs;
uint32_t snapshot_count;
uint64_t snapshot_offset;
} v2;
@ -139,7 +139,7 @@ qcow_write(int fd, u_int version)
uint64_t n, imagesz, nclstrs, ofs, ofsflags;
lba_t blk, blkofs, blk_imgsz;
u_int l1clno, l2clno, rcclno;
u_int blk_clstrsz;
u_int blk_clstrsz, refcnt_clstrs;
u_int clstrsz, l1idx, l2idx;
int error;
@ -199,14 +199,15 @@ qcow_write(int fd, u_int version)
be32enc(&hdr->u.v2.l1_entries, clstr_l2tbls);
be64enc(&hdr->u.v2.l1_offset, clstrsz * l1clno);
be64enc(&hdr->u.v2.refcnt_offset, clstrsz * rcclno);
be32enc(&hdr->u.v2.refcnt_entries, clstr_rcblks);
refcnt_clstrs = round_clstr(clstr_rcblks * 8) >> clstr_log2sz;
be32enc(&hdr->u.v2.refcnt_clstrs, refcnt_clstrs);
break;
default:
return (EDOOFUS);
}
if (sparse_write(fd, hdr, clstrsz) < 0) {
error = errno;
error = errno;
goto out;
}

View File

@ -27,7 +27,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd April 20, 2014
.Dd December 13, 2014
.Dt AUTOUNMOUNTD 8
.Os
.Sh NAME
@ -36,8 +36,8 @@
.Sh SYNOPSIS
.Nm
.Op Fl d
.Op Fl r time
.Op Fl t time
.Op Fl r Ar time
.Op Fl t Ar time
.Op Fl v
.Sh DESCRIPTION
The

View File

@ -1542,7 +1542,7 @@ init(int signo)
struct filed *f, *next, **nextp;
char *p;
char cline[LINE_MAX];
char prog[NAME_MAX+1];
char prog[LINE_MAX];
char host[MAXHOSTNAMELEN];
char oldLocalHostName[MAXHOSTNAMELEN];
char hostMsg[2*MAXHOSTNAMELEN+40];
@ -1664,7 +1664,7 @@ init(int signo)
(void)strlcpy(prog, "*", sizeof(prog));
continue;
}
for (i = 0; i < NAME_MAX; i++) {
for (i = 0; i < LINE_MAX - 1; i++) {
if (!isprint(p[i]) || isspace(p[i]))
break;
prog[i] = p[i];