This commit is contained in:
attilio 2011-11-12 17:12:33 +00:00
parent 6e9e854884
commit f44dbaa16a
165 changed files with 13638 additions and 1783 deletions

View File

@ -131,11 +131,11 @@ static arith_t do_binop(int op, arith_t a, arith_t b)
yyerror("divide error");
return op == ARITH_REM ? a % b : a / b;
case ARITH_MUL:
return a * b;
return (uintmax_t)a * (uintmax_t)b;
case ARITH_ADD:
return a + b;
return (uintmax_t)a + (uintmax_t)b;
case ARITH_SUB:
return a - b;
return (uintmax_t)a - (uintmax_t)b;
case ARITH_LSHIFT:
return a << b;
case ARITH_RSHIFT:

View File

@ -3217,7 +3217,8 @@ legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
gcc_assert (!no_new_pseudos);
if (arm_pic_register != INVALID_REGNUM)
{
cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
if (!cfun->machine->pic_reg)
cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
/* Play games to avoid marking the function as needing pic
if we are being called as part of the cost-estimation
@ -3229,7 +3230,8 @@ legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
{
rtx seq;
cfun->machine->pic_reg = gen_reg_rtx (Pmode);
if (!cfun->machine->pic_reg)
cfun->machine->pic_reg = gen_reg_rtx (Pmode);
/* Play games to avoid marking the function as needing pic
if we are being called as part of the cost-estimation

View File

@ -247,6 +247,7 @@ static_atm()
static_ipx()
{
:
}
ropts_init()

View File

@ -99,7 +99,7 @@ syscons_configure_keyboard()
*)
sc_init
echo -n ' keychange'
set - ${keychange}
set -- ${keychange}
while [ $# -gt 0 ]; do
kbdcontrol <${kbddev} -f "$1" "$2"
shift; shift

View File

@ -1638,7 +1638,7 @@ find_local_scripts_old () {
continue
zlist="$zlist $file"
done
for file in ${dir}/[^0-9]*.sh; do
for file in ${dir}/[!0-9]*.sh; do
grep '^# PROVIDE:' $file >/dev/null 2>&1 &&
continue
slist="$slist $file"

View File

@ -81,10 +81,10 @@ extern "C" {
#endif
/* === regcomp.c === */
static void p_ere(struct parse *p, wint_t stop);
static void p_ere(struct parse *p, int stop);
static void p_ere_exp(struct parse *p);
static void p_str(struct parse *p);
static void p_bre(struct parse *p, wint_t end1, wint_t end2);
static void p_bre(struct parse *p, int end1, int end2);
static int p_simp_re(struct parse *p, int starordinary);
static int p_count(struct parse *p);
static void p_bracket(struct parse *p);
@ -109,7 +109,7 @@ static sopno dupl(struct parse *p, sopno start, sopno finish);
static void doemit(struct parse *p, sop op, size_t opnd);
static void doinsert(struct parse *p, sop op, size_t opnd, sopno pos);
static void dofwd(struct parse *p, sopno pos, sop value);
static void enlarge(struct parse *p, sopno size);
static int enlarge(struct parse *p, sopno size);
static void stripsnug(struct parse *p, struct re_guts *g);
static void findmust(struct parse *p, struct re_guts *g);
static int altoffset(sop *scan, int offset);
@ -285,7 +285,7 @@ regcomp(regex_t * __restrict preg,
/*
- p_ere - ERE parser top level, concatenation and alternation
== static void p_ere(struct parse *p, int stop);
== static void p_ere(struct parse *p, int_t stop);
*/
static void
p_ere(struct parse *p,
@ -493,7 +493,7 @@ p_str(struct parse *p)
/*
- p_bre - BRE parser top level, anchoring and concatenation
== static void p_bre(struct parse *p, int end1, \
== static void p_bre(struct parse *p, int end1, \
== int end2);
* Giving end1 as OUT essentially eliminates the end1/end2 check.
*
@ -840,7 +840,7 @@ p_b_eclass(struct parse *p, cset *cs)
/*
- p_b_symbol - parse a character or [..]ed multicharacter collating symbol
== static char p_b_symbol(struct parse *p);
== static wint_t p_b_symbol(struct parse *p);
*/
static wint_t /* value of symbol */
p_b_symbol(struct parse *p)
@ -859,7 +859,7 @@ p_b_symbol(struct parse *p)
/*
- p_b_coll_elem - parse a collating-element name and look it up
== static char p_b_coll_elem(struct parse *p, int endc);
== static wint_t p_b_coll_elem(struct parse *p, wint_t endc);
*/
static wint_t /* value of collating element */
p_b_coll_elem(struct parse *p,
@ -894,7 +894,7 @@ p_b_coll_elem(struct parse *p,
/*
- othercase - return the case counterpart of an alphabetic
== static char othercase(int ch);
== static wint_t othercase(wint_t ch);
*/
static wint_t /* if no counterpart, return ch */
othercase(wint_t ch)
@ -910,7 +910,7 @@ othercase(wint_t ch)
/*
- bothcases - emit a dualcase version of a two-case character
== static void bothcases(struct parse *p, int ch);
== static void bothcases(struct parse *p, wint_t ch);
*
* Boy, is this implementation ever a kludge...
*/
@ -939,7 +939,7 @@ bothcases(struct parse *p, wint_t ch)
/*
- ordinary - emit an ordinary character
== static void ordinary(struct parse *p, int ch);
== static void ordinary(struct parse *p, wint_t ch);
*/
static void
ordinary(struct parse *p, wint_t ch)
@ -1246,8 +1246,8 @@ dupl(struct parse *p,
assert(finish >= start);
if (len == 0)
return(ret);
enlarge(p, p->ssize + len); /* this many unexpected additions */
assert(p->ssize >= p->slen + len);
if (!enlarge(p, p->ssize + len)) /* this many unexpected additions */
return(ret);
(void) memcpy((char *)(p->strip + p->slen),
(char *)(p->strip + start), (size_t)len*sizeof(sop));
p->slen += len;
@ -1274,8 +1274,8 @@ doemit(struct parse *p, sop op, size_t opnd)
/* deal with undersized strip */
if (p->slen >= p->ssize)
enlarge(p, (p->ssize+1) / 2 * 3); /* +50% */
assert(p->slen < p->ssize);
if (!enlarge(p, (p->ssize+1) / 2 * 3)) /* +50% */
return;
/* finally, it's all reduced to the easy case */
p->strip[p->slen++] = SOP(op, opnd);
@ -1334,23 +1334,24 @@ dofwd(struct parse *p, sopno pos, sop value)
/*
- enlarge - enlarge the strip
== static void enlarge(struct parse *p, sopno size);
== static int enlarge(struct parse *p, sopno size);
*/
static void
static int
enlarge(struct parse *p, sopno size)
{
sop *sp;
if (p->ssize >= size)
return;
return 1;
sp = (sop *)realloc(p->strip, size*sizeof(sop));
if (sp == NULL) {
SETERROR(REG_ESPACE);
return;
return 0;
}
p->strip = sp;
p->ssize = size;
return 1;
}
/*

View File

@ -28,7 +28,7 @@
.\" @(#)flock.2 8.2 (Berkeley) 12/11/93
.\" $FreeBSD$
.\"
.Dd January 22, 2008
.Dd November 9, 2011
.Dt FLOCK 2
.Os
.Sh NAME
@ -154,6 +154,8 @@ refers to an object other than a file.
The argument
.Fa fd
refers to an object that does not support file locking.
.It Bq Er ENOLCK
A lock was requested, but no locks are available.
.El
.Sh SEE ALSO
.Xr close 2 ,

View File

@ -74,6 +74,10 @@ MLINKS += libusb.3 libusb_get_config_descriptor.3
MLINKS += libusb.3 libusb_get_config_descriptor_by_value.3
MLINKS += libusb.3 libusb_free_config_descriptor.3
MLINKS += libusb.3 libusb_get_string_descriptor_ascii.3
MLINKS += libusb.3 libusb_parse_ss_endpoint_comp.3
MLINKS += libusb.3 libusb_free_ss_endpoint_comp.3
MLINKS += libusb.3 libusb_parse_bos_descriptor.3
MLINKS += libusb.3 libusb_free_bos_descriptor.3
MLINKS += libusb.3 libusb_alloc_transfer.3
MLINKS += libusb.3 libusb_free_transfer.3
MLINKS += libusb.3 libusb_submit_transfer.3

View File

@ -26,7 +26,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd August 16, 2011
.Dd November 9, 2011
.Dt LIBUSB 3
.Os
.Sh NAME
@ -316,6 +316,40 @@ Retrieve a string descriptor in C style ASCII.
Returns the positive number of bytes in the resulting ASCII string
on success and a LIBUSB_ERROR code on failure.
.Pp
.Ft int
.Fn libusb_parse_ss_endpoint_comp "const void *buf" "int len" "libusb_ss_endpoint_companion_descriptor **ep_comp"
This function parses the USB 3.0 endpoint companion descriptor in host endian format pointed to by
.Fa buf
and having a length of
.Fa len.
Typically these arguments are the extra and extra_length fields of the
endpoint descriptor.
On success the pointer to resulting descriptor is stored at the location given by
.Fa ep_comp.
Returns zero on success and a LIBUSB_ERROR code on failure.
On success the parsed USB 3.0 endpoint companion descriptor must be
freed using the libusb_free_ss_endpoint_comp function.
.Pp
.Ft void
.Fn libusb_free_ss_endpoint_comp "libusb_ss_endpoint_companion_descriptor *ep_comp"
This function is NULL safe and frees a parsed USB 3.0 endpoint companion descriptor.
.Pp
.Ft int
.Fn libusb_parse_bos_descriptor "const void *buf" "int len" "libusb_bos_descriptor **bos"
This function parses a Binary Object Store, BOS, descriptor into host endian format pointed to by
.Fa buf
and having a length of
.Fa len.
On success the pointer to resulting descriptor is stored at the location given by
.Fa bos.
Returns zero on success and a LIBUSB_ERROR code on failure.
On success the parsed BOS descriptor must be freed using the
libusb_free_bos_descriptor function.
.Pp
.Ft void
.Fn libusb_free_bos_descriptor "libusb_bos_descriptor *bos"
This function is NULL safe and frees a parsed BOS descriptor.
.Pp
.Sh USB ASYNCHRONOUS I/O
.Pp
.Ft struct libusb_transfer *

View File

@ -63,6 +63,16 @@ enum libusb_descriptor_type {
LIBUSB_DT_REPORT = 0x22,
LIBUSB_DT_PHYSICAL = 0x23,
LIBUSB_DT_HUB = 0x29,
LIBUSB_DT_BOS = 0x0f,
LIBUSB_DT_DEVICE_CAPABILITY = 0x10,
LIBUSB_DT_SS_ENDPOINT_COMPANION = 0x30,
};
enum libusb_device_capability_type {
LIBUSB_WIRELESS_USB_DEVICE_CAPABILITY = 0x1,
LIBUSB_USB_2_0_EXTENSION_DEVICE_CAPABILITY = 0x2,
LIBUSB_SS_USB_DEVICE_CAPABILITY = 0x3,
LIBUSB_CONTAINER_ID_DEVICE_CAPABILITY = 0x4,
};
#define LIBUSB_DT_DEVICE_SIZE 18
@ -71,6 +81,10 @@ enum libusb_descriptor_type {
#define LIBUSB_DT_ENDPOINT_SIZE 7
#define LIBUSB_DT_ENDPOINT_AUDIO_SIZE 9
#define LIBUSB_DT_HUB_NONVAR_SIZE 7
#define LIBUSB_DT_SS_ENDPOINT_COMPANION_SIZE 6
#define LIBUSB_DT_BOS_SIZE 5
#define LIBUSB_USB_2_0_EXTENSION_DEVICE_CAPABILITY_SIZE 7
#define LIBUSB_SS_USB_DEVICE_CAPABILITY_SIZE 10
#define LIBUSB_ENDPOINT_ADDRESS_MASK 0x0f
#define LIBUSB_ENDPOINT_DIR_MASK 0x80
@ -230,6 +244,14 @@ typedef struct libusb_endpoint_descriptor {
int extra_length;
} libusb_endpoint_descriptor __aligned(sizeof(void *));
typedef struct libusb_ss_endpoint_companion_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bMaxBurst;
uint8_t bmAttributes;
uint16_t wBytesPerInterval;
} libusb_ss_endpoint_companion_descriptor __aligned(sizeof(void *));
typedef struct libusb_interface_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
@ -264,6 +286,39 @@ typedef struct libusb_config_descriptor {
int extra_length;
} libusb_config_descriptor __aligned(sizeof(void *));
typedef struct libusb_usb_2_0_device_capability_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bDevCapabilityType;
uint32_t bmAttributes;
#define LIBUSB_USB_2_0_CAPABILITY_LPM_SUPPORT (1 << 1)
} libusb_usb_2_0_device_capability_descriptor __aligned(sizeof(void *));
typedef struct libusb_ss_usb_device_capability_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint8_t bDevCapabilityType;
uint8_t bmAttributes;
#define LIBUSB_SS_USB_CAPABILITY_LPM_SUPPORT (1 << 1)
uint16_t wSpeedSupported;
#define LIBUSB_CAPABILITY_LOW_SPEED_OPERATION (1)
#define LIBUSB_CAPABILITY_FULL_SPEED_OPERATION (1 << 1)
#define LIBUSB_CAPABILITY_HIGH_SPEED_OPERATION (1 << 2)
#define LIBUSB_CAPABILITY_5GBPS_OPERATION (1 << 3)
uint8_t bFunctionalitySupport;
uint8_t bU1DevExitLat;
uint16_t wU2DevExitLat;
} libusb_ss_usb_device_capability_descriptor __aligned(sizeof(void *));
typedef struct libusb_bos_descriptor {
uint8_t bLength;
uint8_t bDescriptorType;
uint16_t wTotalLength;
uint8_t bNumDeviceCapabilities;
struct libusb_usb_2_0_device_capability_descriptor *usb_2_0_ext_cap;
struct libusb_ss_usb_device_capability_descriptor *ss_usb_cap;
} libusb_bos_descriptor __aligned(sizeof(void *));
typedef struct libusb_control_setup {
uint8_t bmRequestType;
uint8_t bRequest;
@ -345,6 +400,10 @@ int libusb_get_config_descriptor_by_value(libusb_device * dev, uint8_t bConfigur
void libusb_free_config_descriptor(struct libusb_config_descriptor *config);
int libusb_get_string_descriptor_ascii(libusb_device_handle * devh, uint8_t desc_index, uint8_t *data, int length);
int libusb_get_descriptor(libusb_device_handle * devh, uint8_t desc_type, uint8_t desc_index, uint8_t *data, int length);
int libusb_parse_ss_endpoint_comp(const void *buf, int len, struct libusb_ss_endpoint_companion_descriptor **ep_comp);
void libusb_free_ss_endpoint_comp(struct libusb_ss_endpoint_companion_descriptor *ep_comp);
int libusb_parse_bos_descriptor(const void *buf, int len, struct libusb_bos_descriptor **bos);
void libusb_free_bos_descriptor(struct libusb_bos_descriptor *bos);
/* Asynchronous device I/O */

View File

@ -298,7 +298,7 @@ libusb_get_string_descriptor_ascii(libusb_device_handle *pdev,
uint8_t desc_index, unsigned char *data, int length)
{
if (pdev == NULL || data == NULL || length < 1)
return (LIBUSB20_ERROR_INVALID_PARAM);
return (LIBUSB_ERROR_INVALID_PARAM);
if (length > 65535)
length = 65535;
@ -318,7 +318,7 @@ libusb_get_descriptor(libusb_device_handle * devh, uint8_t desc_type,
uint8_t desc_index, uint8_t *data, int length)
{
if (devh == NULL || data == NULL || length < 1)
return (LIBUSB20_ERROR_INVALID_PARAM);
return (LIBUSB_ERROR_INVALID_PARAM);
if (length > 65535)
length = 65535;
@ -327,3 +327,172 @@ libusb_get_descriptor(libusb_device_handle * devh, uint8_t desc_type,
LIBUSB_REQUEST_GET_DESCRIPTOR, (desc_type << 8) | desc_index, 0, data,
length, 1000));
}
int
libusb_parse_ss_endpoint_comp(const void *buf, int len,
struct libusb_ss_endpoint_companion_descriptor **ep_comp)
{
if (buf == NULL || ep_comp == NULL || len < 1)
return (LIBUSB_ERROR_INVALID_PARAM);
if (len > 65535)
len = 65535;
*ep_comp = NULL;
while (len != 0) {
uint8_t dlen;
uint8_t dtype;
dlen = ((const uint8_t *)buf)[0];
dtype = ((const uint8_t *)buf)[1];
if (dlen < 2 || dlen > len)
break;
if (dlen >= LIBUSB_DT_SS_ENDPOINT_COMPANION_SIZE &&
dtype == LIBUSB_DT_SS_ENDPOINT_COMPANION) {
struct libusb_ss_endpoint_companion_descriptor *ptr;
ptr = malloc(sizeof(*ptr));
if (ptr == NULL)
return (LIBUSB_ERROR_NO_MEM);
ptr->bLength = LIBUSB_DT_SS_ENDPOINT_COMPANION_SIZE;
ptr->bDescriptorType = dtype;
ptr->bMaxBurst = ((const uint8_t *)buf)[2];
ptr->bmAttributes = ((const uint8_t *)buf)[3];
ptr->wBytesPerInterval = ((const uint8_t *)buf)[4] |
(((const uint8_t *)buf)[5] << 8);
*ep_comp = ptr;
return (0); /* success */
}
buf = ((const uint8_t *)buf) + dlen;
len -= dlen;
}
return (LIBUSB_ERROR_IO);
}
void
libusb_free_ss_endpoint_comp(struct libusb_ss_endpoint_companion_descriptor *ep_comp)
{
if (ep_comp == NULL)
return;
free(ep_comp);
}
int
libusb_parse_bos_descriptor(const void *buf, int len,
struct libusb_bos_descriptor **bos)
{
struct libusb_bos_descriptor *ptr;
struct libusb_usb_2_0_device_capability_descriptor *dcap_20;
struct libusb_ss_usb_device_capability_descriptor *ss_cap;
if (buf == NULL || bos == NULL || len < 1)
return (LIBUSB_ERROR_INVALID_PARAM);
if (len > 65535)
len = 65535;
*bos = ptr = NULL;
while (len != 0) {
uint8_t dlen;
uint8_t dtype;
dlen = ((const uint8_t *)buf)[0];
dtype = ((const uint8_t *)buf)[1];
if (dlen < 2 || dlen > len)
break;
if (dlen >= LIBUSB_DT_BOS_SIZE &&
dtype == LIBUSB_DT_BOS) {
ptr = malloc(sizeof(*ptr) + sizeof(*dcap_20) +
sizeof(*ss_cap));
if (ptr == NULL)
return (LIBUSB_ERROR_NO_MEM);
*bos = ptr;
ptr->bLength = LIBUSB_DT_BOS_SIZE;
ptr->bDescriptorType = dtype;
ptr->wTotalLength = ((const uint8_t *)buf)[2] |
(((const uint8_t *)buf)[3] << 8);
ptr->bNumDeviceCapabilities = ((const uint8_t *)buf)[4];
ptr->usb_2_0_ext_cap = NULL;
ptr->ss_usb_cap = NULL;
dcap_20 = (void *)(ptr + 1);
ss_cap = (void *)(dcap_20 + 1);
}
if (dlen >= 3 &&
ptr != NULL &&
dtype == LIBUSB_DT_DEVICE_CAPABILITY) {
switch (((const uint8_t *)buf)[2]) {
case LIBUSB_USB_2_0_EXTENSION_DEVICE_CAPABILITY:
if (ptr->usb_2_0_ext_cap != NULL)
break;
if (dlen < LIBUSB_USB_2_0_EXTENSION_DEVICE_CAPABILITY_SIZE)
break;
ptr->usb_2_0_ext_cap = dcap_20;
dcap_20->bLength = LIBUSB_USB_2_0_EXTENSION_DEVICE_CAPABILITY_SIZE;
dcap_20->bDescriptorType = dtype;
dcap_20->bDevCapabilityType = ((const uint8_t *)buf)[2];
dcap_20->bmAttributes = ((const uint8_t *)buf)[3] |
(((const uint8_t *)buf)[4] << 8) |
(((const uint8_t *)buf)[5] << 16) |
(((const uint8_t *)buf)[6] << 24);
break;
case LIBUSB_SS_USB_DEVICE_CAPABILITY:
if (ptr->ss_usb_cap != NULL)
break;
if (dlen < LIBUSB_SS_USB_DEVICE_CAPABILITY_SIZE)
break;
ptr->ss_usb_cap = ss_cap;
ss_cap->bLength = LIBUSB_SS_USB_DEVICE_CAPABILITY_SIZE;
ss_cap->bDescriptorType = dtype;
ss_cap->bDevCapabilityType = ((const uint8_t *)buf)[2];
ss_cap->bmAttributes = ((const uint8_t *)buf)[3];
ss_cap->wSpeedSupported = ((const uint8_t *)buf)[4] |
(((const uint8_t *)buf)[5] << 8);
ss_cap->bFunctionalitySupport = ((const uint8_t *)buf)[6];
ss_cap->bU1DevExitLat = ((const uint8_t *)buf)[7];
ss_cap->wU2DevExitLat = ((const uint8_t *)buf)[8] |
(((const uint8_t *)buf)[9] << 8);
break;
default:
break;
}
}
buf = ((const uint8_t *)buf) + dlen;
len -= dlen;
}
if (ptr != NULL)
return (0); /* success */
return (LIBUSB_ERROR_IO);
}
void
libusb_free_bos_descriptor(struct libusb_bos_descriptor *bos)
{
if (bos == NULL)
return;
free(bos);
}

View File

@ -41,6 +41,10 @@ LIBUSB20_MAKE_STRUCT_FORMAT(LIBUSB20_ENDPOINT_DESC);
LIBUSB20_MAKE_STRUCT_FORMAT(LIBUSB20_INTERFACE_DESC);
LIBUSB20_MAKE_STRUCT_FORMAT(LIBUSB20_CONFIG_DESC);
LIBUSB20_MAKE_STRUCT_FORMAT(LIBUSB20_CONTROL_SETUP);
LIBUSB20_MAKE_STRUCT_FORMAT(LIBUSB20_SS_ENDPT_COMP_DESC);
LIBUSB20_MAKE_STRUCT_FORMAT(LIBUSB20_USB_20_DEVCAP_DESC);
LIBUSB20_MAKE_STRUCT_FORMAT(LIBUSB20_SS_USB_DEVCAP_DESC);
LIBUSB20_MAKE_STRUCT_FORMAT(LIBUSB20_BOS_DESCRIPTOR);
/*------------------------------------------------------------------------*
* libusb20_parse_config_desc

View File

@ -264,6 +264,43 @@ LIBUSB20_MAKE_STRUCT(LIBUSB20_CONFIG_DESC);
LIBUSB20_MAKE_STRUCT(LIBUSB20_CONTROL_SETUP);
#define LIBUSB20_SS_ENDPT_COMP_DESC(m,n) \
m(n, UINT8_T, bLength, ) \
m(n, UINT8_T, bDescriptorType, ) \
m(n, UINT8_T, bMaxBurst, ) \
m(n, UINT8_T, bmAttributes, ) \
m(n, UINT16_T, wBytesPerInterval, ) \
LIBUSB20_MAKE_STRUCT(LIBUSB20_SS_ENDPT_COMP_DESC);
#define LIBUSB20_USB_20_DEVCAP_DESC(m,n) \
m(n, UINT8_T, bLength, ) \
m(n, UINT8_T, bDescriptorType, ) \
m(n, UINT8_T, bDevCapabilityType, ) \
m(n, UINT32_T, bmAttributes, ) \
LIBUSB20_MAKE_STRUCT(LIBUSB20_USB_20_DEVCAP_DESC);
#define LIBUSB20_SS_USB_DEVCAP_DESC(m,n) \
m(n, UINT8_T, bLength, ) \
m(n, UINT8_T, bDescriptorType, ) \
m(n, UINT8_T, bDevCapabilityType, ) \
m(n, UINT8_T, bmAttributes, ) \
m(n, UINT16_T, wSpeedSupported, ) \
m(n, UINT8_T, bFunctionalitySupport, ) \
m(n, UINT8_T, bU1DevExitLat, ) \
m(n, UINT16_T, wU2DevExitLat, ) \
LIBUSB20_MAKE_STRUCT(LIBUSB20_SS_USB_DEVCAP_DESC);
#define LIBUSB20_BOS_DESCRIPTOR(m,n) \
m(n, UINT8_T, bLength, ) \
m(n, UINT8_T, bDescriptorType, ) \
m(n, UINT16_T, wTotalLength, ) \
m(n, UINT8_T, bNumDeviceCapabilities, ) \
LIBUSB20_MAKE_STRUCT(LIBUSB20_BOS_DESCRIPTOR);
/* standard USB stuff */
/** \ingroup desc
@ -333,6 +370,24 @@ enum libusb20_descriptor_type {
/** Hub descriptor */
LIBUSB20_DT_HUB = 0x29,
/** Binary Object Store, BOS */
LIBUSB20_DT_BOS = 0x0f,
/** Device Capability */
LIBUSB20_DT_DEVICE_CAPABILITY = 0x10,
/** SuperSpeed endpoint companion */
LIBUSB20_DT_SS_ENDPOINT_COMPANION = 0x30,
};
/** \ingroup desc
* Device capability types as defined by the USB specification. */
enum libusb20_device_capability_type {
LIBUSB20_WIRELESS_USB_DEVICE_CAPABILITY = 0x1,
LIBUSB20_USB_2_0_EXTENSION_DEVICE_CAPABILITY = 0x2,
LIBUSB20_SS_USB_DEVICE_CAPABILITY = 0x3,
LIBUSB20_CONTAINER_ID_DEVICE_CAPABILITY = 0x4,
};
/* Descriptor sizes per descriptor type */
@ -342,6 +397,10 @@ enum libusb20_descriptor_type {
#define LIBUSB20_DT_ENDPOINT_SIZE 7
#define LIBUSB20_DT_ENDPOINT_AUDIO_SIZE 9 /* Audio extension */
#define LIBUSB20_DT_HUB_NONVAR_SIZE 7
#define LIBUSB20_DT_SS_ENDPOINT_COMPANION_SIZE 6
#define LIBUSB20_DT_BOS_SIZE 5
#define LIBUSB20_USB_2_0_EXTENSION_DEVICE_CAPABILITY_SIZE 7
#define LIBUSB20_SS_USB_DEVICE_CAPABILITY_SIZE 10
#define LIBUSB20_ENDPOINT_ADDRESS_MASK 0x0f /* in bEndpointAddress */
#define LIBUSB20_ENDPOINT_DIR_MASK 0x80

View File

@ -798,8 +798,12 @@
&hwlist.mly;
&hwlist.mps;
&hwlist.mpt;
&hwlist.mvs;
&hwlist.ncr;
&hwlist.ncv;

View File

@ -15,5 +15,5 @@ WARNS?= 1
CFLAGS+=-DFS_DEBUG -I${GROWFS}
DPADD= ${LIBUFS}
LDADD= -lufs
.include <bsd.prog.mk>
.include <bsd.prog.mk>

View File

@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$");
#include <strings.h>
#include <assert.h>
#include <libgeom.h>
#include <unistd.h>
#include <uuid.h>
#include <geom/multipath/g_multipath.h>
@ -48,31 +49,58 @@ uint32_t version = G_MULTIPATH_VERSION;
static void mp_main(struct gctl_req *, unsigned int);
static void mp_label(struct gctl_req *);
static void mp_clear(struct gctl_req *);
static void mp_add(struct gctl_req *);
struct g_command class_commands[] = {
{
"label", G_FLAG_VERBOSE | G_FLAG_LOADKLD, mp_main, G_NULL_OPTS,
"[-v] name prov ..."
"create", G_FLAG_VERBOSE | G_FLAG_LOADKLD, NULL,
{
{ 'A', "active_active", NULL, G_TYPE_BOOL },
G_OPT_SENTINEL
},
"[-vA] name prov ..."
},
{
"add", G_FLAG_VERBOSE | G_FLAG_LOADKLD, mp_main, G_NULL_OPTS,
"[-v] name prov ..."
"label", G_FLAG_VERBOSE | G_FLAG_LOADKLD, mp_main,
{
{ 'A', "active_active", NULL, G_TYPE_BOOL },
G_OPT_SENTINEL
},
"[-vA] name prov ..."
},
{
"destroy", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
"[-v] prov ..."
"add", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
"[-v] name prov"
},
{
"clear", G_FLAG_VERBOSE, mp_main, G_NULL_OPTS,
"[-v] prov ..."
"remove", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
"[-v] name prov"
},
{
"fail", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
"[-v] name prov"
},
{
"restore", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
"[-v] name prov"
},
{
"rotate", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
"[-v] prov ..."
"[-v] name"
},
{
"getactive", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
"[-v] name"
},
{
"destroy", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
"[-v] name"
},
{
"stop", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
"[-v] name"
},
{
"clear", G_FLAG_VERBOSE, mp_main, G_NULL_OPTS,
"[-v] prov ..."
},
G_CMD_SENTINEL
@ -90,8 +118,6 @@ mp_main(struct gctl_req *req, unsigned int flags __unused)
}
if (strcmp(name, "label") == 0) {
mp_label(req);
} else if (strcmp(name, "add") == 0) {
mp_add(req);
} else if (strcmp(name, "clear") == 0) {
mp_clear(req);
} else {
@ -103,13 +129,13 @@ static void
mp_label(struct gctl_req *req)
{
struct g_multipath_metadata md;
off_t disksiz = 0, msize;
uint8_t *sector;
off_t disksize = 0, msize;
uint8_t *sector, *rsector;
char *ptr;
uuid_t uuid;
uint32_t secsize = 0, ssize, status;
const char *name, *mpname;
int error, i, nargs;
const char *name, *name2, *mpname;
int error, i, nargs, fd;
nargs = gctl_get_int(req, "nargs");
if (nargs < 2) {
@ -132,14 +158,14 @@ mp_label(struct gctl_req *req)
}
if (i == 1) {
secsize = ssize;
disksiz = msize;
disksize = msize;
} else {
if (secsize != ssize) {
gctl_error(req, "%s sector size %u different.",
name, ssize);
return;
}
if (disksiz != msize) {
if (disksize != msize) {
gctl_error(req, "%s media size %ju different.",
name, (intmax_t)msize);
return;
@ -155,7 +181,7 @@ mp_label(struct gctl_req *req)
md.md_version = G_MULTIPATH_VERSION;
mpname = gctl_get_ascii(req, "arg0");
strlcpy(md.md_name, mpname, sizeof(md.md_name));
md.md_size = disksiz;
md.md_size = disksize;
md.md_sectorsize = secsize;
uuid_create(&uuid, &status);
if (status != uuid_s_ok) {
@ -168,18 +194,9 @@ mp_label(struct gctl_req *req)
return;
}
strlcpy(md.md_uuid, ptr, sizeof (md.md_uuid));
md.md_active_active = gctl_get_int(req, "active_active");
free(ptr);
/*
* Clear metadata on initial provider first.
*/
name = gctl_get_ascii(req, "arg1");
error = g_metadata_clear(name, NULL);
if (error != 0) {
gctl_error(req, "cannot clear metadata on %s: %s.", name, strerror(error));
return;
}
/*
* Allocate a sector to write as metadata.
*/
@ -189,6 +206,12 @@ mp_label(struct gctl_req *req)
return;
}
memset(sector, 0, secsize);
rsector = malloc(secsize);
if (rsector == NULL) {
free(sector);
gctl_error(req, "unable to allocate metadata buffer");
return;
}
/*
* encode the metadata
@ -198,6 +221,7 @@ mp_label(struct gctl_req *req)
/*
* Store metadata on the initial provider.
*/
name = gctl_get_ascii(req, "arg1");
error = g_metadata_store(name, sector, secsize);
if (error != 0) {
gctl_error(req, "cannot store metadata on %s: %s.", name, strerror(error));
@ -205,20 +229,29 @@ mp_label(struct gctl_req *req)
}
/*
* Now add the rest of the providers.
* Now touch the rest of the providers to hint retaste.
*/
error = gctl_change_param(req, "verb", -1, "add");
if (error) {
gctl_error(req, "unable to change verb to \"add\": %s.", strerror(error));
return;
}
for (i = 2; i < nargs; i++) {
error = gctl_change_param(req, "arg1", -1, gctl_get_ascii(req, "arg%d", i));
if (error) {
gctl_error(req, "unable to add %s to %s: %s.", gctl_get_ascii(req, "arg%d", i), mpname, strerror(error));
name2 = gctl_get_ascii(req, "arg%d", i);
fd = g_open(name2, 1);
if (fd < 0) {
fprintf(stderr, "Unable to open %s: %s.\n",
name2, strerror(errno));
continue;
}
mp_add(req);
if (pread(fd, rsector, secsize, disksize - secsize) !=
secsize) {
fprintf(stderr, "Unable to read metadata from %s: %s.\n",
name2, strerror(errno));
g_close(fd);
continue;
}
g_close(fd);
if (memcmp(sector, rsector, secsize)) {
fprintf(stderr, "No metadata found on %s."
" It is not a path of %s.\n",
name2, name);
}
}
}
@ -247,13 +280,3 @@ mp_clear(struct gctl_req *req)
}
}
static void
mp_add(struct gctl_req *req)
{
const char *errstr;
errstr = gctl_issue(req);
if (errstr != NULL && errstr[0] != '\0') {
gctl_error(req, "%s", errstr);
}
}

View File

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd February 26, 2007
.Dd October 31, 2011
.Dt GMULTIPATH 8
.Os
.Sh NAME
@ -32,11 +32,48 @@
.Nd "disk multipath control utility"
.Sh SYNOPSIS
.Nm
.Cm label
.Op Fl hv
.Cm create
.Op Fl Av
.Ar name
.Ar prov ...
.Nm
.Cm label
.Op Fl Av
.Ar name
.Ar prov ...
.Nm
.Cm add
.Op Fl v
.Ar name prov
.Nm
.Cm remove
.Op Fl v
.Ar name prov
.Nm
.Cm fail
.Op Fl v
.Ar name prov
.Nm
.Cm restore
.Op Fl v
.Ar name prov
.Nm
.Cm rotate
.Op Fl v
.Ar name
.Nm
.Cm getactive
.Op Fl v
.Ar name
.Nm
.Cm destroy
.Op Fl v
.Ar name
.Nm
.Cm stop
.Op Fl v
.Ar name
.Nm
.Cm clear
.Op Fl v
.Ar prov ...
@ -53,27 +90,79 @@ The
.Nm
utility is used for device multipath configuration.
.Pp
Only automatic configuration is supported at the present time via the
.Cm label
command.
This operation writes a label on the last sector of the underlying
disk device with a contained name and UUID.
The UUID guarantees uniqueness
in a shared storage environment but is in general too cumbersome to use.
The multipath device can be configured using two different methods:
.Dq manual
or
.Dq automatic .
When using the
.Dq manual
method, no metadata are stored on the devices, so the multipath
device has to be configured by hand every time it is needed.
Additional device paths also won't be detected automatically.
The
.Dq automatic
method uses on-disk metadata to detect device and all it's paths.
Metadata use the last sector of the underlying disk device and
include device name and UUID.
The UUID guarantees uniqueness in a shared storage environment
but is in general too cumbersome to use.
The name is what is exported via the device interface.
.Pp
The first argument to
.Nm
indicates an action to be performed:
.Bl -tag -width ".Cm destroy"
.It Cm create
Create multipath device with
.Dq manual
method without writing any on-disk metadata.
It is up to administrator, how to properly identify device paths.
Kernel will only check that all given providers have same media and
sector sizes.
.Pp
.Fl A
option enables Active/Active mode, otherwise Active/Passive mode is used
by default.
.It Cm label
Label the given underlying device with the specified
Create multipath device with
.Dq automatic
method.
Label the first given provider with on-disk metadata using the specified
.Ar name .
The kernel module
.Pa geom_multipath.ko
will be loaded if it is not loaded already.
The rest of given providers will be retasted to detect these metadata.
It reliably protects against specifying unrelated providers.
Providers with no matching metadata detected will not be added to the device.
.Pp
.Fl A
option enables Active/Active mode, otherwise Active/Passive mode is used
by default.
.It Cm add
Add the given provider as a path to the given multipath device.
Should normally be used only for devices created with
.Dq manual
method, unless you know what you are doing (you are sure that it is another
device path, but tasting its metadata in regular
.Dq automatic
way is not possible).
.It Cm remove
Remove the given provider as a path from the given multipath device.
If the last path removed, the multipath device will be destroyed.
.It Cm fail
Mark specified provider as a path of the specified multipath device as failed.
If there are other paths present, new requests will be forwarded there.
.It Cm restore
Mark specified provider as a path of the specified multipath device as
operational, allowing it to handle requests.
.It Cm rotate
Change the active provider/path in Active/Passive mode.
.It Cm getactive
Get the currently active provider(s)/path(s).
.It Cm destroy
Destroy the given multipath device clearing metadata.
.It Cm stop
Stop the given multipath device without clearing metadata.
.It Cm clear
Clear metadata on the given device.
Clear metadata on the given provider.
.It Cm list
See
.Xr geom 8 .
@ -101,14 +190,15 @@ Debug level of the
GEOM class.
This can be set to 0 (default) or 1 to disable or enable various
forms of chattiness.
.It Va kern.geom.multipath.exclusive : No 1
Open underlying providers exclusively, preventing individual paths access.
.El
.Sh EXIT STATUS
Exit status is 0 on success, and 1 if the command fails.
.Sh MULTIPATH ARCHITECTURE
.Pp
This is an active/passive
multiple path architecture with no device knowledge or presumptions other
than size matching built in.
This is a multiple path architecture with no device knowledge or
presumptions other than size matching built in.
Therefore the user must exercise some care
in selecting providers that do indeed represent multiple paths to the
same underlying disk device.
@ -133,15 +223,16 @@ of multiple pathnames refer to the same device should be left to the
system operator who will use tools and knowledge of their own storage
subsystem to make the correct configuration selection.
.Pp
As an active/passive architecture, only one path has I/O moving on it
There are Active/Passive and Active/Active operation modes supported.
In Active/Passive mode only one path has I/O moving on it
at any point in time.
This I/O continues until an I/O is returned with
a generic I/O error or a "Nonexistent Device" error.
When this occurs,
the active device is kicked out of the
.Nm MULTIPATH
GEOM class and the next in a list is selected, the failed I/O reissued
and the system proceeds.
When this occurs, that path is marked FAIL, the next path
in a list is selected as active and the failed I/O reissued.
In Active/Active mode all paths not marked FAIL may handle I/O same time.
Requests are distributed between paths to equalize load.
For capable devices it allows to utilize bandwidth of all paths.
.Pp
When new devices are added to the system the
.Nm MULTIPATH
@ -149,9 +240,9 @@ GEOM class is given an opportunity to taste these new devices.
If a new
device has a
.Nm MULTIPATH
label, the device is used to either create a new
on-disk metadata label, the device is used to either create a new
.Nm MULTIPATH
GEOM, or to attach to the end of the list of devices for an existing
GEOM, or been added the list of paths for an existing
.Nm MULTIPATH
GEOM.
.Pp
@ -176,7 +267,7 @@ of an RSCN event from the Fabric Domain Controller), they can cause
a rescan to occur and cause the attachment and configuration of any
(now) new devices to occur, causing the taste event described above.
.Pp
This means that this active/passive architecture is not a one-shot path
This means that this multipath architecture is not a one-shot path
failover, but can be considered to be steady state as long as failed
paths are repaired (automatically or otherwise).
.Pp
@ -184,7 +275,7 @@ Automatic rescanning is not a requirement.
Nor is Fibre Channel.
The
same failover mechanisms work equally well for traditional "Parallel"
SCSI but require manual intervention with
SCSI but may require manual intervention with
.Xr camcontrol 8
to cause the reattachment of repaired device links.
.Sh EXAMPLES
@ -226,9 +317,9 @@ mount /dev/multipath/FREDa /mnt....
.Pp
The resultant console output looks something like:
.Bd -literal -offset indent
GEOM_MULTIPATH: adding da0 to Fred/b631385f-c61c-11db-b884-0011116ae789
GEOM_MULTIPATH: da0 now active path in Fred
GEOM_MULTIPATH: adding da2 to Fred/b631385f-c61c-11db-b884-0011116ae789
GEOM_MULTIPATH: da0 added to FRED
GEOM_MULTIPATH: da0 is now active path in FRED
GEOM_MULTIPATH: da2 added to FRED
.Ed
.Sh SEE ALSO
.Xr geom 4 ,
@ -240,24 +331,6 @@ GEOM_MULTIPATH: adding da2 to Fred/b631385f-c61c-11db-b884-0011116ae789
.Xr mount 8 ,
.Xr newfs 8 ,
.Xr sysctl 8
.Sh BUGS
The
.Nm
should allow for a manual method of pairing disks.
.Pp
There is currently no way for
.Pa geom_multipath.ko
to distinguish between various label instances of the same provider.
That
is devices such as
.Ar da0
and
.Ar da0c
can be tasted and instantiated as multiple paths for the same device.
Technically, this is correct, but pretty useless.
This will be fixed soon
(I hope), but to avoid this it is a good idea to destroy any label on
the disk object prior to labelling it with
.Nm .
.Sh AUTHOR
.An Matthew Jacob Aq mjacob@FreeBSD.org
.An Alexander Motin Aq mav@FreeBSD.org

View File

@ -1336,6 +1336,36 @@ set80211pureg(const char *val, int d, int s, const struct afswtch *rafp)
set80211(s, IEEE80211_IOC_PUREG, d, 0, NULL);
}
static void
set80211quiet(const char *val, int d, int s, const struct afswtch *rafp)
{
set80211(s, IEEE80211_IOC_QUIET, d, 0, NULL);
}
static
DECL_CMD_FUNC(set80211quietperiod, val, d)
{
set80211(s, IEEE80211_IOC_QUIET_PERIOD, atoi(val), 0, NULL);
}
static
DECL_CMD_FUNC(set80211quietcount, val, d)
{
set80211(s, IEEE80211_IOC_QUIET_COUNT, atoi(val), 0, NULL);
}
static
DECL_CMD_FUNC(set80211quietduration, val, d)
{
set80211(s, IEEE80211_IOC_QUIET_DUR, atoi(val), 0, NULL);
}
static
DECL_CMD_FUNC(set80211quietoffset, val, d)
{
set80211(s, IEEE80211_IOC_QUIET_OFFSET, atoi(val), 0, NULL);
}
static void
set80211bgscan(const char *val, int d, int s, const struct afswtch *rafp)
{
@ -5161,6 +5191,12 @@ static struct cmd ieee80211_cmds[] = {
DEF_CMD_ARG("bgscanidle", set80211bgscanidle),
DEF_CMD_ARG("bgscanintvl", set80211bgscanintvl),
DEF_CMD_ARG("scanvalid", set80211scanvalid),
DEF_CMD("quiet", 1, set80211quiet),
DEF_CMD("-quiet", 0, set80211quiet),
DEF_CMD_ARG("quiet_count", set80211quietcount),
DEF_CMD_ARG("quiet_period", set80211quietperiod),
DEF_CMD_ARG("quiet_dur", set80211quietduration),
DEF_CMD_ARG("quiet_offset", set80211quietoffset),
DEF_CMD_ARG("roam:rssi", set80211roamrssi),
DEF_CMD_ARG("roam:rate", set80211roamrate),
DEF_CMD_ARG("mcastrate", set80211mcastrate),

View File

@ -1,7 +1,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd August 20, 2011
.Dd November 10, 2011
.Dt IPFW 8
.Os
.Sh NAME
@ -769,7 +769,7 @@ To enable
.Cm fwd
a custom kernel needs to be compiled with the option
.Cd "options IPFIREWALL_FORWARD" .
.It Cm nat Ar nat_nr
.It Cm nat Ar nat_nr | tablearg
Pass packet to a
nat instance
(for network address translation, address redirect, etc.):

View File

@ -99,7 +99,7 @@ static const char rcsid[] =
struct bs {
u_int8_t bsJump[3]; /* bootstrap entry point */
u_int8_t bsOemName[8]; /* OEM name and version */
};
} __packed;
struct bsbpb {
u_int8_t bpbBytesPerSec[2]; /* bytes per sector */
@ -114,7 +114,7 @@ struct bsbpb {
u_int8_t bpbHeads[2]; /* drive heads */
u_int8_t bpbHiddenSecs[4]; /* hidden sectors */
u_int8_t bpbHugeSectors[4]; /* big total sectors */
};
} __packed;
struct bsxbpb {
u_int8_t bpbBigFATsecs[4]; /* big sectors per FAT */
@ -124,7 +124,7 @@ struct bsxbpb {
u_int8_t bpbFSInfo[2]; /* file system info sector */
u_int8_t bpbBackup[2]; /* backup boot sector */
u_int8_t bpbReserved[12]; /* reserved */
};
} __packed;
struct bsx {
u_int8_t exDriveNumber; /* drive number */
@ -133,7 +133,7 @@ struct bsx {
u_int8_t exVolumeID[4]; /* volume ID number */
u_int8_t exVolumeLabel[11]; /* volume label */
u_int8_t exFileSysType[8]; /* file system type */
};
} __packed;
struct de {
u_int8_t deName[11]; /* name and extension */
@ -143,7 +143,7 @@ struct de {
u_int8_t deMDate[2]; /* creation date */
u_int8_t deStartCluster[2]; /* starting cluster */
u_int8_t deFileSize[4]; /* size */
};
} __packed;
struct bpb {
u_int bpbBytesPerSec; /* bytes per sector */

View File

@ -205,7 +205,7 @@ section for information on the required format.
.IP "\fB\-infiles\fR" 4
.IX Item "-infiles"
if present this should be the last option, all subsequent arguments
are assumed to the names of files containing certificate requests.
are assumed to be the names of files containing certificate requests.
.IP "\fB\-out filename\fR" 4
.IX Item "-out filename"
the output file to output certificates to. The default is standard

View File

@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd November 6, 2011
.Dd November 8, 2011
.Dt ALTQ 4
.Os
.Sh NAME
@ -161,6 +161,7 @@ They have been applied to the following hardware drivers:
.Xr sk 4 ,
.Xr ste 4 ,
.Xr stge 4 ,
.Xr ti 4 ,
.Xr txp 4 ,
.Xr udav 4 ,
.Xr ural 4 ,

View File

@ -60,9 +60,9 @@ driver provides support for LSI Logic Fusion-MPT 2
.Tn SAS
controllers.
.Sh HARDWARE
The following controllers are supported by the
The
.Nm
driver
driver supports the following controllers:
.Pp
.Bl -bullet -compact
.It

View File

@ -35,7 +35,7 @@
.\" @(#)mtio.4 8.1 (Berkeley) 6/5/93
.\" $FreeBSD$
.\"
.Dd February 11, 1996
.Dd November 11, 2011
.Dt MTIO 4
.Os
.Sh NAME
@ -97,7 +97,7 @@ The following definitions are from
/* structure for MTIOCTOP - mag tape op command */
struct mtop {
short mt_op; /* operations defined below */
daddr_t mt_count; /* how many of them */
int32_t mt_count; /* how many of them */
};
/* operations */
@ -165,19 +165,25 @@ struct mtget {
short mt_dsreg; /* ``drive status'' register */
short mt_erreg; /* ``error'' register */
/* end device-dependent registers */
/*
* Note that the residual count, while maintained, may be
* be nonsense because the size of the residual may (greatly)
* exceed 32 K-bytes. Use the MTIOCERRSTAT ioctl to get a
* more accurate count.
*/
short mt_resid; /* residual count */
#if defined (__FreeBSD__)
daddr_t mt_blksiz; /* presently operating blocksize */
daddr_t mt_density; /* presently operating density */
int32_t mt_blksiz; /* presently operating blocksize */
int32_t mt_density; /* presently operating density */
u_int32_t mt_comp; /* presently operating compression */
daddr_t mt_blksiz0; /* blocksize for mode 0 */
daddr_t mt_blksiz1; /* blocksize for mode 1 */
daddr_t mt_blksiz2; /* blocksize for mode 2 */
daddr_t mt_blksiz3; /* blocksize for mode 3 */
daddr_t mt_density0; /* density for mode 0 */
daddr_t mt_density1; /* density for mode 1 */
daddr_t mt_density2; /* density for mode 2 */
daddr_t mt_density3; /* density for mode 3 */
int32_t mt_blksiz0; /* blocksize for mode 0 */
int32_t mt_blksiz1; /* blocksize for mode 1 */
int32_t mt_blksiz2; /* blocksize for mode 2 */
int32_t mt_blksiz3; /* blocksize for mode 3 */
int32_t mt_density0; /* density for mode 0 */
int32_t mt_density1; /* density for mode 1 */
int32_t mt_density2; /* density for mode 2 */
int32_t mt_density3; /* density for mode 3 */
/* the following are not yet implemented */
u_int32_t mt_comp0; /* compression type for mode 0 */
u_int32_t mt_comp1; /* compression type for mode 1 */
@ -185,8 +191,8 @@ struct mtget {
u_int32_t mt_comp3; /* compression type for mode 3 */
/* end not yet implemented */
#endif
daddr_t mt_fileno; /* relative file number of current position */
daddr_t mt_blkno; /* relative block number of current position */
int32_t mt_fileno; /* relative file number of current position */
int32_t mt_blkno; /* relative block number of current position */
};
/* structure for MTIOCERRSTAT - tape get error status command */
@ -198,10 +204,10 @@ struct scsi_tape_errors {
* of issuing an MTIOCERRSTAT unlatches and clears them.
*/
u_int8_t io_sense[32]; /* Last Sense Data For Data I/O */
u_int32_t io_resid; /* residual count from last Data I/O */
int32_t io_resid; /* residual count from last Data I/O */
u_int8_t io_cdb[16]; /* Command that Caused the Last Data Sense */
u_int8_t ctl_sense[32]; /* Last Sense Data For Control I/O */
u_int32_t ctl_resid; /* residual count from last Control I/O */
int32_t ctl_resid; /* residual count from last Control I/O */
u_int8_t ctl_cdb[16]; /* Command that Caused the Last Control Sense */
/*
* These are the read and write cumulative error counters.
@ -276,20 +282,6 @@ union mterrstat {
#define DEFTAPE "/dev/nsa0"
#endif
#ifdef _KERNEL
/*
* minor device number
*/
#define T_UNIT 003 /* unit selection */
#define T_NOREWIND 004 /* no rewind on close */
#define T_DENSEL 030 /* density select */
#define T_800BPI 000 /* select 800 bpi */
#define T_1600BPI 010 /* select 1600 bpi */
#define T_6250BPI 020 /* select 6250 bpi */
#define T_BADBPI 030 /* undefined selection */
#endif
#endif /* !_SYS_MTIO_H_ */
.Ed
.Sh FILES

View File

@ -109,7 +109,8 @@ hardware command queues (up to 31 command per port),
Native Command Queuing, SATA interface Power Management, device hot-plug
and Message Signaled Interrupts.
.Pp
Same hardware is also supported by atamarvell and ataadaptec drivers from
The same hardware is also supported by the atamarvell and ataadaptec
drivers from the
.Xr ata 4
subsystem.
If both drivers are loaded at the same time, this one will be
@ -118,6 +119,7 @@ given precedence as the more functional of the two.
The
.Nm
driver supports the following controllers:
.Pp
.Bl -tag -compact
.It Gen-I (SATA 1.5Gbps):
.Bl -bullet -compact
@ -155,10 +157,10 @@ driver supports the following controllers:
MV78100 SoC
.El
.El
Note, that this hardware supports command queueing and FIS-based switching
only for ATA DMA commands. ATAPI and non-DMA ATA commands executed one by one
for each port.
.Pp
Note, that this hardware supports command queueing and FIS-based switching
only for ATA DMA commands.
ATAPI and non-DMA ATA commands executed one by one for each port.
.Sh SEE ALSO
.Xr ada 4 ,
.Xr ata 4 ,
@ -172,4 +174,4 @@ The
driver first appeared in
.Fx 8.1 .
.Sh AUTHORS
.An Alexander Motin Aq mav@FreeBSD.org .
.An Alexander Motin Aq mav@FreeBSD.org

View File

@ -30,7 +30,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd July 16, 2005
.Dd November 8, 2011
.Dt TI 4
.Os
.Sh NAME
@ -42,7 +42,7 @@ place the following lines in your
kernel configuration file:
.Bd -ragged -offset indent
.Cd "device ti"
.Cd "options TI_PRIVATE_JUMBOS"
.Cd "options TI_SF_BUF_JUMBO"
.Cd "options TI_JUMBO_HDRSPLIT"
.Ed
.Pp
@ -97,13 +97,14 @@ for more discussion on zero copy receive and header splitting.
.Pp
The
.Nm
driver normally uses jumbo receive buffers allocated by the
driver uses UMA backed jumbo receive buffers, but can be configured
to use
.Xr sendfile 2
buffer allocator, but can be configured to use its own private pool of
jumbo buffers that are contiguous instead of buffers from the jumbo
allocator, which are made up of multiple page sized chunks.
To turn on private jumbos, use the
.Dv TI_PRIVATE_JUMBOS
buffer allocator.
To turn on
.Xr sendfile 2
buffer allocator, use the
.Dv TI_SF_BUF_JUMBO
option.
.Pp
Support for vlans is also available using the
@ -311,6 +312,7 @@ the network connection (cable).
.El
.Sh SEE ALSO
.Xr sendfile 2 ,
.Xr altq 4 ,
.Xr arp 4 ,
.Xr netintro 4 ,
.Xr ng_ether 4 ,

View File

@ -156,6 +156,7 @@ pgollucci [label="Philip M. Gollucci\npgollucci@FreeBSD.org\n2008/07/21"]
rafan [label="Rong-En Fan\nrafan@FreeBSD.org\n2006/06/23"]
rakuco [label="Raphael Kubo da Costa\nrakuco@FreeBSD.org\n2011/08/22"]
rene [label="Rene Ladan\nrene@FreeBSD.org\n2010/04/11"]
rm [label="Ruslan Mahmatkhanov\nrm@FreeBSD.org\n2011/11/06"]
rnoland [label="Robert Noland\nrnoland@FreeBSD.org\n2008/07/21"]
romain [label="Romain Tartiere\nromain@FreeBSD.org\n2010/01/24"]
sahil [label="Sahil Tandon\nsahil@FreeBSD.org\n2010/04/11"]
@ -366,6 +367,7 @@ nork -> ale
novel -> alexbl
novel -> ehaupt
novel -> rm
obrien -> mharo
obrien -> gerald

View File

@ -167,6 +167,7 @@ jls [label="Jordan Sissel\njls@FreeBSD.org\n2006/12/06"]
joerg [label="Joerg Wunsch\njoerg@FreeBSD.org\n1993/11/14"]
jon [label="Jonathan Chen\njon@FreeBSD.org\n2000/10/17"]
jonathan [label="Jonathan Anderson\njonathan@FreeBSD.org\n2010/10/07"]
jpaetzel [label="Josh Paetzel\njpaetzel@FreeBSD.org\n2011/01/21"]
julian [label="Julian Elischer\njulian@FreeBSD.org\n1993/??/??"]
kaiw [label="Kai Wang\nkaiw@FreeBSD.org\n2007/09/26"]
kan [label="Alexander Kabaev\nkan@FreeBSD.org\n2002/07/21"]
@ -237,6 +238,7 @@ stas [label="Stanislav Sedov\nstas@FreeBSD.org\n2008/08/22"]
suz [label="SUZUKI Shinsuke\nsuz@FreeBSD.org\n2002/03/26"]
syrinx [label="Shteryana Shopova\nsyrinx@FreeBSD.org\n2006/10/07"]
takawata [label="Takanori Watanabe\ntakawata@FreeBSD.org\n2000/07/06"]
theraven [label="David Chisnall\ntheraven@FreeBSD.org\n2011/11/11"]
thompsa [label="Andrew Thompson\nthompsa@FreeBSD.org\n2005/05/25"]
ticso [label="Bernd Walter\nticso@FreeBSD.org\n2002/01/31"]
tijl [label="Tijl Coosemans\ntijl@FreeBSD.org\n2010/07/16"]
@ -303,6 +305,7 @@ brian -> joe
brooks -> bushman
brooks -> jamie
brooks -> theraven
bz -> anchie
bz -> jamie
@ -332,6 +335,8 @@ dds -> versus
dfr -> zml
dim -> theraven
dwmalone -> fanf
dwmalone -> peadar
dwmalone -> snb
@ -443,6 +448,7 @@ kan -> kib
kib -> ae
kib -> dchagin
kib -> jpaetzel
kib -> lulf
kib -> melifaro
kib -> pho

View File

@ -18,9 +18,9 @@ CNY
# negative_sign
-
# int_frac_digits
0
2
# frac_digits
0
2
# p_cs_precedes
1
# p_sep_by_space

View File

@ -2047,7 +2047,8 @@ int
fill_fpregs(struct thread *td, struct fpreg *fpregs)
{
KASSERT(td == curthread || TD_IS_SUSPENDED(td),
KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
P_SHOULDSTOP(td->td_proc),
("not suspended thread %p", td));
fpugetregs(td);
fill_fpregs_xmm(&td->td_pcb->pcb_user_save, fpregs);

View File

@ -900,9 +900,9 @@ cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
#include "../../kern/subr_syscall.c"
/*
* syscall - system call request C handler
*
* A system call is essentially treated as a trap.
* System call handler for native binaries. The trap frame is already
* set up by the assembler trampoline and a pointer to it is saved in
* td_frame.
*/
void
amd64_syscall(struct thread *td, int traced)

View File

@ -46,19 +46,15 @@ typedef long sig_atomic_t;
#if __BSD_VISIBLE
#include <machine/trap.h> /* codes for SIGILL, SIGFPE */
/*
* Only the kernel should need these old type definitions.
*/
/*
* Information pushed on stack when a signal is delivered.
* This is used by the kernel to restore state following
* execution of the signal handler. It is also made available
* to the handler to allow it to restore state properly if
* a non-standard exit is performed.
*/
/*
* The sequence of the fields/registers in struct sigcontext should match
* those in mcontext_t.
*
* The sequence of the fields/registers after sc_mask in struct
* sigcontext must match those in mcontext_t and struct trapframe.
*/
struct sigcontext {
struct __sigset sc_mask; /* signal mask to restore */
@ -93,8 +89,8 @@ struct sigcontext {
long sc_ss;
long sc_len; /* sizeof(mcontext_t) */
/*
* XXX - See <machine/ucontext.h> and <machine/fpu.h> for
* the following fields.
* See <machine/ucontext.h> and <machine/fpu.h> for the following
* fields.
*/
long sc_fpformat;
long sc_ownedfp;

View File

@ -41,12 +41,13 @@
typedef struct __mcontext {
/*
* The first 24 fields must match the definition of
* sigcontext. So that we can support sigcontext
* and ucontext_t at the same time.
* The definition of mcontext_t must match the layout of
* struct sigcontext after the sc_mask member. This is so
* that we can support sigcontext and ucontext_t at the same
* time.
*/
__register_t mc_onstack; /* XXX - sigcontext compat. */
__register_t mc_rdi; /* machine state (struct trapframe) */
__register_t mc_onstack; /* XXX - sigcontext compat. */
__register_t mc_rdi; /* machine state (struct trapframe) */
__register_t mc_rsi;
__register_t mc_rdx;
__register_t mc_rcx;

File diff suppressed because it is too large Load Diff

View File

@ -117,7 +117,6 @@ bc_bios2unit(int biosdev)
int i;
DEBUG("looking for bios device 0x%x", biosdev);
printf("looking for bios device 0x%x, nbcinfo=%d\n", biosdev, nbcinfo);
for (i = 0; i < nbcinfo; i++) {
DEBUG("bc unit %d is BIOS device 0x%x", i, bcinfo[i].bc_unit);
if (bcinfo[i].bc_unit == biosdev)
@ -149,7 +148,6 @@ bc_init(void)
int
bc_add(int biosdev)
{
printf("bc_add(%d)\n", biosdev);
if (nbcinfo >= MAXBCDEV)
return (-1);
@ -161,10 +159,8 @@ bc_add(int biosdev)
v86.ds = VTOPSEG(&bcinfo[nbcinfo].bc_sp);
v86.esi = VTOPOFF(&bcinfo[nbcinfo].bc_sp);
v86int();
if ((v86.eax & 0xff00) != 0) {
printf("CD probe failed, eax=0x%08x\n", v86.eax);
if ((v86.eax & 0xff00) != 0)
return (-1);
}
printf("BIOS CD is cd%d\n", nbcinfo);
nbcinfo++;

View File

@ -125,11 +125,11 @@ invop_leave:
movl 8(%esp), %eax /* load calling EIP */
incl %eax /* increment over LOCK prefix */
movl %eax, -8(%ebx) /* store calling EIP */
movl %ebx, -4(%esp) /* temporarily store new %esp */
subl $8, %ebx /* adjust for three pushes, one pop */
movl %ebx, 8(%esp) /* temporarily store new %esp */
popl %ebx /* pop off temp */
popl %eax /* pop off temp */
movl -12(%esp), %esp /* set stack pointer */
subl $8, %esp /* adjust for three pushes, one pop */
movl (%esp), %esp /* set stack pointer */
iret /* return from interrupt */
invop_nop:
/*

View File

@ -213,6 +213,7 @@ systrace_probe(u_int32_t id, int sysnum, struct sysent *sysent, void *params,
/* Process the probe using the converted argments. */
dtrace_probe(id, uargs[0], uargs[1], uargs[2], uargs[3], uargs[4]);
}
#endif
static void
@ -220,8 +221,12 @@ systrace_getargdesc(void *arg, dtrace_id_t id, void *parg, dtrace_argdesc_t *des
{
int sysnum = SYSTRACE_SYSNUM((uintptr_t)parg);
systrace_setargdesc(sysnum, desc->dtargd_ndx, desc->dtargd_native,
sizeof(desc->dtargd_native));
if (SYSTRACE_ISENTRY((uintptr_t)parg))
systrace_entry_setargdesc(sysnum, desc->dtargd_ndx,
desc->dtargd_native, sizeof(desc->dtargd_native));
else
systrace_return_setargdesc(sysnum, desc->dtargd_ndx,
desc->dtargd_native, sizeof(desc->dtargd_native));
if (desc->dtargd_native[0] == '\0')
desc->dtargd_ndx = DTRACE_ARGNONE;

View File

@ -52,8 +52,8 @@ struct timeval32 {
} while (0)
struct timespec32 {
u_int32_t tv_sec;
u_int32_t tv_nsec;
int32_t tv_sec;
int32_t tv_nsec;
};
#define TS_CP(src,dst,fld) do { \
CP((src).fld,(dst).fld,tv_sec); \

File diff suppressed because it is too large Load Diff

View File

@ -2142,12 +2142,12 @@ device malo # Marvell Libertas wireless NICs.
device mwl # Marvell 88W8363 802.11n wireless NICs.
device ral # Ralink Technology RT2500 wireless NICs.
# Use "private" jumbo buffers allocated exclusively for the ti(4) driver.
# This option is incompatible with the TI_JUMBO_HDRSPLIT option below.
#options TI_PRIVATE_JUMBOS
# Use sf_buf(9) interface for jumbo buffers on ti(4) controllers.
#options TI_SF_BUF_JUMBO
# Turn on the header splitting option for the ti(4) driver firmware. This
# only works for Tigon II chips, and has no effect for Tigon I chips.
options TI_JUMBO_HDRSPLIT
# This option requires the TI_SF_BUF_JUMBO option above.
#options TI_JUMBO_HDRSPLIT
#
# Use header splitting feature on bce(4) adapters.

View File

@ -526,7 +526,7 @@ NGATM_CCATM opt_netgraph.h
DRM_DEBUG opt_drm.h
ZERO_COPY_SOCKETS opt_zero.h
TI_PRIVATE_JUMBOS opt_ti.h
TI_SF_BUF_JUMBO opt_ti.h
TI_JUMBO_HDRSPLIT opt_ti.h
BCE_JUMBO_HDRSPLIT opt_bce.h

View File

@ -1431,7 +1431,7 @@ ae_tx_avail_size(ae_softc_t *sc)
else
avail = sc->txd_ack - sc->txd_cur;
return (avail - 4); /* 4-byte header. */
return (avail);
}
static int
@ -1448,7 +1448,7 @@ ae_encap(ae_softc_t *sc, struct mbuf **m_head)
len = m0->m_pkthdr.len;
if ((sc->flags & AE_FLAG_TXAVAIL) == 0 ||
ae_tx_avail_size(sc) < len) {
len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) {
#ifdef AE_DEBUG
if_printf(sc->ifp, "No free Tx available.\n");
#endif
@ -1457,11 +1457,10 @@ ae_encap(ae_softc_t *sc, struct mbuf **m_head)
hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur);
bzero(hdr, sizeof(*hdr));
sc->txd_cur = (sc->txd_cur + 4) % AE_TXD_BUFSIZE_DEFAULT; /* Header
size. */
to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur; /* Space available to
* the end of the ring
*/
/* Skip header size. */
sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT;
/* Space available to the end of the ring */
to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
if (to_end >= len) {
m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
} else {
@ -1840,8 +1839,8 @@ ae_tx_intr(ae_softc_t *sc)
/*
* Move txd ack and align on 4-byte boundary.
*/
sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 4 + 3) & ~3) %
AE_TXD_BUFSIZE_DEFAULT;
sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) +
sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
if ((flags & AE_TXS_SUCCESS) != 0)
ifp->if_opackets++;

View File

@ -38,6 +38,8 @@
#include <sys/bus.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <machine/stdarg.h>
@ -59,6 +61,17 @@
#define BUSTAG(ah) ((ah)->ah_st)
#endif
/*
* This lock is used to seralise register access for chips which have
* problems w/ SMP CPUs issuing concurrent PCI transactions.
*
* XXX This is a global lock for now; it should be pushed to
* a per-device lock in some platform-independent fashion.
*/
struct mtx ah_regser_mtx;
MTX_SYSINIT(ah_regser, &ah_regser_mtx, "Atheros register access mutex",
MTX_SPIN);
extern void ath_hal_printf(struct ath_hal *, const char*, ...)
__printflike(2,3);
extern void ath_hal_vprintf(struct ath_hal *, const char*, __va_list)
@ -250,12 +263,16 @@ ath_hal_reg_write(struct ath_hal *ah, u_int32_t reg, u_int32_t val)
alq_post(ath_hal_alq, ale);
}
}
if (ah->ah_config.ah_serialise_reg_war)
mtx_lock_spin(&ah_regser_mtx);
#if _BYTE_ORDER == _BIG_ENDIAN
if (OS_REG_UNSWAPPED(reg))
bus_space_write_4(tag, h, reg, val);
else
#endif
bus_space_write_stream_4(tag, h, reg, val);
if (ah->ah_config.ah_serialise_reg_war)
mtx_unlock_spin(&ah_regser_mtx);
}
u_int32_t
@ -265,12 +282,16 @@ ath_hal_reg_read(struct ath_hal *ah, u_int32_t reg)
bus_space_handle_t h = ah->ah_sh;
u_int32_t val;
if (ah->ah_config.ah_serialise_reg_war)
mtx_lock_spin(&ah_regser_mtx);
#if _BYTE_ORDER == _BIG_ENDIAN
if (OS_REG_UNSWAPPED(reg))
val = bus_space_read_4(tag, h, reg);
else
#endif
val = bus_space_read_stream_4(tag, h, reg);
if (ah->ah_config.ah_serialise_reg_war)
mtx_unlock_spin(&ah_regser_mtx);
if (ath_hal_alq) {
struct ale *ale = ath_hal_alq_get(ah);
if (ale) {
@ -316,12 +337,16 @@ ath_hal_reg_write(struct ath_hal *ah, u_int32_t reg, u_int32_t val)
bus_space_tag_t tag = BUSTAG(ah);
bus_space_handle_t h = ah->ah_sh;
if (ah->ah_config.ah_serialise_reg_war)
mtx_lock_spin(&ah_regser_mtx);
#if _BYTE_ORDER == _BIG_ENDIAN
if (OS_REG_UNSWAPPED(reg))
bus_space_write_4(tag, h, reg, val);
else
#endif
bus_space_write_stream_4(tag, h, reg, val);
if (ah->ah_config.ah_serialise_reg_war)
mtx_unlock_spin(&ah_regser_mtx);
}
u_int32_t
@ -331,12 +356,16 @@ ath_hal_reg_read(struct ath_hal *ah, u_int32_t reg)
bus_space_handle_t h = ah->ah_sh;
u_int32_t val;
if (ah->ah_config.ah_serialise_reg_war)
mtx_lock_spin(&ah_regser_mtx);
#if _BYTE_ORDER == _BIG_ENDIAN
if (OS_REG_UNSWAPPED(reg))
val = bus_space_read_4(tag, h, reg);
else
#endif
val = bus_space_read_stream_4(tag, h, reg);
if (ah->ah_config.ah_serialise_reg_war)
mtx_unlock_spin(&ah_regser_mtx);
return val;
}
#endif /* AH_DEBUG || AH_REGOPS_FUNC */

View File

@ -114,11 +114,15 @@ ath_hal_mac_name(struct ath_hal *ah)
case AR_XSREV_VERSION_SOWL:
return "9160";
case AR_XSREV_VERSION_MERLIN:
return "9280";
if (AH_PRIVATE(ah)->ah_ispcie)
return "9280";
return "9220";
case AR_XSREV_VERSION_KITE:
return "9285";
case AR_XSREV_VERSION_KIWI:
return "9287";
if (AH_PRIVATE(ah)->ah_ispcie)
return "9287";
return "9227";
}
return "????";
}
@ -661,6 +665,8 @@ ath_hal_getcapability(struct ath_hal *ah, HAL_CAPABILITY_TYPE type,
return pCap->halHasLongRxDescTsf ? HAL_OK : HAL_ENOTSUPP;
case HAL_CAP_BB_READ_WAR: /* Baseband read WAR */
return pCap->halHasBBReadWar? HAL_OK : HAL_ENOTSUPP;
case HAL_CAP_SERIALISE_WAR: /* PCI register serialisation */
return pCap->halSerialiseRegWar ? HAL_OK : HAL_ENOTSUPP;
default:
return HAL_EINVAL;
}

View File

@ -150,6 +150,7 @@ typedef enum {
HAL_CAP_RXDESC_SELFLINK = 242, /* support a self-linked tail RX descriptor */
HAL_CAP_LONG_RXDESC_TSF = 243, /* hardware supports 32bit TSF in RX descriptor */
HAL_CAP_BB_READ_WAR = 244, /* baseband read WAR */
HAL_CAP_SERIALISE_WAR = 245, /* serialise register access on PCI */
} HAL_CAPABILITY_TYPE;
/*
@ -780,6 +781,8 @@ typedef struct
int ah_dma_beacon_response_time;/* in TU's */
int ah_sw_beacon_response_time; /* in TU's */
int ah_additional_swba_backoff; /* in TU's */
int ah_force_full_reset; /* force full chip reset rather then warm reset */
int ah_serialise_reg_war; /* force serialisation of register IO */
} HAL_OPS_CONFIG;
/*
@ -810,6 +813,8 @@ struct ath_hal {
uint16_t *ah_eepromdata; /* eeprom buffer, if needed */
uint32_t ah_intrstate[8]; /* last int state */
HAL_OPS_CONFIG ah_config;
const HAL_RATE_TABLE *__ahdecl(*ah_getRateTable)(struct ath_hal *,
u_int mode);
@ -1023,6 +1028,9 @@ struct ath_hal {
struct ath_desc *);
void __ahdecl(*ah_set11nBurstDuration)(struct ath_hal *,
struct ath_desc *, u_int);
uint32_t __ahdecl(*ah_get_mib_cycle_counts_pct) (struct ath_hal *,
uint32_t *, uint32_t *, uint32_t *, uint32_t *);
uint32_t __ahdecl(*ah_get11nExtBusy)(struct ath_hal *);
void __ahdecl(*ah_set11nMac2040)(struct ath_hal *,
HAL_HT_MACMODE);

View File

@ -73,11 +73,11 @@
/* AR5416 compatible devid's */
#define AR5416_DEVID_PCI 0x0023 /* AR5416 PCI (MB/CB) Owl */
#define AR5416_DEVID_PCIE 0x0024 /* AR5416 PCI-E (XB) Owl */
#define AR5416_DEVID_PCIE 0x0024 /* AR5418 PCI-E (XB) Owl */
#define AR5416_AR9130_DEVID 0x000b /* AR9130 SoC WiMAC */
#define AR9160_DEVID_PCI 0x0027 /* AR9160 PCI Sowl */
#define AR9280_DEVID_PCI 0x0029 /* AR9280 PCI Merlin */
#define AR9280_DEVID_PCIE 0x002a /* AR9280 PCI-E Merlin */
#define AR9280_DEVID_PCIE 0x002a /* AR9220 PCI-E Merlin */
#define AR9285_DEVID_PCIE 0x002b /* AR9285 PCI-E Kite */
#define AR2427_DEVID_PCIE 0x002c /* AR2427 PCI-E w/ 802.11n bonded out */
#define AR9287_DEVID_PCI 0x002d /* AR9227 PCI Kiwi */

View File

@ -210,7 +210,8 @@ typedef struct {
halHasRxSelfLinkedTail : 1,
halSupportsFastClock5GHz : 1, /* Hardware supports 5ghz fast clock; check eeprom/channel before using */
halHasLongRxDescTsf : 1,
halHasBBReadWar : 1;
halHasBBReadWar : 1,
halSerialiseRegWar : 1;
uint32_t halWirelessModes;
uint16_t halTotalQueues;
uint16_t halKeyCacheSize;

View File

@ -112,11 +112,13 @@ struct ath_hal_5416 {
int ah_hangs; /* h/w hangs state */
uint8_t ah_keytype[AR5416_KEYTABLE_SIZE];
/*
* Extension Channel Rx Clear State
* Primary/Extension Channel Tx, Rx, Rx Clear State
*/
uint32_t ah_cycleCount;
uint32_t ah_ctlBusy;
uint32_t ah_extBusy;
uint32_t ah_rxBusy;
uint32_t ah_txBusy;
uint32_t ah_rx_chainmask;
uint32_t ah_tx_chainmask;
@ -194,6 +196,9 @@ extern uint32_t ar5416GetCurRssi(struct ath_hal *ah);
extern HAL_BOOL ar5416SetAntennaSwitch(struct ath_hal *, HAL_ANT_SETTING);
extern HAL_BOOL ar5416SetDecompMask(struct ath_hal *, uint16_t, int);
extern void ar5416SetCoverageClass(struct ath_hal *, uint8_t, int);
extern uint32_t ar5416GetMibCycleCountsPct(struct ath_hal *ah,
uint32_t *rxc_pcnt, uint32_t *rxextc_pcnt, uint32_t *rxf_pcnt,
uint32_t *txf_pcnt);
extern uint32_t ar5416Get11nExtBusy(struct ath_hal *ah);
extern void ar5416Set11nMac2040(struct ath_hal *ah, HAL_HT_MACMODE mode);
extern HAL_HT_RXCLEAR ar5416Get11nRxClear(struct ath_hal *ah);

View File

@ -227,7 +227,7 @@ ar5416AniControl(struct ath_hal *ah, HAL_ANI_CMD cmd, int param)
u_int level = param;
HALDEBUG(ah, HAL_DEBUG_ANI, "%s: HAL_ANI_NOISE_IMMUNITY_LEVEL: set level = %d\n", __func__, level);
if (level >= params->maxNoiseImmunityLevel) {
if (level > params->maxNoiseImmunityLevel) {
HALDEBUG(ah, HAL_DEBUG_ANI,
"%s: immunity level out of range (%u > %u)\n",
__func__, level, params->maxNoiseImmunityLevel);
@ -314,7 +314,7 @@ ar5416AniControl(struct ath_hal *ah, HAL_ANI_CMD cmd, int param)
u_int level = param;
HALDEBUG(ah, HAL_DEBUG_ANI, "%s: HAL_ANI_FIRSTEP_LEVEL: level = %d\n", __func__, level);
if (level >= params->maxFirstepLevel) {
if (level > params->maxFirstepLevel) {
HALDEBUG(ah, HAL_DEBUG_ANI,
"%s: firstep level out of range (%u > %u)\n",
__func__, level, params->maxFirstepLevel);
@ -333,7 +333,7 @@ ar5416AniControl(struct ath_hal *ah, HAL_ANI_CMD cmd, int param)
u_int level = param;
HALDEBUG(ah, HAL_DEBUG_ANI, "%s: HAL_ANI_SPUR_IMMUNITY_LEVEL: level = %d\n", __func__, level);
if (level >= params->maxSpurImmunityLevel) {
if (level > params->maxSpurImmunityLevel) {
HALDEBUG(ah, HAL_DEBUG_ANI,
"%s: spur immunity level out of range (%u > %u)\n",
__func__, level, params->maxSpurImmunityLevel);
@ -342,11 +342,6 @@ ar5416AniControl(struct ath_hal *ah, HAL_ANI_CMD cmd, int param)
OS_REG_RMW_FIELD(ah, AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1, params->cycPwrThr1[level]);
/* Only set the ext channel cycpwr_thr1 field for ht/40 */
if (IEEE80211_IS_CHAN_HT40(AH_PRIVATE(ah)->ah_curchan))
OS_REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
AR_PHY_EXT_TIMING5_CYCPWR_THR1, params->cycPwrThr1[level]);
if (level > aniState->spurImmunityLevel)
ahp->ah_stats.ast_ani_spurup++;
else if (level < aniState->spurImmunityLevel)
@ -384,20 +379,30 @@ ar5416AniOfdmErrTrigger(struct ath_hal *ah)
aniState = ahp->ah_curani;
params = aniState->params;
/* First, raise noise immunity level, up to max */
if ((AH5416(ah)->ah_ani_function & (1 << HAL_ANI_NOISE_IMMUNITY_LEVEL)) &&
(aniState->noiseImmunityLevel+1 < params->maxNoiseImmunityLevel)) {
ar5416AniControl(ah, HAL_ANI_NOISE_IMMUNITY_LEVEL,
aniState->noiseImmunityLevel + 1);
return;
if (aniState->noiseImmunityLevel+1 < params->maxNoiseImmunityLevel) {
if (ar5416AniControl(ah, HAL_ANI_NOISE_IMMUNITY_LEVEL,
aniState->noiseImmunityLevel + 1))
return;
}
/* then, raise spur immunity level, up to max */
if ((AH5416(ah)->ah_ani_function & (1 << HAL_ANI_SPUR_IMMUNITY_LEVEL)) &&
(aniState->spurImmunityLevel+1 < params->maxSpurImmunityLevel)) {
ar5416AniControl(ah, HAL_ANI_SPUR_IMMUNITY_LEVEL,
aniState->spurImmunityLevel + 1);
return;
if (aniState->spurImmunityLevel+1 < params->maxSpurImmunityLevel) {
if (ar5416AniControl(ah, HAL_ANI_SPUR_IMMUNITY_LEVEL,
aniState->spurImmunityLevel + 1))
return;
}
/*
* In the case of AP mode operation, we cannot bucketize beacons
* according to RSSI. Instead, raise Firstep level, up to max, and
* simply return.
*/
if (AH_PRIVATE(ah)->ah_opmode == HAL_M_HOSTAP) {
if (aniState->firstepLevel < params->maxFirstepLevel) {
if (ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
aniState->firstepLevel + 1))
return;
}
}
if (ANI_ENA_RSSI(ah)) {
int32_t rssi = BEACON_RSSI(ahp);
if (rssi > params->rssiThrHigh) {
@ -418,9 +423,9 @@ ar5416AniOfdmErrTrigger(struct ath_hal *ah)
* raise firstep level
*/
if (aniState->firstepLevel+1 < params->maxFirstepLevel) {
ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
aniState->firstepLevel + 1);
return;
if (ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
aniState->firstepLevel + 1))
return;
}
} else if (rssi > params->rssiThrLow) {
/*
@ -432,9 +437,9 @@ ar5416AniOfdmErrTrigger(struct ath_hal *ah)
HAL_ANI_OFDM_WEAK_SIGNAL_DETECTION,
AH_TRUE);
if (aniState->firstepLevel+1 < params->maxFirstepLevel)
ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
aniState->firstepLevel + 1);
return;
if (ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
aniState->firstepLevel + 1))
return;
} else {
/*
* Beacon rssi is low, if in 11b/g mode, turn off ofdm
@ -447,9 +452,9 @@ ar5416AniOfdmErrTrigger(struct ath_hal *ah)
HAL_ANI_OFDM_WEAK_SIGNAL_DETECTION,
AH_FALSE);
if (aniState->firstepLevel > 0)
ar5416AniControl(ah,
HAL_ANI_FIRSTEP_LEVEL, 0);
return;
if (ar5416AniControl(ah,
HAL_ANI_FIRSTEP_LEVEL, 0))
return;
}
}
}
@ -582,6 +587,16 @@ ar5416AniReset(struct ath_hal *ah, const struct ieee80211_channel *chan,
goto finish;
}
/*
* Use a restrictive set of ANI parameters for hostap mode.
*/
if (opmode == HAL_M_HOSTAP) {
if (IEEE80211_IS_CHAN_2GHZ(chan))
AH5416(ah)->ah_ani_function =
HAL_ANI_SPUR_IMMUNITY_LEVEL | HAL_ANI_FIRSTEP_LEVEL;
else
AH5416(ah)->ah_ani_function = 0;
}
/*
* Automatic processing is done only in station mode right now.
@ -611,7 +626,7 @@ ar5416AniReset(struct ath_hal *ah, const struct ieee80211_channel *chan,
ar5416AniControl(ah, HAL_ANI_NOISE_IMMUNITY_LEVEL, 0);
ar5416AniControl(ah, HAL_ANI_SPUR_IMMUNITY_LEVEL, 0);
ar5416AniControl(ah, HAL_ANI_OFDM_WEAK_SIGNAL_DETECTION,
AH_TRUE);
AH_FALSE);
ar5416AniControl(ah, HAL_ANI_CCK_WEAK_SIGNAL_THR, AH_FALSE);
ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL, 0);
ichan->privFlags |= CHANNEL_ANI_SETUP;
@ -715,6 +730,19 @@ ar5416AniLowerImmunity(struct ath_hal *ah)
aniState = ahp->ah_curani;
params = aniState->params;
/*
* In the case of AP mode operation, we cannot bucketize beacons
* according to RSSI. Instead, lower Firstep level, down to min, and
* simply return.
*/
if (AH_PRIVATE(ah)->ah_opmode == HAL_M_HOSTAP) {
if (aniState->firstepLevel > 0) {
if (ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
aniState->firstepLevel - 1))
return;
}
}
if (ANI_ENA_RSSI(ah)) {
int32_t rssi = BEACON_RSSI(ahp);
if (rssi > params->rssiThrHigh) {
@ -729,41 +757,41 @@ ar5416AniLowerImmunity(struct ath_hal *ah)
* detection or lower firstep level.
*/
if (aniState->ofdmWeakSigDetectOff) {
ar5416AniControl(ah,
if (ar5416AniControl(ah,
HAL_ANI_OFDM_WEAK_SIGNAL_DETECTION,
AH_TRUE);
return;
AH_TRUE))
return;
}
if (aniState->firstepLevel > 0) {
ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
aniState->firstepLevel - 1);
return;
if (ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
aniState->firstepLevel - 1))
return;
}
} else {
/*
* Beacon rssi is low, reduce firstep level.
*/
if (aniState->firstepLevel > 0) {
ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
aniState->firstepLevel - 1);
return;
if (ar5416AniControl(ah, HAL_ANI_FIRSTEP_LEVEL,
aniState->firstepLevel - 1))
return;
}
}
}
/* then lower spur immunity level, down to zero */
if (aniState->spurImmunityLevel > 0) {
ar5416AniControl(ah, HAL_ANI_SPUR_IMMUNITY_LEVEL,
aniState->spurImmunityLevel - 1);
return;
if (ar5416AniControl(ah, HAL_ANI_SPUR_IMMUNITY_LEVEL,
aniState->spurImmunityLevel - 1))
return;
}
/*
* if all else fails, lower noise immunity level down to a min value
* zero for now
*/
if (aniState->noiseImmunityLevel > 0) {
ar5416AniControl(ah, HAL_ANI_NOISE_IMMUNITY_LEVEL,
aniState->noiseImmunityLevel - 1);
return;
if (ar5416AniControl(ah, HAL_ANI_NOISE_IMMUNITY_LEVEL,
aniState->noiseImmunityLevel - 1))
return;
}
}
@ -782,15 +810,15 @@ ar5416AniGetListenTime(struct ath_hal *ah)
{
struct ath_hal_5212 *ahp = AH5212(ah);
struct ar5212AniState *aniState;
uint32_t txFrameCount, rxFrameCount, cycleCount;
uint32_t rxc_pct, extc_pct, rxf_pct, txf_pct;
int32_t listenTime;
int good;
txFrameCount = OS_REG_READ(ah, AR_TFCNT);
rxFrameCount = OS_REG_READ(ah, AR_RFCNT);
cycleCount = OS_REG_READ(ah, AR_CCCNT);
good = ar5416GetMibCycleCountsPct(ah,
&rxc_pct, &extc_pct, &rxf_pct, &txf_pct);
aniState = ahp->ah_curani;
if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
if (good == 0) {
/*
* Cycle counter wrap (or initial call); it's not possible
* to accurately calculate a value because the registers
@ -799,14 +827,18 @@ ar5416AniGetListenTime(struct ath_hal *ah)
listenTime = 0;
ahp->ah_stats.ast_ani_lzero++;
} else {
int32_t ccdelta = cycleCount - aniState->cycleCount;
int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
int32_t tfdelta = txFrameCount - aniState->txFrameCount;
int32_t ccdelta = AH5416(ah)->ah_cycleCount - aniState->cycleCount;
int32_t rfdelta = AH5416(ah)->ah_rxBusy - aniState->rxFrameCount;
int32_t tfdelta = AH5416(ah)->ah_txBusy - aniState->txFrameCount;
listenTime = (ccdelta - rfdelta - tfdelta) / CLOCK_RATE;
}
aniState->cycleCount = cycleCount;
aniState->txFrameCount = txFrameCount;
aniState->rxFrameCount = rxFrameCount;
aniState->cycleCount = AH5416(ah)->ah_cycleCount;
aniState->txFrameCount = AH5416(ah)->ah_rxBusy;
aniState->rxFrameCount = AH5416(ah)->ah_txBusy;
HALDEBUG(ah, HAL_DEBUG_ANI, "rxc=%d, extc=%d, rxf=%d, txf=%d\n",
rxc_pct, extc_pct, rxf_pct, txf_pct);
return listenTime;
}
@ -873,10 +905,13 @@ ar5416AniPoll(struct ath_hal *ah, const struct ieee80211_channel *chan)
/* XXX can aniState be null? */
if (aniState == AH_NULL)
return;
/* Always update from the MIB, for statistics gathering */
listenTime = ar5416AniGetListenTime(ah);
if (!ANI_ENA(ah))
return;
listenTime = ar5416AniGetListenTime(ah);
if (listenTime < 0) {
ahp->ah_stats.ast_ani_lneg++;
/* restart ANI period if listenTime is invalid */

View File

@ -908,17 +908,36 @@ ar5416FillCapabilityInfo(struct ath_hal *ah)
pCap->halRfSilentSupport = AH_TRUE;
}
/*
* The MAC will mark frames as RXed if there's a descriptor
* to write them to. So if it hits a self-linked final descriptor,
* it'll keep ACKing frames even though they're being silently
* dropped. Thus, this particular feature of the driver can't
* be used for 802.11n devices.
*/
ahpriv->ah_rxornIsFatal = AH_FALSE;
/*
* If it's a PCI NIC, ask the HAL OS layer to serialise
* register access, or SMP machines may cause the hardware
* to hang. This is applicable to AR5416 and AR9220; I'm not
* sure about AR9160 or AR9227.
*/
if (! AH_PRIVATE(ah)->ah_ispcie)
pCap->halSerialiseRegWar = 1;
return AH_TRUE;
}
static const char*
ar5416Probe(uint16_t vendorid, uint16_t devid)
{
if (vendorid == ATHEROS_VENDOR_ID &&
(devid == AR5416_DEVID_PCI || devid == AR5416_DEVID_PCIE))
return "Atheros 5416";
if (vendorid == ATHEROS_VENDOR_ID) {
if (devid == AR5416_DEVID_PCI)
return "Atheros 5416";
if (devid == AR5416_DEVID_PCIE)
return "Atheros 5418";
}
return AH_NULL;
}
AH_CHIP(AR5416, ar5416Probe, ar5416Attach);

View File

@ -642,15 +642,7 @@ ar5416LoadNF(struct ath_hal *ah, const struct ieee80211_channel *chan)
OS_REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
/* Wait for load to complete, should be fast, a few 10s of us. */
/*
* XXX For now, don't be so aggressive in waiting for the NF
* XXX load to complete. A very busy 11n RX load will cause this
* XXX to always fail; so just leave it.
* XXX Later on we may wish to split longcal into two parts - one to do
* XXX the initial longcal, and one to load in an updated NF value
* XXX once it's finished - say, by checking it every 500ms.
*/
if (! ar5212WaitNFCalComplete(ah, 5)) {
if (! ar5212WaitNFCalComplete(ah, 1000)) {
/*
* We timed out waiting for the noisefloor to load, probably due to an
* in-progress rx. Simply return here and allow the load plenty of time

View File

@ -70,6 +70,13 @@ ar5416GetPendingInterrupts(struct ath_hal *ah, HAL_INT *masked)
uint32_t isr, isr0, isr1, sync_cause = 0;
HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
#ifdef AH_INTERRUPT_DEBUGGING
/*
* Blank the interrupt debugging area regardless.
*/
bzero(&ah->ah_intrstate, sizeof(ah->ah_intrstate));
#endif
/*
* Verify there's a mac interrupt and the RTC is on.
*/
@ -90,6 +97,16 @@ ar5416GetPendingInterrupts(struct ath_hal *ah, HAL_INT *masked)
return AH_FALSE;
}
#ifdef AH_INTERRUPT_DEBUGGING
ah->ah_intrstate[0] = isr;
ah->ah_intrstate[1] = OS_REG_READ(ah, AR_ISR_S0);
ah->ah_intrstate[2] = OS_REG_READ(ah, AR_ISR_S1);
ah->ah_intrstate[3] = OS_REG_READ(ah, AR_ISR_S2);
ah->ah_intrstate[4] = OS_REG_READ(ah, AR_ISR_S3);
ah->ah_intrstate[5] = OS_REG_READ(ah, AR_ISR_S4);
ah->ah_intrstate[6] = OS_REG_READ(ah, AR_ISR_S5);
#endif
if (isr != 0) {
struct ath_hal_5212 *ahp = AH5212(ah);
uint32_t mask2;

View File

@ -171,6 +171,57 @@ ar5416SetCoverageClass(struct ath_hal *ah, uint8_t coverageclass, int now)
AH_PRIVATE(ah)->ah_coverageClass = coverageclass;
}
/*
* Return the busy for rx_frame, rx_clear, and tx_frame
*/
uint32_t
ar5416GetMibCycleCountsPct(struct ath_hal *ah, uint32_t *rxc_pcnt,
uint32_t *extc_pcnt, uint32_t *rxf_pcnt, uint32_t *txf_pcnt)
{
struct ath_hal_5416 *ahp = AH5416(ah);
u_int32_t good = 1;
/* XXX freeze/unfreeze mib counters */
uint32_t rc = OS_REG_READ(ah, AR_RCCNT);
uint32_t ec = OS_REG_READ(ah, AR_EXTRCCNT);
uint32_t rf = OS_REG_READ(ah, AR_RFCNT);
uint32_t tf = OS_REG_READ(ah, AR_TFCNT);
uint32_t cc = OS_REG_READ(ah, AR_CCCNT); /* read cycles last */
if (ahp->ah_cycleCount == 0 || ahp->ah_cycleCount > cc) {
/*
* Cycle counter wrap (or initial call); it's not possible
* to accurately calculate a value because the registers
* right shift rather than wrap--so punt and return 0.
*/
HALDEBUG(ah, HAL_DEBUG_ANY,
"%s: cycle counter wrap. ExtBusy = 0\n", __func__);
good = 0;
} else {
uint32_t cc_d = cc - ahp->ah_cycleCount;
uint32_t rc_d = rc - ahp->ah_ctlBusy;
uint32_t ec_d = ec - ahp->ah_extBusy;
uint32_t rf_d = rf - ahp->ah_rxBusy;
uint32_t tf_d = tf - ahp->ah_txBusy;
if (cc_d != 0) {
*rxc_pcnt = rc_d * 100 / cc_d;
*rxf_pcnt = rf_d * 100 / cc_d;
*txf_pcnt = tf_d * 100 / cc_d;
*extc_pcnt = ec_d * 100 / cc_d;
} else {
good = 0;
}
}
ahp->ah_cycleCount = cc;
ahp->ah_rxBusy = rf;
ahp->ah_ctlBusy = rc;
ahp->ah_txBusy = tf;
ahp->ah_extBusy = ec;
return good;
}
/*
* Return approximation of extension channel busy over an time interval
* 0% (clear) -> 100% (busy)

View File

@ -146,7 +146,9 @@ ar5416Reset(struct ath_hal *ah, HAL_OPMODE opmode,
/* For chips on which the RTC reset is done, save TSF before it gets cleared */
if (AR_SREV_HOWL(ah) ||
(AR_SREV_MERLIN(ah) && ath_hal_eepromGetFlag(ah, AR_EEP_OL_PWRCTRL)))
(AR_SREV_MERLIN(ah) &&
ath_hal_eepromGetFlag(ah, AR_EEP_OL_PWRCTRL)) ||
(ah->ah_config.ah_force_full_reset))
tsf = ar5416GetTsf64(ah);
/* Mark PHY as inactive; marked active in ar5416InitBB() */
@ -733,12 +735,15 @@ ar5416ChipReset(struct ath_hal *ah, const struct ieee80211_channel *chan)
{
OS_MARK(ah, AH_MARK_CHIPRESET, chan ? chan->ic_freq : 0);
/*
* Warm reset is optimistic.
* Warm reset is optimistic for open-loop TX power control.
*/
if (AR_SREV_MERLIN(ah) &&
ath_hal_eepromGetFlag(ah, AR_EEP_OL_PWRCTRL)) {
if (!ar5416SetResetReg(ah, HAL_RESET_POWER_ON))
return AH_FALSE;
} else if (ah->ah_config.ah_force_full_reset) {
if (!ar5416SetResetReg(ah, HAL_RESET_POWER_ON))
return AH_FALSE;
} else {
if (!ar5416SetResetReg(ah, HAL_RESET_WARM))
return AH_FALSE;
@ -1209,6 +1214,12 @@ ar5416PhyDisable(struct ath_hal *ah)
HAL_BOOL
ar5416SetResetReg(struct ath_hal *ah, uint32_t type)
{
/*
* Set force wake
*/
OS_REG_WRITE(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
switch (type) {
case HAL_RESET_POWER_ON:
return ar5416SetResetPowerOn(ah);
@ -1239,10 +1250,15 @@ ar5416SetResetPowerOn(struct ath_hal *ah)
AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
/*
* RTC reset and clear
* PowerOn reset can be used in open loop power control or failure recovery.
* If we do RTC reset while DMA is still running, hardware may corrupt memory.
* Therefore, we need to reset AHB first to stop DMA.
*/
if (! AR_SREV_HOWL(ah))
OS_REG_WRITE(ah, AR_RC, AR_RC_AHB);
/*
* RTC reset and clear
*/
OS_REG_WRITE(ah, AR_RTC_RESET, 0);
OS_DELAY(20);
@ -1293,6 +1309,11 @@ ar5416SetReset(struct ath_hal *ah, int type)
#endif /* AH_SUPPORT_AR9130 */
/*
* Reset AHB
*
* (In case the last interrupt source was a bus timeout.)
* XXX TODO: this is not the way to do it! It should be recorded
* XXX by the interrupt handler and passed _into_ the
* XXX reset path routine so this occurs.
*/
tmpReg = OS_REG_READ(ah, AR_INTR_SYNC_CAUSE);
if (tmpReg & (AR_INTR_SYNC_LOCAL_TIMEOUT|AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
@ -2608,7 +2629,7 @@ ar5416OverrideIni(struct ath_hal *ah, const struct ieee80211_channel *chan)
if (!AR_SREV_9271(ah))
val &= ~AR_PCU_MISC_MODE2_HWWAR1;
if (AR_SREV_KIWI_11_OR_LATER(ah))
if (AR_SREV_KIWI_10_OR_LATER(ah))
val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
OS_REG_WRITE(ah, AR_PCU_MISC_MODE2, val);

View File

@ -121,12 +121,6 @@
#define AR_PHY_EXT_MINCCA_PWR_S 23
#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
#define AR_PHY_EXT_CCA_THRESH62_S 16
/*
* This duplicates AR_PHY_EXT_CCA_CYCPWR_THR1; it reads more like
* an ANI register this way.
*/
#define AR_PHY_EXT_TIMING5_CYCPWR_THR1 0x0000FE00
#define AR_PHY_EXT_TIMING5_CYCPWR_THR1_S 9
#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
#define AR9280_PHY_EXT_MINCCA_PWR_S 16

View File

@ -612,10 +612,10 @@
#define AR_XSREV_REVISION_KITE_11 1 /* Kite 1.1 */
#define AR_XSREV_REVISION_KITE_12 2 /* Kite 1.2 */
#define AR_XSREV_VERSION_KIWI 0x180 /* Kiwi (AR9287) */
#define AR_XSREV_REVISION_KIWI_10 0
#define AR_XSREV_REVISION_KIWI_11 1
#define AR_XSREV_REVISION_KIWI_12 2
#define AR_XSREV_REVISION_KIWI_13 3
#define AR_XSREV_REVISION_KIWI_10 0 /* Kiwi 1.0 */
#define AR_XSREV_REVISION_KIWI_11 1 /* Kiwi 1.1 */
#define AR_XSREV_REVISION_KIWI_12 2 /* Kiwi 1.2 */
#define AR_XSREV_REVISION_KIWI_13 3 /* Kiwi 1.3 */
/* Owl (AR5416) */
#define AR_SREV_OWL(_ah) \
@ -701,6 +701,10 @@
#define AR_SREV_KIWI(_ah) \
(AH_PRIVATE((_ah))->ah_macVersion == AR_XSREV_VERSION_KIWI)
#define AR_SREV_KIWI_10_OR_LATER(_ah) \
(AH_PRIVATE((_ah))->ah_macVersion >= AR_XSREV_VERSION_KIWI)
/* XXX TODO: make these handle macVersion > Kiwi */
#define AR_SREV_KIWI_11_OR_LATER(_ah) \
(AR_SREV_KIWI(_ah) && \
AH_PRIVATE((_ah))->ah_macRev >= AR_XSREV_REVISION_KIWI_11)

View File

@ -892,9 +892,12 @@ ar9280SetAntennaSwitch(struct ath_hal *ah, HAL_ANT_SETTING settings)
static const char*
ar9280Probe(uint16_t vendorid, uint16_t devid)
{
if (vendorid == ATHEROS_VENDOR_ID &&
(devid == AR9280_DEVID_PCI || devid == AR9280_DEVID_PCIE))
return "Atheros 9280";
if (vendorid == ATHEROS_VENDOR_ID) {
if (devid == AR9280_DEVID_PCI)
return "Atheros 9220";
if (devid == AR9280_DEVID_PCIE)
return "Atheros 9280";
}
return AH_NULL;
}
AH_CHIP(AR9280, ar9280Probe, ar9280Attach);

View File

@ -76,7 +76,7 @@ ar9287AniSetup(struct ath_hal *ah)
/*
* These are the parameters from the AR5416 ANI code;
* they likely need quite a bit of adjustment for the
* AR9280.
* AR9287.
*/
static const struct ar5212AniParams aniparams = {
.maxNoiseImmunityLevel = 4, /* levels 0..4 */
@ -402,13 +402,6 @@ ar9287WriteIni(struct ath_hal *ah, const struct ieee80211_channel *chan)
regWrites = ath_hal_ini_write(ah, &AH5212(ah)->ah_ini_common, 1, regWrites);
}
#define AR_BASE_FREQ_2GHZ 2300
#define AR_BASE_FREQ_5GHZ 4900
#define AR_SPUR_FEEQ_BOUND_HT40 19
#define AR_SPUR_FEEQ_BOUND_HT20 10
/*
* Fill all software cached or static hardware state information.
* Return failure if capabilities are to come from EEPROM and
@ -460,7 +453,7 @@ ar9287FillCapabilityInfo(struct ath_hal *ah)
* This has been disabled - having the HAL flip chainmasks on/off
* when attempting to implement 11n disrupts things. For now, just
* leave this flipped off and worry about implementing TX diversity
* for legacy and MCS0-7 when 11n is fully functioning.
* for legacy and MCS0-15 when 11n is fully functioning.
*/
HAL_BOOL
ar9287SetAntennaSwitch(struct ath_hal *ah, HAL_ANT_SETTING settings)
@ -471,9 +464,12 @@ ar9287SetAntennaSwitch(struct ath_hal *ah, HAL_ANT_SETTING settings)
static const char*
ar9287Probe(uint16_t vendorid, uint16_t devid)
{
if (vendorid == ATHEROS_VENDOR_ID &&
(devid == AR9287_DEVID_PCI || devid == AR9287_DEVID_PCIE))
return "Atheros 9287";
if (vendorid == ATHEROS_VENDOR_ID) {
if (devid == AR9287_DEVID_PCI)
return "Atheros 9227";
if (devid == AR9287_DEVID_PCIE)
return "Atheros 9287";
}
return AH_NULL;
}
AH_CHIP(AR9287, ar9287Probe, ar9287Attach);

View File

@ -122,19 +122,21 @@ ath_rate_findrate(struct ath_softc *sc, struct ath_node *an,
*/
void
ath_rate_getxtxrates(struct ath_softc *sc, struct ath_node *an,
uint8_t rix0, uint8_t *rix, uint8_t *try)
uint8_t rix0, struct ath_rc_series *rc)
{
struct amrr_node *amn = ATH_NODE_AMRR(an);
/* rix[0] = amn->amn_tx_rate0; */
rix[1] = amn->amn_tx_rate1;
rix[2] = amn->amn_tx_rate2;
rix[3] = amn->amn_tx_rate3;
rc[0].flags = rc[1].flags = rc[2].flags = rc[3].flags = 0;
try[0] = amn->amn_tx_try0;
try[1] = amn->amn_tx_try1;
try[2] = amn->amn_tx_try2;
try[3] = amn->amn_tx_try3;
rc[0].rix = amn->amn_tx_rate0;
rc[1].rix = amn->amn_tx_rate1;
rc[2].rix = amn->amn_tx_rate2;
rc[3].rix = amn->amn_tx_rate3;
rc[0].tries = amn->amn_tx_try0;
rc[1].tries = amn->amn_tx_try1;
rc[2].tries = amn->amn_tx_try2;
rc[3].tries = amn->amn_tx_try3;
}
@ -153,10 +155,10 @@ ath_rate_setupxtxdesc(struct ath_softc *sc, struct ath_node *an,
void
ath_rate_tx_complete(struct ath_softc *sc, struct ath_node *an,
const struct ath_buf *bf)
const struct ath_rc_series *rc, const struct ath_tx_status *ts,
int frame_size, int nframes, int nbad)
{
struct amrr_node *amn = ATH_NODE_AMRR(an);
const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
int sr = ts->ts_shortretry;
int lr = ts->ts_longretry;
int retry_count = sr + lr;

View File

@ -130,19 +130,21 @@ ath_rate_findrate(struct ath_softc *sc, struct ath_node *an,
*/
void
ath_rate_getxtxrates(struct ath_softc *sc, struct ath_node *an,
uint8_t rix0, uint8_t *rix, uint8_t *try)
uint8_t rix0, struct ath_rc_series *rc)
{
struct onoe_node *on = ATH_NODE_ONOE(an);
/* rix[0] = on->on_tx_rate0; */
rix[1] = on->on_tx_rate1;
rix[2] = on->on_tx_rate2;
rix[3] = on->on_tx_rate3;
rc[0].flags = rc[1].flags = rc[2].flags = rc[3].flags = 0;
try[0] = on->on_tx_try0;
try[1] = 2;
try[2] = 2;
try[3] = 2;
rc[0].rix = on->on_tx_rate0;
rc[1].rix = on->on_tx_rate1;
rc[2].rix = on->on_tx_rate2;
rc[3].rix = on->on_tx_rate3;
rc[0].tries = on->on_tx_try0;
rc[1].tries = 2;
rc[2].tries = 2;
rc[3].tries = 2;
}
void
@ -160,10 +162,10 @@ ath_rate_setupxtxdesc(struct ath_softc *sc, struct ath_node *an,
void
ath_rate_tx_complete(struct ath_softc *sc, struct ath_node *an,
const struct ath_buf *bf)
const struct ath_rc_series *rc, const struct ath_tx_status *ts,
int frame_size, int nframes, int nbad)
{
struct onoe_node *on = ATH_NODE_ONOE(an);
const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
if (ts->ts_status == 0)
on->on_tx_ok++;

View File

@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
*/
#include "opt_inet.h"
#include "opt_wlan.h"
#include "opt_ah.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -146,6 +147,8 @@ ath_rate_node_cleanup(struct ath_softc *sc, struct ath_node *an)
static int
dot11rate(const HAL_RATE_TABLE *rt, int rix)
{
if (rix < 0)
return -1;
return rt->info[rix].phy == IEEE80211_T_HT ?
rt->info[rix].dot11Rate : (rt->info[rix].dot11Rate & IEEE80211_RATE_VAL) / 2;
}
@ -153,6 +156,8 @@ dot11rate(const HAL_RATE_TABLE *rt, int rix)
static const char *
dot11rate_label(const HAL_RATE_TABLE *rt, int rix)
{
if (rix < 0)
return "";
return rt->info[rix].phy == IEEE80211_T_HT ? "MCS" : "Mb ";
}
@ -165,12 +170,13 @@ pick_best_rate(struct ath_node *an, const HAL_RATE_TABLE *rt,
int size_bin, int require_acked_before)
{
struct sample_node *sn = ATH_NODE_SAMPLE(an);
int best_rate_rix, best_rate_tt;
int best_rate_rix, best_rate_tt, best_rate_pct;
uint32_t mask;
int rix, tt;
int rix, tt, pct;
best_rate_rix = 0;
best_rate_tt = 0;
best_rate_pct = 0;
for (mask = sn->ratemask, rix = 0; mask != 0; mask >>= 1, rix++) {
if ((mask & 1) == 0) /* not a supported rate */
continue;
@ -187,13 +193,54 @@ pick_best_rate(struct ath_node *an, const HAL_RATE_TABLE *rt,
!sn->stats[size_bin][rix].packets_acked))
continue;
/* Calculate percentage if possible */
if (sn->stats[size_bin][rix].total_packets > 0) {
pct = sn->stats[size_bin][rix].ewma_pct;
} else {
/* XXX for now, assume 95% ok */
pct = 95;
}
/* don't use a bit-rate that has been failing */
if (sn->stats[size_bin][rix].successive_failures > 3)
continue;
if (best_rate_tt == 0 || tt < best_rate_tt) {
best_rate_tt = tt;
best_rate_rix = rix;
/*
* For HT, Don't use a bit rate that is much more
* lossy than the best.
*
* XXX this isn't optimal; it's just designed to
* eliminate rates that are going to be obviously
* worse.
*/
if (an->an_node.ni_flags & IEEE80211_NODE_HT) {
if (best_rate_pct > (pct + 50))
continue;
}
/*
* For non-MCS rates, use the current average txtime for
* comparison.
*/
if (! (an->an_node.ni_flags & IEEE80211_NODE_HT)) {
if (best_rate_tt == 0 || tt <= best_rate_tt) {
best_rate_tt = tt;
best_rate_rix = rix;
best_rate_pct = pct;
}
}
/*
* Since 2 stream rates have slightly higher TX times,
* allow a little bit of leeway. This should later
* be abstracted out and properly handled.
*/
if (an->an_node.ni_flags & IEEE80211_NODE_HT) {
if (best_rate_tt == 0 || (tt * 8 <= best_rate_tt * 10)) {
best_rate_tt = tt;
best_rate_rix = rix;
best_rate_pct = pct;
}
}
}
return (best_rate_tt ? best_rate_rix : -1);
@ -252,6 +299,28 @@ pick_sample_rate(struct sample_softc *ssc , struct ath_node *an,
goto nextrate;
}
/*
* When doing aggregation, successive failures don't happen
* as often, as sometimes some of the sub-frames get through.
*
* If the sample rix average tx time is greater than the
* average tx time of the current rix, don't immediately use
* the rate for sampling.
*/
if (an->an_node.ni_flags & IEEE80211_NODE_HT) {
if ((sn->stats[size_bin][rix].average_tx_time * 10 >
sn->stats[size_bin][current_rix].average_tx_time * 9) &&
(ticks - sn->stats[size_bin][rix].last_tx < ssc->stale_failure_timeout)) {
mask &= ~(1<<rix);
goto nextrate;
}
}
/*
* XXX TODO
* For HT, limit sample somehow?
*/
/* Don't sample more than 2 rates higher for rates > 11M for non-HT rates */
if (! (an->an_node.ni_flags & IEEE80211_NODE_HT)) {
if (DOT11RATE(rix) > 2*11 && rix > current_rix + 2) {
@ -315,6 +384,96 @@ ath_rate_update_static_rix(struct ath_softc *sc, struct ieee80211_node *ni)
}
}
/*
* Pick a non-HT rate to begin using.
*/
static int
ath_rate_pick_seed_rate_legacy(struct ath_softc *sc, struct ath_node *an,
int frameLen)
{
#define DOT11RATE(ix) (rt->info[ix].dot11Rate & IEEE80211_RATE_VAL)
#define MCS(ix) (rt->info[ix].dot11Rate | IEEE80211_RATE_MCS)
#define RATE(ix) (DOT11RATE(ix) / 2)
int rix = -1;
const HAL_RATE_TABLE *rt = sc->sc_currates;
struct sample_node *sn = ATH_NODE_SAMPLE(an);
const int size_bin = size_to_bin(frameLen);
/* no packet has been sent successfully yet */
for (rix = rt->rateCount-1; rix > 0; rix--) {
if ((sn->ratemask & (1<<rix)) == 0)
continue;
/* Skip HT rates */
if (rt->info[rix].phy == IEEE80211_T_HT)
continue;
/*
* Pick the highest rate <= 36 Mbps
* that hasn't failed.
*/
if (DOT11RATE(rix) <= 72 &&
sn->stats[size_bin][rix].successive_failures == 0) {
break;
}
}
return rix;
#undef RATE
#undef MCS
#undef DOT11RATE
}
/*
* Pick a HT rate to begin using.
*
* Don't use any non-HT rates; only consider HT rates.
*/
static int
ath_rate_pick_seed_rate_ht(struct ath_softc *sc, struct ath_node *an,
int frameLen)
{
#define DOT11RATE(ix) (rt->info[ix].dot11Rate & IEEE80211_RATE_VAL)
#define MCS(ix) (rt->info[ix].dot11Rate | IEEE80211_RATE_MCS)
#define RATE(ix) (DOT11RATE(ix) / 2)
int rix = -1, ht_rix = -1;
const HAL_RATE_TABLE *rt = sc->sc_currates;
struct sample_node *sn = ATH_NODE_SAMPLE(an);
const int size_bin = size_to_bin(frameLen);
/* no packet has been sent successfully yet */
for (rix = rt->rateCount-1; rix > 0; rix--) {
/* Skip rates we can't use */
if ((sn->ratemask & (1<<rix)) == 0)
continue;
/* Keep a copy of the last seen HT rate index */
if (rt->info[rix].phy == IEEE80211_T_HT)
ht_rix = rix;
/* Skip non-HT rates */
if (rt->info[rix].phy != IEEE80211_T_HT)
continue;
/*
* Pick a medium-speed rate regardless of stream count
* which has not seen any failures. Higher rates may fail;
* we'll try them later.
*/
if (((MCS(rix) & 0x7) <= 4) &&
sn->stats[size_bin][rix].successive_failures == 0) {
break;
}
}
/*
* If all the MCS rates have successive failures, rix should be
* > 0; otherwise use the lowest MCS rix (hopefully MCS 0.)
*/
return MAX(rix, ht_rix);
#undef RATE
#undef MCS
#undef DOT11RATE
}
void
@ -358,9 +517,14 @@ ath_rate_findrate(struct ath_softc *sc, struct ath_node *an,
if (sn->sample_tt[size_bin] < average_tx_time * (sn->packets_since_sample[size_bin]*ssc->sample_rate/100)) {
rix = pick_sample_rate(ssc, an, rt, size_bin);
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
&an->an_node, "size %u sample rate %d current rate %d",
bin_to_size(size_bin), RATE(rix),
RATE(sn->current_rix[size_bin]));
&an->an_node, "att %d sample_tt %d size %u sample rate %d %s current rate %d %s",
average_tx_time,
sn->sample_tt[size_bin],
bin_to_size(size_bin),
dot11rate(rt, rix),
dot11rate_label(rt, rix),
dot11rate(rt, sn->current_rix[size_bin]),
dot11rate_label(rt, sn->current_rix[size_bin]));
if (rix != sn->current_rix[size_bin]) {
sn->current_sample_rix[size_bin] = rix;
} else {
@ -371,29 +535,58 @@ ath_rate_findrate(struct ath_softc *sc, struct ath_node *an,
change_rates = 0;
if (!sn->packets_sent[size_bin] || best_rix == -1) {
/* no packet has been sent successfully yet */
for (rix = rt->rateCount-1; rix > 0; rix--) {
if ((sn->ratemask & (1<<rix)) == 0)
continue;
/*
* Pick the highest rate <= 36 Mbps
* that hasn't failed.
*/
if (DOT11RATE(rix) <= 72 &&
sn->stats[size_bin][rix].successive_failures == 0) {
break;
}
}
change_rates = 1;
best_rix = rix;
if (an->an_node.ni_flags & IEEE80211_NODE_HT)
best_rix =
ath_rate_pick_seed_rate_ht(sc, an, frameLen);
else
best_rix =
ath_rate_pick_seed_rate_legacy(sc, an, frameLen);
} else if (sn->packets_sent[size_bin] < 20) {
/* let the bit-rate switch quickly during the first few packets */
IEEE80211_NOTE(an->an_node.ni_vap,
IEEE80211_MSG_RATECTL, &an->an_node,
"%s: switching quickly..", __func__);
change_rates = 1;
} else if (ticks - ssc->min_switch > sn->ticks_since_switch[size_bin]) {
/* min_switch seconds have gone by */
IEEE80211_NOTE(an->an_node.ni_vap,
IEEE80211_MSG_RATECTL, &an->an_node,
"%s: min_switch %d > ticks_since_switch %d..",
__func__, ticks - ssc->min_switch, sn->ticks_since_switch[size_bin]);
change_rates = 1;
} else if (2*average_tx_time < sn->stats[size_bin][sn->current_rix[size_bin]].average_tx_time) {
} else if ((! (an->an_node.ni_flags & IEEE80211_NODE_HT)) &&
(2*average_tx_time < sn->stats[size_bin][sn->current_rix[size_bin]].average_tx_time)) {
/* the current bit-rate is twice as slow as the best one */
IEEE80211_NOTE(an->an_node.ni_vap,
IEEE80211_MSG_RATECTL, &an->an_node,
"%s: 2x att (= %d) < cur_rix att %d",
__func__,
2 * average_tx_time, sn->stats[size_bin][sn->current_rix[size_bin]].average_tx_time);
change_rates = 1;
} else if ((an->an_node.ni_flags & IEEE80211_NODE_HT)) {
int cur_rix = sn->current_rix[size_bin];
int cur_att = sn->stats[size_bin][cur_rix].average_tx_time;
/*
* If the node is HT, upgrade it if the MCS rate is
* higher and the average tx time is within 20% of
* the current rate. It can fail a little.
*
* This is likely not optimal!
*/
#if 0
printf("cur rix/att %x/%d, best rix/att %x/%d\n",
MCS(cur_rix), cur_att, MCS(best_rix), average_tx_time);
#endif
if ((MCS(best_rix) > MCS(cur_rix)) &&
(average_tx_time * 8) <= (cur_att * 10)) {
IEEE80211_NOTE(an->an_node.ni_vap,
IEEE80211_MSG_RATECTL, &an->an_node,
"%s: HT: best_rix 0x%d > cur_rix 0x%x, average_tx_time %d, cur_att %d",
__func__,
MCS(best_rix), MCS(cur_rix), average_tx_time, cur_att);
change_rates = 1;
}
}
sn->packets_since_sample[size_bin]++;
@ -445,22 +638,24 @@ done:
*/
void
ath_rate_getxtxrates(struct ath_softc *sc, struct ath_node *an,
uint8_t rix0, uint8_t *rix, uint8_t *try)
uint8_t rix0, struct ath_rc_series *rc)
{
struct sample_node *sn = ATH_NODE_SAMPLE(an);
const struct txschedule *sched = &sn->sched[rix0];
KASSERT(rix0 == sched->r0, ("rix0 (%x) != sched->r0 (%x)!\n", rix0, sched->r0));
/* rix[0] = sched->r0; */
rix[1] = sched->r1;
rix[2] = sched->r2;
rix[3] = sched->r3;
rc[0].flags = rc[1].flags = rc[2].flags = rc[3].flags = 0;
try[0] = sched->t0;
try[1] = sched->t1;
try[2] = sched->t2;
try[3] = sched->t3;
rc[0].rix = sched->r0;
rc[1].rix = sched->r1;
rc[2].rix = sched->r2;
rc[3].rix = sched->r3;
rc[0].tries = sched->t0;
rc[1].tries = sched->t1;
rc[2].tries = sched->t2;
rc[3].tries = sched->t3;
}
void
@ -488,6 +683,71 @@ ath_rate_setupxtxdesc(struct ath_softc *sc, struct ath_node *an,
s3code, sched->t3); /* series 3 */
}
/*
* Update the EWMA percentage.
*
* This is a simple hack to track an EWMA based on the current
* rate scenario. For the rate codes which failed, this will
* record a 0% against it. For the rate code which succeeded,
* EWMA will record the nbad*100/nframes percentage against it.
*/
static void
update_ewma_stats(struct ath_softc *sc, struct ath_node *an,
int frame_size,
int rix0, int tries0,
int rix1, int tries1,
int rix2, int tries2,
int rix3, int tries3,
int short_tries, int tries, int status,
int nframes, int nbad)
{
struct sample_node *sn = ATH_NODE_SAMPLE(an);
struct sample_softc *ssc = ATH_SOFTC_SAMPLE(sc);
const int size_bin = size_to_bin(frame_size);
int tries_so_far;
int pct;
int rix = rix0;
/* Calculate percentage based on current rate */
if (nframes == 0)
nframes = nbad = 1;
pct = ((nframes - nbad) * 1000) / nframes;
/* Figure out which rate index succeeded */
tries_so_far = tries0;
if (tries1 && tries_so_far < tries) {
tries_so_far += tries1;
rix = rix1;
/* XXX bump ewma pct */
}
if (tries2 && tries_so_far < tries) {
tries_so_far += tries2;
rix = rix2;
/* XXX bump ewma pct */
}
if (tries3 && tries_so_far < tries) {
rix = rix3;
/* XXX bump ewma pct */
}
/* rix is the successful rate, update EWMA for final rix */
if (sn->stats[size_bin][rix].total_packets <
ssc->smoothing_minpackets) {
/* just average the first few packets */
int a_pct = (sn->stats[size_bin][rix].packets_acked * 1000) /
(sn->stats[size_bin][rix].total_packets);
sn->stats[size_bin][rix].ewma_pct = a_pct;
} else {
/* use a ewma */
sn->stats[size_bin][rix].ewma_pct =
((sn->stats[size_bin][rix].ewma_pct * ssc->smoothing_rate) +
(pct * (100 - ssc->smoothing_rate))) / 100;
}
}
static void
update_stats(struct ath_softc *sc, struct ath_node *an,
int frame_size,
@ -495,10 +755,14 @@ update_stats(struct ath_softc *sc, struct ath_node *an,
int rix1, int tries1,
int rix2, int tries2,
int rix3, int tries3,
int short_tries, int tries, int status)
int short_tries, int tries, int status,
int nframes, int nbad)
{
struct sample_node *sn = ATH_NODE_SAMPLE(an);
struct sample_softc *ssc = ATH_SOFTC_SAMPLE(sc);
#ifdef IEEE80211_DEBUG
const HAL_RATE_TABLE *rt = sc->sc_currates;
#endif
const int size_bin = size_to_bin(frame_size);
const int size = bin_to_size(size_bin);
int tt, tries_so_far;
@ -537,7 +801,7 @@ update_stats(struct ath_softc *sc, struct ath_node *an,
/* just average the first few packets */
int avg_tx = sn->stats[size_bin][rix0].average_tx_time;
int packets = sn->stats[size_bin][rix0].total_packets;
sn->stats[size_bin][rix0].average_tx_time = (tt+(avg_tx*packets))/(packets+1);
sn->stats[size_bin][rix0].average_tx_time = (tt+(avg_tx*packets))/(packets+nframes);
} else {
/* use a ewma */
sn->stats[size_bin][rix0].average_tx_time =
@ -545,38 +809,50 @@ update_stats(struct ath_softc *sc, struct ath_node *an,
(tt * (100 - ssc->smoothing_rate))) / 100;
}
if (status != 0) {
/*
* XXX Don't mark the higher bit rates as also having failed; as this
* unfortunately stops those rates from being tasted when trying to
* TX. This happens with 11n aggregation.
*/
if (nframes == nbad) {
#if 0
int y;
sn->stats[size_bin][rix0].successive_failures++;
#endif
sn->stats[size_bin][rix0].successive_failures += nbad;
#if 0
for (y = size_bin+1; y < NUM_PACKET_SIZE_BINS; y++) {
/*
* Also say larger packets failed since we
* assume if a small packet fails at a
* bit-rate then a larger one will also.
*/
sn->stats[y][rix0].successive_failures++;
sn->stats[y][rix0].successive_failures += nbad;
sn->stats[y][rix0].last_tx = ticks;
sn->stats[y][rix0].tries += tries;
sn->stats[y][rix0].total_packets++;
sn->stats[y][rix0].total_packets += nframes;
}
#endif
} else {
sn->stats[size_bin][rix0].packets_acked++;
sn->stats[size_bin][rix0].packets_acked += (nframes - nbad);
sn->stats[size_bin][rix0].successive_failures = 0;
}
sn->stats[size_bin][rix0].tries += tries;
sn->stats[size_bin][rix0].last_tx = ticks;
sn->stats[size_bin][rix0].total_packets++;
sn->stats[size_bin][rix0].total_packets += nframes;
if (rix0 == sn->current_sample_rix[size_bin]) {
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
&an->an_node,
"%s: size %d %s sample rate %d tries (%d/%d) tt %d avg_tt (%d/%d)",
"%s: size %d %s sample rate %d %s tries (%d/%d) tt %d avg_tt (%d/%d) nfrm %d nbad %d",
__func__,
size,
status ? "FAIL" : "OK",
rix0, short_tries, tries, tt,
dot11rate(rt, rix0),
dot11rate_label(rt, rix0),
short_tries, tries, tt,
sn->stats[size_bin][rix0].average_tx_time,
sn->stats[size_bin][rix0].perfect_tx_time);
sn->stats[size_bin][rix0].perfect_tx_time,
nframes, nbad);
sn->sample_tt[size_bin] = tt;
sn->current_sample_rix[size_bin] = -1;
}
@ -591,21 +867,21 @@ badrate(struct ifnet *ifp, int series, int hwrate, int tries, int status)
void
ath_rate_tx_complete(struct ath_softc *sc, struct ath_node *an,
const struct ath_buf *bf)
const struct ath_rc_series *rc, const struct ath_tx_status *ts,
int frame_size, int nframes, int nbad)
{
struct ifnet *ifp = sc->sc_ifp;
struct ieee80211com *ic = ifp->if_l2com;
struct sample_node *sn = ATH_NODE_SAMPLE(an);
const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
const struct ath_desc *ds0 = &bf->bf_desc[0];
int final_rix, short_tries, long_tries, frame_size;
int final_rix, short_tries, long_tries;
const HAL_RATE_TABLE *rt = sc->sc_currates;
int status = ts->ts_status;
int mrr;
final_rix = rt->rateCodeToIndex[ts->ts_rate];
short_tries = ts->ts_shortretry;
long_tries = ts->ts_longretry + 1;
frame_size = ds0->ds_ctl0 & 0x0fff; /* low-order 12 bits of ds_ctl0 */
if (frame_size == 0) /* NB: should not happen */
frame_size = 1500;
@ -615,63 +891,73 @@ ath_rate_tx_complete(struct ath_softc *sc, struct ath_node *an,
"%s: size %d %s rate/try %d/%d no rates yet",
__func__,
bin_to_size(size_to_bin(frame_size)),
ts->ts_status ? "FAIL" : "OK",
status ? "FAIL" : "OK",
short_tries, long_tries);
return;
}
mrr = sc->sc_mrretry && !(ic->ic_flags & IEEE80211_F_USEPROT);
if (!mrr || ts->ts_finaltsi == 0) {
if (!IS_RATE_DEFINED(sn, final_rix)) {
badrate(ifp, 0, ts->ts_rate, long_tries, ts->ts_status);
badrate(ifp, 0, ts->ts_rate, long_tries, status);
return;
}
/*
* Only one rate was used; optimize work.
*/
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
&an->an_node, "%s: size %d (%d bytes) %s rate/try %d %s/%d/%d",
&an->an_node, "%s: size %d (%d bytes) %s rate/try %d %s/%d/%d nframes/nbad [%d/%d]",
__func__,
bin_to_size(size_to_bin(frame_size)),
frame_size,
ts->ts_status ? "FAIL" : "OK",
dot11rate(rt, final_rix), dot11rate_label(rt, final_rix), short_tries, long_tries);
status ? "FAIL" : "OK",
dot11rate(rt, final_rix), dot11rate_label(rt, final_rix),
short_tries, long_tries, nframes, nbad);
update_stats(sc, an, frame_size,
final_rix, long_tries,
0, 0,
0, 0,
0, 0,
short_tries, long_tries, ts->ts_status);
short_tries, long_tries, status,
nframes, nbad);
update_ewma_stats(sc, an, frame_size,
final_rix, long_tries,
0, 0,
0, 0,
0, 0,
short_tries, long_tries, status,
nframes, nbad);
} else {
int hwrates[4], tries[4], rix[4];
int finalTSIdx = ts->ts_finaltsi;
int i;
/*
* Process intermediate rates that failed.
*/
ath_hal_gettxcompletionrates(sc->sc_ah, ds0, hwrates, tries);
for (i = 0; i < 4; i++) {
rix[i] = rt->rateCodeToIndex[hwrates[i]];
}
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
&an->an_node,
"%s: size %d (%d bytes) finaltsidx %d tries %d %s rate/try [%d %s/%d %d %s/%d %d %s/%d %d %s/%d]",
"%s: size %d (%d bytes) finaltsidx %d tries %d %s rate/try [%d %s/%d %d %s/%d %d %s/%d %d %s/%d] nframes/nbad [%d/%d]",
__func__,
bin_to_size(size_to_bin(frame_size)),
frame_size,
finalTSIdx,
long_tries,
ts->ts_status ? "FAIL" : "OK",
dot11rate(rt, rix[0]), dot11rate_label(rt, rix[0]), tries[0],
dot11rate(rt, rix[1]), dot11rate_label(rt, rix[1]), tries[1],
dot11rate(rt, rix[2]), dot11rate_label(rt, rix[2]), tries[2],
dot11rate(rt, rix[3]), dot11rate_label(rt, rix[3]), tries[3]);
long_tries,
status ? "FAIL" : "OK",
dot11rate(rt, rc[0].rix),
dot11rate_label(rt, rc[0].rix), rc[0].tries,
dot11rate(rt, rc[1].rix),
dot11rate_label(rt, rc[1].rix), rc[1].tries,
dot11rate(rt, rc[2].rix),
dot11rate_label(rt, rc[2].rix), rc[2].tries,
dot11rate(rt, rc[3].rix),
dot11rate_label(rt, rc[3].rix), rc[3].tries,
nframes, nbad);
for (i = 0; i < 4; i++) {
if (tries[i] && !IS_RATE_DEFINED(sn, rix[i]))
badrate(ifp, 0, hwrates[i], tries[i], ts->ts_status);
if (rc[i].tries && !IS_RATE_DEFINED(sn, rc[i].rix))
badrate(ifp, 0, rc[i].ratecode, rc[i].tries,
status);
}
/*
@ -681,48 +967,62 @@ ath_rate_tx_complete(struct ath_softc *sc, struct ath_node *an,
* sample higher rates 1 try at a time doing so
* may unfairly penalize them.
*/
if (tries[0]) {
update_stats(sc, an, frame_size,
rix[0], tries[0],
rix[1], tries[1],
rix[2], tries[2],
rix[3], tries[3],
short_tries, long_tries,
long_tries > tries[0]);
long_tries -= tries[0];
if (rc[0].tries) {
update_stats(sc, an, frame_size,
rc[0].rix, rc[0].tries,
rc[1].rix, rc[1].tries,
rc[2].rix, rc[2].tries,
rc[3].rix, rc[3].tries,
short_tries, long_tries,
long_tries > rc[0].tries,
nframes, nbad);
long_tries -= rc[0].tries;
}
if (tries[1] && finalTSIdx > 0) {
update_stats(sc, an, frame_size,
rix[1], tries[1],
rix[2], tries[2],
rix[3], tries[3],
0, 0,
short_tries, long_tries,
ts->ts_status);
long_tries -= tries[1];
if (rc[1].tries && finalTSIdx > 0) {
update_stats(sc, an, frame_size,
rc[1].rix, rc[1].tries,
rc[2].rix, rc[2].tries,
rc[3].rix, rc[3].tries,
0, 0,
short_tries, long_tries,
status,
nframes, nbad);
long_tries -= rc[1].tries;
}
if (tries[2] && finalTSIdx > 1) {
update_stats(sc, an, frame_size,
rix[2], tries[2],
rix[3], tries[3],
if (rc[2].tries && finalTSIdx > 1) {
update_stats(sc, an, frame_size,
rc[2].rix, rc[2].tries,
rc[3].rix, rc[3].tries,
0, 0,
0, 0,
short_tries, long_tries,
ts->ts_status);
long_tries -= tries[2];
short_tries, long_tries,
status,
nframes, nbad);
long_tries -= rc[2].tries;
}
if (tries[3] && finalTSIdx > 2) {
update_stats(sc, an, frame_size,
rix[3], tries[3],
if (rc[3].tries && finalTSIdx > 2) {
update_stats(sc, an, frame_size,
rc[3].rix, rc[3].tries,
0, 0,
0, 0,
0, 0,
short_tries, long_tries,
ts->ts_status);
short_tries, long_tries,
status,
nframes, nbad);
}
update_ewma_stats(sc, an, frame_size,
rc[0].rix, rc[0].tries,
rc[1].rix, rc[1].tries,
rc[2].rix, rc[2].tries,
rc[3].rix, rc[3].tries,
short_tries, long_tries,
long_tries > rc[0].tries,
nframes, nbad);
}
}
@ -844,6 +1144,7 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
sn->stats[y][rix].total_packets = 0;
sn->stats[y][rix].packets_acked = 0;
sn->stats[y][rix].last_tx = 0;
sn->stats[y][rix].ewma_pct = 0;
sn->stats[y][rix].perfect_tx_time =
calc_usecs_unicast_packet(sc, size, rix, 0, 0,
@ -881,18 +1182,24 @@ sample_stats(void *arg, struct ieee80211_node *ni)
uint32_t mask;
int rix, y;
printf("\n[%s] refcnt %d static_rix %d ratemask 0x%x\n",
printf("\n[%s] refcnt %d static_rix (%d %s) ratemask 0x%x\n",
ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni),
sn->static_rix, sn->ratemask);
dot11rate(rt, sn->static_rix),
dot11rate_label(rt, sn->static_rix),
sn->ratemask);
for (y = 0; y < NUM_PACKET_SIZE_BINS; y++) {
printf("[%4u] cur rix %d (%d %s) since switch: packets %d ticks %u\n",
bin_to_size(y), sn->current_rix[y],
dot11rate(rt, sn->current_rix[y]),
dot11rate_label(rt, sn->current_rix[y]),
sn->packets_since_switch[y], sn->ticks_since_switch[y]);
printf("[%4u] last sample %d cur sample %d packets sent %d\n",
bin_to_size(y), sn->last_sample_rix[y],
sn->current_sample_rix[y], sn->packets_sent[y]);
printf("[%4u] last sample (%d %s) cur sample (%d %s) packets sent %d\n",
bin_to_size(y),
dot11rate(rt, sn->last_sample_rix[y]),
dot11rate_label(rt, sn->last_sample_rix[y]),
dot11rate(rt, sn->current_sample_rix[y]),
dot11rate_label(rt, sn->current_sample_rix[y]),
sn->packets_sent[y]);
printf("[%4u] packets since sample %d sample tt %u\n",
bin_to_size(y), sn->packets_since_sample[y],
sn->sample_tt[y]);
@ -903,13 +1210,16 @@ sample_stats(void *arg, struct ieee80211_node *ni)
for (y = 0; y < NUM_PACKET_SIZE_BINS; y++) {
if (sn->stats[y][rix].total_packets == 0)
continue;
printf("[%2u %s:%4u] %8d:%-8d (%3d%%) T %8d F %4d avg %5u last %u\n",
printf("[%2u %s:%4u] %8ju:%-8ju (%3d%%) (EWMA %3d.%1d%%) T %8ju F %4d avg %5u last %u\n",
dot11rate(rt, rix), dot11rate_label(rt, rix),
bin_to_size(y),
sn->stats[y][rix].total_packets,
sn->stats[y][rix].packets_acked,
(100*sn->stats[y][rix].packets_acked)/sn->stats[y][rix].total_packets,
sn->stats[y][rix].tries,
(uintmax_t) sn->stats[y][rix].total_packets,
(uintmax_t) sn->stats[y][rix].packets_acked,
(int) ((sn->stats[y][rix].packets_acked * 100ULL) /
sn->stats[y][rix].total_packets),
sn->stats[y][rix].ewma_pct / 10,
sn->stats[y][rix].ewma_pct % 10,
(uintmax_t) sn->stats[y][rix].tries,
sn->stats[y][rix].successive_failures,
sn->stats[y][rix].average_tx_time,
ticks - sn->stats[y][rix].last_tx);

View File

@ -51,15 +51,17 @@ struct sample_softc {
int max_successive_failures;
int stale_failure_timeout; /* how long to honor max_successive_failures */
int min_switch; /* min time between rate changes */
int min_good_pct; /* min good percentage for a rate to be considered */
};
#define ATH_SOFTC_SAMPLE(sc) ((struct sample_softc *)sc->sc_rc)
struct rate_stats {
unsigned average_tx_time;
int successive_failures;
int tries;
int total_packets;
int packets_acked;
uint64_t tries;
uint64_t total_packets; /* pkts total since assoc */
uint64_t packets_acked; /* pkts acked since assoc */
int ewma_pct; /* EWMA percentage */
unsigned perfect_tx_time; /* transmit time for 0 retries */
int last_tx;
};

File diff suppressed because it is too large Load Diff

View File

@ -123,33 +123,44 @@ ath_printrxbuf(struct ath_softc *sc, const struct ath_buf *bf,
}
void
ath_printtxbuf(struct ath_softc *sc, const struct ath_buf *bf,
ath_printtxbuf(struct ath_softc *sc, const struct ath_buf *first_bf,
u_int qnum, u_int ix, int done)
{
const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
const struct ath_tx_status *ts = &first_bf->bf_last->bf_status.ds_txstat;
const struct ath_buf *bf = first_bf;
struct ath_hal *ah = sc->sc_ah;
const struct ath_desc *ds;
int i;
printf("Q%u[%3u]", qnum, ix);
for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:04%x%s\n"
" %08x %08x %08x %08x %08x %08x\n",
ds, (const struct ath_desc *)bf->bf_daddr + i,
ds->ds_link, ds->ds_data, bf->bf_txflags,
!done ? "" : (ts->ts_status == 0) ? " *" : " !",
ds->ds_ctl0, ds->ds_ctl1,
ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]);
if (ah->ah_magic == 0x20065416) {
printf(" %08x %08x %08x %08x %08x %08x %08x %08x\n",
ds->ds_hw[4], ds->ds_hw[5], ds->ds_hw[6],
ds->ds_hw[7], ds->ds_hw[8], ds->ds_hw[9],
ds->ds_hw[10],ds->ds_hw[11]);
printf(" %08x %08x %08x %08x %08x %08x %08x %08x\n",
ds->ds_hw[12],ds->ds_hw[13],ds->ds_hw[14],
ds->ds_hw[15],ds->ds_hw[16],ds->ds_hw[17],
ds->ds_hw[18], ds->ds_hw[19]);
while (bf != NULL) {
for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:%04x%s\n"
" TXF: %04x Seq: %d swtry: %d ADDBAW?: %d DOBAW?: %d\n"
" %08x %08x %08x %08x %08x %08x\n",
ds, (const struct ath_desc *)bf->bf_daddr + i,
ds->ds_link, ds->ds_data, bf->bf_txflags,
!done ? "" : (ts->ts_status == 0) ? " *" : " !",
bf->bf_state.bfs_flags,
bf->bf_state.bfs_seqno,
bf->bf_state.bfs_retries,
bf->bf_state.bfs_addedbaw,
bf->bf_state.bfs_dobaw,
ds->ds_ctl0, ds->ds_ctl1,
ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]);
if (ah->ah_magic == 0x20065416) {
printf(" %08x %08x %08x %08x %08x %08x %08x %08x\n",
ds->ds_hw[4], ds->ds_hw[5], ds->ds_hw[6],
ds->ds_hw[7], ds->ds_hw[8], ds->ds_hw[9],
ds->ds_hw[10],ds->ds_hw[11]);
printf(" %08x %08x %08x %08x %08x %08x %08x %08x\n",
ds->ds_hw[12],ds->ds_hw[13],ds->ds_hw[14],
ds->ds_hw[15],ds->ds_hw[16],ds->ds_hw[17],
ds->ds_hw[18], ds->ds_hw[19]);
}
}
printf(" [end]\n");
bf = bf->bf_next;
}
}

View File

@ -178,7 +178,8 @@ ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k,
* cache slots for TKIP with hardware MIC support.
*/
int
ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
ath_keyset(struct ath_softc *sc, struct ieee80211vap *vap,
const struct ieee80211_key *k,
struct ieee80211_node *bss)
{
#define N(a) (sizeof(a)/sizeof(a[0]))
@ -212,7 +213,23 @@ ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
} else
hk.kv_type = HAL_CIPHER_CLR;
if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) {
/*
* XXX TODO: check this:
*
* Group keys on hardware that supports multicast frame
* key search should only be done in adhoc/hostap mode,
* not STA mode.
*
* XXX TODO: what about mesh, tdma?
*/
#if 0
if ((vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS) &&
#else
if (
#endif
(k->wk_flags & IEEE80211_KEY_GROUP) &&
sc->sc_mcastkey) {
/*
* Group keys on hardware that supports multicast frame
* key search use a MAC that is the sender's address with
@ -493,5 +510,5 @@ ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
{
struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
return ath_keyset(sc, k, vap->iv_bss);
return ath_keyset(sc, vap, k, vap->iv_bss);
}

View File

@ -37,7 +37,7 @@ extern int ath_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
extern int ath_key_delete(struct ieee80211vap *, const struct ieee80211_key *);
extern int ath_key_set(struct ieee80211vap *, const struct ieee80211_key *,
const u_int8_t mac[IEEE80211_ADDR_LEN]);
extern int ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
struct ieee80211_node *bss);
extern int ath_keyset(struct ath_softc *sc, struct ieee80211vap *vap,
const struct ieee80211_key *k, struct ieee80211_node *bss);
#endif

View File

@ -52,7 +52,19 @@ extern int ath_tx_findrix(const struct ath_softc *sc, uint8_t rate);
extern struct ath_buf * ath_getbuf(struct ath_softc *sc);
extern struct ath_buf * _ath_getbuf_locked(struct ath_softc *sc);
extern struct ath_buf * ath_buf_clone(struct ath_softc *sc,
const struct ath_buf *bf);
extern void ath_freebuf(struct ath_softc *sc, struct ath_buf *bf);
extern int ath_reset(struct ifnet *);
extern int ath_reset(struct ifnet *, ATH_RESET_TYPE);
extern void ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq);
extern void ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf,
int fail);
extern void ath_tx_update_ratectrl(struct ath_softc *sc,
struct ieee80211_node *ni, struct ath_rc_series *rc,
struct ath_tx_status *ts, int frmlen, int nframes, int nbad);
extern void ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf,
int status);
#endif

View File

@ -78,8 +78,10 @@ struct ath_pci_softc {
static void
ath_pci_setup(device_t dev)
{
#ifdef ATH_PCI_LATENCY_WAR
/* Override the system latency timer */
pci_write_config(dev, PCIR_LATTIMER, 0x80, 1);
#endif
/* If a PCI NIC, force wakeup */
#ifdef ATH_PCI_WAKEUP_WAR

View File

@ -263,7 +263,8 @@ ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS)
if (error || !req->newptr)
return error;
return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL :
(ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
(ifp->if_drv_flags & IFF_DRV_RUNNING) ?
ath_reset(ifp, ATH_RESET_NOLOSS) : 0;
}
static int
@ -295,7 +296,8 @@ ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS)
return 0;
if (!ath_hal_setrfkill(ah, rfkill))
return EINVAL;
return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
return (ifp->if_drv_flags & IFF_DRV_RUNNING) ?
ath_reset(ifp, ATH_RESET_FULL) : 0;
}
static int
@ -346,7 +348,7 @@ ath_sysctl_txagg(SYSCTL_HANDLER_ARGS)
i = t = 0;
ATH_TXBUF_LOCK(sc);
STAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list) {
TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list) {
if (bf->bf_flags & ATH_BUF_BUSY) {
printf("Busy: %d\n", t);
i++;
@ -428,7 +430,7 @@ ath_sysctl_intmit(SYSCTL_HANDLER_ARGS)
* things in an inconsistent state.
*/
if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
ath_reset(sc->sc_ifp);
ath_reset(sc->sc_ifp, ATH_RESET_NOLOSS);
return 0;
}
@ -891,4 +893,16 @@ ath_sysctl_hal_attach(struct ath_softc *sc)
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "swba_backoff", CTLFLAG_RW,
&sc->sc_ah->ah_config.ah_additional_swba_backoff, 0,
"Atheros HAL additional SWBA backoff time");
sc->sc_ah->ah_config.ah_force_full_reset = 0;
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "force_full_reset", CTLFLAG_RW,
&sc->sc_ah->ah_config.ah_force_full_reset, 0,
"Force full chip reset rather than a warm reset");
/*
* This is initialised by the driver.
*/
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "serialise_reg_war", CTLFLAG_RW,
&sc->sc_ah->ah_config.ah_serialise_reg_war, 0,
"Force register access serialisation");
}

File diff suppressed because it is too large Load Diff

View File

@ -31,7 +31,58 @@
#ifndef __IF_ATH_TX_H__
#define __IF_ATH_TX_H__
/*
* some general macros
*/
#define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1)
/*
* return block-ack bitmap index given sequence and starting sequence
*/
#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_RANGE - 1))
#define WME_BA_BMP_SIZE 64
#define WME_MAX_BA WME_BA_BMP_SIZE
/*
* How 'busy' to try and keep the hardware txq
*/
#define ATH_AGGR_MIN_QDEPTH 2
/*
* Watermark for scheduling TIDs in order to maximise aggregation.
*
* If hwq_depth is greater than this, don't schedule the TID
* for packet scheduling - the hardware is already busy servicing
* this TID.
*
* If hwq_depth is less than this, schedule the TID for packet
* scheduling in the completion handler.
*/
#define ATH_AGGR_SCHED_HIGH 4
#define ATH_AGGR_SCHED_LOW 2
/*
* return whether a bit at index _n in bitmap _bm is set
* _sz is the size of the bitmap
*/
#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \
((_bm)[(_n) >> 5] & (1 << ((_n) & 31))))
/* extracting the seqno from buffer seqno */
#define SEQNO(_a) ((_a) >> IEEE80211_SEQ_SEQ_SHIFT)
/*
* Whether the current sequence number is within the
* BAW.
*/
#define BAW_WITHIN(_start, _bawsz, _seqno) \
((((_seqno) - (_start)) & 4095) < (_bawsz))
extern void ath_txq_restart_dma(struct ath_softc *sc, struct ath_txq *txq);
extern void ath_freetx(struct mbuf *m);
extern void ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an);
extern void ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq);
extern void ath_txfrag_cleanup(struct ath_softc *sc, ath_bufhead *frags,
struct ieee80211_node *ni);
extern int ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
@ -41,4 +92,36 @@ extern int ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
extern int ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params);
/* software queue stuff */
extern void ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
struct ath_txq *txq, struct ath_buf *bf);
extern void ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an);
extern void ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid);
extern void ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid);
extern void ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq);
extern void ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf,
int fail);
extern void ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf,
int fail);
extern void ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid, struct ath_buf *bf);
extern struct ieee80211_tx_ampdu * ath_tx_get_tx_tid(struct ath_node *an,
int tid);
/* TX addba handling */
extern int ath_addba_request(struct ieee80211_node *ni,
struct ieee80211_tx_ampdu *tap, int dialogtoken,
int baparamset, int batimeout);
extern int ath_addba_response(struct ieee80211_node *ni,
struct ieee80211_tx_ampdu *tap, int dialogtoken,
int code, int batimeout);
extern void ath_addba_stop(struct ieee80211_node *ni,
struct ieee80211_tx_ampdu *tap);
extern void ath_bar_response(struct ieee80211_node *ni,
struct ieee80211_tx_ampdu *tap, int status);
extern void ath_addba_response_timeout(struct ieee80211_node *ni,
struct ieee80211_tx_ampdu *tap);
#endif

View File

@ -86,17 +86,357 @@ __FBSDID("$FreeBSD$");
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
#include <dev/ath/if_ath_tx.h> /* XXX for some support functions */
#include <dev/ath/if_ath_tx_ht.h>
#include <dev/ath/if_athrate.h>
#include <dev/ath/if_ath_debug.h>
/*
* XXX net80211?
*/
#define IEEE80211_AMPDU_SUBFRAME_DEFAULT 32
#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */
#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
#define ATH_AGGR_ENCRYPTDELIM 10 /* number of delimiters for encryption padding */
/*
* returns delimiter padding required given the packet length
*/
#define ATH_AGGR_GET_NDELIM(_len) \
(((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
(ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
int ath_max_4ms_framelen[4][32] = {
[MCS_HT20] = {
3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
},
[MCS_HT20_SGI] = {
3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
},
[MCS_HT40] = {
6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
},
[MCS_HT40_SGI] = {
7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
}
};
/*
* XXX should be in net80211
*/
static int ieee80211_mpdudensity_map[] = {
0, /* IEEE80211_HTCAP_MPDUDENSITY_NA */
25, /* IEEE80211_HTCAP_MPDUDENSITY_025 */
50, /* IEEE80211_HTCAP_MPDUDENSITY_05 */
100, /* IEEE80211_HTCAP_MPDUDENSITY_1 */
200, /* IEEE80211_HTCAP_MPDUDENSITY_2 */
400, /* IEEE80211_HTCAP_MPDUDENSITY_4 */
800, /* IEEE80211_HTCAP_MPDUDENSITY_8 */
1600, /* IEEE80211_HTCAP_MPDUDENSITY_16 */
};
/*
* XXX should be in the HAL/net80211 ?
*/
#define BITS_PER_BYTE 8
#define OFDM_PLCP_BITS 22
#define HT_RC_2_MCS(_rc) ((_rc) & 0x7f)
#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
#define L_STF 8
#define L_LTF 8
#define L_SIG 4
#define HT_SIG 8
#define HT_STF 4
#define HT_LTF(_ns) (4 * (_ns))
#define SYMBOL_TIME(_ns) ((_ns) << 2) // ns * 4 us
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) // ns * 3.6 us
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
#define IS_HT_RATE(_rate) ((_rate) & 0x80)
const uint32_t bits_per_symbol[][2] = {
/* 20MHz 40MHz */
{ 26, 54 }, // 0: BPSK
{ 52, 108 }, // 1: QPSK 1/2
{ 78, 162 }, // 2: QPSK 3/4
{ 104, 216 }, // 3: 16-QAM 1/2
{ 156, 324 }, // 4: 16-QAM 3/4
{ 208, 432 }, // 5: 64-QAM 2/3
{ 234, 486 }, // 6: 64-QAM 3/4
{ 260, 540 }, // 7: 64-QAM 5/6
{ 52, 108 }, // 8: BPSK
{ 104, 216 }, // 9: QPSK 1/2
{ 156, 324 }, // 10: QPSK 3/4
{ 208, 432 }, // 11: 16-QAM 1/2
{ 312, 648 }, // 12: 16-QAM 3/4
{ 416, 864 }, // 13: 64-QAM 2/3
{ 468, 972 }, // 14: 64-QAM 3/4
{ 520, 1080 }, // 15: 64-QAM 5/6
{ 78, 162 }, // 16: BPSK
{ 156, 324 }, // 17: QPSK 1/2
{ 234, 486 }, // 18: QPSK 3/4
{ 312, 648 }, // 19: 16-QAM 1/2
{ 468, 972 }, // 20: 16-QAM 3/4
{ 624, 1296 }, // 21: 64-QAM 2/3
{ 702, 1458 }, // 22: 64-QAM 3/4
{ 780, 1620 }, // 23: 64-QAM 5/6
{ 104, 216 }, // 24: BPSK
{ 208, 432 }, // 25: QPSK 1/2
{ 312, 648 }, // 26: QPSK 3/4
{ 416, 864 }, // 27: 16-QAM 1/2
{ 624, 1296 }, // 28: 16-QAM 3/4
{ 832, 1728 }, // 29: 64-QAM 2/3
{ 936, 1944 }, // 30: 64-QAM 3/4
{ 1040, 2160 }, // 31: 64-QAM 5/6
};
/*
* Fill in the rate array information based on the current
* node configuration and the choices made by the rate
* selection code and ath_buf setup code.
*
* Later on, this may end up also being made by the
* rate control code, but for now it can live here.
*
* This needs to be called just before the packet is
* queued to the software queue or hardware queue,
* so all of the needed fields in bf_state are setup.
*/
void
ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf)
{
struct ieee80211_node *ni = bf->bf_node;
struct ieee80211com *ic = ni->ni_ic;
const HAL_RATE_TABLE *rt = sc->sc_currates;
struct ath_rc_series *rc = bf->bf_state.bfs_rc;
uint8_t rate;
int i;
for (i = 0; i < ATH_RC_NUM; i++) {
rc[i].flags = 0;
if (rc[i].tries == 0)
continue;
rate = rt->info[rc[i].rix].rateCode;
/*
* XXX only do this for legacy rates?
*/
if (bf->bf_state.bfs_shpream)
rate |= rt->info[rc[i].rix].shortPreamble;
/*
* Save this, used by the TX and completion code
*/
rc[i].ratecode = rate;
if (bf->bf_state.bfs_flags &
(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA))
rc[i].flags |= ATH_RC_RTSCTS_FLAG;
/* Only enable shortgi, 2040, dual-stream if HT is set */
if (IS_HT_RATE(rate)) {
rc[i].flags |= ATH_RC_HT_FLAG;
if (ni->ni_chw == 40)
rc[i].flags |= ATH_RC_CW40_FLAG;
if (ni->ni_chw == 40 &&
ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI40 &&
ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
rc[i].flags |= ATH_RC_SGI_FLAG;
if (ni->ni_chw == 20 &&
ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI20 &&
ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20)
rc[i].flags |= ATH_RC_SGI_FLAG;
/* XXX dual stream? and 3-stream? */
}
/*
* Calculate the maximum 4ms frame length based
* on the MCS rate, SGI and channel width flags.
*/
if ((rc[i].flags & ATH_RC_HT_FLAG) &&
(HT_RC_2_MCS(rate) < 32)) {
int j;
if (rc[i].flags & ATH_RC_CW40_FLAG) {
if (rc[i].flags & ATH_RC_SGI_FLAG)
j = MCS_HT40_SGI;
else
j = MCS_HT40;
} else {
if (rc[i].flags & ATH_RC_SGI_FLAG)
j = MCS_HT20_SGI;
else
j = MCS_HT20;
}
rc[i].max4msframelen =
ath_max_4ms_framelen[j][HT_RC_2_MCS(rate)];
} else
rc[i].max4msframelen = 0;
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: i=%d, rate=0x%x, flags=0x%x, max4ms=%d\n",
__func__, i, rate, rc[i].flags, rc[i].max4msframelen);
}
}
/*
* Return the number of delimiters to be added to
* meet the minimum required mpdudensity.
*
* Caller should make sure that the rate is HT.
*
* TODO: is this delimiter calculation supposed to be the
* total frame length, the hdr length, the data length (including
* delimiters, padding, CRC, etc) or ?
*
* TODO: this should ensure that the rate control information
* HAS been setup for the first rate.
*
* TODO: ensure this is only called for MCS rates.
*
* TODO: enforce MCS < 31
*/
static int
ath_compute_num_delims(struct ath_softc *sc, struct ath_buf *first_bf,
uint16_t pktlen)
{
const HAL_RATE_TABLE *rt = sc->sc_currates;
struct ieee80211_node *ni = first_bf->bf_node;
struct ieee80211vap *vap = ni->ni_vap;
int ndelim, mindelim = 0;
int mpdudensity; /* in 1/100'th of a microsecond */
uint8_t rc, rix, flags;
int width, half_gi;
uint32_t nsymbits, nsymbols;
uint16_t minlen;
/*
* vap->iv_ampdu_density is a value, rather than the actual
* density.
*/
if (vap->iv_ampdu_density > IEEE80211_HTCAP_MPDUDENSITY_16)
mpdudensity = 1600; /* maximum density */
else
mpdudensity = ieee80211_mpdudensity_map[vap->iv_ampdu_density];
/* Select standard number of delimiters based on frame length */
ndelim = ATH_AGGR_GET_NDELIM(pktlen);
/*
* If encryption is enabled, add extra delimiters to let the
* crypto hardware catch up. This could be tuned per-MAC and
* per-rate, but for now we'll simply assume encryption is
* always enabled.
*/
ndelim += ATH_AGGR_ENCRYPTDELIM;
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: pktlen=%d, ndelim=%d, mpdudensity=%d\n",
__func__, pktlen, ndelim, mpdudensity);
/*
* If the MPDU density is 0, we can return here.
* Otherwise, we need to convert the desired mpdudensity
* into a byte length, based on the rate in the subframe.
*/
if (mpdudensity == 0)
return ndelim;
/*
* Convert desired mpdu density from microeconds to bytes based
* on highest rate in rate series (i.e. first rate) to determine
* required minimum length for subframe. Take into account
* whether high rate is 20 or 40Mhz and half or full GI.
*/
rix = first_bf->bf_state.bfs_rc[0].rix;
rc = rt->info[rix].rateCode;
flags = first_bf->bf_state.bfs_rc[0].flags;
width = !! (flags & ATH_RC_CW40_FLAG);
half_gi = !! (flags & ATH_RC_SGI_FLAG);
/*
* mpdudensity is in 1/100th of a usec, so divide by 100
*/
if (half_gi)
nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
else
nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
nsymbols /= 100;
if (nsymbols == 0)
nsymbols = 1;
nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
/*
* Min length is the minimum frame length for the
* required MPDU density.
*/
if (pktlen < minlen) {
mindelim = (minlen - pktlen) / ATH_AGGR_DELIM_SZ;
ndelim = MAX(mindelim, ndelim);
}
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: pktlen=%d, minlen=%d, rix=%x, rc=%x, width=%d, hgi=%d, ndelim=%d\n",
__func__, pktlen, minlen, rix, rc, width, half_gi, ndelim);
return ndelim;
}
/*
* Fetch the aggregation limit.
*
* It's the lowest of the four rate series 4ms frame length.
*/
static int
ath_get_aggr_limit(struct ath_softc *sc, struct ath_buf *bf)
{
int amin = 65530;
int i;
for (i = 0; i < 4; i++) {
if (bf->bf_state.bfs_rc[i].tries == 0)
continue;
amin = MIN(amin, bf->bf_state.bfs_rc[i].max4msframelen);
}
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: max frame len= %d\n",
__func__, amin);
return amin;
}
/*
* Setup a 11n rate series structure
*
* This should be called for both legacy and MCS rates.
*
* It, along with ath_buf_set_rate, must be called -after- a burst
* or aggregate is setup.
*/
static void
ath_rateseries_setup(struct ath_softc *sc, struct ieee80211_node *ni,
HAL_11N_RATE_SERIES *series, unsigned int pktlen, uint8_t *rix,
uint8_t *try, int flags)
struct ath_buf *bf, HAL_11N_RATE_SERIES *series)
{
#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
struct ieee80211com *ic = ni->ni_ic;
@ -104,18 +444,34 @@ ath_rateseries_setup(struct ath_softc *sc, struct ieee80211_node *ni,
HAL_BOOL shortPreamble = AH_FALSE;
const HAL_RATE_TABLE *rt = sc->sc_currates;
int i;
int pktlen;
int flags = bf->bf_state.bfs_flags;
struct ath_rc_series *rc = bf->bf_state.bfs_rc;
if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
(ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE))
shortPreamble = AH_TRUE;
/*
* If this is the first frame in an aggregate series,
* use the aggregate length.
*/
if (bf->bf_state.bfs_aggr)
pktlen = bf->bf_state.bfs_al;
else
pktlen = bf->bf_state.bfs_pktlen;
/*
* XXX TODO: modify this routine to use the bfs_rc[x].flags
* XXX fields.
*/
memset(series, 0, sizeof(HAL_11N_RATE_SERIES) * 4);
for (i = 0; i < 4; i++) {
/* Only set flags for actual TX attempts */
if (try[i] == 0)
if (rc[i].tries == 0)
continue;
series[i].Tries = try[i];
series[i].Tries = rc[i].tries;
/*
* XXX this isn't strictly correct - sc_txchainmask
@ -154,7 +510,7 @@ ath_rateseries_setup(struct ath_softc *sc, struct ieee80211_node *ni,
ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20)
series[i].RateFlags |= HAL_RATESERIES_HALFGI;
series[i].Rate = rt->info[rix[i]].rateCode;
series[i].Rate = rt->info[rc[i].rix].rateCode;
/* PktDuration doesn't include slot, ACK, RTS, etc timing - it's just the packet duration */
if (series[i].Rate & IEEE80211_RATE_MCS) {
@ -166,9 +522,10 @@ ath_rateseries_setup(struct ath_softc *sc, struct ieee80211_node *ni,
, series[i].RateFlags & HAL_RATESERIES_HALFGI);
} else {
if (shortPreamble)
series[i].Rate |= rt->info[rix[i]].shortPreamble;
series[i].Rate |=
rt->info[rc[i].rix].shortPreamble;
series[i].PktDuration = ath_hal_computetxtime(ah,
rt, pktlen, rix[i], shortPreamble);
rt, pktlen, rc[i].rix, shortPreamble);
}
}
#undef HT_RC_2_STREAMS
@ -200,25 +557,28 @@ ath_rateseries_print(HAL_11N_RATE_SERIES *series)
*/
void
ath_buf_set_rate(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf,
int pktlen, int flags, uint8_t ctsrate, int is_pspoll, uint8_t *rix, uint8_t *try)
ath_buf_set_rate(struct ath_softc *sc, struct ieee80211_node *ni,
struct ath_buf *bf)
{
HAL_11N_RATE_SERIES series[4];
struct ath_desc *ds = bf->bf_desc;
struct ath_desc *lastds = NULL;
struct ath_hal *ah = sc->sc_ah;
int is_pspoll = (bf->bf_state.bfs_atype == HAL_PKT_TYPE_PSPOLL);
int ctsrate = bf->bf_state.bfs_ctsrate;
int flags = bf->bf_state.bfs_flags;
/* Setup rate scenario */
memset(&series, 0, sizeof(series));
ath_rateseries_setup(sc, ni, series, pktlen, rix, try, flags);
ath_rateseries_setup(sc, ni, bf, series);
/* Enforce AR5416 aggregate limit - can't do RTS w/ an agg frame > 8k */
/* Enforce RTS and CTS are mutually exclusive */
/* Get a pointer to the last tx descriptor in the list */
lastds = &bf->bf_desc[bf->bf_nseg - 1];
lastds = bf->bf_lastds;
#if 0
printf("pktlen: %d; flags 0x%x\n", pktlen, flags);
@ -238,6 +598,238 @@ ath_buf_set_rate(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf
ath_hal_setuplasttxdesc(ah, lastds, ds);
/* Set burst duration */
/* This should only be done if aggregate protection is enabled */
/*
* This is only required when doing 11n burst, not aggregation
* ie, if there's a second frame in a RIFS or A-MPDU burst
* w/ >1 A-MPDU frame bursting back to back.
* Normal A-MPDU doesn't do bursting -between- aggregates.
*
* .. and it's highly likely this won't ever be implemented
*/
//ath_hal_set11nburstduration(ah, ds, 8192);
}
/*
* Form an aggregate packet list.
*
* This function enforces the aggregate restrictions/requirements.
*
* These are:
*
* + The aggregate size maximum (64k for AR9160 and later, 8K for
* AR5416 when doing RTS frame protection.)
* + Maximum number of sub-frames for an aggregate
* + The aggregate delimiter size, giving MACs time to do whatever is
* needed before each frame
* + Enforce the BAW limit
*
* Each descriptor queued should have the DMA setup.
* The rate series, descriptor setup, linking, etc is all done
* externally. This routine simply chains them together.
* ath_tx_setds_11n() will take care of configuring the per-
* descriptor setup, and ath_buf_set_rate() will configure the
* rate control.
*
* Note that the TID lock is only grabbed when dequeuing packets from
* the TID queue. If some code in another thread adds to the head of this
* list, very strange behaviour will occur. Since retransmission is the
* only reason this will occur, and this routine is designed to be called
* from within the scheduler task, it won't ever clash with the completion
* task.
*
* So if you want to call this from an upper layer context (eg, to direct-
* dispatch aggregate frames to the hardware), please keep this in mind.
*/
ATH_AGGR_STATUS
ath_tx_form_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid,
ath_bufhead *bf_q)
{
//struct ieee80211_node *ni = &an->an_node;
struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
int nframes = 0;
uint16_t aggr_limit = 0, al = 0, bpad = 0, al_delta, h_baw;
struct ieee80211_tx_ampdu *tap;
int status = ATH_AGGR_DONE;
int prev_frames = 0; /* XXX for AR5416 burst, not done here */
int prev_al = 0; /* XXX also for AR5416 burst */
ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
tap = ath_tx_get_tx_tid(an, tid->tid);
if (tap == NULL) {
status = ATH_AGGR_ERROR;
goto finish;
}
h_baw = tap->txa_wnd / 2;
for (;;) {
bf = TAILQ_FIRST(&tid->axq_q);
if (bf_first == NULL)
bf_first = bf;
if (bf == NULL) {
status = ATH_AGGR_DONE;
break;
} else {
/*
* It's the first frame;
* set the aggregation limit based on the
* rate control decision that has been made.
*/
aggr_limit = ath_get_aggr_limit(sc, bf_first);
}
/* Set this early just so things don't get confused */
bf->bf_next = NULL;
/*
* Don't unlock the tid lock until we're sure we are going
* to queue this frame.
*/
/*
* If the frame doesn't have a sequence number that we're
* tracking in the BAW (eg NULL QOS data frame), we can't
* aggregate it. Stop the aggregation process; the sender
* can then TX what's in the list thus far and then
* TX the frame individually.
*/
if (! bf->bf_state.bfs_dobaw) {
status = ATH_AGGR_NONAGGR;
break;
}
/*
* If any of the rates are non-HT, this packet
* can't be aggregated.
* XXX TODO: add a bf_state flag which gets marked
* if any active rate is non-HT.
*/
/*
* If the packet has a sequence number, do not
* step outside of the block-ack window.
*/
if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
SEQNO(bf->bf_state.bfs_seqno))) {
status = ATH_AGGR_BAW_CLOSED;
break;
}
/*
* XXX TODO: AR5416 has an 8K aggregation size limit
* when RTS is enabled, and RTS is required for dual-stream
* rates.
*
* For now, limit all aggregates for the AR5416 to be 8K.
*/
/*
* do not exceed aggregation limit
*/
al_delta = ATH_AGGR_DELIM_SZ + bf->bf_state.bfs_pktlen;
if (nframes &&
(aggr_limit < (al + bpad + al_delta + prev_al))) {
status = ATH_AGGR_LIMITED;
break;
}
/*
* Do not exceed subframe limit.
*/
if ((nframes + prev_frames) >= MIN((h_baw),
IEEE80211_AMPDU_SUBFRAME_DEFAULT)) {
status = ATH_AGGR_LIMITED;
break;
}
/*
* this packet is part of an aggregate.
*/
ATH_TXQ_REMOVE(tid, bf, bf_list);
/* The TID lock is required for the BAW update */
ath_tx_addto_baw(sc, an, tid, bf);
bf->bf_state.bfs_addedbaw = 1;
/*
* XXX TODO: If any frame in the aggregate requires RTS/CTS,
* set the first frame.
*/
/*
* XXX enforce ACK for aggregate frames (this needs to be
* XXX handled more gracefully?
*/
if (bf->bf_state.bfs_flags & HAL_TXDESC_NOACK) {
device_printf(sc->sc_dev,
"%s: HAL_TXDESC_NOACK set for an aggregate frame?\n",
__func__);
bf->bf_state.bfs_flags &= (~HAL_TXDESC_NOACK);
}
/*
* Add the now owned buffer (which isn't
* on the software TXQ any longer) to our
* aggregate frame list.
*/
TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
nframes ++;
/* Completion handler */
bf->bf_comp = ath_tx_aggr_comp;
/*
* add padding for previous frame to aggregation length
*/
al += bpad + al_delta;
/*
* Calculate delimiters needed for the current frame
*/
bf->bf_state.bfs_ndelim =
ath_compute_num_delims(sc, bf_first,
bf->bf_state.bfs_pktlen);
/*
* Calculate the padding needed from this set of delimiters,
* used when calculating if the next frame will fit in
* the aggregate.
*/
bpad = PADBYTES(al_delta) + (bf->bf_state.bfs_ndelim << 2);
/*
* Chain the buffers together
*/
if (bf_prev)
bf_prev->bf_next = bf;
bf_prev = bf;
/*
* XXX TODO: if any sub-frames have RTS/CTS enabled;
* enable it for the entire aggregate.
*/
#if 0
/*
* terminate aggregation on a small packet boundary
*/
if (bf->bf_state.bfs_pktlen < ATH_AGGR_MINPLEN) {
status = ATH_AGGR_SHORTPKT;
break;
}
#endif
}
finish:
/*
* Just in case the list was empty when we tried to
* dequeue a packet ..
*/
if (bf_first) {
bf_first->bf_state.bfs_al = al;
bf_first->bf_state.bfs_nframes = nframes;
}
return status;
}

View File

@ -31,9 +31,32 @@
#ifndef __IF_ATH_TX_HT_H__
#define __IF_ATH_TX_HT_H__
enum {
MCS_HT20,
MCS_HT20_SGI,
MCS_HT40,
MCS_HT40_SGI,
};
typedef enum {
ATH_AGGR_DONE,
ATH_AGGR_BAW_CLOSED,
ATH_AGGR_LIMITED,
ATH_AGGR_SHORTPKT,
ATH_AGGR_8K_LIMITED,
ATH_AGGR_ERROR,
ATH_AGGR_NONAGGR,
} ATH_AGGR_STATUS;
extern int ath_max_4ms_framelen[4][32];
extern void ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf);
extern void ath_buf_set_rate(struct ath_softc *sc,
struct ieee80211_node *ni, struct ath_buf *bf,
int pktlen, int flags, uint8_t ctsrate, int is_pspoll,
uint8_t *rix, uint8_t *try);
struct ieee80211_node *ni, struct ath_buf *bf);
extern ATH_AGGR_STATUS
ath_tx_form_aggr(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid, ath_bufhead *bf_q);
#endif

View File

@ -120,7 +120,7 @@ void ath_rate_newassoc(struct ath_softc *, struct ath_node *,
* Return the four TX rate index and try counts for the current data packet.
*/
void ath_rate_getxtxrates(struct ath_softc *sc, struct ath_node *an,
uint8_t rix0, uint8_t *rix, uint8_t *try);
uint8_t rix0, struct ath_rc_series *rc);
/*
* Return the transmit info for a data packet. If multi-rate state
@ -142,8 +142,12 @@ void ath_rate_setupxtxdesc(struct ath_softc *, struct ath_node *,
* supplied transmit descriptor. The routine is invoked both
* for packets that were successfully sent and for those that
* failed (consult the descriptor for details).
*
* For A-MPDU frames, nframes and nbad indicate how many frames
* were in the aggregate, and how many failed.
*/
struct ath_buf;
void ath_rate_tx_complete(struct ath_softc *, struct ath_node *,
const struct ath_buf *);
const struct ath_rc_series *, const struct ath_tx_status *,
int pktlen, int nframes, int nbad);
#endif /* _ATH_RATECTRL_H_ */

View File

@ -170,7 +170,7 @@ struct ath_node {
#define ATH_RSSI(x) ATH_EP_RND(x, HAL_RSSI_EP_MULTIPLIER)
struct ath_buf {
STAILQ_ENTRY(ath_buf) bf_list;
TAILQ_ENTRY(ath_buf) bf_list;
struct ath_buf * bf_next; /* next buffer in the aggregate */
int bf_nseg;
uint16_t bf_txflags; /* tx descriptor flags */
@ -239,7 +239,7 @@ struct ath_buf {
struct ath_rc_series bfs_rc[ATH_RC_NUM]; /* non-11n TX series */
} bf_state;
};
typedef STAILQ_HEAD(, ath_buf) ath_bufhead;
typedef TAILQ_HEAD(ath_bufhead_s, ath_buf) ath_bufhead;
#define ATH_BUF_BUSY 0x00000002 /* (tx) desc owned by h/w */
@ -277,9 +277,10 @@ struct ath_txq {
u_int axq_aggr_depth; /* how many aggregates are queued */
u_int axq_intrcnt; /* interrupt count */
u_int32_t *axq_link; /* link ptr in last TX desc */
STAILQ_HEAD(, ath_buf) axq_q; /* transmit queue */
TAILQ_HEAD(axq_q_s, ath_buf) axq_q; /* transmit queue */
struct mtx axq_lock; /* lock on q and link */
char axq_name[12]; /* e.g. "ath0_txq4" */
/* Per-TID traffic queue for software -> hardware TX */
TAILQ_HEAD(axq_t_s,ath_tid) axq_tidq;
};
@ -299,18 +300,19 @@ struct ath_txq {
#define ATH_TXQ_LOCK_ASSERT(_tq) mtx_assert(&(_tq)->axq_lock, MA_OWNED)
#define ATH_TXQ_IS_LOCKED(_tq) mtx_owned(&(_tq)->axq_lock)
#define ATH_TXQ_INSERT_TAIL(_tq, _elm, _field) do { \
STAILQ_INSERT_TAIL(&(_tq)->axq_q, (_elm), _field); \
#define ATH_TXQ_INSERT_HEAD(_tq, _elm, _field) do { \
TAILQ_INSERT_HEAD(&(_tq)->axq_q, (_elm), _field); \
(_tq)->axq_depth++; \
} while (0)
#define ATH_TXQ_REMOVE_HEAD(_tq, _field) do { \
STAILQ_REMOVE_HEAD(&(_tq)->axq_q, _field); \
#define ATH_TXQ_INSERT_TAIL(_tq, _elm, _field) do { \
TAILQ_INSERT_TAIL(&(_tq)->axq_q, (_elm), _field); \
(_tq)->axq_depth++; \
} while (0)
#define ATH_TXQ_REMOVE(_tq, _elm, _field) do { \
TAILQ_REMOVE(&(_tq)->axq_q, _elm, _field); \
(_tq)->axq_depth--; \
} while (0)
/* NB: this does not do the "head empty check" that STAILQ_LAST does */
#define ATH_TXQ_LAST(_tq) \
((struct ath_buf *)(void *) \
((char *)((_tq)->axq_q.stqh_last) - __offsetof(struct ath_buf, bf_list)))
#define ATH_TXQ_LAST(_tq, _field) TAILQ_LAST(&(_tq)->axq_q, _field)
struct ath_vap {
struct ieee80211vap av_vap; /* base class */
@ -394,7 +396,6 @@ struct ath_softc {
sc_setcca : 1,/* set/clr CCA with TDMA */
sc_resetcal : 1,/* reset cal state next trip */
sc_rxslink : 1,/* do self-linked final descriptor */
sc_kickpcu : 1,/* kick PCU RX on next RX proc */
sc_rxtsf32 : 1;/* RX dec TSF is 32 bits */
uint32_t sc_eerd; /* regdomain from EEPROM */
uint32_t sc_eecc; /* country code from EEPROM */
@ -421,7 +422,19 @@ struct ath_softc {
u_int sc_fftxqmin; /* min frames before staging */
u_int sc_fftxqmax; /* max frames before drop */
u_int sc_txantenna; /* tx antenna (fixed or auto) */
HAL_INT sc_imask; /* interrupt mask copy */
/*
* These are modified in the interrupt handler as well as
* the task queues and other contexts. Thus these must be
* protected by a mutex, or they could clash.
*
* For now, access to these is behind the ATH_LOCK,
* just to save time.
*/
uint32_t sc_txq_active; /* bitmap of active TXQs */
uint32_t sc_kickpcu; /* whether to kick the PCU */
u_int sc_keymax; /* size of key cache */
u_int8_t sc_keymap[ATH_KEYBYTES];/* key use bit map */

View File

@ -254,6 +254,8 @@ mips24k_allocate_pmc(int cpu, int ri, struct pmc *pm,
config |= MIPS24K_PMC_USER_ENABLE;
if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
config |= MIPS24K_PMC_ENABLE;
if (caps & PMC_CAP_INTERRUPT)
config |= MIPS24K_PMC_INTERRUPT_ENABLE;
pm->pm_md.pm_mips24k.pm_mips24k_evsel = config;
@ -404,7 +406,65 @@ mips24k_release_pmc(int cpu, int ri, struct pmc *pmc)
static int
mips24k_intr(int cpu, struct trapframe *tf)
{
return 0;
int error;
int retval, ri;
struct pmc *pm;
struct mips24k_cpu *pc;
uint32_t r, r0, r2;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[mips24k,%d] CPU %d out of range", __LINE__, cpu));
retval = 0;
pc = mips24k_pcpu[cpu];
/* Stop PMCs without clearing the counter */
r0 = mips_rd_perfcnt0();
mips_wr_perfcnt0(r0 & ~(0x1f));
r2 = mips_rd_perfcnt2();
mips_wr_perfcnt2(r2 & ~(0x1f));
for (ri = 0; ri < mips24k_npmcs; ri++) {
pm = mips24k_pcpu[cpu]->pc_mipspmcs[ri].phw_pmc;
if (pm == NULL)
continue;
if (! PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
continue;
r = mips24k_pmcn_read(ri);
/* If bit 31 is set, the counter has overflowed */
if ((r & 0x80000000) == 0)
continue;
retval = 1;
if (pm->pm_state != PMC_STATE_RUNNING)
continue;
error = pmc_process_interrupt(cpu, pm, tf,
TRAPF_USERMODE(tf));
if (error) {
/* Clear/disable the relevant counter */
if (ri == 0)
r0 = 0;
else if (ri == 1)
r2 = 0;
mips24k_stop_pmc(cpu, ri);
}
/* Reload sampling count */
mips24k_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
}
/*
* Re-enable the PMC counters where they left off.
*
* Any counter which overflowed will have its sample count
* reloaded in the loop above.
*/
mips_wr_perfcnt0(r0);
mips_wr_perfcnt2(r2);
return retval;
}
static int

View File

@ -327,7 +327,7 @@ here in clarifying some of this.
A succesful execution of isp_init will lead to the driver 'registering'
itself with this platform's SCSI subsystem. One assumed action for this
is the registry of a function the SCSI subsystem for this platform
is the registry of a function that the SCSI subsystem for this platform
will call when it has a SCSI command to run.
The platform specific module function that receives this will do whatever

View File

@ -4945,7 +4945,9 @@ lmc_ifnet_detach(softc_t *sc)
/* Detach from the ifnet kernel interface. */
if_detach(sc->ifp);
# if (__FreeBSD_version >= 600000)
# if (defined(__FreeBSD__) && __FreeBSD_version >= 800082)
if_free(sc->ifp);
# elif (defined(__FreeBSD__) && __FreeBSD_version >= 600000)
if_free_type(sc->ifp, NSPPP ? IFT_PPP : IFT_OTHER);
# endif
}

View File

@ -932,6 +932,12 @@ mfi_intr(void *arg)
if (sc->mfi_check_clear_intr(sc))
return;
/*
* Do a dummy read to flush the interrupt ACK that we just performed,
* ensuring that everything is really, truly consistent.
*/
(void)sc->mfi_read_fw_status(sc);
pi = sc->mfi_comms->hw_pi;
ci = sc->mfi_comms->hw_ci;
mtx_lock(&sc->mfi_io_lock);

View File

@ -51,6 +51,7 @@ static puc_config_f puc_config_amc;
static puc_config_f puc_config_diva;
static puc_config_f puc_config_exar;
static puc_config_f puc_config_icbook;
static puc_config_f puc_config_moxa;
static puc_config_f puc_config_oxford_pcie;
static puc_config_f puc_config_quatech;
static puc_config_f puc_config_syba;
@ -518,12 +519,25 @@ const struct puc_cfg puc_pci_devices[] = {
PUC_PORT_4S, 0x18, 0, 8,
},
{ 0x1393, 0x1042, 0xffff, 0,
"Moxa Technologies, Smartio CP-104JU/PCI",
DEFAULT_RCLK * 8,
PUC_PORT_4S, 0x18, 0, 8,
},
{ 0x1393, 0x1043, 0xffff, 0,
"Moxa Technologies, Smartio CP-104EL/PCIe",
DEFAULT_RCLK * 8,
PUC_PORT_4S, 0x18, 0, 8,
},
{ 0x1393, 0x1045, 0xffff, 0,
"Moxa Technologies, Smartio CP-104EL-A/PCIe",
DEFAULT_RCLK * 8,
PUC_PORT_4S, 0x14, 0, -1,
.config_function = puc_config_moxa
},
{ 0x1393, 0x1120, 0xffff, 0,
"Moxa Technologies, CP-112UL",
DEFAULT_RCLK * 8,
@ -1085,6 +1099,19 @@ puc_config_icbook(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
return (ENXIO);
}
static int
puc_config_moxa(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
intptr_t *res)
{
const struct puc_cfg *cfg = sc->sc_cfg;
if (cmd == PUC_CFG_GET_OFS && cfg->device == 0x1045) {
*res = ((port == 3) ? 7 : port) * 0x200;
return 0;
}
return (ENXIO);
}
static int
puc_config_quatech(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
intptr_t *res)

File diff suppressed because it is too large Load Diff

View File

@ -887,23 +887,6 @@ struct ti_event_desc {
#define TI_CLRBIT(sc, reg, x) \
CSR_WRITE_4((sc), (reg), (CSR_READ_4((sc), (reg)) & ~(x)))
/*
* Memory management stuff. Note: the SSLOTS, MSLOTS and JSLOTS
* values are tuneable. They control the actual amount of buffers
* allocated for the standard, mini and jumbo receive rings.
*/
#define TI_SSLOTS 256
#define TI_MSLOTS 256
#define TI_JSLOTS 256
#define TI_JRAWLEN (TI_JUMBO_FRAMELEN + ETHER_ALIGN)
#define TI_JLEN (TI_JRAWLEN + (sizeof(uint64_t) - \
(TI_JRAWLEN % sizeof(uint64_t))))
#define TI_JPAGESZ PAGE_SIZE
#define TI_RESID (TI_JPAGESZ - (TI_JLEN * TI_JSLOTS) % TI_JPAGESZ)
#define TI_JMEM ((TI_JLEN * TI_JSLOTS) + TI_RESID)
struct ti_txdesc {
struct mbuf *tx_m;
bus_dmamap_t tx_dmamap;
@ -920,7 +903,7 @@ STAILQ_HEAD(ti_txdq, ti_txdesc);
*/
struct ti_ring_data {
struct ti_rx_desc ti_rx_std_ring[TI_STD_RX_RING_CNT];
#ifdef TI_PRIVATE_JUMBOS
#ifndef TI_SF_BUF_JUMBO
struct ti_rx_desc ti_rx_jumbo_ring[TI_JUMBO_RX_RING_CNT];
#else
struct ti_rx_desc_ext ti_rx_jumbo_ring[TI_JUMBO_RX_RING_CNT];
@ -955,13 +938,13 @@ struct ti_chain_data {
struct ti_txdq ti_txbusyq;
struct mbuf *ti_rx_std_chain[TI_STD_RX_RING_CNT];
bus_dmamap_t ti_rx_std_maps[TI_STD_RX_RING_CNT];
bus_dmamap_t ti_rx_std_sparemap;
struct mbuf *ti_rx_jumbo_chain[TI_JUMBO_RX_RING_CNT];
bus_dmamap_t ti_rx_jumbo_maps[TI_JUMBO_RX_RING_CNT];
bus_dmamap_t ti_rx_jumbo_sparemap;
struct mbuf *ti_rx_mini_chain[TI_MINI_RX_RING_CNT];
bus_dmamap_t ti_rx_mini_maps[TI_MINI_RX_RING_CNT];
/* Stick the jumbo mem management stuff here too. */
caddr_t ti_jslots[TI_JSLOTS];
void *ti_jumbo_buf;
bus_dmamap_t ti_rx_mini_sparemap;
};
struct ti_type {
@ -980,11 +963,6 @@ struct ti_mc_entry {
SLIST_ENTRY(ti_mc_entry) mc_entries;
};
struct ti_jpool_entry {
int slot;
SLIST_ENTRY(ti_jpool_entry) jpool_entries;
};
typedef enum {
TI_FLAG_NONE = 0x00,
TI_FLAG_DEBUGING = 0x01,
@ -1006,7 +984,6 @@ struct ti_softc {
int ti_hdrsplit; /* enable header splitting */
bus_dma_tag_t ti_parent_dmat;
bus_dma_tag_t ti_jumbo_dmat;
bus_dmamap_t ti_jumbo_dmamap;
bus_dma_tag_t ti_mbuftx_dmat;
bus_dma_tag_t ti_mbufrx_dmat;
bus_dma_tag_t ti_rdata_dmat;
@ -1026,8 +1003,6 @@ struct ti_softc {
int ti_mini; /* current mini ring head */
int ti_jumbo; /* current jumo ring head */
SLIST_HEAD(__ti_mchead, ti_mc_entry) ti_mc_listhead;
SLIST_HEAD(__ti_jfreehead, ti_jpool_entry) ti_jfree_listhead;
SLIST_HEAD(__ti_jinusehead, ti_jpool_entry) ti_jinuse_listhead;
uint32_t ti_stat_ticks;
uint32_t ti_rx_coal_ticks;
uint32_t ti_tx_coal_ticks;

View File

@ -2123,7 +2123,7 @@ tr_handle_get_port_status:
if (sc->sc_flags.status_vbus &&
sc->sc_flags.status_bus_reset) {
/* reset endpoint flags */
bzero(sc->sc_ep_flags, sizeof(sc->sc_ep_flags));
memset(sc->sc_ep_flags, 0, sizeof(sc->sc_ep_flags));
}
}
if (sc->sc_flags.change_suspend) {

View File

@ -415,12 +415,11 @@ repeat:
buf_res.length = count;
}
/* receive data */
bcopy(sc->physdata +
memcpy(buf_res.buffer, sc->physdata +
(AVR32_EPTSTA_CURRENT_BANK(temp) << td->bank_shift) +
(td->ep_no << 16) + (td->offset % td->max_packet_size),
buf_res.buffer, buf_res.length)
(td->ep_no << 16) + (td->offset % td->max_packet_size), buf_res.length);
/* update counters */
count -= buf_res.length;
count -= buf_res.length;
td->offset += buf_res.length;
td->remainder -= buf_res.length;
}
@ -491,12 +490,12 @@ repeat:
buf_res.length = count;
}
/* transmit data */
bcopy(buf_res.buffer, sc->physdata +
memcpy(sc->physdata +
(AVR32_EPTSTA_CURRENT_BANK(temp) << td->bank_shift) +
(td->ep_no << 16) + (td->offset % td->max_packet_size),
buf_res.length)
buf_res.buffer, buf_res.length);
/* update counters */
count -= buf_res.length;
count -= buf_res.length;
td->offset += buf_res.length;
td->remainder -= buf_res.length;
}

View File

@ -3369,7 +3369,7 @@ ehci_roothub_exec(struct usb_device *udev,
break;
case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
len = 16;
bzero(sc->sc_hub_desc.temp, 16);
memset(sc->sc_hub_desc.temp, 0, 16);
break;
case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
DPRINTFN(9, "get port status i=%d\n",

View File

@ -2347,7 +2347,7 @@ ohci_roothub_exec(struct usb_device *udev,
case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
len = 16;
bzero(sc->sc_hub_desc.temp, 16);
memset(sc->sc_hub_desc.temp, 0, 16);
break;
case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
DPRINTFN(9, "get port status i=%d\n",

View File

@ -2702,7 +2702,7 @@ uhci_roothub_exec(struct usb_device *udev,
break;
case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
len = 16;
bzero(sc->sc_hub_desc.temp, 16);
memset(sc->sc_hub_desc.temp, 0, 16);
break;
case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
if (index == 1)

View File

@ -2831,7 +2831,7 @@ struct xhci_bos_desc xhci_bosd = {
.bLength = sizeof(xhci_bosd.usb2extd),
.bDescriptorType = 1,
.bDevCapabilityType = 2,
.bmAttributes = 2,
.bmAttributes[0] = 2,
},
.usbdcd = {
.bLength = sizeof(xhci_bosd.usbdcd),
@ -2841,7 +2841,8 @@ struct xhci_bos_desc xhci_bosd = {
HSETW(.wSpeedsSupported, 0x000C),
.bFunctionalitySupport = 8,
.bU1DevExitLat = 255, /* dummy - not used */
.bU2DevExitLat = 255, /* dummy - not used */
.wU2DevExitLat[0] = 0x00,
.wU2DevExitLat[1] = 0x08,
},
.cidd = {
.bLength = sizeof(xhci_bosd.cidd),

View File

@ -380,8 +380,9 @@ kue_setmulti(struct usb_ether *ue)
*/
if (i == KUE_MCFILTCNT(sc))
break;
bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
KUE_MCFILT(sc, i), ETHER_ADDR_LEN);
memcpy(KUE_MCFILT(sc, i),
LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
ETHER_ADDR_LEN);
i++;
}
if_maddr_runlock(ifp);

View File

@ -1153,7 +1153,7 @@ uhso_mux_read_callback(struct usb_xfer *xfer, usb_error_t error)
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
bzero(&req, sizeof(struct usb_device_request));
memset(&req, 0, sizeof(struct usb_device_request));
req.bmRequestType = UT_READ_CLASS_INTERFACE;
req.bRequest = UCDC_GET_ENCAPSULATED_RESPONSE;
USETW(req.wValue, 0);
@ -1206,7 +1206,7 @@ uhso_mux_write_callback(struct usb_xfer *xfer, usb_error_t error)
usbd_get_page(pc, 0, &res);
bzero(&req, sizeof(struct usb_device_request));
memset(&req, 0, sizeof(struct usb_device_request));
req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
req.bRequest = UCDC_SEND_ENCAPSULATED_COMMAND;
USETW(req.wValue, 0);
@ -1731,7 +1731,7 @@ uhso_if_rxflush(void *arg)
* copy the IP-packet into it.
*/
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
bcopy(mtod(m0, uint8_t *), mtod(m, uint8_t *), iplen);
memcpy(mtod(m, uint8_t *), mtod(m0, uint8_t *), iplen);
m->m_pkthdr.len = m->m_len = iplen;
/* Adjust the size of the original mbuf */

View File

@ -438,7 +438,7 @@ ufoma_attach(device_t dev)
goto detach;
}
sc->sc_modetable[0] = (elements + 1);
bcopy(mad->bMode, &sc->sc_modetable[1], elements);
memcpy(&sc->sc_modetable[1], mad->bMode, elements);
sc->sc_currentmode = UMCPC_ACM_MODE_UNLINKED;
sc->sc_modetoactivate = mad->bMode[0];
@ -968,7 +968,7 @@ ufoma_cfg_param(struct ucom_softc *ucom, struct termios *t)
}
DPRINTF("\n");
bzero(&ls, sizeof(ls));
memset(&ls, 0, sizeof(ls));
USETDW(ls.dwDTERate, t->c_ospeed);

View File

@ -560,7 +560,7 @@ static int
uftdi_set_parm_soft(struct termios *t,
struct uftdi_param_config *cfg, uint8_t type)
{
bzero(cfg, sizeof(*cfg));
memset(cfg, 0, sizeof(*cfg));
switch (type) {
case UFTDI_TYPE_SIO:

View File

@ -540,7 +540,7 @@ umodem_cfg_param(struct ucom_softc *ucom, struct termios *t)
DPRINTF("sc=%p\n", sc);
bzero(&ls, sizeof(ls));
memset(&ls, 0, sizeof(ls));
USETDW(ls.dwDTERate, t->c_ospeed);

View File

@ -659,7 +659,7 @@ uplcom_cfg_param(struct ucom_softc *ucom, struct termios *t)
DPRINTF("sc = %p\n", sc);
bzero(&ls, sizeof(ls));
memset(&ls, 0, sizeof(ls));
USETDW(ls.dwDTERate, t->c_ospeed);

View File

@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usbdi_util.h>
#include <dev/usb/usb_ioctl.h>
#include "usbdevs.h"
#define USB_DEBUG_VAR uslcom_debug
@ -75,6 +76,7 @@ SYSCTL_INT(_hw_usb_uslcom, OID_AUTO, debug, CTLFLAG_RW,
#define USLCOM_CTRL 0x07
#define USLCOM_RCTRL 0x08
#define USLCOM_SET_FLOWCTRL 0x13
#define USLCOM_VENDOR_SPECIFIC 0xff
/* USLCOM_UART values */
#define USLCOM_UART_DISABLE 0x00
@ -100,20 +102,23 @@ SYSCTL_INT(_hw_usb_uslcom, OID_AUTO, debug, CTLFLAG_RW,
#define USLCOM_PARITY_ODD 0x10
#define USLCOM_PARITY_EVEN 0x20
#define USLCOM_PORT_NO 0xFFFF /* XXX think this should be 0 --hps */
#define USLCOM_PORT_NO 0x0000
/* USLCOM_BREAK values */
#define USLCOM_BREAK_OFF 0x00
#define USLCOM_BREAK_ON 0x01
/* USLCOM_SET_FLOWCTRL values - 1st word */
#define USLCOM_FLOW_DTR_ON 0x00000001
#define USLCOM_FLOW_DTR_ON 0x00000001 /* DTR static active */
#define USLCOM_FLOW_CTS_HS 0x00000008 /* CTS handshake */
#define USLCOM_FLOW_RESERVED 0xFFFFFF80
/* USLCOM_SET_FLOWCTRL values - 2nd word */
#define USLCOM_FLOW_RTS_ON 0x00000040
#define USLCOM_FLOW_RTS_ON 0x00000040 /* RTS static active */
#define USLCOM_FLOW_RTS_HS 0x00000080 /* RTS handshake */
/* USLCOM_VENDOR_SPECIFIC values */
#define USLCOM_WRITE_LATCH 0x37E1
#define USLCOM_READ_LATCH 0x00C2
enum {
USLCOM_BULK_DT_WR,
USLCOM_BULK_DT_RD,
@ -124,6 +129,7 @@ enum {
struct uslcom_softc {
struct ucom_super_softc sc_super_ucom;
struct ucom_softc sc_ucom;
struct usb_callout sc_watchdog;
struct usb_xfer *sc_xfer[USLCOM_N_TRANSFER];
struct usb_device *sc_udev;
@ -146,6 +152,8 @@ static void uslcom_close(struct ucom_softc *);
static void uslcom_set_dtr(struct ucom_softc *, uint8_t);
static void uslcom_set_rts(struct ucom_softc *, uint8_t);
static void uslcom_set_break(struct ucom_softc *, uint8_t);
static int uslcom_ioctl(struct ucom_softc *, uint32_t, caddr_t, int,
struct thread *);
static int uslcom_pre_param(struct ucom_softc *, struct termios *);
static void uslcom_param(struct ucom_softc *, struct termios *);
static void uslcom_get_status(struct ucom_softc *, uint8_t *, uint8_t *);
@ -178,7 +186,6 @@ static const struct usb_config uslcom_config[USLCOM_N_TRANSFER] = {
.type = UE_CONTROL,
.endpoint = 0x00,
.direction = UE_DIR_ANY,
.interval = 150, /* poll status every 150 ms */
.bufsize = sizeof(struct usb_device_request) + 8,
.flags = {.pipe_bof = 1,},
.callback = &uslcom_control_callback,
@ -193,6 +200,7 @@ static struct ucom_callback uslcom_callback = {
.ucom_cfg_set_dtr = &uslcom_set_dtr,
.ucom_cfg_set_rts = &uslcom_set_rts,
.ucom_cfg_set_break = &uslcom_set_break,
.ucom_ioctl = &uslcom_ioctl,
.ucom_cfg_param = &uslcom_param,
.ucom_pre_param = &uslcom_pre_param,
.ucom_start_read = &uslcom_start_read,
@ -309,6 +317,19 @@ MODULE_DEPEND(uslcom, ucom, 1, 1, 1);
MODULE_DEPEND(uslcom, usb, 1, 1, 1);
MODULE_VERSION(uslcom, 1);
static void
uslcom_watchdog(void *arg)
{
struct uslcom_softc *sc = arg;
mtx_assert(&sc->sc_mtx, MA_OWNED);
usbd_transfer_start(sc->sc_xfer[USLCOM_CTRL_DT_RD]);
usb_callout_reset(&sc->sc_watchdog,
hz / 4, &uslcom_watchdog, sc);
}
static int
uslcom_probe(device_t dev)
{
@ -339,6 +360,7 @@ uslcom_attach(device_t dev)
device_set_usb_desc(dev);
mtx_init(&sc->sc_mtx, "uslcom", NULL, MTX_DEF);
usb_callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
sc->sc_udev = uaa->device;
@ -379,6 +401,8 @@ uslcom_detach(device_t dev)
ucom_detach(&sc->sc_super_ucom, &sc->sc_ucom);
usbd_transfer_unsetup(sc->sc_xfer, USLCOM_N_TRANSFER);
usb_callout_drain(&sc->sc_watchdog);
mtx_destroy(&sc->sc_mtx);
return (0);
@ -400,8 +424,9 @@ uslcom_open(struct ucom_softc *ucom)
&req, NULL, 0, 1000)) {
DPRINTF("UART enable failed (ignored)\n");
}
/* Start polling status */
usbd_transfer_start(sc->sc_xfer[USLCOM_CTRL_DT_RD]);
/* start polling status */
uslcom_watchdog(sc);
}
static void
@ -410,8 +435,8 @@ uslcom_close(struct ucom_softc *ucom)
struct uslcom_softc *sc = ucom->sc_parent;
struct usb_device_request req;
/* Stop polling status */
usbd_transfer_stop(sc->sc_xfer[USLCOM_CTRL_DT_RD]);
/* stop polling status */
usb_callout_stop(&sc->sc_watchdog);
req.bmRequestType = USLCOM_WRITE;
req.bRequest = USLCOM_UART;
@ -540,14 +565,12 @@ uslcom_param(struct ucom_softc *ucom, struct termios *t)
}
if (t->c_cflag & CRTSCTS) {
flowctrl[0] = htole32(USLCOM_FLOW_RESERVED |
USLCOM_FLOW_DTR_ON | USLCOM_FLOW_CTS_HS);
flowctrl[0] = htole32(USLCOM_FLOW_DTR_ON | USLCOM_FLOW_CTS_HS);
flowctrl[1] = htole32(USLCOM_FLOW_RTS_HS);
flowctrl[2] = 0;
flowctrl[3] = 0;
} else {
flowctrl[0] = htole32(USLCOM_FLOW_RESERVED |
USLCOM_FLOW_DTR_ON);
flowctrl[0] = htole32(USLCOM_FLOW_DTR_ON);
flowctrl[1] = htole32(USLCOM_FLOW_RTS_ON);
flowctrl[2] = 0;
flowctrl[3] = 0;
@ -594,6 +617,55 @@ uslcom_set_break(struct ucom_softc *ucom, uint8_t onoff)
}
}
static int
uslcom_ioctl(struct ucom_softc *ucom, uint32_t cmd, caddr_t data,
int flag, struct thread *td)
{
struct uslcom_softc *sc = ucom->sc_parent;
struct usb_device_request req;
int error = 0;
uint8_t latch;
DPRINTF("cmd=0x%08x\n", cmd);
switch (cmd) {
case USB_GET_GPIO:
req.bmRequestType = USLCOM_READ;
req.bRequest = USLCOM_VENDOR_SPECIFIC;
USETW(req.wValue, USLCOM_READ_LATCH);
USETW(req.wIndex, 0);
USETW(req.wLength, sizeof(latch));
if (ucom_cfg_do_request(sc->sc_udev, &sc->sc_ucom,
&req, &latch, 0, 1000)) {
DPRINTF("Get LATCH failed\n");
error = EIO;
}
*(int *)data = latch;
break;
case USB_SET_GPIO:
req.bmRequestType = USLCOM_WRITE;
req.bRequest = USLCOM_VENDOR_SPECIFIC;
USETW(req.wValue, USLCOM_WRITE_LATCH);
USETW(req.wIndex, (*(int *)data));
USETW(req.wLength, 0);
if (ucom_cfg_do_request(sc->sc_udev, &sc->sc_ucom,
&req, NULL, 0, 1000)) {
DPRINTF("Set LATCH failed\n");
error = EIO;
}
break;
default:
DPRINTF("Unknown IOCTL\n");
error = ENOIOCTL;
break;
}
return (error);
}
static void
uslcom_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
@ -684,15 +756,13 @@ uslcom_control_callback(struct usb_xfer *xfer, usb_error_t error)
sc->sc_msr = msr;
ucom_status_change(&sc->sc_ucom);
}
/* FALLTHROUGH */
break;
case USB_ST_SETUP:
tr_setup:
req.bmRequestType = USLCOM_READ;
req.bRequest = USLCOM_RCTRL;
USETW(req.wValue, 0);
USETW(req.wIndex, 0);
USETW(req.wIndex, USLCOM_PORT_NO);
USETW(req.wLength, sizeof(buf));
usbd_xfer_set_frames(xfer, 2);
@ -705,10 +775,8 @@ tr_setup:
break;
default: /* error */
if (error != USB_ERR_CANCELLED) {
if (error != USB_ERR_CANCELLED)
DPRINTF("error=%s\n", usbd_errstr(error));
goto tr_setup;
}
break;
}
}

View File

@ -311,8 +311,9 @@ uvisor_attach(device_t dev)
int error;
DPRINTF("sc=%p\n", sc);
bcopy(uvisor_config, uvisor_config_copy,
memcpy(uvisor_config_copy, uvisor_config,
sizeof(uvisor_config_copy));
device_set_usb_desc(dev);
mtx_init(&sc->sc_mtx, "uvisor", NULL, MTX_DEF);

View File

@ -891,7 +891,7 @@ umass_attach(device_t dev)
int32_t err;
/*
* NOTE: the softc struct is bzero-ed in device_set_driver.
* NOTE: the softc struct is cleared in device_set_driver.
* We can safely call umass_detach without specifically
* initializing the struct.
*/
@ -1305,11 +1305,13 @@ umass_t_bbb_command_callback(struct usb_xfer *xfer, usb_error_t error)
}
sc->cbw.bCDBLength = sc->sc_transfer.cmd_len;
bcopy(sc->sc_transfer.cmd_data, sc->cbw.CBWCDB,
memcpy(sc->cbw.CBWCDB, sc->sc_transfer.cmd_data,
sc->sc_transfer.cmd_len);
bzero(sc->sc_transfer.cmd_data + sc->sc_transfer.cmd_len,
sizeof(sc->cbw.CBWCDB) - sc->sc_transfer.cmd_len);
memset(sc->sc_transfer.cmd_data +
sc->sc_transfer.cmd_len, 0,
sizeof(sc->cbw.CBWCDB) -
sc->sc_transfer.cmd_len);
DIF(UDMASS_BBB, umass_bbb_dump_cbw(sc, &sc->cbw));
@ -1480,9 +1482,9 @@ umass_t_bbb_status_callback(struct usb_xfer *xfer, usb_error_t error)
/* Zero missing parts of the CSW: */
if (actlen < sizeof(sc->csw)) {
bzero(&sc->csw, sizeof(sc->csw));
}
if (actlen < sizeof(sc->csw))
memset(&sc->csw, 0, sizeof(sc->csw));
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, 0, &sc->csw, actlen);
@ -2755,7 +2757,7 @@ umass_scsi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
if (sc->sc_quirks & NO_TEST_UNIT_READY) {
DPRINTF(sc, UDMASS_SCSI, "Converted TEST_UNIT_READY "
"to START_UNIT\n");
bzero(sc->sc_transfer.cmd_data, cmd_len);
memset(sc->sc_transfer.cmd_data, 0, cmd_len);
sc->sc_transfer.cmd_data[0] = START_STOP_UNIT;
sc->sc_transfer.cmd_data[4] = SSS_START;
return (1);
@ -2768,14 +2770,14 @@ umass_scsi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
* information.
*/
if (sc->sc_quirks & FORCE_SHORT_INQUIRY) {
bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH;
return (1);
}
break;
}
bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
return (1);
}
@ -2810,10 +2812,11 @@ umass_rbc_transform(struct umass_softc *sc, uint8_t *cmd_ptr, uint8_t cmd_len)
case REQUEST_SENSE:
case PREVENT_ALLOW:
bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
if ((sc->sc_quirks & RBC_PAD_TO_12) && (cmd_len < 12)) {
bzero(sc->sc_transfer.cmd_data + cmd_len, 12 - cmd_len);
memset(sc->sc_transfer.cmd_data + cmd_len,
0, 12 - cmd_len);
cmd_len = 12;
}
sc->sc_transfer.cmd_len = cmd_len;
@ -2841,7 +2844,7 @@ umass_ufi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
sc->sc_transfer.cmd_len = UFI_COMMAND_LENGTH;
/* Zero the command data */
bzero(sc->sc_transfer.cmd_data, UFI_COMMAND_LENGTH);
memset(sc->sc_transfer.cmd_data, 0, UFI_COMMAND_LENGTH);
switch (cmd_ptr[0]) {
/*
@ -2898,7 +2901,7 @@ umass_ufi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
return (0); /* failure */
}
bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
return (1); /* success */
}
@ -2919,7 +2922,7 @@ umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
sc->sc_transfer.cmd_len = ATAPI_COMMAND_LENGTH;
/* Zero the command data */
bzero(sc->sc_transfer.cmd_data, ATAPI_COMMAND_LENGTH);
memset(sc->sc_transfer.cmd_data, 0, ATAPI_COMMAND_LENGTH);
switch (cmd_ptr[0]) {
/*
@ -2933,7 +2936,7 @@ umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
* information.
*/
if (sc->sc_quirks & FORCE_SHORT_INQUIRY) {
bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH;
return (1);
@ -2994,7 +2997,7 @@ umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
break;
}
bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
return (1); /* success */
}

View File

@ -440,7 +440,7 @@ urio_ioctl(struct usb_fifo *fifo, u_long cmd, void *addr,
error = EPERM;
goto done;
}
bzero(&ur, sizeof(ur));
memset(&ur, 0, sizeof(ur));
rio_cmd = addr;
ur.ucr_request.bmRequestType =
rio_cmd->requesttype | UT_READ_VENDOR_DEVICE;
@ -451,7 +451,7 @@ urio_ioctl(struct usb_fifo *fifo, u_long cmd, void *addr,
error = EPERM;
goto done;
}
bzero(&ur, sizeof(ur));
memset(&ur, 0, sizeof(ur));
rio_cmd = addr;
ur.ucr_request.bmRequestType =
rio_cmd->requesttype | UT_WRITE_VENDOR_DEVICE;

View File

@ -355,7 +355,7 @@ ustorage_fs_attach(device_t dev)
int unit;
/*
* NOTE: the softc struct is bzero-ed in device_set_driver.
* NOTE: the softc struct is cleared in device_set_driver.
* We can safely call ustorage_fs_detach without specifically
* initializing the struct.
*/
@ -364,6 +364,9 @@ ustorage_fs_attach(device_t dev)
sc->sc_udev = uaa->device;
unit = device_get_unit(dev);
/* enable power saving mode */
usbd_set_power_mode(uaa->device, USB_POWER_MODE_SAVE);
if (unit == 0) {
if (ustorage_fs_ramdisk == NULL) {
/*
@ -371,7 +374,9 @@ ustorage_fs_attach(device_t dev)
* further
*/
ustorage_fs_ramdisk =
malloc(USTORAGE_FS_RAM_SECT << 9, M_USB, M_ZERO | M_WAITOK);
malloc(USTORAGE_FS_RAM_SECT << 9, M_USB,
M_ZERO | M_WAITOK);
if (ustorage_fs_ramdisk == NULL) {
return (ENOMEM);
}

View File

@ -913,7 +913,7 @@ usb_hw_ep_resolve(struct usb_device *udev,
}
ues = udev->bus->scratch[0].hw_ep_scratch;
bzero(ues, sizeof(*ues));
memset(ues, 0, sizeof(*ues));
ues->ep_max = ues->ep;
ues->cd = (void *)desc;
@ -1240,7 +1240,7 @@ usb_temp_setup(struct usb_device *udev,
}
uts = udev->bus->scratch[0].temp_setup;
bzero(uts, sizeof(*uts));
memset(uts, 0, sizeof(*uts));
uts->usb_speed = udev->speed;
uts->self_powered = udev->flags.self_powered;

Some files were not shown because too many files have changed in this diff Show More