Merge svn+ssh://svn.freebsd.org/base/head@206063

This commit is contained in:
Marcel Moolenaar 2010-04-02 04:30:21 +00:00
commit 70f89834e6
92 changed files with 8775 additions and 3403 deletions

View File

@ -35,10 +35,10 @@ void
__assert(const char *func, const char *file, int line, const char *expression)
{
if (func == NULL)
printf("Assertion failed: (%s), file %s, line %d.\n",
panic("Assertion failed: (%s), file %s, line %d.\n",
expression, file, line);
else
printf("Assertion failed: (%s), function %s, file %s, line "
"%d.\n", expression, func, file, line);
exit();
panic(
"Assertion failed: (%s), function %s, file %s, line %d.\n",
expression, func, file, line);
}

View File

@ -104,6 +104,18 @@ IDTVEC(timerint)
MEXITCOUNT
jmp doreti
/*
* Local APIC error interrupt handler.
*/
.text
SUPERALIGN_TEXT
IDTVEC(errorint)
PUSH_FRAME
FAKE_MCOUNT(TF_RIP(%rsp))
call lapic_handle_error
MEXITCOUNT
jmp doreti
#ifdef SMP
/*
* Global address space TLB shootdown.

View File

@ -115,14 +115,12 @@ struct lapic {
int la_ioint_irqs[APIC_NUM_IOINTS + 1];
} static lapics[MAX_APIC_ID + 1];
/* XXX: should thermal be an NMI? */
/* Global defaults for local APIC LVT entries. */
static struct lvt lvts[LVT_MAX + 1] = {
{ 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 }, /* LINT0: masked ExtINT */
{ 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 }, /* LINT1: NMI */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT }, /* Timer */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT }, /* Error */
{ 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT }, /* Error */
{ 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 }, /* PMC */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT }, /* Thermal */
};
@ -225,7 +223,10 @@ lapic_init(vm_paddr_t addr)
/* Local APIC timer interrupt. */
setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_SYSIGT, SEL_KPL, 0);
/* XXX: error/thermal interrupts */
/* Local APIC error interrupt. */
setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_SYSIGT, SEL_KPL, 0);
/* XXX: Thermal interrupt */
}
/*
@ -278,7 +279,7 @@ lapic_dump(const char* str)
lapic->id, lapic->version, lapic->ldr, lapic->dfr);
printf(" lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
printf(" timer: 0x%08x therm: 0x%08x err: 0x%08x pcm: 0x%08x\n",
printf(" timer: 0x%08x therm: 0x%08x err: 0x%08x pmc: 0x%08x\n",
lapic->lvt_timer, lapic->lvt_thermal, lapic->lvt_error,
lapic->lvt_pcint);
}
@ -326,7 +327,11 @@ lapic_setup(int boot)
lapic_timer_enable_intr();
}
/* XXX: Error and thermal LVTs */
/* Program error LVT and clear any existing errors. */
lapic->lvt_error = lvt_mode(la, LVT_ERROR, lapic->lvt_error);
lapic->esr = 0;
/* XXX: Thermal LVT */
intr_restore(eflags);
}
@ -725,18 +730,6 @@ lapic_eoi(void)
lapic->eoi = 0;
}
/*
* Read the contents of the error status register. We have to write
* to the register first before reading from it.
*/
u_int
lapic_error(void)
{
lapic->esr = 0;
return (lapic->esr);
}
void
lapic_handle_intr(int vector, struct trapframe *frame)
{
@ -863,6 +856,24 @@ lapic_timer_enable_intr(void)
lapic->lvt_timer = value;
}
void
lapic_handle_error(void)
{
u_int32_t esr;
/*
* Read the contents of the error status register. Write to
* the register first before reading from it to force the APIC
* to update its value to indicate any errors that have
* occurred since the previous write to the register.
*/
lapic->esr = 0;
esr = lapic->esr;
printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
lapic_eoi();
}
u_int
apic_cpuid(u_int apic_id)
{

View File

@ -179,7 +179,8 @@ struct apic_enumerator {
inthand_t
IDTVEC(apic_isr1), IDTVEC(apic_isr2), IDTVEC(apic_isr3),
IDTVEC(apic_isr4), IDTVEC(apic_isr5), IDTVEC(apic_isr6),
IDTVEC(apic_isr7), IDTVEC(spuriousint), IDTVEC(timerint);
IDTVEC(apic_isr7), IDTVEC(errorint), IDTVEC(spuriousint),
IDTVEC(timerint);
extern vm_paddr_t lapic_paddr;
extern int apic_cpuids[];
@ -211,13 +212,13 @@ void lapic_disable_pmc(void);
void lapic_dump(const char *str);
int lapic_enable_pmc(void);
void lapic_eoi(void);
u_int lapic_error(void);
int lapic_id(void);
void lapic_init(vm_paddr_t addr);
int lapic_intr_pending(u_int vector);
void lapic_ipi_raw(register_t icrlo, u_int dest);
void lapic_ipi_vectored(u_int vector, int dest);
int lapic_ipi_wait(int delay);
void lapic_handle_error(void);
void lapic_handle_intr(int vector, struct trapframe *frame);
void lapic_handle_timer(struct trapframe *frame);
void lapic_reenable_pmc(void);

View File

@ -109,7 +109,7 @@ struct dbreg {
#define DBREG_DR7_EXEC 0x00 /* break on execute */
#define DBREG_DR7_WRONLY 0x01 /* break on write */
#define DBREG_DR7_RDWR 0x03 /* break on read or write */
#define DBREG_DR7_MASK(i) ((u_long)0xf << ((i) * 4 + 16) | 0x3 << (i) * 2)
#define DBREG_DR7_MASK(i) (0xful << ((i) * 4 + 16) | 0x3 << (i) * 2)
#define DBREG_DR7_SET(i, len, access, enable) \
((u_long)((len) << 2 | (access)) << ((i) * 4 + 16) | (enable) << (i) * 2)
#define DBREG_DR7_GD 0x2000

View File

@ -24,7 +24,7 @@ include "../at91/std.kb920x"
# The AT91 platform doesn't use /boot/loader, so we have to statically wire
# hints.
hints "KB920X.hints"
makeoptions MODULES_OVERRIDE=""
#makeoptions MODULES_OVERRIDE=""
makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols
options DDB

View File

@ -600,7 +600,12 @@ sata_channel_begin_transaction(struct ata_request *request)
crqb->crqb_ata_lba_mid = request->u.ata.lba >> 8;
crqb->crqb_ata_lba_high = request->u.ata.lba >> 16;
crqb->crqb_ata_device = ((request->u.ata.lba >> 24) & 0x0F) | (1 << 6);
crqb->crqb_ata_lba_low_p = request->u.ata.lba >> 24;
crqb->crqb_ata_lba_mid_p = request->u.ata.lba >> 32;
crqb->crqb_ata_lba_high_p = request->u.ata.lba >> 40;
crqb->crqb_ata_feature_p = request->u.ata.feature >> 8;
crqb->crqb_ata_count = request->u.ata.count;
crqb->crqb_ata_count_p = request->u.ata.count >> 8;
bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

View File

@ -2380,6 +2380,7 @@ xpt_action_default(union ccb *start_ccb)
if (start_ccb->ccb_h.func_code == XPT_ATA_IO) {
start_ccb->ataio.resid = 0;
}
/* FALLTHROUGH */
case XPT_RESET_DEV:
case XPT_ENG_EXEC:
{
@ -2888,6 +2889,9 @@ xpt_action_default(union ccb *start_ccb)
case XPT_ENG_INQ:
/* XXX Implement */
start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
xpt_done(start_ccb);
}
break;
}
}
@ -3930,7 +3934,7 @@ xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
struct cam_et *target, struct cam_ed *device,
void *async_arg)
{
printf("xpt_dev_async called\n");
printf("%s called\n", __func__);
}
u_int32_t
@ -4919,4 +4923,3 @@ camisr_runqueue(void *V_queue)
(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
}
}

View File

@ -170,6 +170,8 @@ struct scsi_mode_sense_6
#define SMS_PAGE_CODE 0x3F
#define SMS_VENDOR_SPECIFIC_PAGE 0x00
#define SMS_DISCONNECT_RECONNECT_PAGE 0x02
#define SMS_FORMAT_DEVICE_PAGE 0x03
#define SMS_GEOMETRY_PAGE 0x04
#define SMS_CACHE_PAGE 0x08
#define SMS_PERIPHERAL_DEVICE_PAGE 0x09
#define SMS_CONTROL_MODE_PAGE 0x0A

View File

@ -2091,6 +2091,7 @@ device ath_hal # pci/cardbus chip support
options AH_SUPPORT_AR5416 # enable AR5416 tx/rx descriptors
#device ath_ar9160 # AR9160 chips
#device ath_ar9280 # AR9280 chips
#device ath_ar9285 # AR9285 chips
device ath_rate_sample # SampleRate tx rate control for ath
device bwi # Broadcom BCM430* BCM431*
device bwn # Broadcom BCM43xx

View File

@ -912,6 +912,8 @@ dev/eisa/eisa_if.m standard
dev/eisa/eisaconf.c optional eisa
dev/e1000/if_em.c optional em inet \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/if_lem.c optional em inet \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/if_igb.c optional igb inet \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_80003es2lan.c optional em | igb \
@ -2320,7 +2322,7 @@ net/if_ethersubr.c optional ether \
net/if_faith.c optional faith
net/if_fddisubr.c optional fddi
net/if_fwsubr.c optional fwip
net/if_gif.c optional gif
net/if_gif.c optional gif | netgraph_gif
net/if_gre.c optional gre inet
net/if_iso88025subr.c optional token
net/if_lagg.c optional lagg
@ -2483,7 +2485,7 @@ netinet/if_ether.c optional inet ether
netinet/igmp.c optional inet
netinet/in.c optional inet
netinet/ip_carp.c optional inet carp | inet6 carp
netinet/in_gif.c optional gif inet
netinet/in_gif.c optional gif inet | netgraph_gif inet
netinet/ip_gre.c optional gre inet
netinet/ip_id.c optional inet
netinet/in_mcast.c optional inet
@ -2558,7 +2560,7 @@ netinet6/frag6.c optional inet6
netinet6/icmp6.c optional inet6
netinet6/in6.c optional inet6
netinet6/in6_cksum.c optional inet6
netinet6/in6_gif.c optional gif inet6
netinet6/in6_gif.c optional gif inet6 | netgraph_gif inet6
netinet6/in6_ifattach.c optional inet6
netinet6/in6_mcast.c optional inet6
netinet6/in6_pcb.c optional inet6

View File

@ -26,6 +26,8 @@
* $FreeBSD$
*/
#include "opt_ata.h"
#if 0
#define ATA_LEGACY_SUPPORT /* Enable obsolete features that break
* some modern devices */

View File

@ -1447,395 +1447,439 @@ static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype)
0xd803, 0x40aa,
0xd804, 0x401c,
0xd805, 0x401e,
0xd806, 0x2ff4,
0xd807, 0x3dc4,
0xd808, 0x2035,
0xd809, 0x3035,
0xd80a, 0x6524,
0xd80b, 0x2cb2,
0xd80c, 0x3012,
0xd80d, 0x1002,
0xd80e, 0x26e2,
0xd80f, 0x3022,
0xd810, 0x1002,
0xd811, 0x27d2,
0xd812, 0x3022,
0xd806, 0x20c5,
0xd807, 0x3c05,
0xd808, 0x6536,
0xd809, 0x2fe4,
0xd80a, 0x3dc4,
0xd80b, 0x6624,
0xd80c, 0x2ff4,
0xd80d, 0x3dc4,
0xd80e, 0x2035,
0xd80f, 0x30a5,
0xd810, 0x6524,
0xd811, 0x2ca2,
0xd812, 0x3012,
0xd813, 0x1002,
0xd814, 0x2822,
0xd815, 0x3012,
0xd814, 0x27e2,
0xd815, 0x3022,
0xd816, 0x1002,
0xd817, 0x2492,
0xd817, 0x28d2,
0xd818, 0x3022,
0xd819, 0x1002,
0xd81a, 0x2772,
0xd81a, 0x2892,
0xd81b, 0x3012,
0xd81c, 0x1002,
0xd81d, 0x23d2,
0xd81d, 0x24e2,
0xd81e, 0x3022,
0xd81f, 0x1002,
0xd820, 0x22cd,
0xd821, 0x301d,
0xd822, 0x27f2,
0xd823, 0x3022,
0xd824, 0x1002,
0xd825, 0x5553,
0xd826, 0x0307,
0xd827, 0x2522,
0xd828, 0x3022,
0xd829, 0x1002,
0xd82a, 0x2142,
0xd82b, 0x3012,
0xd82c, 0x1002,
0xd82d, 0x4016,
0xd82e, 0x5e63,
0xd82f, 0x0344,
0xd830, 0x2142,
0xd820, 0x27e2,
0xd821, 0x3012,
0xd822, 0x1002,
0xd823, 0x2422,
0xd824, 0x3022,
0xd825, 0x1002,
0xd826, 0x22cd,
0xd827, 0x301d,
0xd828, 0x28f2,
0xd829, 0x3022,
0xd82a, 0x1002,
0xd82b, 0x5553,
0xd82c, 0x0307,
0xd82d, 0x2572,
0xd82e, 0x3022,
0xd82f, 0x1002,
0xd830, 0x21a2,
0xd831, 0x3012,
0xd832, 0x1002,
0xd833, 0x400e,
0xd834, 0x2522,
0xd835, 0x3022,
0xd836, 0x1002,
0xd837, 0x2b52,
0xd838, 0x3012,
0xd839, 0x1002,
0xd83a, 0x2742,
0xd833, 0x4016,
0xd834, 0x5e63,
0xd835, 0x0344,
0xd836, 0x21a2,
0xd837, 0x3012,
0xd838, 0x1002,
0xd839, 0x400e,
0xd83a, 0x2572,
0xd83b, 0x3022,
0xd83c, 0x1002,
0xd83d, 0x25e2,
0xd83e, 0x3022,
0xd83d, 0x2b22,
0xd83e, 0x3012,
0xd83f, 0x1002,
0xd840, 0x2fa4,
0xd841, 0x3dc4,
0xd842, 0x6624,
0xd843, 0x414b,
0xd844, 0x56b3,
0xd845, 0x03c6,
0xd846, 0x866b,
0xd847, 0x400c,
0xd848, 0x2712,
0xd849, 0x3012,
0xd84a, 0x1002,
0xd84b, 0x2c4b,
0xd84c, 0x309b,
0xd84d, 0x56b3,
0xd84e, 0x03c3,
0xd84f, 0x866b,
0xd850, 0x400c,
0xd851, 0x2272,
0xd852, 0x3022,
0xd853, 0x1002,
0xd854, 0x2742,
0xd855, 0x3022,
0xd856, 0x1002,
0xd857, 0x25e2,
0xd858, 0x3022,
0xd859, 0x1002,
0xd85a, 0x2fb4,
0xd85b, 0x3dc4,
0xd85c, 0x6624,
0xd85d, 0x56b3,
0xd85e, 0x03c3,
0xd85f, 0x866b,
0xd860, 0x401c,
0xd861, 0x2c45,
0xd862, 0x3095,
0xd863, 0x5b53,
0xd864, 0x2372,
0xd865, 0x3012,
0xd866, 0x13c2,
0xd867, 0x5cc3,
0xd868, 0x2712,
0xd869, 0x3012,
0xd86a, 0x1312,
0xd86b, 0x2b52,
0xd840, 0x2842,
0xd841, 0x3022,
0xd842, 0x1002,
0xd843, 0x26e2,
0xd844, 0x3022,
0xd845, 0x1002,
0xd846, 0x2fa4,
0xd847, 0x3dc4,
0xd848, 0x6624,
0xd849, 0x2e8b,
0xd84a, 0x303b,
0xd84b, 0x56b3,
0xd84c, 0x03c6,
0xd84d, 0x866b,
0xd84e, 0x400c,
0xd84f, 0x2782,
0xd850, 0x3012,
0xd851, 0x1002,
0xd852, 0x2c4b,
0xd853, 0x309b,
0xd854, 0x56b3,
0xd855, 0x03c3,
0xd856, 0x866b,
0xd857, 0x400c,
0xd858, 0x22a2,
0xd859, 0x3022,
0xd85a, 0x1002,
0xd85b, 0x2842,
0xd85c, 0x3022,
0xd85d, 0x1002,
0xd85e, 0x26e2,
0xd85f, 0x3022,
0xd860, 0x1002,
0xd861, 0x2fb4,
0xd862, 0x3dc4,
0xd863, 0x6624,
0xd864, 0x56b3,
0xd865, 0x03c3,
0xd866, 0x866b,
0xd867, 0x401c,
0xd868, 0x2c45,
0xd869, 0x3095,
0xd86a, 0x5b53,
0xd86b, 0x23d2,
0xd86c, 0x3012,
0xd86d, 0x1002,
0xd86e, 0x2742,
0xd86f, 0x3022,
0xd870, 0x1002,
0xd871, 0x2582,
0xd872, 0x3022,
0xd873, 0x1002,
0xd874, 0x2142,
0xd875, 0x3012,
0xd876, 0x1002,
0xd877, 0x628f,
0xd878, 0x2985,
0xd879, 0x33a5,
0xd87a, 0x25e2,
0xd87b, 0x3022,
0xd87c, 0x1002,
0xd87d, 0x5653,
0xd87e, 0x03d2,
0xd87f, 0x401e,
0xd880, 0x6f72,
0xd881, 0x1002,
0xd882, 0x628f,
0xd883, 0x2304,
0xd884, 0x3c84,
0xd885, 0x6436,
0xd886, 0xdff4,
0xd887, 0x6436,
0xd888, 0x2ff5,
0xd889, 0x3005,
0xd88a, 0x8656,
0xd88b, 0xdfba,
0xd88c, 0x56a3,
0xd88d, 0xd05a,
0xd88e, 0x2972,
0xd88f, 0x3012,
0xd890, 0x1392,
0xd891, 0xd05a,
0xd892, 0x56a3,
0xd893, 0xdfba,
0xd894, 0x0383,
0xd895, 0x6f72,
0xd896, 0x1002,
0xd897, 0x2b45,
0xd898, 0x3005,
0xd899, 0x4178,
0xd89a, 0x5653,
0xd89b, 0x0384,
0xd89c, 0x2a62,
0xd89d, 0x3012,
0xd89e, 0x1002,
0xd89f, 0x2f05,
0xd8a0, 0x3005,
0xd8a1, 0x41c8,
0xd8a2, 0x5653,
0xd8a3, 0x0382,
0xd8a4, 0x0002,
0xd8a5, 0x4218,
0xd8a6, 0x2474,
0xd8a7, 0x3c84,
0xd8a8, 0x6437,
0xd8a9, 0xdff4,
0xd8aa, 0x6437,
0xd8ab, 0x2ff5,
0xd8ac, 0x3c05,
0xd8ad, 0x8757,
0xd8ae, 0xb888,
0xd8af, 0x9787,
0xd8b0, 0xdff4,
0xd8b1, 0x6724,
0xd8b2, 0x866a,
0xd8b3, 0x6f72,
0xd8b4, 0x1002,
0xd8b5, 0x2641,
0xd8b6, 0x3021,
0xd8b7, 0x1001,
0xd8b8, 0xc620,
0xd8b9, 0x0000,
0xd8ba, 0xc621,
0xd8bb, 0x0000,
0xd8bc, 0xc622,
0xd8bd, 0x00ce,
0xd8be, 0xc623,
0xd8bf, 0x007f,
0xd8c0, 0xc624,
0xd8c1, 0x0032,
0xd8c2, 0xc625,
0xd8c3, 0x0000,
0xd8c4, 0xc627,
0xd8c5, 0x0000,
0xd8c6, 0xc628,
0xd8c7, 0x0000,
0xd8c8, 0xc62c,
0xd86d, 0x13c2,
0xd86e, 0x5cc3,
0xd86f, 0x2782,
0xd870, 0x3012,
0xd871, 0x1312,
0xd872, 0x2b22,
0xd873, 0x3012,
0xd874, 0x1002,
0xd875, 0x2842,
0xd876, 0x3022,
0xd877, 0x1002,
0xd878, 0x2622,
0xd879, 0x3022,
0xd87a, 0x1002,
0xd87b, 0x21a2,
0xd87c, 0x3012,
0xd87d, 0x1002,
0xd87e, 0x628f,
0xd87f, 0x2985,
0xd880, 0x33a5,
0xd881, 0x26e2,
0xd882, 0x3022,
0xd883, 0x1002,
0xd884, 0x5653,
0xd885, 0x03d2,
0xd886, 0x401e,
0xd887, 0x6f72,
0xd888, 0x1002,
0xd889, 0x628f,
0xd88a, 0x2304,
0xd88b, 0x3c84,
0xd88c, 0x6436,
0xd88d, 0xdff4,
0xd88e, 0x6436,
0xd88f, 0x2ff5,
0xd890, 0x3005,
0xd891, 0x8656,
0xd892, 0xdfba,
0xd893, 0x56a3,
0xd894, 0xd05a,
0xd895, 0x29e2,
0xd896, 0x3012,
0xd897, 0x1392,
0xd898, 0xd05a,
0xd899, 0x56a3,
0xd89a, 0xdfba,
0xd89b, 0x0383,
0xd89c, 0x6f72,
0xd89d, 0x1002,
0xd89e, 0x2a64,
0xd89f, 0x3014,
0xd8a0, 0x2005,
0xd8a1, 0x3d75,
0xd8a2, 0xc451,
0xd8a3, 0x29a2,
0xd8a4, 0x3022,
0xd8a5, 0x1002,
0xd8a6, 0x178c,
0xd8a7, 0x1898,
0xd8a8, 0x19a4,
0xd8a9, 0x1ab0,
0xd8aa, 0x1bbc,
0xd8ab, 0x1cc8,
0xd8ac, 0x1dd3,
0xd8ad, 0x1ede,
0xd8ae, 0x1fe9,
0xd8af, 0x20f4,
0xd8b0, 0x21ff,
0xd8b1, 0x0000,
0xd8b2, 0x2741,
0xd8b3, 0x3021,
0xd8b4, 0x1001,
0xd8b5, 0xc620,
0xd8b6, 0x0000,
0xd8b7, 0xc621,
0xd8b8, 0x0000,
0xd8b9, 0xc622,
0xd8ba, 0x00e2,
0xd8bb, 0xc623,
0xd8bc, 0x007f,
0xd8bd, 0xc624,
0xd8be, 0x00ce,
0xd8bf, 0xc625,
0xd8c0, 0x0000,
0xd8c1, 0xc627,
0xd8c2, 0x0000,
0xd8c3, 0xc628,
0xd8c4, 0x0000,
0xd8c5, 0xc90a,
0xd8c6, 0x3a7c,
0xd8c7, 0xc62c,
0xd8c8, 0x0000,
0xd8c9, 0x0000,
0xd8ca, 0x0000,
0xd8cb, 0x2641,
0xd8cc, 0x3021,
0xd8cd, 0x1001,
0xd8ce, 0xc502,
0xd8cf, 0x53ac,
0xd8d0, 0xc503,
0xd8d1, 0x2cd3,
0xd8d2, 0xc600,
0xd8d3, 0x2a6e,
0xd8d4, 0xc601,
0xd8d5, 0x2a2c,
0xd8d6, 0xc605,
0xd8d7, 0x5557,
0xd8d8, 0xc60c,
0xd8d9, 0x5400,
0xd8da, 0xc710,
0xd8db, 0x0700,
0xd8dc, 0xc711,
0xd8dd, 0x0f06,
0xd8de, 0xc718,
0xd8df, 0x0700,
0xd8e0, 0xc719,
0xd8e1, 0x0f06,
0xd8e2, 0xc720,
0xd8e3, 0x4700,
0xd8e4, 0xc721,
0xd8e5, 0x0f06,
0xd8e6, 0xc728,
0xd8e7, 0x0700,
0xd8e8, 0xc729,
0xd8e9, 0x1207,
0xd8ea, 0xc801,
0xd8eb, 0x7f50,
0xd8ec, 0xc802,
0xd8ed, 0x7760,
0xd8ee, 0xc803,
0xd8ef, 0x7fce,
0xd8f0, 0xc804,
0xd8f1, 0x520e,
0xd8f2, 0xc805,
0xd8f3, 0x5c11,
0xd8f4, 0xc806,
0xd8f5, 0x3c51,
0xd8f6, 0xc807,
0xd8f7, 0x4061,
0xd8f8, 0xc808,
0xd8f9, 0x49c1,
0xd8fa, 0xc809,
0xd8fb, 0x3840,
0xd8fc, 0xc80a,
0xd8fd, 0x0000,
0xd8fe, 0xc821,
0xd8ff, 0x0002,
0xd900, 0xc822,
0xd901, 0x0046,
0xd902, 0xc844,
0xd903, 0x182f,
0xd904, 0xc013,
0xd905, 0xf341,
0xd906, 0xc084,
0xd907, 0x0030,
0xd908, 0xc904,
0xd909, 0x1401,
0xd90a, 0xcb0c,
0xd90b, 0x0004,
0xd90c, 0xcb0e,
0xd90d, 0xa00a,
0xd90e, 0xcb0f,
0xd90f, 0xc0c0,
0xd910, 0xcb10,
0xd911, 0xc0c0,
0xd912, 0xcb11,
0xd913, 0x00a0,
0xd914, 0xcb12,
0xd915, 0x0007,
0xd916, 0xc241,
0xd917, 0xa000,
0xd918, 0xc243,
0xd919, 0x7fe0,
0xd91a, 0xc604,
0xd91b, 0x000e,
0xd91c, 0xc609,
0xd91d, 0x00f5,
0xd91e, 0xc611,
0xd91f, 0x000e,
0xd920, 0xc660,
0xd921, 0x9600,
0xd922, 0xc687,
0xd923, 0x0004,
0xd924, 0xc60a,
0xd925, 0x04f5,
0xd926, 0x0000,
0xd927, 0x2641,
0xd928, 0x3021,
0xd929, 0x1001,
0xd92a, 0xc620,
0xd92b, 0x14e5,
0xd92c, 0xc621,
0xd92d, 0xc53d,
0xd92e, 0xc622,
0xd92f, 0x3cbe,
0xd930, 0xc623,
0xd931, 0x4452,
0xd932, 0xc624,
0xd933, 0xc5c5,
0xd934, 0xc625,
0xd935, 0xe01e,
0xd936, 0xc627,
0xd937, 0x0000,
0xd938, 0xc628,
0xd939, 0x0000,
0xd93a, 0xc62c,
0xd93b, 0x0000,
0xd8ca, 0x2741,
0xd8cb, 0x3021,
0xd8cc, 0x1001,
0xd8cd, 0xc502,
0xd8ce, 0x53ac,
0xd8cf, 0xc503,
0xd8d0, 0x2cd3,
0xd8d1, 0xc600,
0xd8d2, 0x2a6e,
0xd8d3, 0xc601,
0xd8d4, 0x2a2c,
0xd8d5, 0xc605,
0xd8d6, 0x5557,
0xd8d7, 0xc60c,
0xd8d8, 0x5400,
0xd8d9, 0xc710,
0xd8da, 0x0700,
0xd8db, 0xc711,
0xd8dc, 0x0f06,
0xd8dd, 0xc718,
0xd8de, 0x700,
0xd8df, 0xc719,
0xd8e0, 0x0f06,
0xd8e1, 0xc720,
0xd8e2, 0x4700,
0xd8e3, 0xc721,
0xd8e4, 0x0f06,
0xd8e5, 0xc728,
0xd8e6, 0x0700,
0xd8e7, 0xc729,
0xd8e8, 0x1207,
0xd8e9, 0xc801,
0xd8ea, 0x7f50,
0xd8eb, 0xc802,
0xd8ec, 0x7760,
0xd8ed, 0xc803,
0xd8ee, 0x7fce,
0xd8ef, 0xc804,
0xd8f0, 0x520e,
0xd8f1, 0xc805,
0xd8f2, 0x5c11,
0xd8f3, 0xc806,
0xd8f4, 0x3c51,
0xd8f5, 0xc807,
0xd8f6, 0x4061,
0xd8f7, 0xc808,
0xd8f8, 0x49c1,
0xd8f9, 0xc809,
0xd8fa, 0x3840,
0xd8fb, 0xc80a,
0xd8fc, 0x0000,
0xd8fd, 0xc821,
0xd8fe, 0x0002,
0xd8ff, 0xc822,
0xd900, 0x0046,
0xd901, 0xc844,
0xd902, 0x182f,
0xd903, 0xc849,
0xd904, 0x0400,
0xd905, 0xc84a,
0xd906, 0x0002,
0xd907, 0xc013,
0xd908, 0xf341,
0xd909, 0xc084,
0xd90a, 0x0030,
0xd90b, 0xc904,
0xd90c, 0x1401,
0xd90d, 0xcb0c,
0xd90e, 0x0004,
0xd90f, 0xcb0e,
0xd910, 0xa00a,
0xd911, 0xcb0f,
0xd912, 0xc0c0,
0xd913, 0xcb10,
0xd914, 0xc0c0,
0xd915, 0xcb11,
0xd916, 0x00a0,
0xd917, 0xcb12,
0xd918, 0x0007,
0xd919, 0xc241,
0xd91a, 0xa000,
0xd91b, 0xc243,
0xd91c, 0x7fe0,
0xd91d, 0xc604,
0xd91e, 0x000e,
0xd91f, 0xc609,
0xd920, 0x00f5,
0xd921, 0xc611,
0xd922, 0x000e,
0xd923, 0xc660,
0xd924, 0x9600,
0xd925, 0xc687,
0xd926, 0x0004,
0xd927, 0xc60a,
0xd928, 0x04f5,
0xd929, 0x0000,
0xd92a, 0x2741,
0xd92b, 0x3021,
0xd92c, 0x1001,
0xd92d, 0xc620,
0xd92e, 0x14e5,
0xd92f, 0xc621,
0xd930, 0xc53d,
0xd931, 0xc622,
0xd932, 0x3cbe,
0xd933, 0xc623,
0xd934, 0x4452,
0xd935, 0xc624,
0xd936, 0xc5c5,
0xd937, 0xc625,
0xd938, 0xe01e,
0xd939, 0xc627,
0xd93a, 0x0000,
0xd93b, 0xc628,
0xd93c, 0x0000,
0xd93d, 0x2b84,
0xd93e, 0x3c74,
0xd93f, 0x6435,
0xd940, 0xdff4,
0xd941, 0x6435,
0xd942, 0x2806,
0xd943, 0x3006,
0xd944, 0x8565,
0xd945, 0x2b24,
0xd946, 0x3c24,
0xd947, 0x6436,
0xd948, 0x1002,
0xd949, 0x2b24,
0xd94a, 0x3c24,
0xd94b, 0x6436,
0xd94c, 0x4045,
0xd94d, 0x8656,
0xd94e, 0x5663,
0xd94f, 0x0302,
0xd950, 0x401e,
0xd951, 0x1002,
0xd952, 0x2807,
0xd953, 0x31a7,
0xd954, 0x20c4,
0xd955, 0x3c24,
0xd956, 0x6724,
0xd957, 0x1002,
0xd958, 0x2807,
0xd959, 0x3187,
0xd95a, 0x20c4,
0xd95b, 0x3c24,
0xd95c, 0x6724,
0xd95d, 0x1002,
0xd95e, 0x24f4,
0xd95f, 0x3c64,
0xd960, 0x6436,
0xd961, 0xdff4,
0xd962, 0x6436,
0xd963, 0x1002,
0xd964, 0x2006,
0xd965, 0x3d76,
0xd966, 0xc161,
0xd967, 0x6134,
0xd968, 0x6135,
0xd969, 0x5443,
0xd96a, 0x0303,
0xd96b, 0x6524,
0xd96c, 0x00fb,
0xd93d, 0xc62c,
0xd93e, 0x0000,
0xd93f, 0xc90a,
0xd940, 0x3a7c,
0xd941, 0x0000,
0xd942, 0x2b84,
0xd943, 0x3c74,
0xd944, 0x6435,
0xd945, 0xdff4,
0xd946, 0x6435,
0xd947, 0x2806,
0xd948, 0x3006,
0xd949, 0x8565,
0xd94a, 0x2b24,
0xd94b, 0x3c24,
0xd94c, 0x6436,
0xd94d, 0x1002,
0xd94e, 0x2b24,
0xd94f, 0x3c24,
0xd950, 0x6436,
0xd951, 0x4045,
0xd952, 0x8656,
0xd953, 0x5663,
0xd954, 0x0302,
0xd955, 0x401e,
0xd956, 0x1002,
0xd957, 0x2807,
0xd958, 0x31a7,
0xd959, 0x20c4,
0xd95a, 0x3c24,
0xd95b, 0x6724,
0xd95c, 0x2ff7,
0xd95d, 0x30f7,
0xd95e, 0x20c4,
0xd95f, 0x3c04,
0xd960, 0x6724,
0xd961, 0x1002,
0xd962, 0x2807,
0xd963, 0x3187,
0xd964, 0x20c4,
0xd965, 0x3c24,
0xd966, 0x6724,
0xd967, 0x2fe4,
0xd968, 0x3dc4,
0xd969, 0x6437,
0xd96a, 0x20c4,
0xd96b, 0x3c04,
0xd96c, 0x6724,
0xd96d, 0x1002,
0xd96e, 0x20d4,
0xd96f, 0x3c24,
0xd970, 0x2025,
0xd971, 0x3005,
0xd972, 0x6524,
0xd96e, 0x24f4,
0xd96f, 0x3c64,
0xd970, 0x6436,
0xd971, 0xdff4,
0xd972, 0x6436,
0xd973, 0x1002,
0xd974, 0xd019,
0xd975, 0x2104,
0xd976, 0x3c24,
0xd977, 0x2105,
0xd978, 0x3805,
0xd979, 0x6524,
0xd97a, 0xdff4,
0xd97b, 0x4005,
0xd97c, 0x6524,
0xd97d, 0x2e8d,
0xd97e, 0x303d,
0xd97f, 0x2408,
0xd980, 0x35d8,
0xd981, 0x5dd3,
0xd982, 0x0307,
0xd983, 0x8887,
0xd984, 0x63a7,
0xd985, 0x8887,
0xd986, 0x63a7,
0xd987, 0xdffd,
0xd988, 0x00f9,
0xd989, 0x1002,
0xd98a, 0x0000,
0xd974, 0x2006,
0xd975, 0x3d76,
0xd976, 0xc161,
0xd977, 0x6134,
0xd978, 0x6135,
0xd979, 0x5443,
0xd97a, 0x0303,
0xd97b, 0x6524,
0xd97c, 0x00fb,
0xd97d, 0x1002,
0xd97e, 0x20d4,
0xd97f, 0x3c24,
0xd980, 0x2025,
0xd981, 0x3005,
0xd982, 0x6524,
0xd983, 0x1002,
0xd984, 0xd019,
0xd985, 0x2104,
0xd986, 0x3c24,
0xd987, 0x2105,
0xd988, 0x3805,
0xd989, 0x6524,
0xd98a, 0xdff4,
0xd98b, 0x4005,
0xd98c, 0x6524,
0xd98d, 0x2e8d,
0xd98e, 0x303d,
0xd98f, 0x2408,
0xd990, 0x35d8,
0xd991, 0x5dd3,
0xd992, 0x0307,
0xd993, 0x8887,
0xd994, 0x63a7,
0xd995, 0x8887,
0xd996, 0x63a7,
0xd997, 0xdffd,
0xd998, 0x00f9,
0xd999, 0x1002,
0xd99a, 0x866a,
0xd99b, 0x6138,
0xd99c, 0x5883,
0xd99d, 0x2aa2,
0xd99e, 0x3022,
0xd99f, 0x1302,
0xd9a0, 0x2ff7,
0xd9a1, 0x3007,
0xd9a2, 0x8785,
0xd9a3, 0xb887,
0xd9a4, 0x8786,
0xd9a5, 0xb8c6,
0xd9a6, 0x5a53,
0xd9a7, 0x29b2,
0xd9a8, 0x3022,
0xd9a9, 0x13c2,
0xd9aa, 0x2474,
0xd9ab, 0x3c84,
0xd9ac, 0x64d7,
0xd9ad, 0x64d7,
0xd9ae, 0x2ff5,
0xd9af, 0x3c05,
0xd9b0, 0x8757,
0xd9b1, 0xb886,
0xd9b2, 0x9767,
0xd9b3, 0x67c4,
0xd9b4, 0x6f72,
0xd9b5, 0x1002,
0xd9b6, 0x0000,
};
int i, err;
@ -1956,10 +2000,14 @@ static struct reg_val ael2020_reset_regs[] = {
{ MDIO_DEV_PMA_PMD, 0xcd40, 0xffff, 0x0001 },
{ MDIO_DEV_PMA_PMD, 0xca12, 0xffff, 0x0100 },
{ MDIO_DEV_PMA_PMD, 0xca22, 0xffff, 0x0100 },
{ MDIO_DEV_PMA_PMD, 0xca42, 0xffff, 0x0100 },
{ MDIO_DEV_PMA_PMD, 0xff02, 0xffff, 0x0023 },
{ MDIO_DEV_PMA_PMD, 0xff03, 0xffff, 0x0000 },
{ MDIO_DEV_PMA_PMD, 0xff04, 0xffff, 0x0000 },
{ MDIO_DEV_PMA_PMD, 0xc20d, 0xffff, 0x0002 },
/* end */
{ 0, 0, 0, 0 }
};
@ -1987,6 +2035,7 @@ static int ael2020_reset(struct cphy *phy, int wait)
err = set_phy_regs(phy, ael2020_reset_regs);
if (err)
return err;
msleep(100);
/* determine module type and perform appropriate initialization */
err = ael2020_get_module_type(phy, 0);
@ -2091,6 +2140,8 @@ int t3_ael2020_phy_prep(pinfo_t *pinfo, int phy_addr,
err = set_phy_regs(phy, ael2020_reset_regs);
if (err)
return err;
msleep(100);
err = ael2020_get_module_type(phy, 0);
if (err >= 0)
phy->modtype = err;

View File

@ -314,6 +314,7 @@ struct qset_params { /* SGE queue set parameters */
unsigned int rspq_size; /* # of entries in response queue */
unsigned int fl_size; /* # of entries in regular free list */
unsigned int jumbo_size; /* # of entries in jumbo free list */
unsigned int jumbo_buf_size; /* buffer size of jumbo entry */
unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
unsigned int cong_thres; /* FL congestion threshold */
unsigned int vector; /* Interrupt (line or vector) number */

View File

@ -4467,8 +4467,6 @@ int __devinit t3_prep_adapter(adapter_t *adapter,
if (reset && t3_reset_adapter(adapter))
return -1;
t3_sge_prep(adapter, &adapter->params.sge);
if (adapter->params.vpd.mclk) {
struct tp_params *p = &adapter->params.tp;
@ -4497,6 +4495,8 @@ int __devinit t3_prep_adapter(adapter_t *adapter,
t3_mc7_size(&adapter->pmtx) &&
t3_mc7_size(&adapter->cm);
t3_sge_prep(adapter, &adapter->params.sge);
if (is_offload(adapter)) {
adapter->params.mc5.nservers = DEFAULT_NSERVERS;
/* PR 6487. TOE and filtering are mutually exclusive */

View File

@ -141,6 +141,8 @@ enum {
#define JUMBO_Q_SIZE 1024
#define RSPQ_Q_SIZE 1024
#define TX_ETH_Q_SIZE 1024
#define TX_OFLD_Q_SIZE 1024
#define TX_CTRL_Q_SIZE 256
enum { TXQ_ETH = 0,
TXQ_OFLD = 1,
@ -248,7 +250,6 @@ struct sge_txq {
struct callout txq_timer;
struct callout txq_watchdog;
uint64_t txq_coalesced;
uint32_t txq_drops;
uint32_t txq_skipped;
uint32_t txq_enqueued;
uint32_t txq_dump_start;

View File

@ -218,9 +218,9 @@ TUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
SYSCTL_UINT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
"update firmware even if up to date");
int cxgb_use_16k_clusters = 1;
int cxgb_use_16k_clusters = -1;
TUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
SYSCTL_UINT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
&cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
/*
@ -378,17 +378,25 @@ upgrade_fw(adapter_t *sc)
{
const struct firmware *fw;
int status;
u32 vers;
if ((fw = firmware_get(FW_FNAME)) == NULL) {
device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
return (ENOENT);
} else
device_printf(sc->dev, "updating firmware on card\n");
device_printf(sc->dev, "installing firmware on card\n");
status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
device_printf(sc->dev, "firmware update returned %s %d\n",
status == 0 ? "success" : "fail", status);
if (status != 0) {
device_printf(sc->dev, "failed to install firmware: %d\n",
status);
} else {
t3_get_fw_version(sc, &vers);
snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
G_FW_VERSION_MICRO(vers));
}
firmware_put(fw, FIRMWARE_UNLOAD);
return (status);
@ -2415,6 +2423,7 @@ cxgb_tick_handler(void *arg, int count)
struct ifnet *ifp = pi->ifp;
struct cmac *mac = &pi->mac;
struct mac_stats *mstats = &mac->stats;
int drops, j;
if (!isset(&sc->open_device_map, pi->port_id))
continue;
@ -2423,34 +2432,20 @@ cxgb_tick_handler(void *arg, int count)
t3_mac_update_stats(mac);
PORT_UNLOCK(pi);
ifp->if_opackets =
mstats->tx_frames_64 +
mstats->tx_frames_65_127 +
mstats->tx_frames_128_255 +
mstats->tx_frames_256_511 +
mstats->tx_frames_512_1023 +
mstats->tx_frames_1024_1518 +
mstats->tx_frames_1519_max;
ifp->if_ipackets =
mstats->rx_frames_64 +
mstats->rx_frames_65_127 +
mstats->rx_frames_128_255 +
mstats->rx_frames_256_511 +
mstats->rx_frames_512_1023 +
mstats->rx_frames_1024_1518 +
mstats->rx_frames_1519_max;
ifp->if_opackets = mstats->tx_frames;
ifp->if_ipackets = mstats->rx_frames;
ifp->if_obytes = mstats->tx_octets;
ifp->if_ibytes = mstats->rx_octets;
ifp->if_omcasts = mstats->tx_mcast_frames;
ifp->if_imcasts = mstats->rx_mcast_frames;
ifp->if_collisions =
mstats->tx_total_collisions;
ifp->if_collisions = mstats->tx_total_collisions;
ifp->if_iqdrops = mstats->rx_cong_drops;
drops = 0;
for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; j++)
drops += sc->sge.qs[j].txq[TXQ_ETH].txq_mr->br_drops;
ifp->if_snd.ifq_drops = drops;
ifp->if_oerrors =
mstats->tx_excess_collisions +
mstats->tx_underrun +
@ -2707,7 +2702,9 @@ cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
t->cong_thres = q->cong_thres;
t->qnum = i;
if (sc->flags & USING_MSIX)
if ((sc->flags & FULL_INIT_DONE) == 0)
t->vector = 0;
else if (sc->flags & USING_MSIX)
t->vector = rman_get_start(sc->msix_irq_res[i]);
else
t->vector = rman_get_start(sc->irq_res);

View File

@ -30,6 +30,8 @@ POSSIBILITY OF SUCH DAMAGE.
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@ -117,13 +119,9 @@ SYSCTL_UINT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RW,
* we have an m_ext
*/
static int recycle_enable = 0;
int cxgb_ext_freed = 0;
int cxgb_ext_inited = 0;
int fl_q_size = 0;
int jumbo_q_size = 0;
extern int cxgb_use_16k_clusters;
extern int nmbjumbo4;
extern int nmbjumbop;
extern int nmbjumbo9;
extern int nmbjumbo16;
@ -530,21 +528,30 @@ t3_sge_err_intr_handler(adapter_t *adapter)
void
t3_sge_prep(adapter_t *adap, struct sge_params *p)
{
int i, nqsets;
int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
nqsets = min(SGE_QSETS, mp_ncpus*4);
nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
nqsets *= adap->params.nports;
fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
while (!powerof2(fl_q_size))
fl_q_size--;
use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
is_offload(adap);
#if __FreeBSD_version >= 700111
if (cxgb_use_16k_clusters)
if (use_16k) {
jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
else
jumbo_buf_size = MJUM16BYTES;
} else {
jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
jumbo_buf_size = MJUM9BYTES;
}
#else
jumbo_q_size = min(nmbjumbo4/(3*nqsets), JUMBO_Q_SIZE);
jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE);
jumbo_buf_size = MJUMPAGESIZE;
#endif
while (!powerof2(jumbo_q_size))
jumbo_q_size--;
@ -553,8 +560,7 @@ t3_sge_prep(adapter_t *adap, struct sge_params *p)
device_printf(adap->dev,
"Insufficient clusters and/or jumbo buffers.\n");
/* XXX Does ETHER_ALIGN need to be accounted for here? */
p->max_pkt_size = adap->sge.qs[0].fl[1].buf_size - sizeof(struct cpl_rx_data);
p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
for (i = 0; i < SGE_QSETS; ++i) {
struct qset_params *q = p->qset + i;
@ -572,9 +578,10 @@ t3_sge_prep(adapter_t *adap, struct sge_params *p)
q->rspq_size = RSPQ_Q_SIZE;
q->fl_size = fl_q_size;
q->jumbo_size = jumbo_q_size;
q->jumbo_buf_size = jumbo_buf_size;
q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
q->txq_size[TXQ_OFLD] = 1024;
q->txq_size[TXQ_CTRL] = 256;
q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE;
q->cong_thres = 0;
}
}
@ -1636,12 +1643,9 @@ cxgb_start_locked(struct sge_qset *qs)
{
struct mbuf *m_head = NULL;
struct sge_txq *txq = &qs->txq[TXQ_ETH];
int avail, txmax;
int in_use_init = txq->in_use;
struct port_info *pi = qs->port;
struct ifnet *ifp = pi->ifp;
avail = txq->size - txq->in_use - 4;
txmax = min(TX_START_MAX_DESC, avail);
if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
reclaim_completed_tx(qs, 0, TXQ_ETH);
@ -1651,12 +1655,14 @@ cxgb_start_locked(struct sge_qset *qs)
return;
}
TXQ_LOCK_ASSERT(qs);
while ((txq->in_use - in_use_init < txmax) &&
!TXQ_RING_EMPTY(qs) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING) &&
while ((txq->in_use - in_use_init < TX_START_MAX_DESC) &&
!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
pi->link_config.link_ok) {
reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
if (txq->size - txq->in_use <= TX_MAX_DESC)
break;
if ((m_head = cxgb_dequeue(qs)) == NULL)
break;
/*
@ -1695,7 +1701,7 @@ cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
* - there is space in hardware transmit queue
*/
if (check_pkt_coalesce(qs) == 0 &&
!TXQ_RING_NEEDS_ENQUEUE(qs) && avail > 4) {
!TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
if (t3_encap(qs, &m)) {
if (m != NULL &&
(error = drbr_enqueue(ifp, br, m)) != 0)
@ -2003,15 +2009,13 @@ t3_free_qset(adapter_t *sc, struct sge_qset *q)
int i;
reclaim_completed_tx(q, 0, TXQ_ETH);
for (i = 0; i < SGE_TXQ_PER_SET; i++) {
if (q->txq[i].txq_mr != NULL)
buf_ring_free(q->txq[i].txq_mr, M_DEVBUF);
if (q->txq[i].txq_ifq != NULL) {
ifq_delete(q->txq[i].txq_ifq);
free(q->txq[i].txq_ifq, M_DEVBUF);
}
if (q->txq[TXQ_ETH].txq_mr != NULL)
buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
if (q->txq[TXQ_ETH].txq_ifq != NULL) {
ifq_delete(q->txq[TXQ_ETH].txq_ifq);
free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
}
for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
if (q->fl[i].desc) {
mtx_lock_spin(&sc->sge.reg_lock);
@ -2060,7 +2064,9 @@ t3_free_qset(adapter_t *sc, struct sge_qset *q)
MTX_DESTROY(&q->rspq.lock);
}
#ifdef INET
tcp_lro_free(&q->lro.ctrl);
#endif
bzero(q, sizeof(*q));
}
@ -2546,25 +2552,22 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
q->port = pi;
for (i = 0; i < SGE_TXQ_PER_SET; i++) {
if ((q->txq[i].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
device_printf(sc->dev, "failed to allocate mbuf ring\n");
goto err;
}
if ((q->txq[i].txq_ifq =
malloc(sizeof(struct ifaltq), M_DEVBUF, M_NOWAIT|M_ZERO))
== NULL) {
device_printf(sc->dev, "failed to allocate ifq\n");
goto err;
}
ifq_init(q->txq[i].txq_ifq, pi->ifp);
callout_init(&q->txq[i].txq_timer, 1);
callout_init(&q->txq[i].txq_watchdog, 1);
q->txq[i].txq_timer.c_cpu = id % mp_ncpus;
q->txq[i].txq_watchdog.c_cpu = id % mp_ncpus;
if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
device_printf(sc->dev, "failed to allocate mbuf ring\n");
goto err;
}
if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
M_NOWAIT | M_ZERO)) == NULL) {
device_printf(sc->dev, "failed to allocate ifq\n");
goto err;
}
ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
init_qset_cntxt(q, id);
q->idx = id;
if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
@ -2629,29 +2632,32 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
q->fl[0].buf_size = MCLBYTES;
q->fl[0].zone = zone_pack;
q->fl[0].type = EXT_PACKET;
#if __FreeBSD_version > 800000
if (cxgb_use_16k_clusters) {
q->fl[1].buf_size = MJUM16BYTES;
if (p->jumbo_buf_size == MJUM16BYTES) {
q->fl[1].zone = zone_jumbo16;
q->fl[1].type = EXT_JUMBO16;
} else {
q->fl[1].buf_size = MJUM9BYTES;
} else if (p->jumbo_buf_size == MJUM9BYTES) {
q->fl[1].zone = zone_jumbo9;
q->fl[1].type = EXT_JUMBO9;
} else if (p->jumbo_buf_size == MJUMPAGESIZE) {
q->fl[1].zone = zone_jumbop;
q->fl[1].type = EXT_JUMBOP;
} else {
KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
ret = EDOOFUS;
goto err;
}
#else
q->fl[1].buf_size = MJUMPAGESIZE;
q->fl[1].zone = zone_jumbop;
q->fl[1].type = EXT_JUMBOP;
#endif
q->fl[1].buf_size = p->jumbo_buf_size;
/* Allocate and setup the lro_ctrl structure */
q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
#ifdef INET
ret = tcp_lro_init(&q->lro.ctrl);
if (ret) {
printf("error %d from tcp_lro_init\n", ret);
goto err;
}
#endif
q->lro.ctrl.ifp = pi->ifp;
mtx_lock_spin(&sc->sge.reg_lock);
@ -3059,8 +3065,11 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
*/
skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro &&
(tcp_lro_rx(lro_ctrl, m, 0) == 0)) {
if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
#ifdef INET
&& (tcp_lro_rx(lro_ctrl, m, 0) == 0)
#endif
) {
/* successfully queue'd for LRO */
} else {
/*
@ -3081,12 +3090,14 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
deliver_partial_bundle(&adap->tdev, rspq, offload_mbufs, ngathered);
#ifdef INET
/* Flush LRO */
while (!SLIST_EMPTY(&lro_ctrl->lro_active)) {
struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active);
SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next);
tcp_lro_flush(lro_ctrl, queued);
}
#endif
if (sleeping)
check_ring_db(adap, qs, sleeping);
@ -3588,10 +3599,9 @@ t3_add_configured_sysctls(adapter_t *sc)
CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
0, t3_dump_rspq, "A", "dump of the response queue");
SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "dropped",
CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_drops,
0, "#tunneled packets dropped");
SYSCTL_ADD_QUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
"#tunneled packets dropped");
SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen,
0, "#tunneled packets waiting to be sent");

View File

@ -231,7 +231,9 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES;
/* Set if part includes ASF firmware */
mac->asf_firmware_present = TRUE;
/* Set if manageability features are enabled. */
/* FWSM register */
mac->has_fwsm = TRUE;
/* ARC supported; valid only if manageability features are enabled. */
mac->arc_subsystem_valid =
(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
? TRUE : FALSE;

View File

@ -313,10 +313,6 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES;
/* Set if part includes ASF firmware */
mac->asf_firmware_present = TRUE;
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid =
(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
? TRUE : FALSE;
/* Adaptive IFS supported */
mac->adaptive_ifs = TRUE;
@ -357,6 +353,16 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
mac->ops.set_lan_id = e1000_set_lan_id_single_port;
mac->ops.check_mng_mode = e1000_check_mng_mode_generic;
mac->ops.led_on = e1000_led_on_generic;
/* FWSM register */
mac->has_fwsm = TRUE;
/*
* ARC supported; valid only if manageability features are
* enabled.
*/
mac->arc_subsystem_valid =
(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
? TRUE : FALSE;
break;
case e1000_82574:
case e1000_82583:
@ -367,6 +373,9 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
default:
mac->ops.check_mng_mode = e1000_check_mng_mode_generic;
mac->ops.led_on = e1000_led_on_generic;
/* FWSM register */
mac->has_fwsm = TRUE;
break;
}
@ -1076,9 +1085,10 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* ...for both queues. */
switch (mac->type) {
case e1000_82573:
e1000_enable_tx_pkt_filtering_generic(hw);
/* fall through */
case e1000_82574:
case e1000_82583:
e1000_enable_tx_pkt_filtering_generic(hw);
reg_data = E1000_READ_REG(hw, E1000_GCR);
reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
E1000_WRITE_REG(hw, E1000_GCR, reg_data);
@ -1364,7 +1374,7 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val;
s32 ret_val;
DEBUGFUNC("e1000_setup_copper_link_82571");

View File

@ -289,7 +289,9 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
/* Set if part includes ASF firmware */
mac->asf_firmware_present = TRUE;
/* Set if manageability features are enabled. */
/* FWSM register */
mac->has_fwsm = TRUE;
/* ARC supported; valid only if manageability features are enabled. */
mac->arc_subsystem_valid =
(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
? TRUE : FALSE;
@ -1435,13 +1437,12 @@ static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
struct e1000_mac_info *mac = &hw->mac;
if (!(phy->ops.check_reset_block))
return;
/* If the management interface is not enabled, then power down */
if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
return;
@ -1646,14 +1647,23 @@ out:
**/
void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
{
u32 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
u32 dtxswc;
switch (hw->mac.type) {
case e1000_82576:
dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
if (enable)
dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
else
dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
break;
default:
/* Currently no other hardware supports loopback */
break;
}
if (enable)
dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
else
dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
}
/**

View File

@ -49,8 +49,8 @@
* For 82576, there are an additional set of RARs that begin at an offset
* separate from the first set of RARs.
*/
#define E1000_RAR_ENTRIES_82575 16
#define E1000_RAR_ENTRIES_82576 24
#define E1000_RAR_ENTRIES_82575 16
#define E1000_RAR_ENTRIES_82576 24
#define E1000_RAR_ENTRIES_82580 24
#define E1000_SW_SYNCH_MB 0x00000100
#define E1000_STAT_DEV_RST_SET 0x00100000
@ -425,6 +425,7 @@ struct e1000_adv_tx_context_desc {
#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
#define E1000_VLVF_ARRAY_SIZE 32
#define E1000_VLVF_VLANID_MASK 0x00000FFF
#define E1000_VLVF_POOLSEL_SHIFT 12

View File

@ -314,6 +314,11 @@
#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
/* Receive Control */
#define E1000_RCTL_RST 0x00000001 /* Software reset */
#define E1000_RCTL_EN 0x00000002 /* enable */
@ -418,6 +423,8 @@
* PHYRST_N pin */
#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external
* LINK_0 and LINK_1 pins */
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
@ -953,6 +960,8 @@
#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
/* Transmit Descriptor Control */
#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
@ -1380,6 +1389,9 @@
#define PCI_HEADER_TYPE_MULTIFUNC 0x80
#define PCIE_LINK_WIDTH_MASK 0x3F0
#define PCIE_LINK_WIDTH_SHIFT 4
#define PCIE_LINK_SPEED_MASK 0x0F
#define PCIE_LINK_SPEED_2500 0x01
#define PCIE_LINK_SPEED_5000 0x02
#define PCIE_DEVICE_CONTROL2_16ms 0x0005
#ifndef ETH_ADDR_LEN
@ -1410,7 +1422,7 @@
#define BME1000_E_PHY_ID_R2 0x01410CB1
#define I82577_E_PHY_ID 0x01540050
#define I82578_E_PHY_ID 0x004DD040
#define I82580_I_PHY_ID 0x015403A0
#define I82580_I_PHY_ID 0x015403A0
#define IGP04E1000_E_PHY_ID 0x02A80391
#define M88_VENDOR 0x0141
@ -1507,6 +1519,7 @@
#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
/* M88EC018 Rev 2 specific DownShift settings */
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000

View File

@ -122,6 +122,7 @@ struct e1000_hw;
#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
@ -682,6 +683,7 @@ struct e1000_mac_info {
u8 forced_speed_duplex;
bool adaptive_ifs;
bool has_fwsm;
bool arc_subsystem_valid;
bool asf_firmware_present;
bool autoneg;

View File

@ -177,6 +177,7 @@ union ich8_hws_flash_regacc {
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
u32 ctrl;
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_init_phy_params_pchlan");
@ -199,6 +200,35 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
if ((hw->mac.type == e1000_pchlan) &&
(!(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))) {
/*
* The MAC-PHY interconnect may still be in SMBus mode
* after Sx->S0. Toggle the LANPHYPC Value bit to force
* the interconnect to PCIe mode, but only if there is no
* firmware present otherwise firmware will have done it.
*/
ctrl = E1000_READ_REG(hw, E1000_CTRL);
ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
usec_delay(10);
ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
msec_delay(50);
}
/*
* Reset the PHY before any acccess to it. Doing so, ensures that
* the PHY is in a known good state before we read/write PHY registers.
* The generic reset is sufficient here, because we haven't determined
* the PHY type yet.
*/
ret_val = e1000_phy_hw_reset_generic(hw);
if (ret_val)
goto out;
phy->id = e1000_phy_unknown;
ret_val = e1000_get_phy_id(hw);
if (ret_val)
@ -225,6 +255,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_get_cable_length_82577;
phy->ops.get_info = e1000_get_phy_info_82577;
phy->ops.commit = e1000_phy_sw_reset_generic;
break;
case e1000_phy_82578:
phy->ops.check_polarity = e1000_check_polarity_m88;
phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
@ -431,8 +462,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->rar_entry_count--;
/* Set if part includes ASF firmware */
mac->asf_firmware_present = TRUE;
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = TRUE;
/* FWSM register */
mac->has_fwsm = TRUE;
/* ARC subsystem not supported */
mac->arc_subsystem_valid = FALSE;
/* Adaptive IFS supported */
mac->adaptive_ifs = TRUE;
@ -764,6 +797,9 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_check_reset_block_ich8lan");
if (hw->phy.reset_disable)
return E1000_BLK_PHY_RESET;
fwsm = E1000_READ_REG(hw, E1000_FWSM);
return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
@ -2684,6 +2720,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
DEBUGOUT("Auto Read Done did not complete\n");
}
}
/* Dummy read to clear the phy wakeup bit after lcd reset */
if (hw->mac.type == e1000_pchlan)
hw->phy.ops.read_reg(hw, BM_WUC, &reg);
@ -2857,6 +2894,14 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_STATUS, reg);
}
/*
* work-around descriptor data corruption issue during nfs v2 udp
* traffic, just disable the nfs filtering capability
*/
reg = E1000_READ_REG(hw, E1000_RFCTL);
reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
E1000_WRITE_REG(hw, E1000_RFCTL, reg);
return;
}

View File

@ -167,6 +167,9 @@
#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
#define HV_KMRN_MDIO_SLOW 0x0400
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
/*
@ -192,7 +195,6 @@
#define E1000_RXDEXT_LINKSEC_ERROR_REPLAY_ERROR 0x40000000
#define E1000_RXDEXT_LINKSEC_ERROR_BAD_SIG 0x60000000
void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);

View File

@ -225,17 +225,30 @@ s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
DEBUGFUNC("e1000_get_bus_info_pcie_generic");
bus->type = e1000_bus_type_pci_express;
bus->speed = e1000_bus_speed_2500;
ret_val = e1000_read_pcie_cap_reg(hw,
PCIE_LINK_STATUS,
&pcie_link_status);
if (ret_val)
if (ret_val) {
bus->width = e1000_bus_width_unknown;
else
bus->speed = e1000_bus_speed_unknown;
} else {
switch (pcie_link_status & PCIE_LINK_SPEED_MASK) {
case PCIE_LINK_SPEED_2500:
bus->speed = e1000_bus_speed_2500;
break;
case PCIE_LINK_SPEED_5000:
bus->speed = e1000_bus_speed_5000;
break;
default:
bus->speed = e1000_bus_speed_unknown;
break;
}
bus->width = (enum e1000_bus_width)((pcie_link_status &
PCIE_LINK_WIDTH_MASK) >>
PCIE_LINK_WIDTH_SHIFT);
}
mac->ops.set_lan_id(hw);

View File

@ -78,6 +78,12 @@ s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
DEBUGFUNC("e1000_mng_enable_host_if_generic");
if (!(hw->mac.arc_subsystem_valid)) {
DEBUGOUT("ARC subsystem not valid.\n");
ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
goto out;
}
/* Check that the host interface is enabled. */
hicr = E1000_READ_REG(hw, E1000_HICR);
if ((hicr & E1000_HICR_EN) == 0) {
@ -365,7 +371,7 @@ bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
if (!(manc & E1000_MANC_RCV_TCO_EN))
goto out;
if (hw->mac.arc_subsystem_valid) {
if (hw->mac.has_fwsm) {
fwsm = E1000_READ_REG(hw, E1000_FWSM);
factps = E1000_READ_REG(hw, E1000_FACTPS);
@ -375,12 +381,23 @@ bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
ret_val = TRUE;
goto out;
}
} else {
if ((manc & E1000_MANC_SMBUS_EN) &&
!(manc & E1000_MANC_ASF_EN)) {
} else if ((hw->mac.type == e1000_82574) ||
(hw->mac.type == e1000_82583)) {
u16 data;
factps = E1000_READ_REG(hw, E1000_FACTPS);
e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
if (!(factps & E1000_FACTPS_MNGCG) &&
((data & E1000_NVM_INIT_CTRL2_MNGM) ==
(e1000_mng_mode_pt << 13))) {
ret_val = TRUE;
goto out;
}
} else if ((manc & E1000_MANC_SMBUS_EN) &&
!(manc & E1000_MANC_ASF_EN)) {
ret_val = TRUE;
goto out;
}
out:

View File

@ -3402,9 +3402,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
* e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
* @hw: pointer to the HW structure
*
* Calls the PHY setup function to force speed and duplex. Clears the
* auto-crossover to force MDI manually. Waits for link and returns
* successful if link up is successful, else -E1000_ERR_PHY (-2).
* Calls the PHY setup function to force speed and duplex.
**/
s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
{
@ -3425,23 +3423,6 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
if (ret_val)
goto out;
/*
* Clear Auto-Crossover to force MDI manually. 82577 requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
if (ret_val)
goto out;
phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX;
phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX;
ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
if (ret_val)
goto out;
DEBUGOUT1("I82577_PHY_CTRL_2: %X\n", phy_data);
usec_delay(1);
if (phy->autoneg_wait_to_complete) {

View File

@ -65,7 +65,7 @@
#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */
#define E1000_SVCR 0x000F0
#define E1000_SVT 0x000F4
#define E1000_SVT 0x000F4
#define E1000_RCTL 0x00100 /* Rx Control - RW */
#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
@ -282,6 +282,17 @@
#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */
/* Virtualization statistical counters */
#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n)))
#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n)))
#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n)))
#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n)))
#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n)))
#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n)))
#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n)))
#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n)))
#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n)))
#define E1000_LSECTXUT 0x04300 /* LinkSec Tx Untagged Packet Count - OutPktsUntagged */
#define E1000_LSECTXPKTE 0x04304 /* LinkSec Encrypted Tx Packets Count - OutPktsEncrypted */
#define E1000_LSECTXPKTP 0x04308 /* LinkSec Protected Tx Packet Count - OutPktsProtected */
@ -386,6 +397,7 @@
#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
#define E1000_MDPHYA 0x0003C /* PHY address - RW */
#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) /* Mngmt Decision Filters */
#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */

File diff suppressed because it is too large Load Diff

View File

@ -52,7 +52,6 @@
* (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
*/
#define EM_MIN_TXD 80
#define EM_MAX_TXD_82543 256
#define EM_MAX_TXD 4096
#define EM_DEFAULT_TXD 1024
@ -70,7 +69,6 @@
* (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
*/
#define EM_MIN_RXD 80
#define EM_MAX_RXD_82543 256
#define EM_MAX_RXD 4096
#define EM_DEFAULT_RXD 1024
@ -144,7 +142,6 @@
* transmit descriptors.
*/
#define EM_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
#define EM_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
/*
* This parameter controls whether or not autonegotation is enabled.
@ -182,7 +179,7 @@
#define EM_DEFAULT_PBA 0x00000030
#define EM_SMARTSPEED_DOWNSHIFT 3
#define EM_SMARTSPEED_MAX 15
#define EM_MAX_INTR 10
#define EM_MAX_LOOP 10
#define MAX_NUM_MULTICAST_ADDRESSES 128
#define PCI_ANY_ID (~0U)
@ -191,11 +188,6 @@
#define EM_EEPROM_APME 0x400;
#define EM_82544_APME 0x0004;
/* Code compatilbility between 6 and 7 */
#ifndef ETHER_BPF_MTAP
#define ETHER_BPF_MTAP BPF_MTAP
#endif
/*
* TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
* multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
@ -209,7 +201,6 @@
#define EM_BAR_TYPE(v) ((v) & EM_BAR_TYPE_MASK)
#define EM_BAR_TYPE_MASK 0x00000001
#define EM_BAR_TYPE_MMEM 0x00000000
#define EM_BAR_TYPE_IO 0x00000001
#define EM_BAR_TYPE_FLASH 0x0014
#define EM_BAR_MEM_TYPE(v) ((v) & EM_BAR_MEM_TYPE_MASK)
#define EM_BAR_MEM_TYPE_MASK 0x00000006
@ -237,6 +228,7 @@
#define EM_TSO_SIZE (65535 + sizeof(struct ether_vlan_header))
#define EM_TSO_SEG_SIZE 4096 /* Max dma segment size */
#define EM_MSIX_MASK 0x01F00000 /* For 82574 use */
#define EM_MSIX_LINK 0x01000000 /* For 82574 use */
#define ETH_ZLEN 60
#define ETH_ADDR_LEN 6
#define CSUM_OFFLOAD 7 /* Offload bits in mbuf flag */
@ -249,18 +241,6 @@
*/
#define EM_EIAC 0x000DC
/* Used in for 82547 10Mb Half workaround */
#define EM_PBA_BYTES_SHIFT 0xA
#define EM_TX_HEAD_ADDR_SHIFT 7
#define EM_PBA_TX_MASK 0xFFFF0000
#define EM_FIFO_HDR 0x10
#define EM_82547_PKT_THRESH 0x3e0
/* Precision Time Sync (IEEE 1588) defines */
#define ETHERTYPE_IEEE1588 0x88F7
#define PICOSECS_PER_TICK 20833
#define TSYNC_PORT 319 /* UDP port for the protocol */
/*
* Bus dma allocation structure used by
* e1000_dma_malloc and e1000_dma_free.
@ -282,56 +262,129 @@ struct em_int_delay_info {
int value; /* Current value in usecs */
};
/*
* The transmit ring, one per tx queue
*/
struct tx_ring {
struct adapter *adapter;
struct mtx tx_mtx;
char mtx_name[16];
u32 me;
u32 msix;
u32 ims;
bool watchdog_check;
int watchdog_time;
struct em_dma_alloc txdma;
struct e1000_tx_desc *tx_base;
struct task tx_task;
struct taskqueue *tq;
u32 next_avail_desc;
u32 next_to_clean;
struct em_buffer *tx_buffers;
volatile u16 tx_avail;
u32 tx_tso; /* last tx was tso */
u16 last_hw_offload;
#if __FreeBSD_version >= 800000
struct buf_ring *br;
#endif
/* Interrupt resources */
bus_dma_tag_t txtag;
void *tag;
struct resource *res;
unsigned long tx_irq;
unsigned long no_desc_avail;
};
/*
* The Receive ring, one per rx queue
*/
struct rx_ring {
struct adapter *adapter;
u32 me;
u32 msix;
u32 ims;
struct mtx rx_mtx;
char mtx_name[16];
u32 payload;
struct task rx_task;
struct taskqueue *tq;
struct e1000_rx_desc *rx_base;
struct em_dma_alloc rxdma;
u32 next_to_refresh;
u32 next_to_check;
struct em_buffer *rx_buffers;
struct mbuf *fmp;
struct mbuf *lmp;
/* Interrupt resources */
void *tag;
struct resource *res;
bus_dma_tag_t rxtag;
bus_dmamap_t rx_sparemap;
/* Soft stats */
unsigned long rx_irq;
unsigned long rx_packets;
unsigned long rx_bytes;
};
/* Our adapter structure */
struct adapter {
struct ifnet *ifp;
#if __FreeBSD_version >= 800000
struct buf_ring *br;
#endif
struct e1000_hw hw;
/* FreeBSD operating-system-specific structures. */
struct e1000_osdep osdep;
struct device *dev;
struct cdev *led_dev;
struct resource *memory;
struct resource *flash;
struct resource *msix;
struct resource *msix_mem;
struct resource *ioport;
int io_rid;
/* 82574 may use 3 int vectors */
struct resource *res[3];
void *tag[3];
int rid[3];
struct resource *res;
void *tag;
u32 linkvec;
u32 ivars;
struct ifmedia media;
struct callout timer;
struct callout tx_fifo_timer;
bool watchdog_check;
int watchdog_time;
int msi;
int msix;
int if_flags;
int max_frame_size;
int min_frame_size;
struct mtx core_mtx;
struct mtx tx_mtx;
struct mtx rx_mtx;
int em_insert_vlan_header;
u32 ims;
bool in_detach;
/* Task for FAST handling */
struct task link_task;
struct task rxtx_task;
struct task rx_task;
struct task tx_task;
struct task que_task;
struct taskqueue *tq; /* private task queue */
#if __FreeBSD_version >= 700029
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
u32 num_vlans;
#endif
u16 num_vlans;
u16 num_queues;
/*
* Transmit rings:
* Allocated at run time, an array of rings.
*/
struct tx_ring *tx_rings;
int num_tx_desc;
u32 txd_cmd;
/*
* Receive rings:
* Allocated at run time, an array of rings.
*/
struct rx_ring *rx_rings;
int num_rx_desc;
u32 rx_process_limit;
/* Management and WOL features */
u32 wol;
@ -348,96 +401,26 @@ struct adapter {
struct em_int_delay_info rx_int_delay;
struct em_int_delay_info rx_abs_int_delay;
/*
* Transmit definitions
*
* We have an array of num_tx_desc descriptors (handled
* by the controller) paired with an array of tx_buffers
* (at tx_buffer_area).
* The index of the next available descriptor is next_avail_tx_desc.
* The number of remaining tx_desc is num_tx_desc_avail.
*/
struct em_dma_alloc txdma; /* bus_dma glue for tx desc */
struct e1000_tx_desc *tx_desc_base;
uint32_t next_avail_tx_desc;
uint32_t next_tx_to_clean;
volatile uint16_t num_tx_desc_avail;
uint16_t num_tx_desc;
uint16_t last_hw_offload;
uint32_t txd_cmd;
struct em_buffer *tx_buffer_area;
bus_dma_tag_t txtag; /* dma tag for tx */
uint32_t tx_tso; /* last tx was tso */
/*
* Receive definitions
*
* we have an array of num_rx_desc rx_desc (handled by the
* controller), and paired with an array of rx_buffers
* (at rx_buffer_area).
* The next pair to check on receive is at offset next_rx_desc_to_check
*/
struct em_dma_alloc rxdma; /* bus_dma glue for rx desc */
struct e1000_rx_desc *rx_desc_base;
uint32_t next_rx_desc_to_check;
uint32_t rx_buffer_len;
uint16_t num_rx_desc;
int rx_process_limit;
struct em_buffer *rx_buffer_area;
bus_dma_tag_t rxtag;
bus_dmamap_t rx_sparemap;
/*
* First/last mbuf pointers, for
* collecting multisegment RX packets.
*/
struct mbuf *fmp;
struct mbuf *lmp;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_alloc_failed;
unsigned long mbuf_cluster_failed;
unsigned long no_tx_desc_avail1;
unsigned long no_tx_desc_avail2;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long rx_overruns;
unsigned long rx_irq;
unsigned long tx_irq;
unsigned long watchdog_events;
unsigned long link_irq;
/* 82547 workaround */
uint32_t tx_fifo_size;
uint32_t tx_fifo_head;
uint32_t tx_fifo_head_addr;
uint64_t tx_fifo_reset_cnt;
uint64_t tx_fifo_wrk_cnt;
uint32_t tx_head_addr;
/* For 82544 PCIX Workaround */
boolean_t pcix_82544;
boolean_t in_detach;
#ifdef EM_IEEE1588
/* IEEE 1588 precision time support */
struct cyclecounter cycles;
struct nettimer clock;
struct nettime_compare compare;
struct hwtstamp_ctrl hwtstamp;
#endif
struct e1000_hw_stats stats;
};
/* ******************************************************************************
/********************************************************************************
* vendor_info_array
*
* This array contains the list of Subvendor/Subdevice IDs on which the driver
* should load.
*
* ******************************************************************************/
********************************************************************************/
typedef struct _em_vendor_info_t {
unsigned int vendor_id;
unsigned int device_id;
@ -452,19 +435,6 @@ struct em_buffer {
bus_dmamap_t map; /* bus_dma map for packet */
};
/* For 82544 PCIX Workaround */
typedef struct _ADDRESS_LENGTH_PAIR
{
uint64_t address;
uint32_t length;
} ADDRESS_LENGTH_PAIR, *PADDRESS_LENGTH_PAIR;
typedef struct _DESCRIPTOR_PAIR
{
ADDRESS_LENGTH_PAIR descriptor[4];
uint32_t elements;
} DESC_ARRAY, *PDESC_ARRAY;
#define EM_CORE_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->core_mtx, _name, "EM Core Lock", MTX_DEF)
#define EM_TX_LOCK_INIT(_sc, _name) \

View File

@ -63,10 +63,6 @@
#include <machine/bus.h>
#include <machine/resource.h>
#ifdef IGB_IEEE1588
#include <sys/ieee1588.h>
#endif
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
@ -87,6 +83,7 @@
#include <netinet/udp.h>
#include <machine/in_cksum.h>
#include <dev/led/led.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
@ -102,7 +99,7 @@ int igb_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
char igb_driver_version[] = "version - 1.9.1";
char igb_driver_version[] = "version - 1.9.3";
/*********************************************************************
@ -204,12 +201,11 @@ static void igb_disable_intr(struct adapter *);
static void igb_update_stats_counters(struct adapter *);
static bool igb_txeof(struct tx_ring *);
static __inline void igb_rx_discard(struct rx_ring *,
union e1000_adv_rx_desc *, int);
static __inline void igb_rx_discard(struct rx_ring *, int);
static __inline void igb_rx_input(struct rx_ring *,
struct ifnet *, struct mbuf *, u32);
static bool igb_rxeof(struct rx_ring *, int);
static bool igb_rxeof(struct igb_queue *, int);
static void igb_rx_checksum(u32, struct mbuf *, u32);
static int igb_tx_ctx_setup(struct tx_ring *, struct mbuf *);
static bool igb_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
@ -218,7 +214,7 @@ static void igb_disable_promisc(struct adapter *);
static void igb_set_multi(struct adapter *);
static void igb_print_hw_stats(struct adapter *);
static void igb_update_link_status(struct adapter *);
static int igb_get_buf(struct rx_ring *, int, u8);
static void igb_refresh_mbufs(struct rx_ring *, int);
static void igb_register_vlan(void *, struct ifnet *, u16);
static void igb_unregister_vlan(void *, struct ifnet *, u16);
@ -239,6 +235,7 @@ static void igb_release_manageability(struct adapter *);
static void igb_get_hw_control(struct adapter *);
static void igb_release_hw_control(struct adapter *);
static void igb_enable_wakeup(device_t);
static void igb_led_func(void *, int);
static int igb_irq_fast(void *);
static void igb_add_rx_process_limit(struct adapter *, const char *,
@ -590,6 +587,9 @@ igb_attach(device_t dev)
/* Tell the stack that the interface is not active */
adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
adapter->led_dev = led_create(igb_led_func, adapter,
device_get_nameunit(dev));
INIT_DEBUGOUT("igb_attach: end");
return (0);
@ -629,6 +629,9 @@ igb_detach(device_t dev)
return (EBUSY);
}
if (adapter->led_dev != NULL)
led_destroy(adapter->led_dev);
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
ether_poll_deregister(ifp);
@ -814,6 +817,9 @@ igb_mq_start(struct ifnet *ifp, struct mbuf *m)
/* Which queue to use */
if ((m->m_flags & M_FLOWID) != 0)
i = m->m_pkthdr.flowid % adapter->num_queues;
else
i = curcpu % adapter->num_queues;
txr = &adapter->tx_rings[i];
if (IGB_TX_TRYLOCK(txr)) {
@ -1192,15 +1198,15 @@ igb_init(void *arg)
static void
igb_handle_rxtx(void *context, int pending)
{
struct adapter *adapter = context;
struct tx_ring *txr = adapter->tx_rings;
struct rx_ring *rxr = adapter->rx_rings;
struct ifnet *ifp;
struct igb_queue *que = context;
struct adapter *adapter = que->adapter;
struct tx_ring *txr = adapter->tx_rings;
struct ifnet *ifp;
ifp = adapter->ifp;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (igb_rxeof(rxr, adapter->rx_process_limit))
if (igb_rxeof(que, adapter->rx_process_limit))
taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
IGB_TX_LOCK(txr);
igb_txeof(txr);
@ -1224,14 +1230,13 @@ igb_handle_que(void *context, int pending)
struct igb_queue *que = context;
struct adapter *adapter = que->adapter;
struct tx_ring *txr = que->txr;
struct rx_ring *rxr = que->rxr;
struct ifnet *ifp = adapter->ifp;
u32 loop = IGB_MAX_LOOP;
bool more;
/* RX first */
do {
more = igb_rxeof(rxr, -1);
more = igb_rxeof(que, -1);
} while (loop-- && more);
if (IGB_TX_TRYLOCK(txr)) {
@ -1311,7 +1316,8 @@ igb_irq_fast(void *arg)
#ifdef DEVICE_POLLING
/*********************************************************************
*
* Legacy polling routine
* Legacy polling routine : if using this code you MUST be sure that
* multiqueue is not defined, ie, set igb_num_queues to 1.
*
*********************************************************************/
#if __FreeBSD_version >= 800000
@ -1323,12 +1329,12 @@ static void
#endif
igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
{
struct adapter *adapter = ifp->if_softc;
struct rx_ring *rxr = adapter->rx_rings;
struct tx_ring *txr = adapter->tx_rings;
u32 reg_icr, rx_done = 0;
u32 loop = IGB_MAX_LOOP;
bool more;
struct adapter *adapter = ifp->if_softc;
struct igb_queue *que = adapter->queues;
struct tx_ring *txr = adapter->tx_rings;
u32 reg_icr, rx_done = 0;
u32 loop = IGB_MAX_LOOP;
bool more;
IGB_CORE_LOCK(adapter);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
@ -1348,7 +1354,7 @@ igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
IGB_CORE_UNLOCK(adapter);
/* TODO: rx_count */
rx_done = igb_rxeof(rxr, count) ? 1 : 0;
rx_done = igb_rxeof(que, count) ? 1 : 0;
IGB_TX_LOCK(txr);
do {
@ -1388,7 +1394,7 @@ igb_msix_que(void *arg)
more_tx = igb_txeof(txr);
IGB_TX_UNLOCK(txr);
more_rx = igb_rxeof(rxr, adapter->rx_process_limit);
more_rx = igb_rxeof(que, adapter->rx_process_limit);
if (igb_enable_aim == FALSE)
goto no_calc;
@ -1955,6 +1961,7 @@ igb_update_link_status(struct adapter *adapter)
"Full Duplex" : "Half Duplex"));
adapter->link_active = 1;
ifp->if_baudrate = adapter->link_speed * 1000000;
/* This can sleep */
if_link_state_change(ifp, LINK_STATE_UP);
} else if (!link_check && (adapter->link_active == 1)) {
ifp->if_baudrate = adapter->link_speed = 0;
@ -1962,6 +1969,7 @@ igb_update_link_status(struct adapter *adapter)
if (bootverbose)
device_printf(dev, "Link is Down\n");
adapter->link_active = 0;
/* This can sleep */
if_link_state_change(ifp, LINK_STATE_DOWN);
/* Turn off watchdogs */
for (int i = 0; i < adapter->num_queues; i++, txr++)
@ -2003,6 +2011,9 @@ igb_stop(void *arg)
e1000_reset_hw(&adapter->hw);
E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
e1000_led_off(&adapter->hw);
e1000_cleanup_led(&adapter->hw);
}
@ -2080,8 +2091,9 @@ igb_allocate_pci_resources(struct adapter *adapter)
static int
igb_allocate_legacy(struct adapter *adapter)
{
device_t dev = adapter->dev;
int error, rid = 0;
device_t dev = adapter->dev;
struct igb_queue *que = adapter->queues;
int error, rid = 0;
/* Turn off all interrupts */
E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
@ -2103,7 +2115,7 @@ igb_allocate_legacy(struct adapter *adapter)
* Try allocating a fast interrupt and the associated deferred
* processing contexts.
*/
TASK_INIT(&adapter->rxtx_task, 0, igb_handle_rxtx, adapter);
TASK_INIT(&adapter->rxtx_task, 0, igb_handle_rxtx, que);
/* Make tasklet for deferred link handling */
TASK_INIT(&adapter->link_task, 0, igb_handle_link, adapter);
adapter->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT,
@ -2433,22 +2445,19 @@ igb_setup_msix(struct adapter *adapter)
/* Figure out a reasonable auto config value */
queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
/* Can have max of 4 queues on 82575 */
if (adapter->hw.mac.type == e1000_82575) {
if (queues > 4)
queues = 4;
if (igb_num_queues > 4)
igb_num_queues = 4;
}
/* Manual override */
if (igb_num_queues != 0)
queues = igb_num_queues;
if (igb_num_queues == 0)
igb_num_queues = queues;
/* Can have max of 4 queues on 82575 */
if ((adapter->hw.mac.type == e1000_82575) && (queues > 4))
queues = 4;
/*
** One vector (RX/TX pair) per queue
** plus an additional for Link interrupt
*/
want = igb_num_queues + 1;
want = queues + 1;
if (msgs >= want)
msgs = want;
else {
@ -2461,7 +2470,7 @@ igb_setup_msix(struct adapter *adapter)
if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
device_printf(adapter->dev,
"Using MSIX interrupts with %d vectors\n", msgs);
adapter->num_queues = igb_num_queues;
adapter->num_queues = queues;
return (msgs);
}
msi:
@ -2922,9 +2931,7 @@ err_tx_desc:
igb_dma_free(adapter, &txr->txdma);
free(adapter->rx_rings, M_DEVBUF);
rx_fail:
#if __FreeBSD_version >= 800000
buf_ring_free(txr->br, M_DEVBUF);
#endif
free(adapter->tx_rings, M_DEVBUF);
tx_fail:
free(adapter->queues, M_DEVBUF);
@ -3502,111 +3509,88 @@ igb_txeof(struct tx_ring *txr)
/*********************************************************************
*
* Setup descriptor buffer(s) from system mbuf buffer pools.
* i - designates the ring index
* clean - tells the function whether to update
* the header, the packet buffer, or both.
* Refresh mbuf buffers for RX descriptor rings
* - now keeps its own state so discards due to resource
* exhaustion are unnecessary, if an mbuf cannot be obtained
* it just returns, keeping its placeholder, thus it can simply
* be recalled to try again.
*
**********************************************************************/
static int
igb_get_buf(struct rx_ring *rxr, int i, u8 clean)
static void
igb_refresh_mbufs(struct rx_ring *rxr, int limit)
{
struct adapter *adapter = rxr->adapter;
struct igb_rx_buf *rxbuf;
struct mbuf *mh, *mp;
bus_dma_segment_t hseg[1];
bus_dma_segment_t pseg[1];
bus_dmamap_t map;
int nsegs, error;
struct igb_rx_buf *rxbuf;
struct mbuf *mh, *mp;
int i, nsegs, error, cleaned;
rxbuf = &rxr->rx_buffers[i];
mh = mp = NULL;
if ((clean & IGB_CLEAN_HEADER) != 0) {
mh = m_gethdr(M_DONTWAIT, MT_DATA);
if (mh == NULL) {
adapter->mbuf_header_failed++;
return (ENOBUFS);
}
mh->m_pkthdr.len = mh->m_len = MHLEN;
/*
* Because IGB_HDR_BUF size is less than MHLEN
* and we configure controller to split headers
* we can align mbuf on ETHER_ALIGN boundary.
*/
m_adj(mh, ETHER_ALIGN);
error = bus_dmamap_load_mbuf_sg(rxr->rx_htag,
rxr->rx_hspare_map, mh, hseg, &nsegs, 0);
if (error != 0) {
m_freem(mh);
return (error);
}
mh->m_flags &= ~M_PKTHDR;
}
if ((clean & IGB_CLEAN_PAYLOAD) != 0) {
mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
adapter->rx_mbuf_sz);
if (mp == NULL) {
if (mh != NULL) {
adapter->mbuf_packet_failed++;
bus_dmamap_unload(rxr->rx_htag,
rxbuf->head_map);
mh->m_flags |= M_PKTHDR;
m_freem(mh);
i = rxr->next_to_refresh;
cleaned = -1; /* Signify no completions */
while (i != limit) {
rxbuf = &rxr->rx_buffers[i];
if (rxbuf->m_head == NULL) {
mh = m_gethdr(M_DONTWAIT, MT_DATA);
if (mh == NULL)
goto update;
mh->m_pkthdr.len = mh->m_len = MHLEN;
mh->m_len = MHLEN;
mh->m_flags |= M_PKTHDR;
m_adj(mh, ETHER_ALIGN);
/* Get the memory mapping */
error = bus_dmamap_load_mbuf_sg(rxr->htag,
rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
printf("GET BUF: dmamap load"
" failure - %d\n", error);
m_free(mh);
goto update;
}
return (ENOBUFS);
rxbuf->m_head = mh;
bus_dmamap_sync(rxr->htag, rxbuf->hmap,
BUS_DMASYNC_PREREAD);
rxr->rx_base[i].read.hdr_addr =
htole64(hseg[0].ds_addr);
}
mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
error = bus_dmamap_load_mbuf_sg(rxr->rx_ptag,
rxr->rx_pspare_map, mp, pseg, &nsegs, 0);
if (error != 0) {
if (mh != NULL) {
bus_dmamap_unload(rxr->rx_htag,
rxbuf->head_map);
mh->m_flags |= M_PKTHDR;
m_freem(mh);
if (rxbuf->m_pack == NULL) {
mp = m_getjcl(M_DONTWAIT, MT_DATA,
M_PKTHDR, adapter->rx_mbuf_sz);
if (mp == NULL)
goto update;
mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
/* Get the memory mapping */
error = bus_dmamap_load_mbuf_sg(rxr->ptag,
rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
printf("GET BUF: dmamap load"
" failure - %d\n", error);
m_free(mp);
goto update;
}
m_freem(mp);
return (error);
rxbuf->m_pack = mp;
bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
BUS_DMASYNC_PREREAD);
rxr->rx_base[i].read.pkt_addr =
htole64(pseg[0].ds_addr);
}
mp->m_flags &= ~M_PKTHDR;
}
/* Loading new DMA maps complete, unload maps for received buffers. */
if ((clean & IGB_CLEAN_HEADER) != 0 && rxbuf->m_head != NULL) {
bus_dmamap_sync(rxr->rx_htag, rxbuf->head_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->rx_htag, rxbuf->head_map);
cleaned = i;
/* Calculate next index */
if (++i == adapter->num_rx_desc)
i = 0;
/* This is the work marker for refresh */
rxr->next_to_refresh = i;
}
if ((clean & IGB_CLEAN_PAYLOAD) != 0 && rxbuf->m_pack != NULL) {
bus_dmamap_sync(rxr->rx_ptag, rxbuf->pack_map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->rx_ptag, rxbuf->pack_map);
}
/* Reflect loaded dmamaps. */
if ((clean & IGB_CLEAN_HEADER) != 0) {
map = rxbuf->head_map;
rxbuf->head_map = rxr->rx_hspare_map;
rxr->rx_hspare_map = map;
rxbuf->m_head = mh;
bus_dmamap_sync(rxr->rx_htag, rxbuf->head_map,
BUS_DMASYNC_PREREAD);
rxr->rx_base[i].read.hdr_addr = htole64(hseg[0].ds_addr);
}
if ((clean & IGB_CLEAN_PAYLOAD) != 0) {
map = rxbuf->pack_map;
rxbuf->pack_map = rxr->rx_pspare_map;
rxr->rx_pspare_map = map;
rxbuf->m_pack = mp;
bus_dmamap_sync(rxr->rx_ptag, rxbuf->pack_map,
BUS_DMASYNC_PREREAD);
rxr->rx_base[i].read.pkt_addr = htole64(pseg[0].ds_addr);
}
return (0);
update:
if (cleaned != -1) /* If we refreshed some, bump tail */
E1000_WRITE_REG(&adapter->hw,
E1000_RDT(rxr->me), cleaned);
return;
}
/*********************************************************************
*
* Allocate memory for rx_buffer structures. Since we use one
@ -3643,7 +3627,7 @@ igb_allocate_receive_buffers(struct rx_ring *rxr)
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&rxr->rx_htag))) {
&rxr->htag))) {
device_printf(dev, "Unable to create RX DMA tag\n");
goto fail;
}
@ -3659,40 +3643,22 @@ igb_allocate_receive_buffers(struct rx_ring *rxr)
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&rxr->rx_ptag))) {
&rxr->ptag))) {
device_printf(dev, "Unable to create RX payload DMA tag\n");
goto fail;
}
/* Create the spare maps (used by getbuf) */
error = bus_dmamap_create(rxr->rx_htag, BUS_DMA_NOWAIT,
&rxr->rx_hspare_map);
if (error) {
device_printf(dev,
"%s: bus_dmamap_create header spare failed: %d\n",
__func__, error);
goto fail;
}
error = bus_dmamap_create(rxr->rx_ptag, BUS_DMA_NOWAIT,
&rxr->rx_pspare_map);
if (error) {
device_printf(dev,
"%s: bus_dmamap_create packet spare failed: %d\n",
__func__, error);
goto fail;
}
for (i = 0; i < adapter->num_rx_desc; i++) {
rxbuf = &rxr->rx_buffers[i];
error = bus_dmamap_create(rxr->rx_htag,
BUS_DMA_NOWAIT, &rxbuf->head_map);
error = bus_dmamap_create(rxr->htag,
BUS_DMA_NOWAIT, &rxbuf->hmap);
if (error) {
device_printf(dev,
"Unable to create RX head DMA maps\n");
goto fail;
}
error = bus_dmamap_create(rxr->rx_ptag,
BUS_DMA_NOWAIT, &rxbuf->pack_map);
error = bus_dmamap_create(rxr->ptag,
BUS_DMA_NOWAIT, &rxbuf->pmap);
if (error) {
device_printf(dev,
"Unable to create RX packet DMA maps\n");
@ -3720,16 +3686,16 @@ igb_free_receive_ring(struct rx_ring *rxr)
for (i = 0; i < adapter->num_rx_desc; i++) {
rxbuf = &rxr->rx_buffers[i];
if (rxbuf->m_head != NULL) {
bus_dmamap_sync(rxr->rx_htag, rxbuf->head_map,
bus_dmamap_sync(rxr->htag, rxbuf->hmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->rx_htag, rxbuf->head_map);
bus_dmamap_unload(rxr->htag, rxbuf->hmap);
rxbuf->m_head->m_flags |= M_PKTHDR;
m_freem(rxbuf->m_head);
}
if (rxbuf->m_pack != NULL) {
bus_dmamap_sync(rxr->rx_ptag, rxbuf->pack_map,
bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->rx_ptag, rxbuf->pack_map);
bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
rxbuf->m_pack->m_flags |= M_PKTHDR;
m_freem(rxbuf->m_pack);
}
@ -3750,8 +3716,10 @@ igb_setup_receive_ring(struct rx_ring *rxr)
struct adapter *adapter;
struct ifnet *ifp;
device_t dev;
struct igb_rx_buf *rxbuf;
bus_dma_segment_t pseg[1], hseg[1];
struct lro_ctrl *lro = &rxr->lro;
int j, rsize, error = 0;
int rsize, nsegs, error = 0;
adapter = rxr->adapter;
dev = adapter->dev;
@ -3768,15 +3736,50 @@ igb_setup_receive_ring(struct rx_ring *rxr)
*/
igb_free_receive_ring(rxr);
/* Now replenish the ring mbufs */
for (j = 0; j < adapter->num_rx_desc; j++) {
if ((error = igb_get_buf(rxr, j, IGB_CLEAN_BOTH)) != 0)
goto fail;
}
/* Now replenish the ring mbufs */
for (int j = 0; j != adapter->num_rx_desc; ++j) {
struct mbuf *mh, *mp;
/* Setup our descriptor indices */
rxr->next_to_check = 0;
rxr->last_cleaned = 0;
rxbuf = &rxr->rx_buffers[j];
/* First the header */
rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
if (rxbuf->m_head == NULL)
goto fail;
m_adj(rxbuf->m_head, ETHER_ALIGN);
mh = rxbuf->m_head;
mh->m_len = mh->m_pkthdr.len = MHLEN;
mh->m_flags |= M_PKTHDR;
/* Get the memory mapping */
error = bus_dmamap_load_mbuf_sg(rxr->htag,
rxbuf->hmap, rxbuf->m_head, hseg,
&nsegs, BUS_DMA_NOWAIT);
if (error != 0) /* Nothing elegant to do here */
goto fail;
bus_dmamap_sync(rxr->htag,
rxbuf->hmap, BUS_DMASYNC_PREREAD);
/* Update descriptor */
rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
/* Now the payload cluster */
rxbuf->m_pack = m_getjcl(M_DONTWAIT, MT_DATA,
M_PKTHDR, adapter->rx_mbuf_sz);
if (rxbuf->m_pack == NULL)
goto fail;
mp = rxbuf->m_pack;
mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
/* Get the memory mapping */
error = bus_dmamap_load_mbuf_sg(rxr->ptag,
rxbuf->pmap, mp, pseg,
&nsegs, BUS_DMA_NOWAIT);
if (error != 0)
goto fail;
bus_dmamap_sync(rxr->ptag,
rxbuf->pmap, BUS_DMASYNC_PREREAD);
/* Update descriptor */
rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
}
rxr->next_to_refresh = 0;
rxr->lro_enabled = FALSE;
if (igb_header_split)
@ -4049,47 +4052,33 @@ igb_free_receive_buffers(struct rx_ring *rxr)
INIT_DEBUGOUT("free_receive_structures: begin");
if (rxr->rx_hspare_map != NULL) {
bus_dmamap_destroy(rxr->rx_htag, rxr->rx_hspare_map);
rxr->rx_hspare_map = NULL;
}
if (rxr->rx_hspare_map != NULL) {
bus_dmamap_destroy(rxr->rx_ptag, rxr->rx_pspare_map);
rxr->rx_pspare_map = NULL;
}
/* Cleanup any existing buffers */
if (rxr->rx_buffers != NULL) {
for (i = 0; i < adapter->num_rx_desc; i++) {
rxbuf = &rxr->rx_buffers[i];
if (rxbuf->m_head != NULL) {
bus_dmamap_sync(rxr->rx_htag, rxbuf->head_map,
bus_dmamap_sync(rxr->htag, rxbuf->hmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->rx_htag,
rxbuf->head_map);
bus_dmamap_unload(rxr->htag, rxbuf->hmap);
rxbuf->m_head->m_flags |= M_PKTHDR;
m_freem(rxbuf->m_head);
}
if (rxbuf->m_pack != NULL) {
bus_dmamap_sync(rxr->rx_ptag, rxbuf->pack_map,
bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->rx_ptag,
rxbuf->pack_map);
bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
rxbuf->m_pack->m_flags |= M_PKTHDR;
m_freem(rxbuf->m_pack);
}
rxbuf->m_head = NULL;
rxbuf->m_pack = NULL;
if (rxbuf->head_map != NULL) {
bus_dmamap_destroy(rxr->rx_htag,
rxbuf->head_map);
rxbuf->head_map = NULL;
if (rxbuf->hmap != NULL) {
bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
rxbuf->hmap = NULL;
}
if (rxbuf->pack_map != NULL) {
bus_dmamap_destroy(rxr->rx_ptag,
rxbuf->pack_map);
rxbuf->pack_map = NULL;
if (rxbuf->pmap != NULL) {
bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
rxbuf->pmap = NULL;
}
}
if (rxr->rx_buffers != NULL) {
@ -4098,26 +4087,43 @@ igb_free_receive_buffers(struct rx_ring *rxr)
}
}
if (rxr->rx_htag != NULL) {
bus_dma_tag_destroy(rxr->rx_htag);
rxr->rx_htag = NULL;
if (rxr->htag != NULL) {
bus_dma_tag_destroy(rxr->htag);
rxr->htag = NULL;
}
if (rxr->rx_ptag != NULL) {
bus_dma_tag_destroy(rxr->rx_ptag);
rxr->rx_ptag = NULL;
if (rxr->ptag != NULL) {
bus_dma_tag_destroy(rxr->ptag);
rxr->ptag = NULL;
}
}
static __inline void
igb_rx_discard(struct rx_ring *rxr, union e1000_adv_rx_desc *cur, int i)
igb_rx_discard(struct rx_ring *rxr, int i)
{
struct adapter *adapter = rxr->adapter;
struct igb_rx_buf *rbuf;
struct mbuf *mh, *mp;
rbuf = &rxr->rx_buffers[i];
if (rxr->fmp != NULL) {
rxr->fmp->m_flags |= M_PKTHDR;
m_freem(rxr->fmp);
rxr->fmp = NULL;
rxr->lmp = NULL;
}
mh = rbuf->m_head;
mp = rbuf->m_pack;
/* Reuse loaded DMA map and just update mbuf chain */
mh->m_len = MHLEN;
mh->m_flags |= M_PKTHDR;
mh->m_next = NULL;
mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
mp->m_data = mp->m_ext.ext_buf;
mp->m_next = NULL;
return;
}
static __inline void
@ -4161,28 +4167,29 @@ igb_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
* Return TRUE if more to clean, FALSE otherwise
*********************************************************************/
static bool
igb_rxeof(struct rx_ring *rxr, int count)
igb_rxeof(struct igb_queue *que, int count)
{
struct adapter *adapter = rxr->adapter;
struct adapter *adapter = que->adapter;
struct rx_ring *rxr = que->rxr;
struct ifnet *ifp = adapter->ifp;
struct lro_ctrl *lro = &rxr->lro;
struct lro_entry *queued;
int i, prog = 0;
int i, processed = 0;
u32 ptype, staterr = 0;
union e1000_adv_rx_desc *cur;
IGB_RX_LOCK(rxr);
/* Sync the ring. */
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/* Main clean loop */
for (i = rxr->next_to_check; count > 0; prog++) {
struct mbuf *sendmp, *mh, *mp;
u16 hlen, plen, hdr, vtag;
bool eop = FALSE;
u8 dopayload;
for (i = rxr->next_to_check; count != 0;) {
struct mbuf *sendmp, *mh, *mp;
struct igb_rx_buf *rxbuf;
u16 hlen, plen, hdr, vtag;
bool eop = FALSE;
/* Sync the ring. */
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
cur = &rxr->rx_base[i];
staterr = le32toh(cur->wb.upper.status_error);
if ((staterr & E1000_RXD_STAT_DD) == 0)
@ -4192,8 +4199,10 @@ igb_rxeof(struct rx_ring *rxr, int count)
count--;
sendmp = mh = mp = NULL;
cur->wb.upper.status_error = 0;
rxbuf = &rxr->rx_buffers[i];
plen = le16toh(cur->wb.upper.length);
ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
vtag = le16toh(cur->wb.upper.vlan);
hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
@ -4206,7 +4215,7 @@ igb_rxeof(struct rx_ring *rxr, int count)
rxr->discard = TRUE;
else
rxr->discard = FALSE;
igb_rx_discard(rxr, cur, i);
igb_rx_discard(rxr, i);
goto next_desc;
}
@ -4229,7 +4238,8 @@ igb_rxeof(struct rx_ring *rxr, int count)
/* Handle the header mbuf */
mh = rxr->rx_buffers[i].m_head;
mh->m_len = hlen;
dopayload = IGB_CLEAN_HEADER;
/* clear buf info for refresh */
rxbuf->m_head = NULL;
/*
** Get the payload length, this
** could be zero if its a small
@ -4239,7 +4249,8 @@ igb_rxeof(struct rx_ring *rxr, int count)
mp = rxr->rx_buffers[i].m_pack;
mp->m_len = plen;
mh->m_next = mp;
dopayload = IGB_CLEAN_BOTH;
/* clear buf info for refresh */
rxbuf->m_pack = NULL;
rxr->rx_split_packets++;
}
} else {
@ -4250,26 +4261,11 @@ igb_rxeof(struct rx_ring *rxr, int count)
*/
mh = rxr->rx_buffers[i].m_pack;
mh->m_len = plen;
dopayload = IGB_CLEAN_PAYLOAD;
/* clear buf info for refresh */
rxbuf->m_pack = NULL;
}
/*
** get_buf will overwrite the writeback
** descriptor so save the VLAN tag now.
*/
vtag = le16toh(cur->wb.upper.vlan);
if (igb_get_buf(rxr, i, dopayload) != 0) {
ifp->if_iqdrops++;
/*
* We've dropped a frame due to lack of resources
* so we should drop entire multi-segmented
* frames until we encounter EOP.
*/
if ((staterr & E1000_RXD_STAT_EOP) != 0)
rxr->discard = TRUE;
igb_rx_discard(rxr, cur, i);
goto next_desc;
}
++processed; /* So we know when to refresh */
/* Initial frame - setup */
if (rxr->fmp == NULL) {
@ -4300,14 +4296,14 @@ igb_rxeof(struct rx_ring *rxr, int count)
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
igb_rx_checksum(staterr, rxr->fmp, ptype);
/* XXX igb(4) always strips VLAN. */
if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
(staterr & E1000_RXD_STAT_VP) != 0) {
rxr->fmp->m_pkthdr.ether_vtag = vtag;
rxr->fmp->m_flags |= M_VLANTAG;
}
#if __FreeBSD_version >= 800000
rxr->fmp->m_pkthdr.flowid = curcpu;
rxr->fmp->m_pkthdr.flowid = que->msix;
rxr->fmp->m_flags |= M_FLOWID;
#endif
sendmp = rxr->fmp;
@ -4321,31 +4317,30 @@ next_desc:
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
rxr->last_cleaned = i; /* For updating tail */
/* Advance our pointers to the next descriptor. */
if (++i == adapter->num_rx_desc)
i = 0;
/*
** Note that we hold the RX lock thru
** the following call so this ring's
** next_to_check is not gonna change.
** Send to the stack or LRO
*/
if (sendmp != NULL)
igb_rx_input(rxr, ifp, sendmp, ptype);
/* Every 8 descriptors we go to refresh mbufs */
if (processed == 8) {
igb_refresh_mbufs(rxr, i);
processed = 0;
}
}
if (prog == 0) {
IGB_RX_UNLOCK(rxr);
return (FALSE);
/* Catch any remainders */
if (processed != 0) {
igb_refresh_mbufs(rxr, i);
processed = 0;
}
rxr->next_to_check = i;
/* Advance the E1000's Receive Queue "Tail Pointer". */
E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), rxr->last_cleaned);
/*
* Flush any outstanding LRO work
*/
@ -4630,7 +4625,7 @@ igb_is_valid_ether_addr(uint8_t *addr)
/*
* Enable PCI Wake On Lan capability
*/
void
static void
igb_enable_wakeup(device_t dev)
{
u16 cap, status;
@ -4651,6 +4646,21 @@ igb_enable_wakeup(device_t dev)
return;
}
static void
igb_led_func(void *arg, int onoff)
{
struct adapter *adapter = arg;
IGB_CORE_LOCK(adapter);
if (onoff) {
e1000_setup_led(&adapter->hw);
e1000_led_on(&adapter->hw);
} else {
e1000_led_off(&adapter->hw);
e1000_cleanup_led(&adapter->hw);
}
IGB_CORE_UNLOCK(adapter);
}
/**********************************************************************
*

View File

@ -47,8 +47,8 @@
* desscriptors should meet the following condition.
* (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
*/
#define IGB_MIN_TXD 80
#define IGB_DEFAULT_TXD 256
#define IGB_MIN_TXD 256
#define IGB_DEFAULT_TXD 1024
#define IGB_MAX_TXD 4096
/*
@ -62,8 +62,8 @@
* desscriptors should meet the following condition.
* (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
*/
#define IGB_MIN_RXD 80
#define IGB_DEFAULT_RXD 256
#define IGB_MIN_RXD 256
#define IGB_DEFAULT_RXD 1024
#define IGB_MAX_RXD 4096
/*
@ -333,13 +333,11 @@ struct rx_ring {
bool discard;
struct mtx rx_mtx;
char mtx_name[16];
u32 last_cleaned;
u32 next_to_refresh;
u32 next_to_check;
struct igb_rx_buf *rx_buffers;
bus_dma_tag_t rx_htag; /* dma tag for rx head */
bus_dmamap_t rx_hspare_map;
bus_dma_tag_t rx_ptag; /* dma tag for rx packet */
bus_dmamap_t rx_pspare_map;
bus_dma_tag_t htag; /* dma tag for rx head */
bus_dma_tag_t ptag; /* dma tag for rx packet */
/*
* First/last mbuf pointers, for
* collecting multisegment RX packets.
@ -363,6 +361,7 @@ struct adapter {
struct e1000_osdep osdep;
struct device *dev;
struct cdev *led_dev;
struct resource *pci_mem;
struct resource *msix_mem;
@ -468,8 +467,8 @@ struct igb_tx_buffer {
struct igb_rx_buf {
struct mbuf *m_head;
struct mbuf *m_pack;
bus_dmamap_t head_map; /* bus_dma map for packet */
bus_dmamap_t pack_map; /* bus_dma map for packet */
bus_dmamap_t hmap; /* bus_dma map for header */
bus_dmamap_t pmap; /* bus_dma map for packet */
};
#define IGB_CORE_LOCK_INIT(_sc, _name) \

4706
sys/dev/e1000/if_lem.c Normal file

File diff suppressed because it is too large Load Diff

481
sys/dev/e1000/if_lem.h Normal file
View File

@ -0,0 +1,481 @@
/******************************************************************************
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _LEM_H_DEFINED_
#define _LEM_H_DEFINED_
/* Tunables */
/*
* EM_TXD: Maximum number of Transmit Descriptors
* Valid Range: 80-256 for 82542 and 82543-based adapters
* 80-4096 for others
* Default Value: 256
* This value is the number of transmit descriptors allocated by the driver.
* Increasing this value allows the driver to queue more transmits. Each
* descriptor is 16 bytes.
* Since TDLEN should be multiple of 128bytes, the number of transmit
* desscriptors should meet the following condition.
* (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
*/
#define EM_MIN_TXD 80
#define EM_MAX_TXD_82543 256
#define EM_MAX_TXD 4096
#define EM_DEFAULT_TXD EM_MAX_TXD_82543
/*
* EM_RXD - Maximum number of receive Descriptors
* Valid Range: 80-256 for 82542 and 82543-based adapters
* 80-4096 for others
* Default Value: 256
* This value is the number of receive descriptors allocated by the driver.
* Increasing this value allows the driver to buffer more incoming packets.
* Each descriptor is 16 bytes. A receive buffer is also allocated for each
* descriptor. The maximum MTU size is 16110.
* Since TDLEN should be multiple of 128bytes, the number of transmit
* desscriptors should meet the following condition.
* (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
*/
#define EM_MIN_RXD 80
#define EM_MAX_RXD_82543 256
#define EM_MAX_RXD 4096
#define EM_DEFAULT_RXD EM_MAX_RXD_82543
/*
* EM_TIDV - Transmit Interrupt Delay Value
* Valid Range: 0-65535 (0=off)
* Default Value: 64
* This value delays the generation of transmit interrupts in units of
* 1.024 microseconds. Transmit interrupt reduction can improve CPU
* efficiency if properly tuned for specific network traffic. If the
* system is reporting dropped transmits, this value may be set too high
* causing the driver to run out of available transmit descriptors.
*/
#define EM_TIDV 64
/*
* EM_TADV - Transmit Absolute Interrupt Delay Value
* (Not valid for 82542/82543/82544)
* Valid Range: 0-65535 (0=off)
* Default Value: 64
* This value, in units of 1.024 microseconds, limits the delay in which a
* transmit interrupt is generated. Useful only if EM_TIDV is non-zero,
* this value ensures that an interrupt is generated after the initial
* packet is sent on the wire within the set amount of time. Proper tuning,
* along with EM_TIDV, may improve traffic throughput in specific
* network conditions.
*/
#define EM_TADV 64
/*
* EM_RDTR - Receive Interrupt Delay Timer (Packet Timer)
* Valid Range: 0-65535 (0=off)
* Default Value: 0
* This value delays the generation of receive interrupts in units of 1.024
* microseconds. Receive interrupt reduction can improve CPU efficiency if
* properly tuned for specific network traffic. Increasing this value adds
* extra latency to frame reception and can end up decreasing the throughput
* of TCP traffic. If the system is reporting dropped receives, this value
* may be set too high, causing the driver to run out of available receive
* descriptors.
*
* CAUTION: When setting EM_RDTR to a value other than 0, adapters
* may hang (stop transmitting) under certain network conditions.
* If this occurs a WATCHDOG message is logged in the system
* event log. In addition, the controller is automatically reset,
* restoring the network connection. To eliminate the potential
* for the hang ensure that EM_RDTR is set to 0.
*/
#define EM_RDTR 0
/*
* Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544)
* Valid Range: 0-65535 (0=off)
* Default Value: 64
* This value, in units of 1.024 microseconds, limits the delay in which a
* receive interrupt is generated. Useful only if EM_RDTR is non-zero,
* this value ensures that an interrupt is generated after the initial
* packet is received within the set amount of time. Proper tuning,
* along with EM_RDTR, may improve traffic throughput in specific network
* conditions.
*/
#define EM_RADV 64
/*
* This parameter controls the max duration of transmit watchdog.
*/
#define EM_WATCHDOG (10 * hz)
/*
* This parameter controls when the driver calls the routine to reclaim
* transmit descriptors.
*/
#define EM_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
#define EM_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
/*
* This parameter controls whether or not autonegotation is enabled.
* 0 - Disable autonegotiation
* 1 - Enable autonegotiation
*/
#define DO_AUTO_NEG 1
/*
* This parameter control whether or not the driver will wait for
* autonegotiation to complete.
* 1 - Wait for autonegotiation to complete
* 0 - Don't wait for autonegotiation to complete
*/
#define WAIT_FOR_AUTO_NEG_DEFAULT 0
/* Tunables -- End */
#define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
ADVERTISE_1000_FULL)
#define AUTO_ALL_MODES 0
/* PHY master/slave setting */
#define EM_MASTER_SLAVE e1000_ms_hw_default
/*
* Micellaneous constants
*/
#define EM_VENDOR_ID 0x8086
#define EM_FLASH 0x0014
#define EM_JUMBO_PBA 0x00000028
#define EM_DEFAULT_PBA 0x00000030
#define EM_SMARTSPEED_DOWNSHIFT 3
#define EM_SMARTSPEED_MAX 15
#define EM_MAX_LOOP 10
#define MAX_NUM_MULTICAST_ADDRESSES 128
#define PCI_ANY_ID (~0U)
#define ETHER_ALIGN 2
#define EM_FC_PAUSE_TIME 0x0680
#define EM_EEPROM_APME 0x400;
#define EM_82544_APME 0x0004;
/* Code compatilbility between 6 and 7 */
#ifndef ETHER_BPF_MTAP
#define ETHER_BPF_MTAP BPF_MTAP
#endif
/*
* TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
* multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
* also optimize cache line size effect. H/W supports up to cache line size 128.
*/
#define EM_DBA_ALIGN 128
#define SPEED_MODE_BIT (1<<21) /* On PCI-E MACs only */
/* PCI Config defines */
#define EM_BAR_TYPE(v) ((v) & EM_BAR_TYPE_MASK)
#define EM_BAR_TYPE_MASK 0x00000001
#define EM_BAR_TYPE_MMEM 0x00000000
#define EM_BAR_TYPE_IO 0x00000001
#define EM_BAR_TYPE_FLASH 0x0014
#define EM_BAR_MEM_TYPE(v) ((v) & EM_BAR_MEM_TYPE_MASK)
#define EM_BAR_MEM_TYPE_MASK 0x00000006
#define EM_BAR_MEM_TYPE_32BIT 0x00000000
#define EM_BAR_MEM_TYPE_64BIT 0x00000004
#define EM_MSIX_BAR 3 /* On 82575 */
/* Defines for printing debug information */
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
#define DEBUG_HW 0
#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
#define EM_MAX_SCATTER 64
#define EM_VFTA_SIZE 128
#define EM_TSO_SIZE (65535 + sizeof(struct ether_vlan_header))
#define EM_TSO_SEG_SIZE 4096 /* Max dma segment size */
#define EM_MSIX_MASK 0x01F00000 /* For 82574 use */
#define ETH_ZLEN 60
#define ETH_ADDR_LEN 6
#define CSUM_OFFLOAD 7 /* Offload bits in mbuf flag */
/*
* 82574 has a nonstandard address for EIAC
* and since its only used in MSIX, and in
* the em driver only 82574 uses MSIX we can
* solve it just using this define.
*/
#define EM_EIAC 0x000DC
/* Used in for 82547 10Mb Half workaround */
#define EM_PBA_BYTES_SHIFT 0xA
#define EM_TX_HEAD_ADDR_SHIFT 7
#define EM_PBA_TX_MASK 0xFFFF0000
#define EM_FIFO_HDR 0x10
#define EM_82547_PKT_THRESH 0x3e0
/* Precision Time Sync (IEEE 1588) defines */
#define ETHERTYPE_IEEE1588 0x88F7
#define PICOSECS_PER_TICK 20833
#define TSYNC_PORT 319 /* UDP port for the protocol */
/*
* Bus dma allocation structure used by
* e1000_dma_malloc and e1000_dma_free.
*/
struct em_dma_alloc {
bus_addr_t dma_paddr;
caddr_t dma_vaddr;
bus_dma_tag_t dma_tag;
bus_dmamap_t dma_map;
bus_dma_segment_t dma_seg;
int dma_nseg;
};
struct adapter;
struct em_int_delay_info {
struct adapter *adapter; /* Back-pointer to the adapter struct */
int offset; /* Register offset to read/write */
int value; /* Current value in usecs */
};
/* Our adapter structure */
struct adapter {
struct ifnet *ifp;
#if __FreeBSD_version >= 800000
struct buf_ring *br;
#endif
struct e1000_hw hw;
/* FreeBSD operating-system-specific structures. */
struct e1000_osdep osdep;
struct device *dev;
struct cdev *led_dev;
struct resource *memory;
struct resource *flash;
struct resource *msix;
struct resource *ioport;
int io_rid;
/* 82574 may use 3 int vectors */
struct resource *res[3];
void *tag[3];
int rid[3];
struct ifmedia media;
struct callout timer;
struct callout tx_fifo_timer;
bool watchdog_check;
int watchdog_time;
int msi;
int if_flags;
int max_frame_size;
int min_frame_size;
struct mtx core_mtx;
struct mtx tx_mtx;
struct mtx rx_mtx;
int em_insert_vlan_header;
/* Task for FAST handling */
struct task link_task;
struct task rxtx_task;
struct task rx_task;
struct task tx_task;
struct taskqueue *tq; /* private task queue */
#if __FreeBSD_version >= 700029
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
u32 num_vlans;
#endif
/* Management and WOL features */
u32 wol;
bool has_manage;
bool has_amt;
/* Info about the board itself */
uint8_t link_active;
uint16_t link_speed;
uint16_t link_duplex;
uint32_t smartspeed;
struct em_int_delay_info tx_int_delay;
struct em_int_delay_info tx_abs_int_delay;
struct em_int_delay_info rx_int_delay;
struct em_int_delay_info rx_abs_int_delay;
/*
* Transmit definitions
*
* We have an array of num_tx_desc descriptors (handled
* by the controller) paired with an array of tx_buffers
* (at tx_buffer_area).
* The index of the next available descriptor is next_avail_tx_desc.
* The number of remaining tx_desc is num_tx_desc_avail.
*/
struct em_dma_alloc txdma; /* bus_dma glue for tx desc */
struct e1000_tx_desc *tx_desc_base;
uint32_t next_avail_tx_desc;
uint32_t next_tx_to_clean;
volatile uint16_t num_tx_desc_avail;
uint16_t num_tx_desc;
uint16_t last_hw_offload;
uint32_t txd_cmd;
struct em_buffer *tx_buffer_area;
bus_dma_tag_t txtag; /* dma tag for tx */
uint32_t tx_tso; /* last tx was tso */
/*
* Receive definitions
*
* we have an array of num_rx_desc rx_desc (handled by the
* controller), and paired with an array of rx_buffers
* (at rx_buffer_area).
* The next pair to check on receive is at offset next_rx_desc_to_check
*/
struct em_dma_alloc rxdma; /* bus_dma glue for rx desc */
struct e1000_rx_desc *rx_desc_base;
uint32_t next_rx_desc_to_check;
uint32_t rx_buffer_len;
uint16_t num_rx_desc;
int rx_process_limit;
struct em_buffer *rx_buffer_area;
bus_dma_tag_t rxtag;
bus_dmamap_t rx_sparemap;
/*
* First/last mbuf pointers, for
* collecting multisegment RX packets.
*/
struct mbuf *fmp;
struct mbuf *lmp;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_alloc_failed;
unsigned long mbuf_cluster_failed;
unsigned long no_tx_desc_avail1;
unsigned long no_tx_desc_avail2;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long rx_overruns;
unsigned long rx_irq;
unsigned long tx_irq;
unsigned long link_irq;
/* 82547 workaround */
uint32_t tx_fifo_size;
uint32_t tx_fifo_head;
uint32_t tx_fifo_head_addr;
uint64_t tx_fifo_reset_cnt;
uint64_t tx_fifo_wrk_cnt;
uint32_t tx_head_addr;
/* For 82544 PCIX Workaround */
boolean_t pcix_82544;
boolean_t in_detach;
struct e1000_hw_stats stats;
};
/* ******************************************************************************
* vendor_info_array
*
* This array contains the list of Subvendor/Subdevice IDs on which the driver
* should load.
*
* ******************************************************************************/
typedef struct _em_vendor_info_t {
unsigned int vendor_id;
unsigned int device_id;
unsigned int subvendor_id;
unsigned int subdevice_id;
unsigned int index;
} em_vendor_info_t;
struct em_buffer {
int next_eop; /* Index of the desc to watch */
struct mbuf *m_head;
bus_dmamap_t map; /* bus_dma map for packet */
};
/* For 82544 PCIX Workaround */
typedef struct _ADDRESS_LENGTH_PAIR
{
uint64_t address;
uint32_t length;
} ADDRESS_LENGTH_PAIR, *PADDRESS_LENGTH_PAIR;
typedef struct _DESCRIPTOR_PAIR
{
ADDRESS_LENGTH_PAIR descriptor[4];
uint32_t elements;
} DESC_ARRAY, *PDESC_ARRAY;
#define EM_CORE_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->core_mtx, _name, "EM Core Lock", MTX_DEF)
#define EM_TX_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->tx_mtx, _name, "EM TX Lock", MTX_DEF)
#define EM_RX_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->rx_mtx, _name, "EM RX Lock", MTX_DEF)
#define EM_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
#define EM_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
#define EM_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
#define EM_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
#define EM_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
#define EM_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
#define EM_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
#define EM_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
#define EM_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
#define EM_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
#define EM_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
#define EM_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
#endif /* _LEM_H_DEFINED_ */

View File

@ -2698,7 +2698,7 @@ SBP_DEBUG(0)
#else
"segment length(%zd) is less than 16."
#endif
"(seg=%d/%d)\n", s->ds_len, i+1, seg);
"(seg=%d/%d)\n", (size_t)s->ds_len, i+1, seg);
END_DEBUG
if (s->ds_len > SBP_SEG_MAX)
panic("ds_len > SBP_SEG_MAX, fix busdma code");

View File

@ -3972,9 +3972,11 @@ pmc_post_callchain_callback(void)
td = curthread;
KASSERT((td->td_pflags & TDP_CALLCHAIN) == 0,
("[pmc,%d] thread %p already marked for callchain capture",
__LINE__, (void *) td));
/*
* If there is multiple PMCs for the same interrupt ignore new post
*/
if (td->td_pflags & TDP_CALLCHAIN)
return;
/*
* Mark this thread as needing callchain capture.

View File

@ -46,7 +46,7 @@ int ixgbe_display_debug_stats = 0;
/*********************************************************************
* Driver version
*********************************************************************/
char ixgbe_driver_version[] = "2.1.6";
char ixgbe_driver_version[] = "2.1.7";
/*********************************************************************
* PCI Device ID Table
@ -106,8 +106,8 @@ static int ixgbe_mq_start_locked(struct ifnet *,
static void ixgbe_qflush(struct ifnet *);
#endif
static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
static void ixgbe_init(void *);
static void ixgbe_init_locked(struct adapter *);
static void ixgbe_init(void *);
static int ixgbe_init_locked(struct adapter *);
static void ixgbe_stop(void *);
static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
static int ixgbe_media_change(struct ifnet *);
@ -142,7 +142,7 @@ static void ixgbe_disable_intr(struct adapter *);
static void ixgbe_update_stats_counters(struct adapter *);
static bool ixgbe_txeof(struct tx_ring *);
static bool ixgbe_rxeof(struct ix_queue *, int);
static void ixgbe_rx_checksum(u32, struct mbuf *);
static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
static void ixgbe_set_promisc(struct adapter *);
static void ixgbe_disable_promisc(struct adapter *);
static void ixgbe_set_multi(struct adapter *);
@ -827,9 +827,13 @@ ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
}
enqueued = 0;
if (m == NULL)
if (m == NULL) {
next = drbr_dequeue(ifp, txr->br);
else
} else if (drbr_needs_enqueue(ifp, txr->br)) {
if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
return (err);
next = drbr_dequeue(ifp, txr->br);
} else
next = m;
/* Process the queue */
@ -905,7 +909,7 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
ifp->if_mtu = ifr->ifr_mtu;
adapter->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
ixgbe_init_locked(adapter);
error = ixgbe_init_locked(adapter);
IXGBE_CORE_UNLOCK(adapter);
}
break;
@ -920,7 +924,7 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
ixgbe_set_promisc(adapter);
}
} else
ixgbe_init_locked(adapter);
error = ixgbe_init_locked(adapter);
} else
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
ixgbe_stop(adapter);
@ -955,8 +959,11 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
ifp->if_capenable ^= IFCAP_LRO;
if (mask & IFCAP_VLAN_HWTAGGING)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
ixgbe_init(adapter);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IXGBE_CORE_LOCK(adapter);
error = ixgbe_init_locked(adapter);
IXGBE_CORE_UNLOCK(adapter);
}
VLAN_CAPABILITIES(ifp);
break;
}
@ -982,7 +989,7 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
**********************************************************************/
#define IXGBE_MHADD_MFS_SHIFT 16
static void
static int
ixgbe_init_locked(struct adapter *adapter)
{
struct ifnet *ifp = adapter->ifp;
@ -990,7 +997,6 @@ ixgbe_init_locked(struct adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 k, txdctl, mhadd, gpie;
u32 rxdctl, rxctrl;
int err;
mtx_assert(&adapter->core_mtx, MA_OWNED);
INIT_DEBUGOUT("ixgbe_init: begin");
@ -1012,7 +1018,7 @@ ixgbe_init_locked(struct adapter *adapter)
if (ixgbe_setup_transmit_structures(adapter)) {
device_printf(dev,"Could not setup transmit structures\n");
ixgbe_stop(adapter);
return;
return (ENOMEM);
}
ixgbe_init_hw(hw);
@ -1034,7 +1040,7 @@ ixgbe_init_locked(struct adapter *adapter)
if (ixgbe_setup_receive_structures(adapter)) {
device_printf(dev,"Could not setup receive structures\n");
ixgbe_stop(adapter);
return;
return (ENOMEM);
}
/* Configure RX settings */
@ -1064,8 +1070,11 @@ ixgbe_init_locked(struct adapter *adapter)
if (ifp->if_capenable & IFCAP_TSO4)
ifp->if_hwassist |= CSUM_TSO;
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist = (CSUM_TCP | CSUM_UDP);
ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
#if __FreeBSD_version >= 800000
if (hw->mac.type == ixgbe_mac_82599EB)
ifp->if_hwassist |= CSUM_SCTP;
#endif
/* Set MTU size */
if (ifp->if_mtu > ETHERMTU) {
mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
@ -1130,7 +1139,7 @@ ixgbe_init_locked(struct adapter *adapter)
#ifdef IXGBE_FDIR
/* Init Flow director */
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
if (hw->mac.type == ixgbe_mac_82599EB)
ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
#endif
@ -1138,13 +1147,14 @@ ixgbe_init_locked(struct adapter *adapter)
** Check on any SFP devices that
** need to be kick-started
*/
err = hw->phy.ops.identify(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
device_printf(dev,
"Unsupported SFP+ module type was detected.\n");
ixgbe_detach(dev);
return;
}
if (hw->phy.type == ixgbe_phy_none) {
int err = hw->phy.ops.identify(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
device_printf(dev,
"Unsupported SFP+ module type was detected.\n");
return (EIO);
}
}
/* Config/Enable Link */
ixgbe_config_link(adapter);
@ -1156,7 +1166,7 @@ ixgbe_init_locked(struct adapter *adapter)
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
return;
return (0);
}
static void
@ -3003,6 +3013,12 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
break;
#if __FreeBSD_version >= 800000
case IPPROTO_SCTP:
if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
#endif
default:
offload = FALSE;
break;
@ -3956,16 +3972,16 @@ ixgbe_rxeof(struct ix_queue *que, int count)
IXGBE_RX_LOCK(rxr);
/* Sync the ring. */
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (i = rxr->next_to_check; count != 0;) {
struct mbuf *sendmp, *mh, *mp;
u32 rsc, ptype;
u16 hlen, plen, hdr, vtag;
bool eop;
/* Sync the ring. */
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
cur = &rxr->rx_base[i];
staterr = le32toh(cur->wb.upper.status_error);
@ -4138,7 +4154,7 @@ ixgbe_rxeof(struct ix_queue *que, int count)
rxr->bytes += sendmp->m_pkthdr.len;
rxr->rx_bytes += sendmp->m_pkthdr.len;
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
ixgbe_rx_checksum(staterr, sendmp);
ixgbe_rx_checksum(staterr, sendmp, ptype);
#if __FreeBSD_version >= 800000
sendmp->m_pkthdr.flowid = que->msix;
sendmp->m_flags |= M_FLOWID;
@ -4202,10 +4218,15 @@ next_desc:
*
*********************************************************************/
static void
ixgbe_rx_checksum(u32 staterr, struct mbuf * mp)
ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
{
u16 status = (u16) staterr;
u8 errors = (u8) (staterr >> 24);
u16 status = (u16) staterr;
u8 errors = (u8) (staterr >> 24);
bool sctp = FALSE;
if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
(ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
sctp = TRUE;
if (status & IXGBE_RXD_STAT_IPCS) {
if (!(errors & IXGBE_RXD_ERR_IPE)) {
@ -4217,10 +4238,15 @@ ixgbe_rx_checksum(u32 staterr, struct mbuf * mp)
mp->m_pkthdr.csum_flags = 0;
}
if (status & IXGBE_RXD_STAT_L4CS) {
u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
#if __FreeBSD_version >= 800000
if (sctp)
type = CSUM_SCTP_VALID;
#endif
if (!(errors & IXGBE_RXD_ERR_TCPE)) {
mp->m_pkthdr.csum_flags |=
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
mp->m_pkthdr.csum_data = htons(0xffff);
mp->m_pkthdr.csum_flags |= type;
if (!sctp)
mp->m_pkthdr.csum_data = htons(0xffff);
}
}
return;

View File

@ -179,7 +179,13 @@
#define IXGBE_RX_HDR 128
#define IXGBE_VFTA_SIZE 128
#define IXGBE_BR_SIZE 4096
#define CSUM_OFFLOAD 7 /* Bits in csum flags */
/* Offload bits in mbuf flag */
#if __FreeBSD_version >= 800000
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
#else
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
#endif
/* For 6.X code compatibility */
#if !defined(ETHER_BPF_MTAP)

View File

@ -168,8 +168,8 @@ malo_bar0_read4(struct malo_softc *sc, bus_size_t off)
static void
malo_bar0_write4(struct malo_softc *sc, bus_size_t off, uint32_t val)
{
DPRINTF(sc, MALO_DEBUG_FW, "%s: off 0x%zx val 0x%x\n",
__func__, off, val);
DPRINTF(sc, MALO_DEBUG_FW, "%s: off 0x%jx val 0x%x\n",
__func__, (intmax_t)off, val);
bus_space_write_4(sc->malo_io0t, sc->malo_io0h, off, val);
}

View File

@ -1208,14 +1208,18 @@ mpt_cam_detach(struct mpt_softc *mpt)
if (mpt->sim != NULL) {
xpt_free_path(mpt->path);
MPT_LOCK(mpt);
xpt_bus_deregister(cam_sim_path(mpt->sim));
MPT_UNLOCK(mpt);
cam_sim_free(mpt->sim, TRUE);
mpt->sim = NULL;
}
if (mpt->phydisk_sim != NULL) {
xpt_free_path(mpt->phydisk_path);
MPT_LOCK(mpt);
xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
MPT_UNLOCK(mpt);
cam_sim_free(mpt->phydisk_sim, TRUE);
mpt->phydisk_sim = NULL;
}

View File

@ -1,4 +1,4 @@
/*
/*-
* Copyright (c) 2001 Katsurajima Naoto <raven@katsurajima.seya.yokohama.jp>
* All rights reserved.
*

View File

@ -1,4 +1,4 @@
/*
/*-
* Copyright (c) 2001 Katsurajima Naoto <raven@katsurajima.seya.yokohama.jp>
* All rights reserved.
*

View File

@ -1,4 +1,4 @@
/*
/*-
* Copyright (c) 2006 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
* Copyright (c) 2001 Katsurajima Naoto <raven@katsurajima.seya.yokohama.jp>
* All rights reserved.

View File

@ -1,4 +1,4 @@
/*
/*-
* Copyright (c) 2006 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
* Copyright (c) 2001 Katsurajima Naoto <raven@katsurajima.seya.yokohama.jp>
* All rights reserved.

View File

@ -355,7 +355,7 @@ es1370_mixset(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right)
if (mixtable[dev].left == 0xf)
rl = (l < 2) ? 0x80 : 7 - (l - 2) / 14;
else
rl = (l < 10) ? 0x80 : 15 - (l - 10) / 6;
rl = (l < 7) ? 0x80 : 31 - (l - 7) / 3;
es = mix_getdevinfo(m);
ES_LOCK(es);
if (dev == SOUND_MIXER_PCM && (ES_SINGLE_PCM_MIX(es->escfg)) &&
@ -364,7 +364,7 @@ es1370_mixset(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right)
else
set_dac1 = 0;
if (mixtable[dev].stereo) {
rr = (r < 10) ? 0x80 : 15 - (r - 10) / 6;
rr = (r < 7) ? 0x80 : 31 - (r - 7) / 3;
es1370_wrcodec(es, mixtable[dev].right, rr);
if (set_dac1 && mixtable[SOUND_MIXER_SYNTH].stereo)
es1370_wrcodec(es,

View File

@ -1,4 +1,4 @@
/*
/*-
* Copyright (c) 2006 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
* Copyright (c) 2001 Katsurajima Naoto <raven@katsurajima.seya.yokohama.jp>
* All rights reserved.

View File

@ -1,4 +1,4 @@
/*
/*-
* Copyright (c) 2006 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
* Copyright (c) 2001 Katsurajima Naoto <raven@katsurajima.seya.yokohama.jp>
* All rights reserved.

View File

@ -741,7 +741,7 @@ sc_vid_ioctl(struct tty *tp, u_long cmd, caddr_t data, struct thread *td)
#ifndef SC_NO_PALETTE_LOADING
#ifdef SC_PIXEL_MODE
if ((adp->va_flags & V_ADP_DAC8) != 0)
if (adp->va_info.vi_mem_model == V_INFO_MM_DIRECT)
vidd_load_palette(adp, scp->sc->palette2);
else
#endif
@ -802,7 +802,7 @@ sc_vid_ioctl(struct tty *tp, u_long cmd, caddr_t data, struct thread *td)
if (scp == scp->sc->cur_scp) {
set_mode(scp);
#ifndef SC_NO_PALETTE_LOADING
if ((adp->va_flags & V_ADP_DAC8) != 0)
if (adp->va_info.vi_mem_model == V_INFO_MM_DIRECT)
vidd_load_palette(adp, scp->sc->palette2);
else
vidd_load_palette(adp, scp->sc->palette);

View File

@ -2131,7 +2131,7 @@ restore_scrn_saver_mode(scr_stat *scp, int changemode)
if (set_mode(scp) == 0) {
#ifndef SC_NO_PALETTE_LOADING
#ifdef SC_PIXEL_MODE
if ((scp->sc->adp->va_flags & V_ADP_DAC8) != 0)
if (scp->sc->adp->va_info.vi_mem_model == V_INFO_MM_DIRECT)
vidd_load_palette(scp->sc->adp, scp->sc->palette2);
else
#endif
@ -2540,7 +2540,7 @@ exchange_scr(sc_softc_t *sc)
#ifndef SC_NO_PALETTE_LOADING
if (ISGRAPHSC(sc->old_scp)) {
#ifdef SC_PIXEL_MODE
if ((sc->adp->va_flags & V_ADP_DAC8) != 0)
if (sc->adp->va_info.vi_mem_model == V_INFO_MM_DIRECT)
vidd_load_palette(sc->adp, sc->palette2);
else
#endif

View File

@ -1886,8 +1886,8 @@ ubsec_dma_malloc(
BUS_DMA_NOWAIT, &dma->dma_map);
if (r != 0) {
device_printf(sc->sc_dev, "ubsec_dma_malloc: "
"bus_dmammem_alloc failed; size %zu, error %u\n",
size, r);
"bus_dmammem_alloc failed; size %ju, error %u\n",
(intmax_t)size, r);
goto fail_2;
}

View File

@ -82,7 +82,8 @@ SYSCTL_STRING(_vfs_newnfs, OID_AUTO, callback_addr, CTLFLAG_RW,
*/
MALLOC_DEFINE(M_NEWNFSRVCACHE, "NFSD srvcache", "NFSD Server Request Cache");
MALLOC_DEFINE(M_NEWNFSDCLIENT, "NFSD V4client", "NFSD V4 Client Id");
MALLOC_DEFINE(M_NEWNFSDSTATE, "NFSD V4state", "NFSD V4 State (Openowner, Open, Lockowner, Delegation");
MALLOC_DEFINE(M_NEWNFSDSTATE, "NFSD V4state",
"NFSD V4 State (Openowner, Open, Lockowner, Delegation");
MALLOC_DEFINE(M_NEWNFSDLOCK, "NFSD V4lock", "NFSD V4 byte range lock");
MALLOC_DEFINE(M_NEWNFSDLOCKFILE, "NFSD lckfile", "NFSD Open/Lock file");
MALLOC_DEFINE(M_NEWNFSSTRING, "NFSD string", "NFSD V4 long string");
@ -97,7 +98,10 @@ MALLOC_DEFINE(M_NEWNFSCLLOCKOWNER, "NFSCL lckown", "NFSCL Lock Owner");
MALLOC_DEFINE(M_NEWNFSCLLOCK, "NFSCL lck", "NFSCL Lock");
MALLOC_DEFINE(M_NEWNFSV4NODE, "NEWNFSnode", "New nfs vnode");
MALLOC_DEFINE(M_NEWNFSDIRECTIO, "NEWdirectio", "New nfs Direct IO buffer");
MALLOC_DEFINE(M_NEWNFSDIROFF, "Newnfscl_diroff", "New NFS directory offset data");
MALLOC_DEFINE(M_NEWNFSDIROFF, "NFSCL diroffdiroff",
"New NFS directory offset data");
MALLOC_DEFINE(M_NEWNFSDROLLBACK, "NFSD rollback",
"New NFS local lock rollback");
/*
* Definition of mutex locks.

View File

@ -1823,6 +1823,19 @@ nfsv4_getref(struct nfsv4lock *lp, int *isleptp, void *mutex)
lp->nfslock_usecnt++;
}
/*
* Test for a lock. Return 1 if locked, 0 otherwise.
*/
APPLESTATIC int
nfsv4_testlock(struct nfsv4lock *lp)
{
if ((lp->nfslock_lock & NFSV4LOCK_LOCK) == 0 &&
lp->nfslock_usecnt == 0)
return (0);
return (1);
}
/*
* Wake up anyone sleeping, waiting for this lock.
*/

View File

@ -251,6 +251,7 @@ int nfsv4_lock(struct nfsv4lock *, int, int *, void *);
void nfsv4_unlock(struct nfsv4lock *, int);
void nfsv4_relref(struct nfsv4lock *);
void nfsv4_getref(struct nfsv4lock *, int *, void *);
int nfsv4_testlock(struct nfsv4lock *);
int nfsrv_mtostr(struct nfsrv_descript *, char *, int);
int nfsrv_checkutf8(u_int8_t *, int);
int newnfs_sndlock(int *);

View File

@ -539,6 +539,7 @@ void nfsrvd_rcv(struct socket *, void *, int);
#define NFSSTATESPINLOCK extern struct mtx nfs_state_mutex
#define NFSLOCKSTATE() mtx_lock(&nfs_state_mutex)
#define NFSUNLOCKSTATE() mtx_unlock(&nfs_state_mutex)
#define NFSSTATEMUTEXPTR (&nfs_state_mutex)
#define NFSREQSPINLOCK extern struct mtx nfs_req_mutex
#define NFSLOCKREQ() mtx_lock(&nfs_req_mutex)
#define NFSUNLOCKREQ() mtx_unlock(&nfs_req_mutex)
@ -674,6 +675,7 @@ MALLOC_DECLARE(M_NEWNFSDIROFF);
MALLOC_DECLARE(M_NEWNFSV4NODE);
MALLOC_DECLARE(M_NEWNFSDIRECTIO);
MALLOC_DECLARE(M_NEWNFSMNT);
MALLOC_DECLARE(M_NEWNFSDROLLBACK);
#define M_NFSRVCACHE M_NEWNFSRVCACHE
#define M_NFSDCLIENT M_NEWNFSDCLIENT
#define M_NFSDSTATE M_NEWNFSDSTATE
@ -692,6 +694,7 @@ MALLOC_DECLARE(M_NEWNFSMNT);
#define M_NFSDIROFF M_NEWNFSDIROFF
#define M_NFSV4NODE M_NEWNFSV4NODE
#define M_NFSDIRECTIO M_NEWNFSDIRECTIO
#define M_NFSDROLLBACK M_NEWNFSDROLLBACK
#define NFSINT_SIGMASK(set) \
(SIGISMEMBER(set, SIGINT) || SIGISMEMBER(set, SIGTERM) || \

View File

@ -184,6 +184,17 @@ struct nfslockconflict {
u_char cl_owner[NFSV4_OPAQUELIMIT];
};
/*
* This structure is used to keep track of local locks that might need
* to be rolled back.
*/
struct nfsrollback {
LIST_ENTRY(nfsrollback) rlck_list;
uint64_t rlck_first;
uint64_t rlck_end;
int rlck_type;
};
/*
* This structure refers to a file for which lock(s) and/or open(s) exist.
* Searched via hash table on file handle or found via the back pointer from an
@ -193,8 +204,12 @@ struct nfslockfile {
LIST_HEAD(, nfsstate) lf_open; /* Open list */
LIST_HEAD(, nfsstate) lf_deleg; /* Delegation list */
LIST_HEAD(, nfslock) lf_lock; /* Lock list */
LIST_HEAD(, nfslock) lf_locallock; /* Local lock list */
LIST_HEAD(, nfsrollback) lf_rollback; /* Local lock rollback list */
LIST_ENTRY(nfslockfile) lf_hash; /* Hash list entry */
fhandle_t lf_fh; /* The file handle */
struct nfsv4lock lf_locallock_lck; /* serialize local locking */
int lf_usecount; /* Ref count for locking */
};
/*

View File

@ -869,6 +869,7 @@ nfsvno_mkdir(struct nameidata *ndp, struct nfsvattr *nvap, uid_t saved_uid,
else
vput(ndp->ni_dvp);
vrele(ndp->ni_vp);
nfsvno_relpathbuf(ndp);
return (EEXIST);
}
error = VOP_MKDIR(ndp->ni_dvp, &ndp->ni_vp, &ndp->ni_cnd,

View File

@ -1086,7 +1086,7 @@ nfsrvd_mknod(struct nfsrv_descript *nd, __unused int isdgram,
case NFFIFO:
break;
case NFDIR:
cnflags = LOCKPARENT;
cnflags = (LOCKPARENT | SAVENAME);
break;
default:
nd->nd_repstat = NFSERR_BADTYPE;
@ -1549,7 +1549,8 @@ nfsrvd_link(struct nfsrv_descript *nd, int isdgram,
NFSVOPUNLOCK(dp, 0, p);
}
}
NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, CREATE, LOCKPARENT);
NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, CREATE,
LOCKPARENT | SAVENAME);
if (!nd->nd_repstat) {
nfsvno_setpathbuf(&named, &bufp, &hashp);
error = nfsrv_parsename(nd, bufp, hashp, &named.ni_pathlen);
@ -1743,7 +1744,8 @@ nfsrvd_mkdir(struct nfsrv_descript *nd, __unused int isdgram,
nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, &diraft);
return (0);
}
NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, CREATE, LOCKPARENT);
NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, CREATE,
LOCKPARENT | SAVENAME);
nfsvno_setpathbuf(&named, &bufp, &hashp);
error = nfsrv_parsename(nd, bufp, hashp, &named.ni_pathlen);
if (error) {

File diff suppressed because it is too large Load Diff

View File

@ -179,7 +179,7 @@ g_vfs_open(struct vnode *vp, struct g_consumer **cpp, const char *fsname, int wr
bo = &vp->v_bufobj;
bo->bo_ops = g_vfs_bufops;
bo->bo_private = cp;
bo->bo_bsize = pp->sectorsize;
bo->bo_bsize = DEV_BSIZE;
gp->softc = bo;
return (error);

View File

@ -70,6 +70,7 @@ static int g_multipath_destroy(struct g_geom *);
static int
g_multipath_destroy_geom(struct gctl_req *, struct g_class *, struct g_geom *);
static struct g_geom *g_multipath_find_geom(struct g_class *, const char *);
static int g_multipath_rotate(struct g_geom *);
static g_taste_t g_multipath_taste;
@ -602,14 +603,13 @@ g_multipath_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
}
static void
g_multipath_ctl_create(struct gctl_req *req, struct g_class *mp)
g_multipath_ctl_add(struct gctl_req *req, struct g_class *mp)
{
struct g_geom *gp;
struct g_provider *pp0, *pp1;
struct g_multipath_metadata md;
const char *name, *mpname, *uuid;
struct g_consumer *cp;
struct g_provider *pp, *pp0;
const char *name, *mpname;
static const char devpf[6] = "/dev/";
int *nargs, error;
g_topology_assert();
@ -618,14 +618,9 @@ g_multipath_ctl_create(struct gctl_req *req, struct g_class *mp)
gctl_error(req, "No 'arg0' argument");
return;
}
nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
if (nargs == NULL) {
gctl_error(req, "No 'nargs' argument");
return;
}
if (*nargs != 4) {
gctl_error(req, "missing device or uuid arguments");
gp = g_multipath_find_geom(mp, mpname);
if (gp == NULL) {
gctl_error(req, "Device %s is invalid", mpname);
return;
}
@ -636,78 +631,45 @@ g_multipath_ctl_create(struct gctl_req *req, struct g_class *mp)
}
if (strncmp(name, devpf, 5) == 0)
name += 5;
pp0 = g_provider_by_name(name);
if (pp0 == NULL) {
pp = g_provider_by_name(name);
if (pp == NULL) {
gctl_error(req, "Provider %s is invalid", name);
return;
}
name = gctl_get_asciiparam(req, "arg2");
if (name == NULL) {
gctl_error(req, "No 'arg2' argument");
return;
/*
* Check to make sure parameters match, if we already have one.
*/
cp = LIST_FIRST(&gp->consumer);
if (cp) {
pp0 = cp->provider;
} else {
pp0 = NULL;
}
if (strncmp(name, devpf, 5) == 0)
name += 5;
pp1 = g_provider_by_name(name);
if (pp1 == NULL) {
gctl_error(req, "Provider %s is invalid", name);
return;
}
uuid = gctl_get_asciiparam(req, "arg3");
if (uuid == NULL) {
gctl_error(req, "No uuid argument");
return;
}
if (strlen(uuid) != 36) {
gctl_error(req, "Malformed uuid argument");
return;
if (pp0) {
if (pp0 == pp) {
gctl_error(req, "providers %s and %s are the same",
pp0->name, pp->name);
return;
}
if (pp0->mediasize != pp->mediasize) {
gctl_error(req, "Provider %s is %jd; Provider %s is %jd",
pp0->name, (intmax_t) pp0->mediasize,
pp->name, (intmax_t) pp->mediasize);
return;
}
if (pp0->sectorsize != pp->sectorsize) {
gctl_error(req, "Provider %s has sectorsize %u; Provider %s "
"has sectorsize %u", pp0->name, pp0->sectorsize,
pp->name, pp->sectorsize);
return;
}
}
/*
* Check to make sure parameters from the two providers are the same
* Now add....
*/
if (pp0 == pp1) {
gctl_error(req, "providers %s and %s are the same",
pp0->name, pp1->name);
return;
}
if (pp0->mediasize != pp1->mediasize) {
gctl_error(req, "Provider %s is %jd; Provider %s is %jd",
pp0->name, (intmax_t) pp0->mediasize,
pp1->name, (intmax_t) pp1->mediasize);
return;
}
if (pp0->sectorsize != pp1->sectorsize) {
gctl_error(req, "Provider %s has sectorsize %u; Provider %s "
"has sectorsize %u", pp0->name, pp0->sectorsize,
pp1->name, pp1->sectorsize);
return;
}
/*
* cons up enough of a metadata structure to use.
*/
memset(&md, 0, sizeof(md));
md.md_size = pp0->mediasize;
md.md_sectorsize = pp0->sectorsize;
strlcpy(md.md_name, mpname, sizeof(md.md_name));
strlcpy(md.md_uuid, uuid, sizeof(md.md_uuid));
gp = g_multipath_create(mp, &md);
if (gp == NULL)
return;
error = g_multipath_add_disk(gp, pp0);
if (error) {
g_multipath_destroy(gp);
return;
}
error = g_multipath_add_disk(gp, pp1);
if (error) {
g_multipath_destroy(gp);
return;
}
(void) g_multipath_add_disk(gp, pp);
}
static struct g_geom *
@ -815,8 +777,8 @@ g_multipath_config(struct gctl_req *req, struct g_class *mp, const char *verb)
gctl_error(req, "No 'version' argument");
} else if (*version != G_MULTIPATH_VERSION) {
gctl_error(req, "Userland and kernel parts are out of sync");
} else if (strcmp(verb, "create") == 0) {
g_multipath_ctl_create(req, mp);
} else if (strcmp(verb, "add") == 0) {
g_multipath_ctl_add(req, mp);
} else if (strcmp(verb, "destroy") == 0) {
g_multipath_ctl_destroy(req, mp);
} else if (strcmp(verb, "rotate") == 0) {

View File

@ -110,6 +110,19 @@ IDTVEC(timerint)
MEXITCOUNT
jmp doreti
/*
* Local APIC error interrupt handler.
*/
.text
SUPERALIGN_TEXT
IDTVEC(errorint)
PUSH_FRAME
SET_KERNEL_SREGS
FAKE_MCOUNT(TF_EIP(%esp))
call lapic_handle_error
MEXITCOUNT
jmp doreti
#ifdef SMP
/*
* Global address space TLB shootdown.

View File

@ -116,14 +116,12 @@ struct lapic {
int la_ioint_irqs[APIC_NUM_IOINTS + 1];
} static lapics[MAX_APIC_ID + 1];
/* XXX: should thermal be an NMI? */
/* Global defaults for local APIC LVT entries. */
static struct lvt lvts[LVT_MAX + 1] = {
{ 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 }, /* LINT0: masked ExtINT */
{ 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 }, /* LINT1: NMI */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT }, /* Timer */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT }, /* Error */
{ 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT }, /* Error */
{ 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 }, /* PMC */
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT }, /* Thermal */
};
@ -228,7 +226,11 @@ lapic_init(vm_paddr_t addr)
setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_SYS386IGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
/* XXX: error/thermal interrupts */
/* Local APIC error interrupt. */
setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_SYS386IGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
/* XXX: Thermal interrupt */
}
/*
@ -281,7 +283,7 @@ lapic_dump(const char* str)
lapic->id, lapic->version, lapic->ldr, lapic->dfr);
printf(" lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
printf(" timer: 0x%08x therm: 0x%08x err: 0x%08x pcm: 0x%08x\n",
printf(" timer: 0x%08x therm: 0x%08x err: 0x%08x pmc: 0x%08x\n",
lapic->lvt_timer, lapic->lvt_thermal, lapic->lvt_error,
lapic->lvt_pcint);
}
@ -329,7 +331,11 @@ lapic_setup(int boot)
lapic_timer_enable_intr();
}
/* XXX: Error and thermal LVTs */
/* Program error LVT and clear any existing errors. */
lapic->lvt_error = lvt_mode(la, LVT_ERROR, lapic->lvt_error);
lapic->esr = 0;
/* XXX: Thermal LVT */
intr_restore(eflags);
}
@ -725,18 +731,6 @@ lapic_eoi(void)
lapic->eoi = 0;
}
/*
* Read the contents of the error status register. We have to write
* to the register first before reading from it.
*/
u_int
lapic_error(void)
{
lapic->esr = 0;
return (lapic->esr);
}
void
lapic_handle_intr(int vector, struct trapframe *frame)
{
@ -863,6 +857,24 @@ lapic_timer_enable_intr(void)
lapic->lvt_timer = value;
}
void
lapic_handle_error(void)
{
u_int32_t esr;
/*
* Read the contents of the error status register. Write to
* the register first before reading from it to force the APIC
* to update its value to indicate any errors that have
* occurred since the previous write to the register.
*/
lapic->esr = 0;
esr = lapic->esr;
printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
lapic_eoi();
}
u_int
apic_cpuid(u_int apic_id)
{

View File

@ -208,7 +208,8 @@ struct apic_enumerator {
inthand_t
IDTVEC(apic_isr1), IDTVEC(apic_isr2), IDTVEC(apic_isr3),
IDTVEC(apic_isr4), IDTVEC(apic_isr5), IDTVEC(apic_isr6),
IDTVEC(apic_isr7), IDTVEC(spuriousint), IDTVEC(timerint);
IDTVEC(apic_isr7), IDTVEC(errorint), IDTVEC(spuriousint),
IDTVEC(timerint);
extern vm_paddr_t lapic_paddr;
extern int apic_cpuids[];
@ -240,13 +241,13 @@ void lapic_disable_pmc(void);
void lapic_dump(const char *str);
int lapic_enable_pmc(void);
void lapic_eoi(void);
u_int lapic_error(void);
int lapic_id(void);
void lapic_init(vm_paddr_t addr);
int lapic_intr_pending(u_int vector);
void lapic_ipi_raw(register_t icrlo, u_int dest);
void lapic_ipi_vectored(u_int vector, int dest);
int lapic_ipi_wait(int delay);
void lapic_handle_error(void);
void lapic_handle_intr(int vector, struct trapframe *frame);
void lapic_handle_timer(struct trapframe *frame);
void lapic_reenable_pmc(void);

View File

@ -1,7 +1,13 @@
/*-
* Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
* Copyright (c) 2008-2009, Lawrence Stewart <lstewart@freebsd.org>
* Copyright (c) 2009-2010, The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed at the Centre for Advanced
* Internet Architectures, Swinburne University of Technology, Melbourne,
* Australia by Lawrence Stewart under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@ -27,6 +33,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_mac.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@ -95,6 +103,7 @@ static void ald_deactivate(struct alq *);
/* Internal queue functions */
static void alq_shutdown(struct alq *);
static void alq_destroy(struct alq *);
static int alq_doio(struct alq *);
@ -180,8 +189,15 @@ ald_daemon(void)
ALD_LOCK();
for (;;) {
while ((alq = LIST_FIRST(&ald_active)) == NULL)
msleep(&ald_active, &ald_mtx, PWAIT, "aldslp", 0);
while ((alq = LIST_FIRST(&ald_active)) == NULL &&
!ald_shutingdown)
mtx_sleep(&ald_active, &ald_mtx, PWAIT, "aldslp", 0);
/* Don't shutdown until all active ALQs are flushed. */
if (ald_shutingdown && alq == NULL) {
ALD_UNLOCK();
break;
}
ALQ_LOCK(alq);
ald_deactivate(alq);
@ -192,6 +208,8 @@ ald_daemon(void)
wakeup(alq);
ALD_LOCK();
}
kproc_exit(0);
}
static void
@ -200,14 +218,29 @@ ald_shutdown(void *arg, int howto)
struct alq *alq;
ALD_LOCK();
/* Ensure no new queues can be created. */
ald_shutingdown = 1;
/* Shutdown all ALQs prior to terminating the ald_daemon. */
while ((alq = LIST_FIRST(&ald_queues)) != NULL) {
LIST_REMOVE(alq, aq_link);
ALD_UNLOCK();
alq_shutdown(alq);
ALD_LOCK();
}
/* At this point, all ALQs are flushed and shutdown. */
/*
* Wake ald_daemon so that it exits. It won't be able to do
* anything until we mtx_sleep because we hold the ald_mtx.
*/
wakeup(&ald_active);
/* Wait for ald_daemon to exit. */
mtx_sleep(ald_proc, &ald_mtx, PWAIT, "aldslp", 0);
ALD_UNLOCK();
}
@ -220,7 +253,7 @@ alq_shutdown(struct alq *alq)
alq->aq_flags |= AQ_SHUTDOWN;
/* Drain IO */
while (alq->aq_flags & (AQ_FLUSHING|AQ_ACTIVE)) {
while (alq->aq_flags & AQ_ACTIVE) {
alq->aq_flags |= AQ_WANTED;
msleep_spin(alq, &alq->aq_mtx, "aldclose", 0);
}
@ -231,6 +264,18 @@ alq_shutdown(struct alq *alq)
crfree(alq->aq_cred);
}
void
alq_destroy(struct alq *alq)
{
/* Drain all pending IO. */
alq_shutdown(alq);
mtx_destroy(&alq->aq_mtx);
free(alq->aq_first, M_ALD);
free(alq->aq_entbuf, M_ALD);
free(alq, M_ALD);
}
/*
* Flush all pending data to disk. This operation will block.
*/
@ -388,8 +433,11 @@ alq_open(struct alq **alqp, const char *file, struct ucred *cred, int cmode,
alp->ae_next = alq->aq_first;
if ((error = ald_add(alq)) != 0)
if ((error = ald_add(alq)) != 0) {
alq_destroy(alq);
return (error);
}
*alqp = alq;
return (0);
@ -493,20 +541,57 @@ alq_flush(struct alq *alq)
void
alq_close(struct alq *alq)
{
/*
* If we're already shuting down someone else will flush and close
* the vnode.
*/
if (ald_rem(alq) != 0)
return;
/*
* Drain all pending IO.
*/
alq_shutdown(alq);
mtx_destroy(&alq->aq_mtx);
free(alq->aq_first, M_ALD);
free(alq->aq_entbuf, M_ALD);
free(alq, M_ALD);
/* Only flush and destroy alq if not already shutting down. */
if (ald_rem(alq) == 0)
alq_destroy(alq);
}
static int
alq_load_handler(module_t mod, int what, void *arg)
{
int ret;
ret = 0;
switch (what) {
case MOD_LOAD:
case MOD_SHUTDOWN:
break;
case MOD_QUIESCE:
ALD_LOCK();
/* Only allow unload if there are no open queues. */
if (LIST_FIRST(&ald_queues) == NULL) {
ald_shutingdown = 1;
ALD_UNLOCK();
ald_shutdown(NULL, 0);
mtx_destroy(&ald_mtx);
} else {
ALD_UNLOCK();
ret = EBUSY;
}
break;
case MOD_UNLOAD:
/* If MOD_QUIESCE failed we must fail here too. */
if (ald_shutingdown == 0)
ret = EBUSY;
break;
default:
ret = EINVAL;
break;
}
return (ret);
}
static moduledata_t alq_mod =
{
"alq",
alq_load_handler,
NULL
};
DECLARE_MODULE(alq, alq_mod, SI_SUB_SMP, SI_ORDER_ANY);
MODULE_VERSION(alq, 1);

View File

@ -1218,7 +1218,7 @@ static int
kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
int waitok)
{
struct klist *list, *tmp_knhash;
struct klist *list, *tmp_knhash, *to_free;
u_long tmp_knhashmask;
int size;
int fd;
@ -1226,6 +1226,7 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
KQ_NOTOWNED(kq);
to_free = NULL;
if (fops->f_isfd) {
fd = ident;
if (kq->kq_knlistsize <= fd) {
@ -1237,13 +1238,13 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
return ENOMEM;
KQ_LOCK(kq);
if (kq->kq_knlistsize > fd) {
free(list, M_KQUEUE);
to_free = list;
list = NULL;
} else {
if (kq->kq_knlist != NULL) {
bcopy(kq->kq_knlist, list,
kq->kq_knlistsize * sizeof(*list));
free(kq->kq_knlist, M_KQUEUE);
to_free = kq->kq_knlist;
kq->kq_knlist = NULL;
}
bzero((caddr_t)list +
@ -1265,11 +1266,12 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
kq->kq_knhash = tmp_knhash;
kq->kq_knhashmask = tmp_knhashmask;
} else {
free(tmp_knhash, M_KQUEUE);
to_free = tmp_knhash;
}
KQ_UNLOCK(kq);
}
}
free(to_free, M_KQUEUE);
KQ_NOTOWNED(kq);
return 0;

View File

@ -15,7 +15,7 @@ files "../atheros/files.ar71xx"
hints "AR71XX.hints"
makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols
makeoptions MODULES_OVERRIDE=""
#makeoptions MODULES_OVERRIDE=""
options DDB
options KDB

View File

@ -20,6 +20,7 @@ SUBDIR= ${_3dfx} \
aio \
alc \
ale \
alq \
${_amd} \
${_amdsbwd} \
${_amdtemp} \
@ -34,15 +35,15 @@ SUBDIR= ${_3dfx} \
ata \
ath \
${_auxio} \
bce \
${_bce} \
bfe \
bge \
${_bios} \
${_bktr} \
${_bm} \
bridgestp \
bwi \
bwn \
${_bwi} \
${_bwn} \
cam \
${_canbepm} \
${_canbus} \
@ -66,7 +67,7 @@ SUBDIR= ${_3dfx} \
${_cryptodev} \
${_cs} \
${_ctau} \
cxgb \
${_cxgb} \
${_cyclic} \
dc \
dcons \
@ -175,13 +176,13 @@ SUBDIR= ${_3dfx} \
mcd \
md \
mem \
mfi \
${_mfi} \
mii \
mlx \
${_mly} \
mmc \
mmcsd \
mpt \
${_mpt} \
mqueue \
msdosfs \
msdosfs_iconv \
@ -251,7 +252,7 @@ SUBDIR= ${_3dfx} \
sdhci \
sem \
sf \
siba_bwn \
${_siba_bwn} \
siis \
sis \
sk \
@ -268,7 +269,7 @@ SUBDIR= ${_3dfx} \
stge \
${_streams} \
${_svr4} \
sym \
${_sym} \
${_syscons} \
sysvipc \
ti \
@ -279,7 +280,7 @@ SUBDIR= ${_3dfx} \
twe \
tx \
txp \
uart \
${_uart} \
ubsec \
udf \
udf_iconv \
@ -312,11 +313,28 @@ SUBDIR= ${_3dfx} \
${_zfs} \
zlib \
.if ${MACHINE_ARCH} != "powerpc"
.if ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "arm" && \
${MACHINE_ARCH} != "mips"
_syscons= syscons
_vpo= vpo
.endif
.if ${MACHINE_ARCH} != "arm" && ${MACHINE_ARCH} != "mips"
# no BUS_SPACE_UNSPECIFIED
_bce= bce
_bwi= bwi
_bwn= bwn
_mfi= mfi
_mpt= mpt
_siba_bwn= siba_bwn
# No barrier instruction support (specific to this driver)
_sym= sym
# no uart_cpu_$MACHINE_ARCH
_uart= uart
# intr_disable() is a macro, causes problems
_cxgb= cxgb
.endif
.if ${MK_CRYPT} != "no" || defined(ALL_MODULES)
.if exists(${.CURDIR}/../opencrypto)
_crypto= crypto

9
sys/modules/alq/Makefile Normal file
View File

@ -0,0 +1,9 @@
# $FreeBSD$
.include <bsd.own.mk>
.PATH: ${.CURDIR}/../../kern
KMOD= alq
SRCS= opt_mac.h vnode_if.h kern_alq.c
.include <bsd.kmod.mk>

View File

@ -2,15 +2,19 @@
.PATH: ${.CURDIR}/../../dev/e1000
KMOD = if_em
SRCS = device_if.h bus_if.h pci_if.h opt_inet.h
SRCS += if_em.c $(SHARED_SRCS)
SHARED_SRCS = e1000_api.c e1000_phy.c e1000_nvm.c e1000_mac.c e1000_manage.c
SHARED_SRCS += e1000_80003es2lan.c e1000_82542.c e1000_82541.c e1000_82543.c
SHARED_SRCS += e1000_82540.c e1000_ich8lan.c e1000_82571.c e1000_osdep.c
SHARED_SRCS += e1000_82575.c
SRCS += $(CORE_SRC) $(LEGACY_SRC)
SRCS += $(COMMON_SHARED) $(LEGACY_SHARED) $(PCIE_SHARED)
CORE_SRC = if_em.c e1000_osdep.c
# This is the Legacy, pre-PCIE source, it can be
# undefined when using modular driver if not needed
LEGACY_SRC += if_lem.c
COMMON_SHARED = e1000_api.c e1000_phy.c e1000_nvm.c e1000_mac.c e1000_manage.c
PCIE_SHARED = e1000_80003es2lan.c e1000_ich8lan.c e1000_82571.c e1000_82575.c
LEGACY_SHARED = e1000_82540.c e1000_82542.c e1000_82541.c e1000_82543.c
CFLAGS+= -I${.CURDIR}/../../dev/e1000
CFLAGS += -I${.CURDIR}/../../dev/e1000
# DEVICE_POLLING gives you Legacy interrupt handling
# DEVICE_POLLING for a non-interrupt-driven method
#CFLAGS += -DDEVICE_POLLING
clean:

View File

@ -1867,13 +1867,14 @@ bpf_freed(struct bpf_d *d)
* free.
*/
bpf_free(d);
if (d->bd_rfilter) {
if (d->bd_rfilter != NULL) {
free((caddr_t)d->bd_rfilter, M_BPF);
#ifdef BPF_JITTER
bpf_destroy_jit_filter(d->bd_bfilter);
if (d->bd_bfilter != NULL)
bpf_destroy_jit_filter(d->bd_bfilter);
#endif
}
if (d->bd_wfilter)
if (d->bd_wfilter != NULL)
free((caddr_t)d->bd_wfilter, M_BPF);
mtx_destroy(&d->bd_mtx);
}

View File

@ -734,7 +734,8 @@ ieee80211_ssid_mismatch(struct ieee80211vap *vap, const char *tag,
* Return the bssid of a frame.
*/
static const uint8_t *
ieee80211_getbssid(struct ieee80211vap *vap, const struct ieee80211_frame *wh)
ieee80211_getbssid(const struct ieee80211vap *vap,
const struct ieee80211_frame *wh)
{
if (vap->iv_opmode == IEEE80211_M_STA)
return wh->i_addr2;
@ -748,7 +749,7 @@ ieee80211_getbssid(struct ieee80211vap *vap, const struct ieee80211_frame *wh)
#include <machine/stdarg.h>
void
ieee80211_note(struct ieee80211vap *vap, const char *fmt, ...)
ieee80211_note(const struct ieee80211vap *vap, const char *fmt, ...)
{
char buf[128]; /* XXX */
va_list ap;
@ -761,7 +762,7 @@ ieee80211_note(struct ieee80211vap *vap, const char *fmt, ...)
}
void
ieee80211_note_frame(struct ieee80211vap *vap,
ieee80211_note_frame(const struct ieee80211vap *vap,
const struct ieee80211_frame *wh,
const char *fmt, ...)
{
@ -776,7 +777,7 @@ ieee80211_note_frame(struct ieee80211vap *vap,
}
void
ieee80211_note_mac(struct ieee80211vap *vap,
ieee80211_note_mac(const struct ieee80211vap *vap,
const uint8_t mac[IEEE80211_ADDR_LEN],
const char *fmt, ...)
{
@ -790,7 +791,7 @@ ieee80211_note_mac(struct ieee80211vap *vap,
}
void
ieee80211_discard_frame(struct ieee80211vap *vap,
ieee80211_discard_frame(const struct ieee80211vap *vap,
const struct ieee80211_frame *wh,
const char *type, const char *fmt, ...)
{
@ -811,7 +812,7 @@ ieee80211_discard_frame(struct ieee80211vap *vap,
}
void
ieee80211_discard_ie(struct ieee80211vap *vap,
ieee80211_discard_ie(const struct ieee80211vap *vap,
const struct ieee80211_frame *wh,
const char *type, const char *fmt, ...)
{
@ -830,7 +831,7 @@ ieee80211_discard_ie(struct ieee80211vap *vap,
}
void
ieee80211_discard_mac(struct ieee80211vap *vap,
ieee80211_discard_mac(const struct ieee80211vap *vap,
const uint8_t mac[IEEE80211_ADDR_LEN],
const char *type, const char *fmt, ...)
{

View File

@ -852,10 +852,10 @@ ieee80211_htchanflags(const struct ieee80211_channel *c)
if (ieee80211_msg(_vap, _m)) \
ieee80211_note_frame(_vap, _wh, _fmt, __VA_ARGS__); \
} while (0)
void ieee80211_note(struct ieee80211vap *, const char *, ...);
void ieee80211_note_mac(struct ieee80211vap *,
void ieee80211_note(const struct ieee80211vap *, const char *, ...);
void ieee80211_note_mac(const struct ieee80211vap *,
const uint8_t mac[IEEE80211_ADDR_LEN], const char *, ...);
void ieee80211_note_frame(struct ieee80211vap *,
void ieee80211_note_frame(const struct ieee80211vap *,
const struct ieee80211_frame *, const char *, ...);
#define ieee80211_msg_debug(_vap) \
((_vap)->iv_debug & IEEE80211_MSG_DEBUG)
@ -893,11 +893,11 @@ void ieee80211_note_frame(struct ieee80211vap *,
ieee80211_discard_mac(_vap, _mac, _type, _fmt, __VA_ARGS__);\
} while (0)
void ieee80211_discard_frame(struct ieee80211vap *,
void ieee80211_discard_frame(const struct ieee80211vap *,
const struct ieee80211_frame *, const char *type, const char *fmt, ...);
void ieee80211_discard_ie(struct ieee80211vap *,
void ieee80211_discard_ie(const struct ieee80211vap *,
const struct ieee80211_frame *, const char *type, const char *fmt, ...);
void ieee80211_discard_mac(struct ieee80211vap *,
void ieee80211_discard_mac(const struct ieee80211vap *,
const uint8_t mac[IEEE80211_ADDR_LEN], const char *type,
const char *fmt, ...);
#else

View File

@ -36,6 +36,7 @@
#include <sys/kernel.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/endian.h>
#include <sys/errno.h>
#include <sys/syslog.h>
@ -505,8 +506,8 @@ ng_deflate_compress(node_p node, struct mbuf *m, struct mbuf **resultp)
priv->stats.OutOctets+=inlen;
} else {
/* Install header. */
((u_int16_t *)priv->outbuf)[0] = htons(PROT_COMPD);
((u_int16_t *)priv->outbuf)[1] = htons(priv->seqnum);
be16enc(priv->outbuf, PROT_COMPD);
be16enc(priv->outbuf + 2, priv->seqnum);
/* Return packet in an mbuf. */
m_copyback(m, 0, outlen, (caddr_t)priv->outbuf);
@ -568,7 +569,7 @@ ng_deflate_decompress(node_p node, struct mbuf *m, struct mbuf **resultp)
proto = priv->inbuf[0];
offset = 1;
} else {
proto = ntohs(((uint16_t *)priv->inbuf)[0]);
proto = be16dec(priv->inbuf);
offset = 2;
}
@ -579,7 +580,7 @@ ng_deflate_decompress(node_p node, struct mbuf *m, struct mbuf **resultp)
priv->stats.FramesComp++;
/* Check sequence number. */
rseqnum = ntohs(((uint16_t *)(priv->inbuf + offset))[0]);
rseqnum = be16dec(priv->inbuf + offset);
offset += 2;
if (rseqnum != priv->seqnum) {
priv->stats.Errors++;

View File

@ -902,12 +902,24 @@ ng_ksocket_rcvdata(hook_p hook, item_p item)
struct sockaddr *sa = NULL;
int error;
struct mbuf *m;
#ifdef ALIGNED_POINTER
struct mbuf *n;
#endif /* ALIGNED_POINTER */
struct sa_tag *stag;
/* Extract data */
NGI_GET_M(item, m);
NG_FREE_ITEM(item);
#ifdef ALIGNED_POINTER
if (!ALIGNED_POINTER(mtod(m, caddr_t), uint32_t)) {
n = m_defrag(m, M_NOWAIT);
if (n == NULL) {
m_freem(m);
return (ENOBUFS);
}
m = n;
}
#endif /* ALIGNED_POINTER */
/*
* Look if socket address is stored in packet tags.
* If sockaddr is ours, or provided by a third party (zero id),

View File

@ -790,7 +790,7 @@ ng_l2tp_rcvdata_lower(hook_p h, item_p item)
NG_FREE_ITEM(item);
ERROUT(EINVAL);
}
hdr = ntohs(*mtod(m, u_int16_t *));
hdr = (mtod(m, uint8_t *)[0] << 8) + mtod(m, uint8_t *)[1];
m_adj(m, 2);
/* Check required header bits and minimum length */
@ -819,7 +819,7 @@ ng_l2tp_rcvdata_lower(hook_p h, item_p item)
NG_FREE_ITEM(item);
ERROUT(EINVAL);
}
len = (u_int16_t)ntohs(*mtod(m, u_int16_t *)) - 4;
len = (mtod(m, uint8_t *)[0] << 8) + mtod(m, uint8_t *)[1] - 4;
m_adj(m, 2);
if (len < 0 || len > m->m_pkthdr.len) {
priv->stats.recvInvalid++;
@ -1095,9 +1095,10 @@ ng_l2tp_rcvdata(hook_p hook, item_p item)
const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook));
const hookpriv_p hpriv = NG_HOOK_PRIVATE(hook);
struct mbuf *m;
uint8_t *p;
u_int16_t hdr;
int error;
int i = 1;
int i = 2;
/* Sanity check */
L2TP_SEQ_CHECK(&priv->seq);
@ -1129,20 +1130,27 @@ ng_l2tp_rcvdata(hook_p hook, item_p item)
NG_FREE_ITEM(item);
ERROUT(ENOBUFS);
}
p = mtod(m, uint8_t *);
hdr = L2TP_DATA_HDR;
if (hpriv->conf.include_length) {
hdr |= L2TP_HDR_LEN;
mtod(m, u_int16_t *)[i++] = htons(m->m_pkthdr.len);
p[i++] = m->m_pkthdr.len >> 8;
p[i++] = m->m_pkthdr.len & 0xff;
}
mtod(m, u_int16_t *)[i++] = htons(priv->conf.peer_id);
mtod(m, u_int16_t *)[i++] = htons(hpriv->conf.peer_id);
p[i++] = priv->conf.peer_id >> 8;
p[i++] = priv->conf.peer_id & 0xff;
p[i++] = hpriv->conf.peer_id >> 8;
p[i++] = hpriv->conf.peer_id & 0xff;
if (hpriv->conf.enable_dseq) {
hdr |= L2TP_HDR_SEQ;
mtod(m, u_int16_t *)[i++] = htons(hpriv->ns);
mtod(m, u_int16_t *)[i++] = htons(hpriv->nr);
p[i++] = hpriv->ns >> 8;
p[i++] = hpriv->ns & 0xff;
p[i++] = hpriv->nr >> 8;
p[i++] = hpriv->nr & 0xff;
hpriv->ns++;
}
mtod(m, u_int16_t *)[0] = htons(hdr);
p[0] = hdr >> 8;
p[1] = hdr & 0xff;
/* Update per session stats. */
hpriv->stats.xmitPackets++;
@ -1496,6 +1504,7 @@ static int
ng_l2tp_xmit_ctrl(priv_p priv, struct mbuf *m, u_int16_t ns)
{
struct l2tp_seq *const seq = &priv->seq;
uint8_t *p;
u_int16_t session_id = 0;
int error;
@ -1540,12 +1549,19 @@ ng_l2tp_xmit_ctrl(priv_p priv, struct mbuf *m, u_int16_t ns)
}
/* Fill in L2TP header */
mtod(m, u_int16_t *)[0] = htons(L2TP_CTRL_HDR);
mtod(m, u_int16_t *)[1] = htons(m->m_pkthdr.len);
mtod(m, u_int16_t *)[2] = htons(priv->conf.peer_id);
mtod(m, u_int16_t *)[3] = htons(session_id);
mtod(m, u_int16_t *)[4] = htons(ns);
mtod(m, u_int16_t *)[5] = htons(seq->nr);
p = mtod(m, u_int8_t *);
p[0] = L2TP_CTRL_HDR >> 8;
p[1] = L2TP_CTRL_HDR & 0xff;
p[2] = m->m_pkthdr.len >> 8;
p[3] = m->m_pkthdr.len & 0xff;
p[4] = priv->conf.peer_id >> 8;
p[5] = priv->conf.peer_id & 0xff;
p[6] = session_id >> 8;
p[7] = session_id & 0xff;
p[8] = ns >> 8;
p[9] = ns & 0xff;
p[10] = seq->nr >> 8;
p[11] = seq->nr & 0xff;
/* Update sequence number info and stats */
priv->stats.xmitPackets++;

View File

@ -53,6 +53,7 @@
#include <sys/kernel.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/endian.h>
#include <sys/errno.h>
#include <sys/syslog.h>
@ -601,7 +602,7 @@ err1:
/* Install header */
M_PREPEND(m, MPPC_HDRLEN, M_DONTWAIT);
if (m != NULL)
*(mtod(m, uint16_t *)) = htons(header);
be16enc(mtod(m, void *), header);
*datap = m;
return (*datap == NULL ? ENOBUFS : 0);
@ -630,8 +631,7 @@ ng_mppc_decompress(node_p node, struct mbuf **datap)
m_freem(m);
return (EINVAL);
}
m_copydata(m, 0, MPPC_HDRLEN, (caddr_t)&header);
header = ntohs(header);
header = be16dec(mtod(m, void *));
cc = (header & MPPC_CCOUNT_MASK);
m_adj(m, MPPC_HDRLEN);

View File

@ -97,6 +97,7 @@
#include <sys/time.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/endian.h>
#include <sys/errno.h>
#include <sys/ctype.h>
@ -860,8 +861,8 @@ ng_ppp_rcvdata_bypass(hook_p hook, item_p item)
NG_FREE_ITEM(item);
return (ENOBUFS);
}
linkNum = ntohs(mtod(m, uint16_t *)[0]);
proto = ntohs(mtod(m, uint16_t *)[1]);
linkNum = be16dec(mtod(m, uint8_t *));
proto = be16dec(mtod(m, uint8_t *) + 2);
m_adj(m, 4);
NGI_M(item) = m;
@ -907,7 +908,21 @@ ng_ppp_proto_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum)
const priv_p priv = NG_NODE_PRIVATE(node);
hook_p outHook = NULL;
int error;
#ifdef ALIGNED_POINTER
struct mbuf *m, *n;
NGI_GET_M(item, m);
if (!ALIGNED_POINTER(mtod(m, caddr_t), uint32_t)) {
n = m_defrag(m, M_NOWAIT);
if (n == NULL) {
m_freem(m);
NG_FREE_ITEM(item);
return (ENOBUFS);
}
m = n;
}
NGI_M(item) = m;
#endif /* ALIGNED_POINTER */
switch (proto) {
case PROT_IP:
if (priv->conf.enableIP)
@ -1530,7 +1545,7 @@ ng_ppp_mp_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum)
if (m->m_len < 2 && (m = m_pullup(m, 2)) == NULL)
ERROUT(ENOBUFS);
shdr = ntohs(*mtod(m, uint16_t *));
shdr = be16dec(mtod(m, void *));
frag->seq = MP_SHORT_EXTEND(shdr);
frag->first = (shdr & MP_SHORT_FIRST_FLAG) != 0;
frag->last = (shdr & MP_SHORT_LAST_FLAG) != 0;
@ -1547,7 +1562,7 @@ ng_ppp_mp_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum)
if (m->m_len < 4 && (m = m_pullup(m, 4)) == NULL)
ERROUT(ENOBUFS);
lhdr = ntohl(*mtod(m, uint32_t *));
lhdr = be32dec(mtod(m, void *));
frag->seq = MP_LONG_EXTEND(lhdr);
frag->first = (lhdr & MP_LONG_FIRST_FLAG) != 0;
frag->last = (lhdr & MP_LONG_LAST_FLAG) != 0;

View File

@ -62,6 +62,7 @@
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/endian.h>
#include <sys/errno.h>
#include <netinet/in.h>
@ -572,9 +573,9 @@ ng_pptpgre_xmit(hpriv_p hpriv, item_p item)
}
/* Build GRE header */
((u_int32_t *)gre)[0] = htonl(PPTP_INIT_VALUE);
gre->length = (m != NULL) ? htons((u_short)m->m_pkthdr.len) : 0;
gre->cid = htons(hpriv->conf.peerCid);
be32enc(gre, PPTP_INIT_VALUE);
be16enc(&gre->length, (m != NULL) ? m->m_pkthdr.len : 0);
be16enc(&gre->cid, hpriv->conf.peerCid);
/* Include sequence number if packet contains any data */
if (m != NULL) {
@ -584,13 +585,13 @@ ng_pptpgre_xmit(hpriv_p hpriv, item_p item)
= ng_pptpgre_time();
}
hpriv->xmitSeq++;
gre->data[0] = htonl(hpriv->xmitSeq);
be32enc(&gre->data[0], hpriv->xmitSeq);
}
/* Include acknowledgement (and stop send ack timer) if needed */
if (hpriv->conf.enableAlwaysAck || hpriv->xmitAck != hpriv->recvSeq) {
gre->hasAck = 1;
gre->data[gre->hasSeq] = htonl(hpriv->recvSeq);
be32enc(&gre->data[gre->hasSeq], hpriv->recvSeq);
hpriv->xmitAck = hpriv->recvSeq;
if (hpriv->conf.enableDelayedAck)
ng_uncallout(&hpriv->sackTimer, hpriv->node);
@ -705,18 +706,17 @@ ng_pptpgre_rcvdata_lower(hook_p hook, item_p item)
/* Sanity check packet length and GRE header bits */
extralen = m->m_pkthdr.len
- (iphlen + grelen + gre->hasSeq * (u_int16_t)ntohs(gre->length));
- (iphlen + grelen + gre->hasSeq * be16dec(&gre->length));
if (extralen < 0) {
priv->stats.recvBadGRE++;
ERROUT(EINVAL);
}
if ((ntohl(*((const u_int32_t *)gre)) & PPTP_INIT_MASK)
!= PPTP_INIT_VALUE) {
if ((be32dec(gre) & PPTP_INIT_MASK) != PPTP_INIT_VALUE) {
priv->stats.recvBadGRE++;
ERROUT(EINVAL);
}
hpriv = ng_pptpgre_find_session(priv, ntohs(gre->cid));
hpriv = ng_pptpgre_find_session(priv, be16dec(&gre->cid));
if (hpriv == NULL || hpriv->hook == NULL || !hpriv->conf.enabled) {
priv->stats.recvBadCID++;
ERROUT(EINVAL);
@ -725,7 +725,7 @@ ng_pptpgre_rcvdata_lower(hook_p hook, item_p item)
/* Look for peer ack */
if (gre->hasAck) {
const u_int32_t ack = ntohl(gre->data[gre->hasSeq]);
const u_int32_t ack = be32dec(&gre->data[gre->hasSeq]);
const int index = ack - hpriv->recvAck - 1;
long sample;
long diff;
@ -776,7 +776,7 @@ badAck:
/* See if frame contains any data */
if (gre->hasSeq) {
const u_int32_t seq = ntohl(gre->data[0]);
const u_int32_t seq = be32dec(&gre->data[0]);
/* Sanity check sequence number */
if (PPTP_SEQ_DIFF(seq, hpriv->recvSeq) <= 0) {

View File

@ -47,6 +47,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/endian.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
@ -410,9 +411,9 @@ correct_mss(struct tcphdr *tc, int hlen, uint16_t maxmss, int flags)
{
int olen, optlen;
u_char *opt;
uint16_t *mss;
int accumulate;
int res = 0;
uint16_t sum;
for (olen = hlen - sizeof(struct tcphdr), opt = (u_char *)(tc + 1);
olen > 0; olen -= optlen, opt += optlen) {
@ -427,13 +428,15 @@ correct_mss(struct tcphdr *tc, int hlen, uint16_t maxmss, int flags)
if (*opt == TCPOPT_MAXSEG) {
if (optlen != TCPOLEN_MAXSEG)
continue;
mss = (uint16_t *)(opt + 2);
if (ntohs(*mss) > maxmss) {
accumulate = *mss;
*mss = htons(maxmss);
accumulate -= *mss;
if ((flags & CSUM_TCP) == 0)
TCPMSS_ADJUST_CHECKSUM(accumulate, tc->th_sum);
accumulate = be16dec(opt + 2);
if (accumulate > maxmss) {
if ((flags & CSUM_TCP) == 0) {
accumulate -= maxmss;
sum = be16dec(&tc->th_sum);
TCPMSS_ADJUST_CHECKSUM(accumulate, sum);
be16enc(&tc->th_sum, sum);
}
be16enc(opt + 2, maxmss);
res = 1;
}
}

View File

@ -236,6 +236,7 @@ __END_DECLS
#define IPPROTO_GMTP 100 /* GMTP*/
#define IPPROTO_IPCOMP 108 /* payload compression (IPComp) */
#define IPPROTO_SCTP 132 /* SCTP */
#define IPPROTO_MH 135 /* IPv6 Mobility Header */
/* 101-254: Partly Unassigned */
#define IPPROTO_PIM 103 /* Protocol Independent Mcast */
#define IPPROTO_CARP 112 /* CARP */

View File

@ -731,7 +731,6 @@ dummynet_io(struct mbuf **m0, int dir, struct ip_fw_args *fwa)
goto dropit;
}
if (fs->sched->fp->enqueue(si, q, m)) {
printf("%s dropped by enqueue\n", __FUNCTION__);
/* packet was dropped by enqueue() */
m = *m0 = NULL;
goto dropit;

View File

@ -239,12 +239,12 @@ ipfw_reap_rules(struct ip_fw *head)
* The argument is an u_int32_t. The low 16 bit are the rule or set number,
* the next 8 bits are the new set, the top 8 bits are the command:
*
* 0 delete rules with given number
* 1 delete rules with given set number
* 2 move rules with given number to new set
* 3 move rules with given set number to new set
* 4 swap sets with given numbers
* 5 delete rules with given number and with given set number
* 0 delete rules numbered "rulenum"
* 1 delete rules in set "rulenum"
* 2 move rules "rulenum" to set "new_set"
* 3 move rules from set "rulenum" to set "new_set"
* 4 swap sets "rulenum" and "new_set"
* 5 delete rules "rulenum" and set "new_set"
*/
static int
del_entry(struct ip_fw_chain *chain, u_int32_t arg)
@ -270,23 +270,24 @@ del_entry(struct ip_fw_chain *chain, u_int32_t arg)
return EINVAL;
}
IPFW_UH_WLOCK(chain); /* prevent conflicts among the writers */
IPFW_UH_WLOCK(chain); /* arbitrate writers */
chain->reap = NULL; /* prepare for deletions */
switch (cmd) {
case 0: /* delete rules with given number (0 is special means all) */
case 1: /* delete all rules with given set number, rule->set == rulenum */
case 5: /* delete rules with given number and with given set number.
* rulenum - given rule number;
* new_set - given set number.
*/
/* locate first rule to delete (start), the one after the
* last one (end), and count how many rules to delete (n)
case 0: /* delete rules "rulenum" (rulenum == 0 matches all) */
case 1: /* delete all rules in set N */
case 5: /* delete rules with number N and set "new_set". */
/*
* Locate first rule to delete (start), the rule after
* the last one to delete (end), and count how many
* rules to delete (n)
*/
n = 0;
if (cmd == 1) { /* look for a specific set, must scan all */
new_set = rulenum;
for (start = -1, i = 0; i < chain->n_rules; i++) {
if (chain->map[start]->set != rulenum)
if (chain->map[i]->set != new_set)
continue;
if (start < 0)
start = i;
@ -314,32 +315,47 @@ del_entry(struct ip_fw_chain *chain, u_int32_t arg)
error = EINVAL;
break;
}
/* copy the initial part of the map */
/*
* bcopy the initial part of the map, then individually
* copy all matching entries between start and end,
* and then bcopy the final part.
* Once we are done we can swap maps and clean up the
* deleted rules (unfortunately we need to repeat a
* convoluted test). Rules to keep are
* (set == RESVD_SET || !match_set || !match_rule)
* where
* match_set ::= (cmd == 0 || rule->set == new_set)
* match_rule ::= (cmd == 1 || rule->rulenum == rulenum)
*/
if (start > 0)
bcopy(chain->map, map, start * sizeof(struct ip_fw *));
/* copy active rules between start and end */
for (i = ofs = start; i < end; i++) {
rule = chain->map[i];
if (!(rule->set != RESVD_SET &&
(cmd == 0 || rule->set == new_set) ))
if (rule->set == RESVD_SET ||
!(cmd == 0 || rule->set == new_set) ||
!(cmd == 1 || rule->rulenum == rulenum) ) {
map[ofs++] = chain->map[i];
}
}
/* finally the tail */
bcopy(chain->map + end, map + ofs,
(chain->n_rules - end) * sizeof(struct ip_fw *));
map = swap_map(chain, map, chain->n_rules - n);
/* now remove the rules deleted */
for (i = start; i < end; i++) {
int l;
rule = map[i];
if (rule->set != RESVD_SET &&
(cmd == 0 || rule->set == new_set) ) {
int l = RULESIZE(rule);
/* same test as above */
if (rule->set == RESVD_SET ||
!(cmd == 0 || rule->set == new_set) ||
!(cmd == 1 || rule->rulenum == rulenum) )
continue;
chain->static_len -= l;
ipfw_remove_dyn_children(rule);
rule->x_next = chain->reap;
chain->reap = rule;
}
l = RULESIZE(rule);
chain->static_len -= l;
ipfw_remove_dyn_children(rule);
rule->x_next = chain->reap;
chain->reap = rule;
}
break;
@ -446,7 +462,7 @@ zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only)
break;
}
if (!cleared) { /* we did not find any matching rules */
IPFW_WUNLOCK(chain);
IPFW_UH_RUNLOCK(chain);
return (EINVAL);
}
msg = log_only ? "logging count reset" : "cleared";

View File

@ -208,7 +208,7 @@ fhc_attach(device_t dev)
printf("model unknown\n");
for (i = FHC_FANFAIL; i <= FHC_TOD; i++) {
bus_write_4(sc->sc_memres[i], FHC_ICLR, 0x0);
bus_write_4(sc->sc_memres[i], FHC_ICLR, INTCLR_IDLE);
(void)bus_read_4(sc->sc_memres[i], FHC_ICLR);
}
@ -391,7 +391,7 @@ fhc_intr_clear(void *arg)
struct intr_vector *iv = arg;
struct fhc_icarg *fica = iv->iv_icarg;
bus_write_4(fica->fica_memres, FHC_ICLR, 0x0);
bus_write_4(fica->fica_memres, FHC_ICLR, INTCLR_IDLE);
(void)bus_read_4(fica->fica_memres, FHC_ICLR);
}

View File

@ -223,8 +223,7 @@ apb_alloc_resource(device_t dev, device_t child, int type, int *rid,
*/
if (start == 0 && end == ~0) {
device_printf(dev, "can't decode default resource id %d for "
"%s%d, bypassing\n", *rid, device_get_name(child),
device_get_unit(child));
"%s, bypassing\n", *rid, device_get_nameunit(child));
goto passup;
}
@ -236,31 +235,28 @@ apb_alloc_resource(device_t dev, device_t child, int type, int *rid,
switch (type) {
case SYS_RES_IOPORT:
if (!apb_checkrange(sc->sc_iomap, APB_IO_SCALE, start, end)) {
device_printf(dev, "device %s%d requested unsupported "
"I/O range 0x%lx-0x%lx\n", device_get_name(child),
device_get_unit(child), start, end);
device_printf(dev, "device %s requested unsupported "
"I/O range 0x%lx-0x%lx\n",
device_get_nameunit(child), start, end);
return (NULL);
}
if (bootverbose)
device_printf(sc->sc_bsc.ops_pcib_sc.dev, "device "
"%s%d requested decoded I/O range 0x%lx-0x%lx\n",
device_get_name(child), device_get_unit(child),
start, end);
"%s requested decoded I/O range 0x%lx-0x%lx\n",
device_get_nameunit(child), start, end);
break;
case SYS_RES_MEMORY:
if (!apb_checkrange(sc->sc_memmap, APB_MEM_SCALE, start, end)) {
device_printf(dev, "device %s%d requested unsupported "
device_printf(dev, "device %s requested unsupported "
"memory range 0x%lx-0x%lx\n",
device_get_name(child), device_get_unit(child),
start, end);
device_get_nameunit(child), start, end);
return (NULL);
}
if (bootverbose)
device_printf(sc->sc_bsc.ops_pcib_sc.dev, "device "
"%s%d requested decoded memory range 0x%lx-0x%lx\n",
device_get_name(child), device_get_unit(child),
start, end);
"%s requested decoded memory range 0x%lx-0x%lx\n",
device_get_nameunit(child), start, end);
break;
default:

View File

@ -188,13 +188,13 @@ struct psycho_dma_sync {
uint8_t pds_func; /* func. of farest PCI dev. */
};
#define PSYCHO_READ8(sc, off) \
#define PSYCHO_READ8(sc, off) \
bus_read_8((sc)->sc_mem_res, (off))
#define PSYCHO_WRITE8(sc, off, v) \
#define PSYCHO_WRITE8(sc, off, v) \
bus_write_8((sc)->sc_mem_res, (off), (v))
#define PCICTL_READ8(sc, off) \
#define PCICTL_READ8(sc, off) \
PSYCHO_READ8((sc), (sc)->sc_pcictl + (off))
#define PCICTL_WRITE8(sc, off, v) \
#define PCICTL_WRITE8(sc, off, v) \
PSYCHO_WRITE8((sc), (sc)->sc_pcictl + (off), (v))
/*
@ -523,7 +523,7 @@ psycho_attach(device_t dev)
(u_long)intrmap, (u_long)PSYCHO_READ8(sc,
intrmap), (u_long)intrclr);
PSYCHO_WRITE8(sc, intrmap, INTMAP_VEC(sc->sc_ign, i));
PSYCHO_WRITE8(sc, intrclr, 0);
PSYCHO_WRITE8(sc, intrclr, INTCLR_IDLE);
PSYCHO_WRITE8(sc, intrmap,
INTMAP_ENABLE(INTMAP_VEC(sc->sc_ign, i),
PCPU_GET(mid)));
@ -808,7 +808,7 @@ psycho_ue(void *arg)
if ((afsr & UEAFSR_P_DTE) != 0)
iommu_decode_fault(sc->sc_is, afar);
panic("%s: uncorrectable DMA error AFAR %#lx AFSR %#lx",
device_get_name(sc->sc_dev), (u_long)afar, (u_long)afsr);
device_get_nameunit(sc->sc_dev), (u_long)afar, (u_long)afsr);
return (FILTER_HANDLED);
}
@ -838,7 +838,7 @@ psycho_pci_bus(void *arg)
afar = PCICTL_READ8(sc, PCR_AFA);
afsr = PCICTL_READ8(sc, PCR_AFS);
panic("%s: PCI bus %c error AFAR %#lx AFSR %#lx",
device_get_name(sc->sc_dev), 'A' + sc->sc_half, (u_long)afar,
device_get_nameunit(sc->sc_dev), 'A' + sc->sc_half, (u_long)afar,
(u_long)afsr);
return (FILTER_HANDLED);
}
@ -1137,7 +1137,7 @@ psycho_intr_clear(void *arg)
struct intr_vector *iv = arg;
struct psycho_icarg *pica = iv->iv_icarg;
PSYCHO_WRITE8(pica->pica_sc, pica->pica_clr, 0);
PSYCHO_WRITE8(pica->pica_sc, pica->pica_clr, INTCLR_IDLE);
}
static int

View File

@ -189,26 +189,26 @@ struct schizo_dma_sync {
#define SCHIZO_PERF_CNT_QLTY 100
#define SCHIZO_SPC_READ_8(spc, sc, offs) \
#define SCHIZO_SPC_READ_8(spc, sc, offs) \
bus_read_8((sc)->sc_mem_res[(spc)], (offs))
#define SCHIZO_SPC_WRITE_8(spc, sc, offs, v) \
#define SCHIZO_SPC_WRITE_8(spc, sc, offs, v) \
bus_write_8((sc)->sc_mem_res[(spc)], (offs), (v))
#define SCHIZO_PCI_READ_8(sc, offs) \
#define SCHIZO_PCI_READ_8(sc, offs) \
SCHIZO_SPC_READ_8(STX_PCI, (sc), (offs))
#define SCHIZO_PCI_WRITE_8(sc, offs, v) \
#define SCHIZO_PCI_WRITE_8(sc, offs, v) \
SCHIZO_SPC_WRITE_8(STX_PCI, (sc), (offs), (v))
#define SCHIZO_CTRL_READ_8(sc, offs) \
#define SCHIZO_CTRL_READ_8(sc, offs) \
SCHIZO_SPC_READ_8(STX_CTRL, (sc), (offs))
#define SCHIZO_CTRL_WRITE_8(sc, offs, v) \
#define SCHIZO_CTRL_WRITE_8(sc, offs, v) \
SCHIZO_SPC_WRITE_8(STX_CTRL, (sc), (offs), (v))
#define SCHIZO_PCICFG_READ_8(sc, offs) \
#define SCHIZO_PCICFG_READ_8(sc, offs) \
SCHIZO_SPC_READ_8(STX_PCICFG, (sc), (offs))
#define SCHIZO_PCICFG_WRITE_8(sc, offs, v) \
#define SCHIZO_PCICFG_WRITE_8(sc, offs, v) \
SCHIZO_SPC_WRITE_8(STX_PCICFG, (sc), (offs), (v))
#define SCHIZO_ICON_READ_8(sc, offs) \
#define SCHIZO_ICON_READ_8(sc, offs) \
SCHIZO_SPC_READ_8(STX_ICON, (sc), (offs))
#define SCHIZO_ICON_WRITE_8(sc, offs, v) \
#define SCHIZO_ICON_WRITE_8(sc, offs, v) \
SCHIZO_SPC_WRITE_8(STX_ICON, (sc), (offs), (v))
struct schizo_desc {
@ -826,7 +826,7 @@ schizo_pci_bus(void *arg)
}
panic("%s: PCI bus %c error AFAR %#llx AFSR %#llx PCI CSR %#llx "
"IOMMU %#llx STATUS %#llx", device_get_name(sc->sc_dev),
"IOMMU %#llx STATUS %#llx", device_get_nameunit(sc->sc_dev),
'A' + sc->sc_half, (unsigned long long)afar,
(unsigned long long)afsr, (unsigned long long)csr,
(unsigned long long)iommu, (unsigned long long)status);
@ -861,7 +861,7 @@ schizo_ue(void *arg)
break;
mtx_unlock_spin(sc->sc_mtx);
panic("%s: uncorrectable DMA error AFAR %#llx AFSR %#llx",
device_get_name(sc->sc_dev), (unsigned long long)afar,
device_get_nameunit(sc->sc_dev), (unsigned long long)afar,
(unsigned long long)afsr);
return (FILTER_HANDLED);
}
@ -895,7 +895,7 @@ schizo_host_bus(void *arg)
uint64_t errlog;
errlog = SCHIZO_CTRL_READ_8(sc, STX_CTRL_BUS_ERRLOG);
panic("%s: %s error %#llx", device_get_name(sc->sc_dev),
panic("%s: %s error %#llx", device_get_nameunit(sc->sc_dev),
sc->sc_mode == SCHIZO_MODE_TOM ? "JBus" : "Safari",
(unsigned long long)errlog);
return (FILTER_HANDLED);
@ -1077,7 +1077,7 @@ schizo_dma_sync_stub(void *arg)
for (; atomic_cmpset_acq_32(&sc->sc_cdma_state,
SCHIZO_CDMA_STATE_DONE, SCHIZO_CDMA_STATE_PENDING) == 0;)
;
SCHIZO_PCI_WRITE_8(sc, sc->sc_cdma_clr, 1);
SCHIZO_PCI_WRITE_8(sc, sc->sc_cdma_clr, INTCLR_RECEIVED);
microuptime(&cur);
end.tv_sec = 1;
end.tv_usec = 0;
@ -1162,7 +1162,7 @@ schizo_intr_clear(void *arg)
struct intr_vector *iv = arg;
struct schizo_icarg *sica = iv->iv_icarg;
SCHIZO_PCI_WRITE_8(sica->sica_sc, sica->sica_clr, 0);
SCHIZO_PCI_WRITE_8(sica->sica_sc, sica->sica_clr, INTCLR_IDLE);
}
static int

View File

@ -171,9 +171,9 @@ struct sbus_softc {
void *sc_pf_ihand;
};
#define SYSIO_READ8(sc, off) \
#define SYSIO_READ8(sc, off) \
bus_read_8((sc)->sc_sysio_res, (off))
#define SYSIO_WRITE8(sc, off, v) \
#define SYSIO_WRITE8(sc, off, v) \
bus_write_8((sc)->sc_sysio_res, (off), (v))
static device_probe_t sbus_probe;
@ -697,7 +697,7 @@ sbus_intr_clear(void *arg)
struct intr_vector *iv = arg;
struct sbus_icarg *sica = iv->iv_icarg;
SYSIO_WRITE8(sica->sica_sc, sica->sica_clr, 0);
SYSIO_WRITE8(sica->sica_sc, sica->sica_clr, INTCLR_IDLE);
}
static int

View File

@ -38,12 +38,13 @@
* any in or out parameters in the upper word. The high 3 bits of the
* upper word are used to encode the in/out status of the parameter.
*/
#define IOCPARM_MASK 0x1fff /* parameter length, at most 13 bits */
#define IOCPARM_SHIFT 13 /* number of bits for ioctl size */
#define IOCPARM_MASK ((1 << IOCPARM_SHIFT) - 1) /* parameter length mask */
#define IOCPARM_LEN(x) (((x) >> 16) & IOCPARM_MASK)
#define IOCBASECMD(x) ((x) & ~(IOCPARM_MASK << 16))
#define IOCGROUP(x) (((x) >> 8) & 0xff)
#define IOCPARM_MAX PAGE_SIZE /* max size of ioctl, mult. of PAGE_SIZE */
#define IOCPARM_MAX (1 << IOCPARM_SHIFT) /* max size of ioctl */
#define IOC_VOID 0x20000000 /* no parameters */
#define IOC_OUT 0x40000000 /* copy out parameters */
#define IOC_IN 0x80000000 /* copy in parameters */

View File

@ -42,22 +42,19 @@
#include <sys/resource.h>
#include <machine/pcpu.h>
struct pcb;
struct thread;
/*
* Define a set for pcpu data.
*
* We don't use SET_DECLARE because it defines the set as 'a' when we
* want 'aw'. GCC considers uninitialized data in a seperate section
* writable and there is no generic zero initializer that works for
* want 'aw'. gcc considers uninitialized data in a separate section
* writable, and there is no generic zero initializer that works for
* structs and scalars.
*/
extern uintptr_t *__start_set_pcpu;
extern uintptr_t *__stop_set_pcpu;
__asm__(
#if defined(__arm__)
#ifdef __arm__
".section set_pcpu, \"aw\", %progbits\n"
#else
".section set_pcpu, \"aw\", @progbits\n"
@ -73,8 +70,8 @@ extern uintptr_t dpcpu_off[];
/*
* Convenience defines.
*/
#define DPCPU_START (uintptr_t)&__start_set_pcpu
#define DPCPU_STOP (uintptr_t)&__stop_set_pcpu
#define DPCPU_START ((uintptr_t)&__start_set_pcpu)
#define DPCPU_STOP ((uintptr_t)&__stop_set_pcpu)
#define DPCPU_BYTES (DPCPU_STOP - DPCPU_START)
#define DPCPU_MODMIN 2048
#define DPCPU_SIZE roundup2(DPCPU_BYTES, PAGE_SIZE)
@ -111,8 +108,8 @@ extern uintptr_t dpcpu_off[];
/*
* XXXUPS remove as soon as we have per cpu variable
* linker sets and can define rm_queue in _rm_lock.h
*/
* linker sets and can define rm_queue in _rm_lock.h
*/
struct rm_queue {
struct rm_queue* volatile rmq_next;
struct rm_queue* volatile rmq_prev;
@ -120,7 +117,6 @@ struct rm_queue {
#define PCPU_NAME_LEN (sizeof("CPU ") + sizeof(__XSTRING(MAXCPU) + 1))
/*
* This structure maps out the global data that needs to be kept on a
* per-cpu basis. The members are accessed via the PCPU_GET/SET/PTR
@ -133,52 +129,50 @@ struct pcpu {
struct thread *pc_fpcurthread; /* Fp state owner */
struct thread *pc_deadthread; /* Zombie thread or NULL */
struct pcb *pc_curpcb; /* Current pcb */
uint64_t pc_switchtime;
int pc_switchticks;
uint64_t pc_switchtime; /* cpu_ticks() at last csw */
int pc_switchticks; /* `ticks' at last csw */
u_int pc_cpuid; /* This cpu number */
cpumask_t pc_cpumask; /* This cpu mask */
cpumask_t pc_other_cpus; /* Mask of all other cpus */
SLIST_ENTRY(pcpu) pc_allcpu;
struct lock_list_entry *pc_spinlocks;
#ifdef KTR
char pc_name[PCPU_NAME_LEN]; /* String name for KTR. */
char pc_name[PCPU_NAME_LEN]; /* String name for KTR */
#endif
struct vmmeter pc_cnt; /* VM stats counters */
long pc_cp_time[CPUSTATES]; /* statclock ticks */
struct device *pc_device;
void *pc_netisr; /* netisr SWI cookie. */
void *pc_netisr; /* netisr SWI cookie */
/*
/*
* Stuff for read mostly lock
*
*
* XXXUPS remove as soon as we have per cpu variable
* linker sets.
*/
struct rm_queue pc_rm_queue;
struct rm_queue pc_rm_queue;
/*
* Dynamic per-cpu data area.
*/
uintptr_t pc_dynamic;
uintptr_t pc_dynamic; /* Dynamic per-cpu data area */
/*
* Keep MD fields last, so that CPU-specific variations on a
* single architecture don't result in offset variations of
* the machine-independent fields of the pcpu. Even though
* the machine-independent fields of the pcpu. Even though
* the pcpu structure is private to the kernel, some ports
* (e.g. lsof, part of gtop) define _KERNEL and include this
* header. While strictly speaking this is wrong, there's no
* reason not to keep the offsets of the MI fields constant.
* If only to make kernel debugging easier...
* (e.g., lsof, part of gtop) define _KERNEL and include this
* header. While strictly speaking this is wrong, there's no
* reason not to keep the offsets of the MI fields constant
* if only to make kernel debugging easier.
*/
PCPU_MD_FIELDS;
} __aligned(128);
} __aligned(CACHE_LINE_SIZE);
#ifdef _KERNEL
SLIST_HEAD(cpuhead, pcpu);
extern struct cpuhead cpuhead;
extern struct pcpu *cpuid_to_pcpu[MAXCPU];
#define curcpu PCPU_GET(cpuid)
#define curproc (curthread->td_proc)
@ -193,21 +187,17 @@ extern struct cpuhead cpuhead;
* db_show_mdpcpu() is responsible for handling machine dependent
* fields for the DDB 'show pcpu' command.
*/
extern struct pcpu *cpuid_to_pcpu[MAXCPU];
void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size);
void db_show_mdpcpu(struct pcpu *pcpu);
void pcpu_destroy(struct pcpu *pcpu);
struct pcpu *pcpu_find(u_int cpuid);
void pcpu_init(struct pcpu *pcpu, int cpuid, size_t size);
void *dpcpu_alloc(int size);
void dpcpu_copy(void *s, int size);
void dpcpu_free(void *s, int size);
void dpcpu_init(void *dpcpu, int cpuid);
void pcpu_destroy(struct pcpu *pcpu);
struct pcpu *pcpu_find(u_int cpuid);
void pcpu_init(struct pcpu *pcpu, int cpuid, size_t size);
#endif /* _KERNEL */
#endif /* _KERNEL */
#endif /* !_SYS_PCPU_H_ */

View File

@ -97,7 +97,7 @@ struct vtoc8 {
} map[VTOC8_NPARTS];
uint16_t magic;
uint16_t cksum;
};
} __packed;
#ifdef CTASSERT
CTASSERT(sizeof(struct vtoc8) == 512);