Merge from vmcontention

This commit is contained in:
attilio 2013-03-07 23:52:15 +00:00
commit 993799493c
154 changed files with 13408 additions and 1955 deletions

View File

@ -26,6 +26,19 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 10.x IS SLOW:
disable the most expensive debugging functionality run
"ln -s 'abort:false,junk:false' /etc/malloc.conf".)
20130304:
Recent commits to callout(9) changed the size of struct callout,
so the KBI is probably heavily disturbed. Also, some functions
in callout(9)/sleep(9)/sleepqueue(9)/condvar(9) KPIs were replaced
by macros. Every kernel module using it won't load, so rebuild
is requested.
The ctl device has been re-enabled in GENERIC for i386 and amd64,
but does not initialize by default (because of the new CTL_DISABLE
option) to save memory. To re-enable it, remove the CTL_DISABLE
option from the kernel config file or set kern.cam.ctl.disable=0
in /boot/loader.conf.
20130301:
The ctl device has been disabled in GENERIC for i386 and amd64.
This was done due to the extra memory being allocated at system

View File

@ -1189,7 +1189,7 @@ dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
}
static void
dump_bpobj(bpobj_t *bpo, char *name)
dump_bpobj(bpobj_t *bpo, char *name, int indent)
{
char bytes[32];
char comp[32];
@ -1199,31 +1199,56 @@ dump_bpobj(bpobj_t *bpo, char *name)
return;
zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes);
if (bpo->bpo_havesubobj) {
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
zdb_nicenum(bpo->bpo_phys->bpo_comp, comp);
zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp);
(void) printf("\n %s: %llu local blkptrs, %llu subobjs, "
"%s (%s/%s comp)\n",
name, (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(void) printf(" %*s: object %llu, %llu local blkptrs, "
"%llu subobjs, %s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
bytes, comp, uncomp);
for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
dump_bpobj(&subbpo, "subobj", indent + 1);
}
} else {
(void) printf("\n %s: %llu blkptrs, %s\n",
name, (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, bytes);
(void) printf(" %*s: object %llu, %llu blkptrs, %s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
bytes);
}
if (dump_opt['d'] < 5)
return;
(void) printf("\n");
(void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
if (indent == 0) {
(void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
(void) printf("\n");
}
}
static void
dump_deadlist(dsl_deadlist_t *dl)
{
dsl_deadlist_entry_t *dle;
uint64_t unused;
char bytes[32];
char comp[32];
char uncomp[32];
@ -1242,14 +1267,24 @@ dump_deadlist(dsl_deadlist_t *dl)
(void) printf("\n");
/* force the tree to be loaded */
dsl_deadlist_space_range(dl, 0, UINT64_MAX, &unused, &unused, &unused);
for (dle = avl_first(&dl->dl_tree); dle;
dle = AVL_NEXT(&dl->dl_tree, dle)) {
(void) printf(" mintxg %llu -> obj %llu\n",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
if (dump_opt['d'] >= 5) {
char buf[128];
(void) snprintf(buf, sizeof (buf), "mintxg %llu -> ",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
if (dump_opt['d'] >= 5)
dump_bpobj(&dle->dle_bpobj, "");
dump_bpobj(&dle->dle_bpobj, buf, 0);
} else {
(void) printf("mintxg %llu -> obj %llu\n",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
}
}
}
@ -1272,7 +1307,7 @@ fuid_table_destroy()
* print uid or gid information.
* For normal POSIX id just the id is printed in decimal format.
* For CIFS files with FUID the fuid is printed in hex followed by
* the doman-rid string.
* the domain-rid string.
*/
static void
print_idstr(uint64_t id, const char *id_type)
@ -2529,10 +2564,11 @@ dump_zpool(spa_t *spa)
if (dump_opt['d'] || dump_opt['i']) {
dump_dir(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {
dump_bpobj(&spa->spa_deferred_bpobj, "Deferred frees");
dump_bpobj(&spa->spa_deferred_bpobj,
"Deferred frees", 0);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
dump_bpobj(&spa->spa_dsl_pool->dp_free_bpobj,
"Pool snapshot frees");
"Pool snapshot frees", 0);
}
if (spa_feature_is_active(spa,

View File

@ -363,6 +363,7 @@ sort_iidescs(Elf *elf, const char *file, tdata_t *td, int fuzzymatch,
for (i = 0; i < nent; i++) {
GElf_Sym sym;
char *bname;
iidesc_t **tolist;
GElf_Sym ssym;
iidesc_match_t smatch;
@ -377,7 +378,8 @@ sort_iidescs(Elf *elf, const char *file, tdata_t *td, int fuzzymatch,
switch (GELF_ST_TYPE(sym.st_info)) {
case STT_FILE:
match.iim_file = match.iim_name;
bname = strrchr(match.iim_name, '/');
match.iim_file = bname == NULL ? match.iim_name : bname + 1;
continue;
case STT_OBJECT:
tolist = iiburst->iib_objts;

View File

@ -34,7 +34,7 @@
.\"
.\" $Id$
.\"
.Dd May 26, 2012
.Dd March 3, 2013
.Dt OPENPAM_STRADDCH 3
.Os
.Sh NAME
@ -73,6 +73,21 @@ and
argument point to variables used to hold the size
of the buffer and the length of the string it contains, respectively.
.Pp
The final argument,
.Fa ch ,
is the character that should be appended to
the string. If
.Fa ch
is 0, nothing is appended, but a new buffer is
still allocated if
.Fa str
is NULL. This can be used to
.Do
bootstrap
.Dc
the
string.
.Pp
If a new buffer is allocated or an existing buffer is reallocated to
make room for the additional character,
.Fa str
@ -91,7 +106,9 @@ If the
function is successful, it increments the
integer variable pointed to by
.Fa len
and returns 0.
(unless
.Fa ch
was 0) and returns 0.
Otherwise, it leaves the variables pointed to by
.Fa str ,
.Fa size

View File

@ -62,11 +62,9 @@ openpam_readline(FILE *f, int *lineno, size_t *lenp)
size_t len, size;
int ch;
if ((line = malloc(size = MIN_LINE_LENGTH)) == NULL) {
openpam_log(PAM_LOG_ERROR, "malloc(): %m");
line = NULL;
if (openpam_straddch(&line, &size, &len, 0) != 0)
return (NULL);
}
len = 0;
for (;;) {
ch = fgetc(f);
/* strip comment */

View File

@ -86,13 +86,8 @@ openpam_readword(FILE *f, int *lineno, size_t *lenp)
/* begin quote */
quote = ch;
/* edge case: empty quoted string */
if (word == NULL && (word = malloc(1)) == NULL) {
openpam_log(PAM_LOG_ERROR, "malloc(): %m");
errno = ENOMEM;
if (openpam_straddch(&word, &size, &len, 0) != 0)
return (NULL);
}
*word = '\0';
size = 1;
} else if (ch == quote && !escape) {
/* end quote */
quote = 0;

View File

@ -43,6 +43,7 @@
7) Run configure with the appropriate arguments:
$ ./configure --prefix=/usr --sysconfdir=/etc/ssh \
--disable-lastlog --disable-utmp --disable-wtmp \
--with-pam --with-tcp-wrappers --with-libedit \
--with-ssl-engine

View File

@ -17,6 +17,9 @@
/* Define if your resolver libs need this for getrrsetbyname */
/* #undef BIND_8_COMPAT */
/* The system has incomplete BSM API */
/* #undef BROKEN_BSM_API */
/* Define if cmsg_type is not passed correctly */
/* #undef BROKEN_CMSG_TYPE */
@ -97,7 +100,7 @@
/* #undef DISABLE_FD_PASSING */
/* Define if you don't want to use lastlog */
/* #undef DISABLE_LASTLOG */
#define DISABLE_LASTLOG 1
/* Define if you don't want to use your system's login() call */
/* #undef DISABLE_LOGIN */
@ -307,7 +310,7 @@
#define HAVE_DECL__GETSHORT 0
/* Define if you have /dev/ptmx */
#define HAVE_DEV_PTMX 1
/* #undef HAVE_DEV_PTMX */
/* Define if you have /dev/ptc */
/* #undef HAVE_DEV_PTS_AND_PTC */
@ -316,7 +319,7 @@
#define HAVE_DIRENT_H 1
/* Define to 1 if you have the `dirfd' function. */
/* #undef HAVE_DIRFD */
#define HAVE_DIRFD 1
/* Define to 1 if you have the `dirname' function. */
#define HAVE_DIRNAME 1
@ -501,6 +504,9 @@
/* Define if HEADER.ad exists in arpa/nameser.h */
#define HAVE_HEADER_AD 1
/* Define to 1 if you have the `HMAC_CTX_init' function. */
#define HAVE_HMAC_CTX_INIT 1
/* Define if you have ut_host in utmp.h */
/* #undef HAVE_HOST_IN_UTMP */
@ -552,6 +558,9 @@
/* Define to 1 if you have the <lastlog.h> header file. */
/* #undef HAVE_LASTLOG_H */
/* Define if you want ldns support */
/* #undef HAVE_LDNS */
/* Define to 1 if you have the <libaudit.h> header file. */
/* #undef HAVE_LIBAUDIT_H */
@ -594,10 +603,19 @@
/* Define to 1 if you have the <limits.h> header file. */
#define HAVE_LIMITS_H 1
/* Define to 1 if you have the <linux/audit.h> header file. */
/* #undef HAVE_LINUX_AUDIT_H */
/* Define to 1 if you have the <linux/filter.h> header file. */
/* #undef HAVE_LINUX_FILTER_H */
/* Define to 1 if you have the <linux/if_tun.h> header file. */
/* #undef HAVE_LINUX_IF_TUN_H */
/* Define if your libraries define login() */
/* Define to 1 if you have the <linux/seccomp.h> header file. */
/* #undef HAVE_LINUX_SECCOMP_H */
/* Define to 1 if you have the `login' function. */
/* #undef HAVE_LOGIN */
/* Define to 1 if you have the <login_cap.h> header file. */
@ -805,6 +823,9 @@
/* Define to 1 if you have the `setgroups' function. */
#define HAVE_SETGROUPS 1
/* Define to 1 if you have the `setlinebuf' function. */
#define HAVE_SETLINEBUF 1
/* Define to 1 if you have the `setlogin' function. */
#define HAVE_SETLOGIN 1
@ -931,6 +952,9 @@
/* Define to 1 if you have the `strmode' function. */
#define HAVE_STRMODE 1
/* Define to 1 if you have the `strnlen' function. */
#define HAVE_STRNLEN 1
/* Define to 1 if you have the `strnvis' function. */
/* #undef HAVE_STRNVIS */
@ -1172,7 +1196,7 @@
/* #undef HAVE_VHANGUP */
/* Define to 1 if you have the <vis.h> header file. */
#define HAVE_VIS_H 1
/* #undef HAVE_VIS_H */
/* Define to 1 if you have the `vsnprintf' function. */
#define HAVE_VSNPRINTF 1
@ -1351,15 +1375,21 @@
/* Sandbox using setrlimit(2) */
#define SANDBOX_RLIMIT 1
/* Sandbox using seccomp filter */
/* #undef SANDBOX_SECCOMP_FILTER */
/* setrlimit RLIMIT_FSIZE works */
/* #undef SANDBOX_SKIP_RLIMIT_FSIZE */
/* Sandbox using systrace(4) */
/* #undef SANDBOX_SYSTRACE */
/* Specify the system call convention in use */
/* #undef SECCOMP_AUDIT_ARCH */
/* Define if your platform breaks doing a seteuid before a setuid */
/* #undef SETEUID_BREAKS_SETUID */
/* The size of `char', as computed by sizeof. */
#define SIZEOF_CHAR 1
/* The size of `int', as computed by sizeof. */
#define SIZEOF_INT 4
@ -1500,6 +1530,11 @@
/* Define if xauth is found in your path */
/* #undef XAUTH_PATH */
/* Enable large inode numbers on Mac OS X 10.5. */
#ifndef _DARWIN_USE_64_BIT_INODE
# define _DARWIN_USE_64_BIT_INODE 1
#endif
/* Number of bits in a file offset, on hosts where this is settable. */
/* #undef _FILE_OFFSET_BITS */

View File

@ -30,6 +30,5 @@ KEYPRINT=9b5feee6d69f170e3dd0a2c8e469ddbd64f13f978f2f3aede40c98633216c330
# REFUSE korean polish portuguese russian ukrainian vietnamese
# List of INDEX files to build and the DESCRIBE file to use for each
INDEX INDEX-7 DESCRIBE.7
INDEX INDEX-8 DESCRIBE.8
INDEX INDEX-9 DESCRIBE.9

View File

@ -46,7 +46,8 @@ MLINKS+=strcasecmp.3 strncasecmp.3 \
strcasecmp.3 strcasecmp_l.3 \
strcasecmp.3 strncasecmp_l.3
MLINKS+=strcat.3 strncat.3
MLINKS+=strchr.3 strrchr.3
MLINKS+=strchr.3 strrchr.3 \
strchr.3 strchrnul.3
MLINKS+=strcmp.3 strncmp.3
MLINKS+=strcoll.3 strcoll_l.3
MLINKS+=strcpy.3 stpcpy.3 \

View File

@ -67,6 +67,9 @@ SRCS+= divsi3.S
.else
# Compiler support functions
.PATH: ${.CURDIR}/../../contrib/compiler-rt/lib/
# __clzsi2 and ctzsi2 for various builtin functions
SRCS+= clzsi2.c ctzsi2.c
# Divide and modulus functions called by the compiler
SRCS+= divmoddi4.c divmodsi4.c divdi3.c divsi3.c moddi3.c modsi3.c
SRCS+= udivmoddi4.c udivmodsi4.c udivdi3.c udivsi3.c umoddi3.c umodsi3.c

View File

@ -49,6 +49,8 @@ static char group_dir[PATH_MAX];
static char group_file[PATH_MAX];
static char tempname[PATH_MAX];
static int initialized;
static size_t grmemlen(const struct group *, const char *, int *);
static struct group *grcopy(const struct group *gr, struct group *newgr, const char *, int ndx);
/*
* Initialize statics
@ -428,91 +430,122 @@ gr_make(const struct group *gr)
*/
struct group *
gr_dup(const struct group *gr)
{
return (gr_add(gr, NULL));
}
/*
* Add a new member name to a struct group.
*/
struct group *
gr_add(const struct group *gr, const char *newmember)
{
struct group *newgr;
char *dst;
size_t len;
int ndx;
int num_mem;
/* Calculate size of the group. */
len = sizeof(*newgr);
if (gr->gr_name != NULL)
len += strlen(gr->gr_name) + 1;
if (gr->gr_passwd != NULL)
len += strlen(gr->gr_passwd) + 1;
if (gr->gr_mem != NULL) {
for (num_mem = 0; gr->gr_mem[num_mem] != NULL; num_mem++)
len += strlen(gr->gr_mem[num_mem]) + 1;
len += (num_mem + 1) * sizeof(*gr->gr_mem);
} else
num_mem = -1;
num_mem = 0;
len = grmemlen(gr, newmember, &num_mem);
/* Create new group and copy old group into it. */
if ((newgr = malloc(len)) == NULL)
return (NULL);
/* point new gr_mem to end of struct + 1 */
if (gr->gr_mem != NULL)
return (grcopy(gr, newgr, newmember, num_mem));
}
/* It is safer to walk the pointers given at gr_mem since there is no
* guarantee the gr_mem + strings are continguous in the given struct group
* but compact the new group into the following form.
*
* The new struct is laid out like this in memory. The example given is
* for a group with two members only.
*
* {
* (char *name)
* (char *passwd)
* (int gid)
* (gr_mem * newgrp + sizeof(struct group) + sizeof(**)) points to gr_mem area
* gr_mem area
* (member1 *)
* (member2 *)
* (NULL)
* (name string)
* (passwd string)
* (member1 string)
* (member2 string)
* }
*/
/*
* Copy the guts of a group plus given name to a preallocated group struct
*/
static struct group *
grcopy(const struct group *gr, struct group *newgr, const char *name, int ndx)
{
char *dst;
int i;
if (name != NULL)
ndx++;
/* point new gr_mem to end of struct + 1 if there are names */
if (ndx != 0)
newgr->gr_mem = (char **)(newgr + 1);
else
newgr->gr_mem = NULL;
/* point dst after the end of all the gr_mem pointers in newgr */
dst = (char *)&newgr->gr_mem[num_mem + 1];
dst = (char *)&newgr->gr_mem[ndx + 1];
if (gr->gr_name != NULL) {
newgr->gr_name = dst;
dst = stpcpy(dst, gr->gr_name) + 1;
} else {
} else
newgr->gr_name = NULL;
}
if (gr->gr_passwd != NULL) {
newgr->gr_passwd = dst;
dst = stpcpy(dst, gr->gr_passwd) + 1;
} else {
} else
newgr->gr_passwd = NULL;
}
newgr->gr_gid = gr->gr_gid;
if (gr->gr_mem != NULL) {
for (ndx = 0; ndx < num_mem; ndx++) {
newgr->gr_mem[ndx] = dst;
dst = stpcpy(dst, gr->gr_mem[ndx]) + 1;
if (ndx != 0) {
for (i = 0; gr->gr_mem[i] != NULL; i++) {
newgr->gr_mem[i] = dst;
dst = stpcpy(dst, gr->gr_mem[i]) + 1;
}
newgr->gr_mem[ndx] = NULL;
if (name != NULL) {
newgr->gr_mem[i++] = dst;
dst = stpcpy(dst, name) + 1;
}
newgr->gr_mem[i] = NULL;
}
return (newgr);
}
/*
* Add a new member name to a struct group.
* Calculate length of a struct group + given name
*/
struct group *
gr_add(struct group *gr, char *newmember)
static size_t
grmemlen(const struct group *gr, const char *name, int *num_mem)
{
size_t mlen;
int num_mem=0;
char **members;
struct group *newgr;
if (newmember == NULL)
return(gr_dup(gr));
size_t len;
int i;
if (gr == NULL)
return (0);
/* Calculate size of the group. */
len = sizeof(*gr);
if (gr->gr_name != NULL)
len += strlen(gr->gr_name) + 1;
if (gr->gr_passwd != NULL)
len += strlen(gr->gr_passwd) + 1;
if (gr->gr_mem != NULL) {
for (num_mem = 0; gr->gr_mem[num_mem] != NULL; num_mem++) {
if (strcmp(gr->gr_mem[num_mem], newmember) == 0) {
errno = EEXIST;
return (NULL);
}
for (len = i = 0; gr->gr_mem[i] != NULL; i++) {
len += strlen(gr->gr_mem[i]) + 1;
len += sizeof(*gr->gr_mem);
}
*num_mem = i;
}
/* Allocate enough for current pointers + 1 more and NULL marker */
mlen = (num_mem + 2) * sizeof(*gr->gr_mem);
if ((members = malloc(mlen)) == NULL)
return (NULL);
memcpy(members, gr->gr_mem, num_mem * sizeof(*gr->gr_mem));
members[num_mem++] = newmember;
members[num_mem] = NULL;
gr->gr_mem = members;
newgr = gr_dup(gr);
free(members);
return (newgr);
if (name != NULL) {
len += strlen(name) + 1;
if (gr->gr_mem == NULL)
len += sizeof(*gr->gr_mem);
}
return(len);
}
/*

View File

@ -167,7 +167,7 @@ int gr_copy(int __ffd, int _tfd, const struct group *_gr,
struct group *
gr_dup(const struct group *_gr);
struct group *
gr_add(struct group *_gr, char *_newmember);
gr_add(const struct group *_gr, const char *_newmember);
int gr_equal(const struct group *_gr1, const struct group *_gr2);
void gr_fini(void);
int gr_init(const char *_dir, const char *_master);

View File

@ -7,6 +7,7 @@ SHLIB_MAJOR= 0
INCS= bsdyml.h
SRCS= api.c dumper.c emitter.c loader.c \
parser.c reader.c scanner.c writer.c
MAN= libbsdyml.3
.PATH: ${LIBYAML}/src ${LIBYAML}/include
CLEANFILES= bsdyml.h

View File

@ -1,5 +1,5 @@
.\" Copyright (c) 1993, 1994
.\" The Regents of the University of California. All rights reserved.
.\" Copyright (c) 2013 Baptiste Daroussin <bapt@FreeBSD.org>
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
@ -9,14 +9,11 @@
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\" 4. Neither the name of the University nor the names of its contributors
.\" may be used to endorse or promote products derived from this software
.\" without specific prior written permission.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@ -27,46 +24,38 @@
.\"
.\" $FreeBSD$
.\"
.Dd January 31, 1996
.Dt MOUNT_EXT2FS 8
.Dd March 05, 2013
.Dt LIBBSDYML 3
.Os
.Sh NAME
.Nm mount_ext2fs
.Nd mount an ext2fs file system
.Nm libbsdyml
.Nd LibYAML library for parsing and emitting YAML
.Sh SYNOPSIS
.Nm
.Op Fl o Ar options
.Ar special
.Ar node
.In bsdyml.h
.Sh DESCRIPTION
The
.Nm
utility attaches an ext2fs file system
.Ar special
device on to the file system tree at the point
.Ar node .
library is a verbatim copy of the LibYAML version 0.1.4
.Pp
This command is normally executed by
.Xr mount 8
at boot time.
.Pp
The options are as follows:
.Bl -tag -width indent
.It Fl o
Options are specified with a
.Fl o
flag followed by a comma separated string of options.
See the
.Xr mount 8
man page for possible options and their meanings.
.El
.Sh SEE ALSO
.Xr mount 2 ,
.Xr unmount 2 ,
.Xr fstab 5 ,
.Xr mount 8
.Sh HISTORY
The
.Nm
utility first appeared in
.Fx 2.2 .
library is intended to be used within the
.Fx
base system only.
Use of the
.Nm
library for other purposes is not supported and discouraged.
.Pp
To avoid version and autoconfiguration issues, the library has been
renamed to
.Nm
rather than retain the original LibYAML library and include file names
to prevent confusion and autoconfiguration issues for 3rd party
software.
.Sh SEE ALSO
For full documentation, please see the LibYAML webpage at
.Pa http://pyyaml.org/wiki/LibYAML .
.Sh AUTHORS
.An -nosplit
The original LibYAML was written by
.An Kirill Simonov Aq xi@resolvent.net .

View File

@ -3083,9 +3083,14 @@ ipfw_add(char *av[])
} else {
len = sizeof(c->max_log);
if (sysctlbyname("net.inet.ip.fw.verbose_limit",
&c->max_log, &len, NULL, 0) == -1)
&c->max_log, &len, NULL, 0) == -1) {
if (co.test_only) {
c->max_log = 0;
break;
}
errx(1, "sysctlbyname(\"%s\")",
"net.inet.ip.fw.verbose_limit");
}
}
}
break;
@ -3986,9 +3991,13 @@ ipfw_table_handler(int ac, char *av[])
mask = 0; // XXX uninitialized ?
len = sizeof(tables_max);
if (sysctlbyname("net.inet.ip.fw.tables_max", &tables_max, &len,
NULL, 0) == -1)
errx(1, "Can't determine maximum number of ipfw tables. "
"Perhaps you forgot to load ipfw module?");
NULL, 0) == -1) {
if (co.test_only)
tables_max = 128; /* Old conservative default */
else
errx(1, "Can't determine maximum number of ipfw tables."
" Perhaps you forgot to load ipfw module?");
}
memset(&xent, 0, sizeof(xent));

View File

@ -32,7 +32,7 @@
.\" @(#)mount_cd9660.8 8.3 (Berkeley) 3/27/94
.\" $FreeBSD$
.\"
.Dd October 3, 2005
.Dd March 5, 2013
.Dt MOUNT_CD9660 8
.Os
.Sh NAME
@ -80,7 +80,7 @@ See the
man page for possible options and their meanings.
The following cd9660 specific options are available:
.Pp
.Bl -tag -width "nostrictjoliet" -compact
.Bl -tag -width "brokenjoliet" -compact
.It Cm extatt
Same as
.Fl e .
@ -93,7 +93,7 @@ Same as
.It Cm norrip
Same as
.Fl r .
.It Cm nostrictjoliet
.It Cm brokenjoliet
Same as
.Fl b .
.El

View File

@ -83,7 +83,7 @@ main(int argc, char **argv)
{
struct iovec *iov;
int iovlen;
int ch, mntflags, opts;
int ch, mntflags;
char *dev, *dir, *p, *val, mntpath[MAXPATHLEN];
int verbose;
int ssector; /* starting sector, 0 for 1st session */
@ -91,7 +91,7 @@ main(int argc, char **argv)
iov = NULL;
iovlen = 0;
mntflags = opts = verbose = 0;
mntflags = verbose = 0;
ssector = -1;
while ((ch = getopt(argc, argv, "begjo:rs:vC:")) != -1)
@ -109,7 +109,7 @@ main(int argc, char **argv)
build_iovec(&iov, &iovlen, "nojoliet", NULL, (size_t)-1);
break;
case 'o':
getmntopts(optarg, mopts, &mntflags, &opts);
getmntopts(optarg, mopts, &mntflags, NULL);
p = strchr(optarg, '=');
val = NULL;
if (p != NULL) {

View File

@ -1,14 +0,0 @@
# @(#)Makefile 8.3 (Berkeley) 3/27/94
# $FreeBSD$
PROG= mount_ext2fs
SRCS= mount_ext2fs.c getmntopts.c
MAN= mount_ext2fs.8
WARNS?= 2
MOUNT= ${.CURDIR}/../mount
CFLAGS+= -I${MOUNT}
.PATH: ${MOUNT}
.include <bsd.prog.mk>

View File

@ -1,125 +0,0 @@
/*-
* Copyright (c) 1993, 1994
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef lint
static const char copyright[] =
"@(#) Copyright (c) 1993, 1994\n\
The Regents of the University of California. All rights reserved.\n";
#endif /* not lint */
#ifndef lint
/*
static char sccsid[] = "@(#)mount_lfs.c 8.3 (Berkeley) 3/27/94";
*/
static const char rcsid[] =
"$FreeBSD$";
#endif /* not lint */
#include <sys/param.h>
#include <sys/mount.h>
#include <sys/uio.h>
#include <err.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sysexits.h>
#include <unistd.h>
#include "mntopts.h"
static void usage(void);
int
main(int argc, char *argv[])
{
struct iovec *iov;
int ch, iovlen;
char *fs_name, *fspec, mntpath[MAXPATHLEN];
char *fstype;
fstype = strrchr(argv[0], '_');
if (fstype == NULL)
errx(EX_USAGE, "argv[0] must end in _fstype");
else
++fstype;
iov = NULL;
iovlen = 0;
while ((ch = getopt(argc, argv, "o:")) != -1)
switch (ch) {
case 'o': {
char *p = NULL;
char *val = strdup("");
p = strchr(optarg, '=');
if (p != NULL) {
free(val);
*p = '\0';
val = p + 1;
}
build_iovec(&iov, &iovlen, optarg, val, strlen(val)+1);
}
break;
case '?':
default:
usage();
}
argc -= optind;
argv += optind;
if (argc != 2)
usage();
fspec = argv[0]; /* the name of the device file */
fs_name = argv[1]; /* the mount point */
/*
* Resolve the mountpoint with realpath(3) and remove unnecessary
* slashes from the devicename if there are any.
*/
if (checkpath(fs_name, mntpath) != 0)
err(EX_USAGE, "%s", mntpath);
(void)rmslashes(fspec, fspec);
build_iovec(&iov, &iovlen, "fstype", fstype, strlen(fstype) + 1);
build_iovec(&iov, &iovlen, "fspath", mntpath, strlen(mntpath) + 1);
build_iovec(&iov, &iovlen, "from", fspec, strlen(fspec) + 1);
if (nmount(iov, iovlen, 0) < 0)
err(EX_OSERR, "%s", fspec);
return (0);
}
static void
usage()
{
(void)fprintf(stderr,
"usage: mount_ext2fs [-o options] special node\n");
exit(EX_USAGE);
}

View File

@ -69,7 +69,7 @@ main(int argc, char **argv)
struct iovec *iov = NULL;
int iovlen = 0;
struct stat sb;
int c, mntflags, set_gid, set_uid, set_mask, set_dirmask;
int c, set_gid, set_uid, set_mask, set_dirmask;
char *dev, *dir, mntpath[MAXPATHLEN], *csp;
char fstype[] = "msdosfs";
char errmsg[255] = {0};
@ -78,9 +78,8 @@ main(int argc, char **argv)
mode_t mask = 0, dirmask = 0;
uid_t uid = 0;
gid_t gid = 0;
getmnt_silent = 1;
mntflags = set_gid = set_uid = set_mask = set_dirmask = 0;
set_gid = set_uid = set_mask = set_dirmask = 0;
while ((c = getopt(argc, argv, "sl9u:g:m:M:o:L:D:W:")) != -1) {
switch (c) {
@ -219,7 +218,7 @@ main(int argc, char **argv)
build_iovec_argf(&iov, &iovlen, "mask", "%u", mask);
build_iovec_argf(&iov, &iovlen, "dirmask", "%u", dirmask);
if (nmount(iov, iovlen, mntflags) < 0) {
if (nmount(iov, iovlen, 0) < 0) {
if (errmsg[0])
err(1, "%s: %s", dev, errmsg);
else

View File

@ -130,7 +130,7 @@ enum tryret {
TRYRET_LOCALERR /* Local failure. */
};
static int fallback_mount(struct iovec *iov, int iovlen, int mntflags);
static int fallback_mount(struct iovec *iov, int iovlen);
static int sec_name_to_num(char *sec);
static char *sec_num_to_name(int num);
static int getnfsargs(char *, struct iovec **iov, int *iovlen);
@ -149,13 +149,12 @@ main(int argc, char *argv[])
{
int c;
struct iovec *iov;
int mntflags, num, iovlen;
int num, iovlen;
int osversion;
char *name, *p, *spec, *fstype;
char mntpath[MAXPATHLEN], errmsg[255];
char hostname[MAXHOSTNAMELEN + 1], *gssname, gssn[MAXHOSTNAMELEN + 50];
mntflags = 0;
iov = NULL;
iovlen = 0;
memset(errmsg, 0, sizeof(errmsg));
@ -427,10 +426,10 @@ main(int argc, char *argv[])
*/
osversion = getosreldate();
if (osversion >= 702100) {
if (nmount(iov, iovlen, mntflags))
if (nmount(iov, iovlen, 0))
err(1, "%s, %s", mntpath, errmsg);
} else {
if (fallback_mount(iov, iovlen, mntflags))
if (fallback_mount(iov, iovlen))
err(1, "%s, %s", mntpath, errmsg);
}
@ -473,7 +472,7 @@ copyopt(struct iovec **newiov, int *newiovlen,
* parameters. It should be eventually be removed.
*/
static int
fallback_mount(struct iovec *iov, int iovlen, int mntflags)
fallback_mount(struct iovec *iov, int iovlen)
{
struct nfs_args args = {
.version = NFS_ARGSVERSION,
@ -663,7 +662,7 @@ fallback_mount(struct iovec *iov, int iovlen, int mntflags)
copyopt(&newiov, &newiovlen, iov, iovlen, "fspath");
copyopt(&newiov, &newiovlen, iov, iovlen, "errmsg");
return nmount(newiov, newiovlen, mntflags);
return nmount(newiov, newiovlen, 0);
}
static int

View File

@ -68,12 +68,11 @@ main(int argc, char *argv[])
char source[MAXPATHLEN];
char target[MAXPATHLEN];
char errmsg[255];
int ch, mntflags, iovlen;
int ch, iovlen;
char nullfs[] = "nullfs";
iov = NULL;
iovlen = 0;
mntflags = 0;
errmsg[0] = '\0';
while ((ch = getopt(argc, argv, "o:")) != -1)
switch(ch) {
@ -111,7 +110,7 @@ main(int argc, char *argv[])
build_iovec(&iov, &iovlen, "fspath", source, (size_t)-1);
build_iovec(&iov, &iovlen, "target", target, (size_t)-1);
build_iovec(&iov, &iovlen, "errmsg", errmsg, sizeof(errmsg));
if (nmount(iov, iovlen, mntflags) < 0) {
if (nmount(iov, iovlen, 0) < 0) {
if (errmsg[0] != 0)
err(1, "%s: %s", source, errmsg);
else

View File

@ -1,13 +0,0 @@
# $FreeBSD$
PROG = mount_reiserfs
SRCS = mount_reiserfs.c getmntopts.c
MAN = mount_reiserfs.8
# mount_reiserfs needs mntopts.h and getmntopts.c from src/sbin/mount/
MOUNT ?= ${.CURDIR}/../mount
CFLAGS += -I${MOUNT}
.PATH: ${MOUNT}
.include <bsd.prog.mk>

View File

@ -1,90 +0,0 @@
.\"
.\" Copyright (c) 1993,1994 Christopher G. Demetriou
.\" Copyright (c) 1999 Semen Ustimenko
.\" Copyright (c) 2005 Jean-Sébastien Pédron
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\" 3. All advertising materials mentioning features or use of this software
.\" must display the following acknowledgment:
.\" This product includes software developed by Christopher G. Demetriou.
.\" 3. The name of the author may not be used to endorse or promote products
.\" derived from this software without specific prior written permission
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
.\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
.\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
.\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd February 3, 2005
.Dt MOUNT_REISERFS 8
.Os
.Sh NAME
.Nm mount_reiserfs
.Nd "mount a ReiserFS file system"
.Sh SYNOPSIS
.Nm
.Ar special
.Ar node
.Sh DESCRIPTION
The
.Nm
utility attaches the ReiserFS file system residing on the device
.Ar special
to the global file system namespace at the location
indicated by
.Ar node .
.Pp
This command is normally executed by
.Xr mount 8
at boot time, but can be used by any user to mount a
ReiserFS file system on any directory that they own (provided,
of course, that they have appropriate access to the device that
contains the file system).
.Sh EXAMPLES
To mount a ReiserFS volume located in
.Pa /dev/ad1s1 :
.Pp
.Dl "mount_reiserfs /dev/ad1s1 /mnt"
.Sh SEE ALSO
.Xr mount 2 ,
.Xr unmount 2 ,
.Xr fstab 5 ,
.Xr mount 8
.Sh HISTORY
The
.Nm
utility first appeared in
.Fx 6.0 .
.Sh AUTHORS
.An -nosplit
The ReiserFS kernel implementation was written by
.An Hans Reiser
.Pq Pa http://www.namesys.com/ ,
and ported to
.Fx
by
.An Jean-S\['e]bastien P\['e]dron Aq dumbbell@FreeBSD.org .
.Pp
The
.Nm
utility and manual were written by
.An Jean-S\['e]bastien P\['e]dron Aq dumbbell@FreeBSD.org .
.Sh CAVEATS
This utility is primarily used for read access to a ReiserFS volume.
Writing to a volume is currently unsupported.

View File

@ -1,108 +0,0 @@
/*-
* Copyright (c) 2005 Jean-Sébastien Pédron
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/mount.h>
#include <sys/uio.h>
#include <err.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sysexits.h>
#include <unistd.h>
#include "mntopts.h"
struct mntopt mopts[] = {
MOPT_STDOPTS,
MOPT_END
};
void usage(void);
int
main(int argc, char *argv[])
{
struct iovec *iov;
int ch, mntflags, iovlen;
char *dev, *dir, mntpath[MAXPATHLEN];
char fstype[] = "reiserfs";
mntflags = 0;
while ((ch = getopt(argc, argv, "o:")) != -1) {
switch(ch) {
case 'o':
getmntopts(optarg, mopts, &mntflags, 0);
break;
case '?':
default:
usage();
}
}
argc -= optind;
argv += optind;
if (argc != 2)
usage();
dev = argv[0];
dir = argv[1];
/*
* Resolve the mountpoint with realpath(3) and remove unnecessary
* slashes from the devicename if there are any.
*/
if (checkpath(dir, mntpath) != 0)
err(EX_USAGE, "%s", mntpath);
(void)rmslashes(dev, dev);
/* Read-only support for now */
mntflags |= MNT_RDONLY;
/* Prepare the options vector for nmount(). build_iovec() is declared
* in mntopts.h. */
iov = NULL;
iovlen = 0;
build_iovec(&iov, &iovlen, "fstype", fstype, (size_t)-1);
build_iovec(&iov, &iovlen, "fspath", mntpath, (size_t)-1);
build_iovec(&iov, &iovlen, "from", dev, (size_t)-1);
if (nmount(iov, iovlen, mntflags) < 0)
err(EX_OSERR, "%s", dev);
exit(0);
}
void
usage(void)
{
fprintf(stderr,
"usage: mount_reiserfs [-o options] special node\n");
exit(EX_USAGE);
}

View File

@ -1,23 +0,0 @@
# @(#)Makefile 8.2 (Berkeley) 3/27/94
# $FreeBSD$
PROG= mount_std
SRCS= mount_std.c getmntopts.c
MAN= mount_std.8
MLINKS= mount_std.8 mount_devfs.8 \
mount_std.8 mount_fdescfs.8 \
mount_std.8 mount_linprocfs.8 \
mount_std.8 mount_procfs.8
MOUNT= ${.CURDIR}/../mount
CFLAGS+= -I${MOUNT}
WARNS?= 3
.PATH: ${MOUNT}
LINKS= ${BINDIR}/mount_std ${BINDIR}/mount_devfs \
${BINDIR}/mount_std ${BINDIR}/mount_fdescfs \
${BINDIR}/mount_std ${BINDIR}/mount_linprocfs \
${BINDIR}/mount_std ${BINDIR}/mount_procfs
.include <bsd.prog.mk>

View File

@ -1,167 +0,0 @@
.\"
.\" Copyright (c) 1992, 1993, 1994
.\" The Regents of the University of California. All rights reserved.
.\" All rights reserved.
.\"
.\" This code is derived from software donated to Berkeley by
.\" Jan-Simon Pendry.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\" 4. Neither the name of the University nor the names of its contributors
.\" may be used to endorse or promote products derived from this software
.\" without specific prior written permission.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd November 26, 2004
.Dt MOUNT_STD 8
.Os
.Sh NAME
.Nm mount_std ,
.Nm mount_devfs ,
.Nm mount_fdescfs ,
.Nm mount_linprocfs ,
.Nm mount_procfs
.Nd mount
.Dq standard
file systems
.Sh SYNOPSIS
.Nm mount_ Ns Ar fsname
.Op Fl o Ar options
.Ar "fs"
.Ar mount_point
.Sh DESCRIPTION
The
.Nm
utility is a generic mechanism for attaching ``standard'' file systems to
the file system.
The
.Nm
utility currently supports the following file systems:
.Nm devfs ,
.Nm fdescfs ,
.Nm linprocfs
and
.Nm procfs .
A ``standard'' file system is one which:
.Bl -enum -offset indent
.It
accepts only the standard
.Fl o
options
.Dq ro ,
.Dq rw ,
.Dq noexec ,
.Dq nosuid ,
and
.Dq union .
.It
has a kernel file system module name the same as its user-visible name.
.It
requires no other special processing on the part of the
.Nm
utility.
.El
.Pp
The options are as follows:
.Bl -tag -width indent
.It Fl o
Options are specified with a
.Fl o
flag followed by a comma separated string of options.
See the
.Xr mount 8
man page for possible options and their meanings.
.El
.Pp
The
.Nm
utility examines its zeroth command-line argument (the name by which
it was called) to determine the type of file system to be mounted.
If
it is called by a name which does not end in
.Dq Li _ Ns Ar fsname ,
.Nm
will assume (for compatibility
with
.Xr mount 8 )
that the zeroth argument contains only the name of the file system type.
The
.Nm
utility is normally installed with appropriate links to commands for
the distributed file systems which can be mounted in this way;
for information on the function of each file system, see the manual page
for that specific
.Nm mount_ Ns Ar fsname
utility.
.Pp
Refer to the following manual pages for detailed information
on these file systems:
.Xr devfs 5 ,
.Xr fdescfs 5 ,
.Xr linprocfs 5
and
.Xr procfs 5 .
.Sh DIAGNOSTICS
.Bl -diag
.It argv[0] must end in _fsname
The
.Nm
utility was called with a zeroth argument of
.Dq Li mount_std .
.It %s file system not available
The specified file system type was not present in the kernel and no
loadable module for it was found.
.El
.Sh SEE ALSO
.Xr mount 2 ,
.Xr unmount 2 ,
.Xr getvfsbyname 3 ,
.Xr devfs 5 ,
.Xr fdescfs 5 ,
.Xr fstab 5 ,
.Xr linprocfs 5 ,
.Xr procfs 5 ,
.Xr mount 8
.Sh HISTORY
The
.Nm
utility first appeared in
.Fx 2.2 .
Loadable file system modules first appeared in
.Fx 2.0 .
The
.Dq fdescfs
and
.Dq procfs
file system types first appeared in
.Fx 2.0 ;
the
.Dq devfs
file system type first appeared in
.Fx 2.2 ;
the
.Dq linprocfs
file system type first appeared in
.Fx 4.0 .
.Sh CAVEATS
None of the ``standard'' file systems may be NFS-exported.

View File

@ -1,160 +0,0 @@
/*
* Copyright (c) 1990, 1992 Jan-Simon Pendry
* Copyright (c) 1992, 1993, 1994
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Jan-Simon Pendry.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef lint
static const char copyright[] =
"@(#) Copyright (c) 1992, 1993, 1994\n\
The Regents of the University of California. All rights reserved.\n";
#endif /* not lint */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/mount.h>
#include <sys/uio.h>
#include <err.h>
#include <errno.h>
#include <stdio.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <sysexits.h>
#include <unistd.h>
#include "mntopts.h"
static struct mntopt mopts[] = {
MOPT_STDOPTS,
MOPT_END
};
static char *fsname;
static volatile sig_atomic_t caughtsig;
static void usage(void) __dead2;
static void
catchsig(int s __unused)
{
caughtsig = 1;
}
int
main(int argc, char *argv[])
{
int ch, mntflags;
char mntpath[MAXPATHLEN];
struct iovec iov[4];
int error;
/*
* XXX
* mount(8) calls the mount programs with an argv[0] which is
* /just/ the file system name. So, if there is no underscore
* in argv[0], we assume that we are being called from mount(8)
* and that argv[0] is thus the name of the file system type.
*/
fsname = strrchr(argv[0], '_');
if (fsname) {
if (strcmp(fsname, "_std") == 0)
errx(EX_USAGE, "argv[0] must end in _fsname");
fsname++;
} else {
fsname = argv[0];
}
mntflags = 0;
while ((ch = getopt(argc, argv, "o:")) != -1)
switch (ch) {
case 'o':
getmntopts(optarg, mopts, &mntflags, 0);
break;
case '?':
default:
usage();
}
argc -= optind;
argv += optind;
if (argc != 2)
usage();
/* resolve the mountpoint with realpath(3) */
if (checkpath(argv[1], mntpath) != 0)
err(EX_USAGE, "%s", mntpath);
iov[0].iov_base = "fstype";
iov[0].iov_len = sizeof("fstype");
iov[1].iov_base = fsname;
iov[1].iov_len = strlen(iov[1].iov_base) + 1;
iov[2].iov_base = "fspath";
iov[2].iov_len = sizeof("fspath");
iov[3].iov_base = mntpath;
iov[3].iov_len = strlen(mntpath) + 1;
/*
* nmount(2) would kill us with SIGSYS if the kernel doesn't have it.
* This design bug is inconvenient. We must catch the signal and not
* just ignore it because of a plain bug: nmount(2) would return
* EINVAL instead of the correct ENOSYS if the kernel doesn't have it
* and we don't let the signal kill us. EINVAL is too ambiguous.
* This bug in 4.4BSD-Lite1 was fixed in 4.4BSD-Lite2 but is still in
* FreeBSD-5.0.
*/
signal(SIGSYS, catchsig);
error = nmount(iov, 4, mntflags);
signal(SIGSYS, SIG_DFL);
/*
* Try with the old mount syscall in the case
* this file system has not been converted yet,
* or the user didn't recompile his kernel.
*/
if (error && (errno == EOPNOTSUPP || errno == ENOSYS || caughtsig))
error = mount(fsname, mntpath, mntflags, NULL);
if (error)
err(EX_OSERR, NULL);
exit(0);
}
void
usage(void)
{
(void)fprintf(stderr,
"usage: mount_%s [-o options] what_to_mount mount_point\n",
fsname);
exit(EX_USAGE);
}

View File

@ -9,7 +9,6 @@ LDADD= -lkiconv
MOUNT= ${.CURDIR}/../mount
CFLAGS+= -I${MOUNT} -I${.CURDIR}/../../sys
.PATH: ${MOUNT}
WARNS?= 1
# Needs to be dynamically linked for optional dlopen() access to
# userland libiconv

View File

@ -73,18 +73,19 @@ void usage(void);
int
main(int argc, char **argv)
{
struct iovec iov[12];
int ch, i, mntflags, opts, udf_flags;
char *dev, *dir, mntpath[MAXPATHLEN];
char *cs_disk, *cs_local;
int verbose;
char mntpath[MAXPATHLEN];
char fstype[] = "udf";
struct iovec *iov;
char *cs_disk, *cs_local, *dev, *dir;
int ch, i, iovlen, mntflags, udf_flags, verbose;
i = mntflags = opts = udf_flags = verbose = 0;
i = iovlen = mntflags = udf_flags = verbose = 0;
cs_disk = cs_local = NULL;
iov = NULL;
while ((ch = getopt(argc, argv, "o:vC:")) != -1)
switch (ch) {
case 'o':
getmntopts(optarg, mopts, &mntflags, &opts);
getmntopts(optarg, mopts, &mntflags, NULL);
break;
case 'v':
verbose++;
@ -120,32 +121,13 @@ main(int argc, char **argv)
*/
mntflags |= MNT_RDONLY;
iov[i].iov_base = "fstype";
iov[i++].iov_len = sizeof("fstype");
iov[i].iov_base = "udf";
iov[i].iov_len = strlen(iov[i].iov_base) + 1;
i++;
iov[i].iov_base = "fspath";
iov[i++].iov_len = sizeof("fspath");
iov[i].iov_base = mntpath;
iov[i++].iov_len = strlen(mntpath) + 1;
iov[i].iov_base = "from";
iov[i++].iov_len = sizeof("from");
iov[i].iov_base = dev;
iov[i++].iov_len = strlen(dev) + 1;
iov[i].iov_base = "flags";
iov[i++].iov_len = sizeof("flags");
iov[i].iov_base = &udf_flags;
iov[i++].iov_len = sizeof(udf_flags);
build_iovec(&iov, &iovlen, "fstype", fstype, (size_t)-1);
build_iovec(&iov, &iovlen, "fspath", mntpath, (size_t)-1);
build_iovec(&iov, &iovlen, "from", dev, (size_t)-1);
build_iovec(&iov, &iovlen, "flags", &udf_flags, sizeof(udf_flags));
if (udf_flags & UDFMNT_KICONV) {
iov[i].iov_base = "cs_disk";
iov[i++].iov_len = sizeof("cs_disk");
iov[i].iov_base = cs_disk;
iov[i++].iov_len = strlen(cs_disk) + 1;
iov[i].iov_base = "cs_local";
iov[i++].iov_len = sizeof("cs_local");
iov[i].iov_base = cs_local;
iov[i++].iov_len = strlen(cs_local) + 1;
build_iovec(&iov, &iovlen, "cs_disk", cs_disk, (size_t)-1);
build_iovec(&iov, &iovlen, "cs_local", cs_local, (size_t)-1);
}
if (nmount(iov, i, mntflags) < 0)
err(1, "%s", dev);

View File

@ -129,7 +129,7 @@ int
main(int argc, char *argv[])
{
struct iovec *iov;
int ch, mntflags, iovlen;
int ch, iovlen;
char source [MAXPATHLEN], target[MAXPATHLEN], errmsg[255];
char uid_str[20], gid_str[20];
char fstype[] = "unionfs";
@ -137,7 +137,6 @@ main(int argc, char *argv[])
iov = NULL;
iovlen = 0;
mntflags = 0;
memset(errmsg, 0, sizeof(errmsg));
while ((ch = getopt(argc, argv, "bo:")) != -1) {
@ -190,7 +189,7 @@ main(int argc, char *argv[])
build_iovec(&iov, &iovlen, "from", target, (size_t)-1);
build_iovec(&iov, &iovlen, "errmsg", errmsg, sizeof(errmsg));
if (nmount(iov, iovlen, mntflags))
if (nmount(iov, iovlen, 0))
err(EX_OSERR, "%s: %s", source, errmsg);
exit(0);
}

View File

@ -143,14 +143,6 @@ By default this options is disabled.
If chosen timer is per-CPU
and runs in periodic mode, this option has no effect - all interrupts are
always generating.
.It Va kern.eventtimer.activetick
makes each CPU to receive all kinds of timer interrupts when they are busy.
Disabling it allows to skip some
.Fn hardclock
calls in some cases.
By default this options is enabled.
If chosen timer is per-CPU, this option has no effect - all interrupts are
always generating, as timer reprogramming is too expensive for that case.
.El
.Sh SEE ALSO
.Xr apic 4 ,

View File

@ -32,7 +32,7 @@
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
.\" THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd October 2, 2009
.Dd March 7, 2013
.Dt ZYD 4
.Os
.Sh NAME
@ -121,6 +121,7 @@ driver:
.It X-Micro XWL-11GUZX
.It Yakumo QuickWLAN USB
.It Zonet ZEW2501
.It ZyXEL ZyAIR G-202
.It ZyXEL ZyAIR G-220
.El
.Sh EXAMPLES

View File

@ -23,7 +23,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd September 19, 2012
.Dd March 7, 2013
.Dt DEVELOPMENT 7
.Os
.Sh NAME
@ -109,9 +109,9 @@ your clients automatically pick up the changes.
.Bd -literal -offset 4n
mkdir /FreeBSD
cd /FreeBSD
svn co svn://svn.freebsd.org/ports/head ports
svn co svn://svn.freebsd.org/doc/head doc
svn co svn://svn.freebsd.org/base/head src
svn co https://svn.freebsd.org/ports/head ports
svn co https://svn.freebsd.org/doc/head doc
svn co https://svn.freebsd.org/base/head src
cd /usr
rm -rf src
ln -s /FreeBSD/src src

View File

@ -490,27 +490,17 @@ single file
.Bl -tag -width ".Pa /usr/ports/Mk/bsd.port.mk" -compact
.It Pa /usr/ports
The default ports directory
.No ( Fx
and
.Ox ) .
.It Pa /usr/pkgsrc
The default ports directory
.Pq Nx .
.It Pa /usr/ports/Mk/bsd.port.mk
The big Kahuna.
.El
.Sh SEE ALSO
.Xr make 1 ,
.Xr pkg_add 1 ,
.Xr pkg_create 1 ,
.Xr pkg_delete 1 ,
.Xr pkg_info 1 ,
.Xr pkg_version 1
.Xr pkg 8 ,
.Xr portsnap 8
.Pp
The following are part of the ports collection:
.Pp
.Xr portaudit 1 ,
.Xr portcheckout 1 ,
.Xr portlint 1
.Rs
.%B "The FreeBSD Handbook"

View File

@ -1196,9 +1196,13 @@ MLINKS+=signal.9 cursig.9 \
signal.9 SIG_STOPSIGMASK.9 \
signal.9 trapsignal.9
MLINKS+=sleep.9 msleep.9 \
sleep.9 msleep_sbt.9 \
sleep.9 msleep_spin.9 \
sleep.9 msleep_spin_sbt.9 \
sleep.9 pause.9 \
sleep.9 pause_sbt.9 \
sleep.9 tsleep.9 \
sleep.9 tsleep_sbt.9 \
sleep.9 wakeup.9 \
sleep.9 wakeup_one.9
MLINKS+=sleepqueue.9 init_sleepqueues.9 \
@ -1213,6 +1217,7 @@ MLINKS+=sleepqueue.9 init_sleepqueues.9 \
sleepqueue.9 sleepq_release.9 \
sleepqueue.9 sleepq_remove.9 \
sleepqueue.9 sleepq_set_timeout.9 \
sleepqueue.9 sleepq_set_timeout_sbt.9 \
sleepqueue.9 sleepq_signal.9 \
sleepqueue.9 sleepq_timedwait.9 \
sleepqueue.9 sleepq_timedwait_sig.9 \
@ -1335,6 +1340,9 @@ MLINKS+=timeout.9 callout.9 \
timeout.9 callout_init_rw.9 \
timeout.9 callout_pending.9 \
timeout.9 callout_reset.9 \
timeout.9 callout_reset_sbt.9 \
timeout.9 callout_reset_on.9 \
timeout.9 callout_reset_sbt_on.9 \
timeout.9 callout_schedule.9 \
timeout.9 callout_stop.9 \
timeout.9 untimeout.9

View File

@ -26,7 +26,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 5, 2007
.Dd February 19, 2013
.Dt CONDVAR 9
.Os
.Sh NAME
@ -37,7 +37,9 @@
.Nm cv_wait_sig ,
.Nm cv_wait_unlock ,
.Nm cv_timedwait ,
.Nm cv_timedwait_sbt ,
.Nm cv_timedwait_sig ,
.Nm cv_timedwait_sig_sbt ,
.Nm cv_signal ,
.Nm cv_broadcast ,
.Nm cv_broadcastpri ,
@ -60,7 +62,13 @@
.Ft int
.Fn cv_timedwait "struct cv *cvp" "lock" "int timo"
.Ft int
.Fn cv_timedwait_sbt "struct cv *cvp" "lock" "sbintime_t sbt" \
"sbintime_t pr" "int flags"
.Ft int
.Fn cv_timedwait_sig "struct cv *cvp" "lock" "int timo"
.Ft int
.Fn cv_timedwait_sig_sbt "struct cv *cvp" "lock" "sbintime_t sbt" \
"sbintime_t pr" "int flags"
.Ft void
.Fn cv_signal "struct cv *cvp"
.Ft void
@ -191,6 +199,25 @@ if a signal is caught, or 0 if signaled via
.Fn cv_signal
or
.Fn cv_broadcast .
.Pp
.Fn cv_timedwait_sbt
and
.Fn cv_timedwait_sig_sbt
functions take
.Fa sbt
argument instead of
.Fa timo .
It allows to specify relative or absolute unblock time with higher resolution
in form of
.Vt sbintime_t .
The parameter
.Fa pr
allows to specify wanted absolute event precision.
The parameter
.Fa flags
allows to pass additional
.Fn callout_reset_sbt
flags.
.Sh RETURN VALUES
If successful,
.Fn cv_wait_sig ,
@ -230,4 +257,5 @@ Timeout expired.
.Xr rwlock 9 ,
.Xr sema 9 ,
.Xr sleep 9 ,
.Xr sx 9
.Xr sx 9 ,
.Xr timeout 9

View File

@ -25,14 +25,18 @@
.\"
.\" $FreeBSD$
.\"
.Dd December 12, 2009
.Dd February 19, 2013
.Dt SLEEP 9
.Os
.Sh NAME
.Nm msleep ,
.Nm msleep_sbt ,
.Nm msleep_spin ,
.Nm msleep_spin_sbt ,
.Nm pause ,
.Nm pause_sbt ,
.Nm tsleep ,
.Nm tsleep_sbt ,
.Nm wakeup
.Nd wait for events
.Sh SYNOPSIS
@ -42,11 +46,23 @@
.Ft int
.Fn msleep "void *chan" "struct mtx *mtx" "int priority" "const char *wmesg" "int timo"
.Ft int
.Fn msleep_sbt "void *chan" "struct mtx *mtx" "int priority" \
"const char *wmesg" "sbintime_t sbt" "sbintime_t pr" "int flags"
.Ft int
.Fn msleep_spin "void *chan" "struct mtx *mtx" "const char *wmesg" "int timo"
.Ft int
.Fn msleep_spin_sbt "void *chan" "struct mtx *mtx" "const char *wmesg" \
"sbintime_t sbt" "sbintime_t pr" "int flags"
.Ft void
.Fn pause "const char *wmesg" "int timo"
.Ft void
.Fn pause_sbt "const char *wmesg" "sbintime_t sbt" "sbintime_t pr" \
"int flags"
.Ft int
.Fn tsleep "void *chan" "int priority" "const char *wmesg" "int timo"
.Ft int
.Fn tsleep_sbt "void *chan" "int priority" "const char *wmesg" \
"sbintime_t sbt" "sbintime_t pr" "int flags"
.Ft void
.Fn wakeup "void *chan"
.Ft void
@ -148,6 +164,27 @@ If the timeout expires,
then the sleep function will return
.Er EWOULDBLOCK .
.Pp
.Fn msleep_sbt ,
.Fn msleep_spin_sbt ,
.Fn pause_sbt
and
.Fn tsleep_sbt
functions take
.Fa sbt
parameter instead of
.Fa timo .
It allows to specify relative or absolite wakeup time with higher resolution
in form of
.Vt sbintime_t .
The parameter
.Fa pr
allows to specify wanted absolute event precision.
The parameter
.Fa flags
allows to pass additional
.Fn callout_reset_sbt
flags.
.Pp
Several of the sleep functions including
.Fn msleep ,
.Fn msleep_spin ,
@ -301,7 +338,8 @@ A non-zero timeout was specified and the timeout expired.
.Xr mi_switch 9 ,
.Xr mtx_sleep 9 ,
.Xr rw_sleep 9 ,
.Xr sx_sleep 9
.Xr sx_sleep 9 ,
.Xr timeout 9
.Sh HISTORY
The functions
.Fn sleep

View File

@ -23,7 +23,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd January 8, 2010
.Dd February 19, 2013
.Dt SLEEPQUEUE 9
.Os
.Sh NAME
@ -41,6 +41,7 @@
.Nm sleepq_remove ,
.Nm sleepq_signal ,
.Nm sleepq_set_timeout ,
.Nm sleepq_set_timeout_sbt ,
.Nm sleepq_sleepcnt ,
.Nm sleepq_timedwait ,
.Nm sleepq_timedwait_sig ,
@ -79,6 +80,9 @@
.Fn sleepq_signal "void *wchan" "int flags" "int pri" "int queue"
.Ft void
.Fn sleepq_set_timeout "void *wchan" "int timo"
.Ft void
.Fn sleepq_set_timeout_sbt "void *wchan" "sbintime_t sbt" \
"sbintime_t pr" "int flags"
.Ft u_int
.Fn sleepq_sleepcnt "void *wchan" "int queue"
.Ft int
@ -231,6 +235,23 @@ The
.Fa timo
parameter should specify the timeout value in ticks.
.Pp
.Fn sleepq_set_timeout_sbt
function takes
.Fa sbt
argument instead of
.Fa timo .
It allows to specify relative or absolute wakeup time with higher resolution
in form of
.Vt sbintime_t .
The parameter
.Fa pr
allows to specify wanted absolute event precision.
The parameter
.Fa flags
allows to pass additional
.Fn callout_reset_sbt
flags.
.Pp
The current thread may be marked interruptible by calling
.Fn sleepq_catch_signals
with
@ -400,4 +421,5 @@ than manipulating sleep queues directly.
.Xr condvar 9 ,
.Xr runqueue 9 ,
.Xr scheduler 9 ,
.Xr sleep 9
.Xr sleep 9 ,
.Xr timeout 9

View File

@ -29,7 +29,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd November 1, 2012
.Dd February 19, 2013
.Dt TIMEOUT 9
.Os
.Sh NAME
@ -44,6 +44,9 @@
.Nm callout_reset ,
.Nm callout_reset_on ,
.Nm callout_reset_curcpu ,
.Nm callout_reset_sbt ,
.Nm callout_reset_sbt_on ,
.Nm callout_reset_sbt_curcpu ,
.Nm callout_schedule ,
.Nm callout_schedule_on ,
.Nm callout_schedule_curcpu ,
@ -82,6 +85,9 @@ struct callout_handle handle = CALLOUT_HANDLE_INITIALIZER(&handle);
.Fn callout_reset_on "struct callout *c" "int ticks" "timeout_t *func" \
"void *arg" "int cpu"
.Ft int
.Fn callout_reset_sbt_on "struct callout *c" "sbintime_t sbt" \
"sbintime_t pr" "timeout_t *func" "void *arg" "int cpu" "int flags"
.Ft int
.Fn callout_reset_curcpu "struct callout *c" "int ticks" "timeout_t *func" \
"void *arg"
.Ft int
@ -326,6 +332,33 @@ and
.Fn callout_schedule
but take an extra parameter specifying the target CPU for the callout.
.Pp
The function
.Fn callout_reset_sbt_on
allows to get higher time resolution, taking relative or absolute time
and precision instead of relative ticks count.
If specified time is in past, it will be silently converted to present
to run handler as soon as possible.
.Pp
The following
.Fa flags
may be specified:
.Bl -tag -width ".Dv C_DIRECT_EXEC"
.It Dv C_ALSOLUTE
Handle the
.Fa sbt
argument as absolute time of the event since boot, or relative time otherwise.
.It Dv C_DIRECT_EXEC
Run handler directly from hardware interrupt context instead of softclock swi.
It is faster, but puts more constraints on handlers.
Handlers may use only spin mutexes for locking, and they must be fast because
they run with absolute priority.
.It Fn C_PREL
Specifies relative event time precision as binary logarithm of time interval
divided by acceptable time deviation: 1 -- 1/2, 2 -- 1/4, etc.
Smaller value allows to aggregate more events in one timer interrupt to
reduce processing overhead and power consumption.
.El
.Pp
The functions
.Fn callout_reset_curcpu
and

View File

@ -138,7 +138,10 @@ device sa # Sequential Access (tape etc)
device cd # CD
device pass # Passthrough device (direct ATA/SCSI access)
device ses # Enclosure Services (SES and SAF-TE)
#device ctl # CAM Target Layer
device ctl # CAM Target Layer
options CTL_DISABLE # Disable CTL by default to save memory.
# Re-enable with kern.cam.ctl.disable=0 in
# /boot/loader.conf
# RAID controllers interfaced to the SCSI subsystem
device amr # AMI MegaRAID

View File

@ -442,6 +442,13 @@ options SAFE_RNDTEST # enable rndtest support
#
# VirtIO support
#
# The virtio entry provides a generic bus for use by the device drivers.
# It must be combined with an interface that communicates with the host.
# Multiple such interfaces defined by the VirtIO specification. FreeBSD
# only has support for PCI. Therefore, virtio_pci must be statically
# compiled in or loaded as a module for the device drivers to function.
#
device virtio # Generic VirtIO bus (required)
device virtio_pci # VirtIO PCI Interface
device vtnet # VirtIO Ethernet device

View File

@ -211,10 +211,12 @@ ENTRY(cpu_throw)
GET_PCPU(r6)
str r7, [r6, #PC_CURPCB]
add sp, sp, #4;
ldmfd sp!, {r4-r7, pc}
ENTRY(cpu_switch)
stmfd sp!, {r4-r7, lr}
sub sp, sp, #4;
mov r6, r2 /* Save the mutex */
.Lswitch_resume:
@ -488,6 +490,7 @@ ENTRY(cpu_switch)
* Pull the registers that got pushed when either savectx() or
* cpu_switch() was called and return.
*/
add sp, sp, #4;
ldmfd sp!, {r4-r7, pc}
#ifdef DIAGNOSTIC
.Lswitch_bogons:
@ -501,6 +504,7 @@ ENTRY(cpu_switch)
#endif
ENTRY(savectx)
stmfd sp!, {r4-r7, lr}
sub sp, sp, #4
/*
* r0 = pcb
*/
@ -528,6 +532,7 @@ ENTRY(savectx)
bl _C_LABEL(vfp_store)
1:
#endif /* ARM_VFP_SUPPORT */
add sp, sp, #4;
ldmfd sp!, {r4-r7, pc}
ENTRY(fork_trampoline)

View File

@ -73,6 +73,12 @@ __FBSDID("$FreeBSD$");
#include <machine/md_var.h>
/*
* struct switchframe must be a multiple of 8 for correct stack alignment
*/
CTASSERT(sizeof(struct switchframe) == 24);
CTASSERT(sizeof(struct trapframe) == 76);
#ifndef NSFBUFS
#define NSFBUFS (512 + maxusers * 16)
#endif
@ -131,8 +137,8 @@ cpu_fork(register struct thread *td1, register struct proc *p2,
pcb2->un_32.pcb32_sp = td2->td_kstack +
USPACE_SVC_STACK_TOP - sizeof(*pcb2);
pmap_activate(td2);
td2->td_frame = tf =
(struct trapframe *)pcb2->un_32.pcb32_sp - 1;
td2->td_frame = tf = (struct trapframe *)STACKALIGN(
pcb2->un_32.pcb32_sp - sizeof(struct trapframe));
*tf = *td1->td_frame;
sf = (struct switchframe *)tf - 1;
sf->sf_r4 = (u_int)fork_return;
@ -142,6 +148,8 @@ cpu_fork(register struct thread *td1, register struct proc *p2,
tf->tf_r0 = 0;
tf->tf_r1 = 0;
pcb2->un_32.pcb32_sp = (u_int)sf;
KASSERT((pcb2->un_32.pcb32_sp & 7) == 0,
("cpu_fork: Incorrect stack alignment"));
/* Setup to release spin count in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
@ -345,6 +353,8 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
tf->tf_r0 = 0;
td->td_pcb->un_32.pcb32_sp = (u_int)sf;
td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + USPACE_UNDEF_STACK_TOP;
KASSERT((td->td_pcb->un_32.pcb32_sp & 7) == 0,
("cpu_set_upcall: Incorrect stack alignment"));
/* Setup to release spin count in fork_exit(). */
td->td_md.md_spinlock_count = 1;
@ -438,6 +448,8 @@ cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
sf->sf_r4 = (u_int)func;
sf->sf_r5 = (u_int)arg;
td->td_pcb->un_32.pcb32_sp = (u_int)sf;
KASSERT((td->td_pcb->un_32.pcb32_sp & 7) == 0,
("cpu_set_fork_handler: Incorrect stack alignment"));
}
/*

View File

@ -199,6 +199,7 @@ bcm_dma_reset(device_t dev, int ch)
/* Reset control block */
cb = sc->sc_dma_ch[ch].cb;
bzero(cb, sizeof(cb));
cb->info = INFO_WAIT_RESP;
}
static int
@ -615,6 +616,7 @@ bcm_dma_intr(void *arg)
debug & DEBUG_ERROR_MASK, ch->ch);
bus_write_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch),
debug & DEBUG_ERROR_MASK);
bcm_dma_reset(sc->sc_dev, ch->ch);
}
if (cs & CS_INT) {

View File

@ -138,10 +138,14 @@ typedef struct irqframe {
} irqframe_t;
/*
* Switch frame
* Switch frame.
*
* It is important this is a multiple of 8 bytes so the stack is correctly
* aligned when we create new threads.
*/
struct switchframe {
u_int pad; /* Used to pad the struct to a multiple of 8-bytes */
u_int sf_r4;
u_int sf_r5;
u_int sf_r6;

View File

@ -78,6 +78,8 @@ __FBSDID("$FreeBSD$");
#include <cam/ctl/ctl_scsi_all.h>
#include <cam/ctl/ctl_error.h>
#include "opt_ctl.h"
struct ctl_softc *control_softc = NULL;
/*
@ -317,7 +319,11 @@ static int persis_offset;
static uint8_t ctl_pause_rtr;
static int ctl_is_single;
static int index_to_aps_page;
#ifdef CTL_DISABLE
int ctl_disable = 1;
#else
int ctl_disable = 0;
#endif
SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, disable, CTLFLAG_RDTUN, &ctl_disable, 0,

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/bpobj.h>
@ -414,6 +414,12 @@ bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, subsubobjs,
0, FTAG, &subdb, 0));
/*
* Make sure that we are not asking dmu_write()
* to write more data than we have in our buffer.
*/
VERIFY3U(subdb->db_size, >=,
numsubsub * sizeof (subobj));
dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
numsubsub * sizeof (subobj), subdb->db_data, tx);

View File

@ -1711,7 +1711,7 @@ dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
doi->doi_checksum = dn->dn_checksum;
doi->doi_compress = dn->dn_compress;
doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
doi->doi_max_offset = (dnp->dn_maxblkid + 1) * dn->dn_datablksz;
doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
doi->doi_fill_count = 0;
for (int i = 0; i < dnp->dn_nblkptr; i++)
doi->doi_fill_count += dnp->dn_blkptr[i].blk_fill;

View File

@ -1034,6 +1034,7 @@ fasttrap_pid_probe(struct reg *rp)
#endif
PROC_LOCK(p);
_PHOLD(p);
pid = p->p_pid;
#if defined(sun)
pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
@ -1059,6 +1060,7 @@ fasttrap_pid_probe(struct reg *rp)
#if defined(sun)
mutex_exit(pid_mtx);
#endif
_PRELE(p);
PROC_UNLOCK(p);
return (-1);
}
@ -1732,7 +1734,6 @@ fasttrap_pid_probe(struct reg *rp)
ASSERT(i <= sizeof (scratch));
#if defined(sun)
if (fasttrap_copyout(scratch, (char *)addr, i)) {
#else
@ -1794,7 +1795,11 @@ fasttrap_pid_probe(struct reg *rp)
}
rp->r_rip = new_pc;
set_regs(curthread, rp);
PROC_LOCK(p);
proc_write_regs(curthread, rp);
_PRELE(p);
PROC_UNLOCK(p);
return (0);
}

View File

@ -393,11 +393,11 @@ dev/virtio/virtio.c optional virtio
dev/virtio/virtqueue.c optional virtio
dev/virtio/virtio_bus_if.m optional virtio
dev/virtio/virtio_if.m optional virtio
dev/virtio/pci/virtio_pci.c optional virtio_pci virtio pci
dev/virtio/network/if_vtnet.c optional vtnet virtio
dev/virtio/block/virtio_blk.c optional virtio_blk virtio
dev/virtio/balloon/virtio_balloon.c optional virtio_balloon virtio
dev/virtio/scsi/virtio_scsi.c optional virtio_scsi virtio scbus
dev/virtio/pci/virtio_pci.c optional virtio_pci
dev/virtio/network/if_vtnet.c optional vtnet
dev/virtio/block/virtio_blk.c optional virtio_blk
dev/virtio/balloon/virtio_balloon.c optional virtio_balloon
dev/virtio/scsi/virtio_scsi.c optional virtio_scsi
isa/syscons_isa.c optional sc
isa/vga_isa.c optional vga
kern/kern_clocksource.c standard

View File

@ -374,11 +374,11 @@ dev/virtio/virtio.c optional virtio
dev/virtio/virtqueue.c optional virtio
dev/virtio/virtio_bus_if.m optional virtio
dev/virtio/virtio_if.m optional virtio
dev/virtio/pci/virtio_pci.c optional virtio_pci virtio pci
dev/virtio/network/if_vtnet.c optional vtnet virtio
dev/virtio/block/virtio_blk.c optional virtio_blk virtio
dev/virtio/balloon/virtio_balloon.c optional virtio_balloon virtio
dev/virtio/scsi/virtio_scsi.c optional virtio_scsi virtio scbus
dev/virtio/pci/virtio_pci.c optional virtio_pci
dev/virtio/network/if_vtnet.c optional vtnet
dev/virtio/block/virtio_blk.c optional virtio_blk
dev/virtio/balloon/virtio_balloon.c optional virtio_balloon
dev/virtio/scsi/virtio_scsi.c optional virtio_scsi
i386/acpica/acpi_machdep.c optional acpi
acpi_wakecode.o optional acpi \
dependency "$S/i386/acpica/acpi_wakecode.S assym.s" \

View File

@ -329,6 +329,9 @@ SCSI_PT_DEFAULT_TIMEOUT opt_pt.h
# Options used only in cam/scsi/scsi_ses.c
SES_ENABLE_PASSTHROUGH opt_ses.h
# Options used only in cam/ctl
CTL_DISABLE opt_ctl.h
# Options used in dev/sym/ (Symbios SCSI driver).
SYM_SETUP_LP_PROBE_MAP opt_sym.h #-Low Priority Probe Map (bits)
# Allows the ncr to take precedence

View File

@ -104,14 +104,10 @@ static void update_ed(struct hfsc_class *, int);
static void update_d(struct hfsc_class *, int);
static void init_vf(struct hfsc_class *, int);
static void update_vf(struct hfsc_class *, int, u_int64_t);
static ellist_t *ellist_alloc(void);
static void ellist_destroy(ellist_t *);
static void ellist_insert(struct hfsc_class *);
static void ellist_remove(struct hfsc_class *);
static void ellist_update(struct hfsc_class *);
struct hfsc_class *ellist_get_mindl(ellist_t *, u_int64_t);
static actlist_t *actlist_alloc(void);
static void actlist_destroy(actlist_t *);
struct hfsc_class *hfsc_get_mindl(struct hfsc_if *, u_int64_t);
static void actlist_insert(struct hfsc_class *);
static void actlist_remove(struct hfsc_class *);
static void actlist_update(struct hfsc_class *);
@ -204,12 +200,7 @@ hfsc_add_altq(struct pf_altq *a)
if (hif == NULL)
return (ENOMEM);
hif->hif_eligible = ellist_alloc();
if (hif->hif_eligible == NULL) {
free(hif, M_DEVBUF);
return (ENOMEM);
}
TAILQ_INIT(&hif->hif_eligible);
hif->hif_ifq = &ifp->if_snd;
/* keep the state in pf_altq */
@ -230,8 +221,6 @@ hfsc_remove_altq(struct pf_altq *a)
(void)hfsc_clear_interface(hif);
(void)hfsc_class_destroy(hif->hif_rootclass);
ellist_destroy(hif->hif_eligible);
free(hif, M_DEVBUF);
return (0);
@ -408,9 +397,7 @@ hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
if (cl->cl_q == NULL)
goto err_ret;
cl->cl_actc = actlist_alloc();
if (cl->cl_actc == NULL)
goto err_ret;
TAILQ_INIT(&cl->cl_actc);
if (qlimit == 0)
qlimit = 50; /* use default */
@ -544,8 +531,6 @@ hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
return (cl);
err_ret:
if (cl->cl_actc != NULL)
actlist_destroy(cl->cl_actc);
if (cl->cl_red != NULL) {
#ifdef ALTQ_RIO
if (q_is_rio(cl->cl_q))
@ -620,8 +605,6 @@ hfsc_class_destroy(struct hfsc_class *cl)
IFQ_UNLOCK(cl->cl_hif->hif_ifq);
splx(s);
actlist_destroy(cl->cl_actc);
if (cl->cl_red != NULL) {
#ifdef ALTQ_RIO
if (q_is_rio(cl->cl_q))
@ -774,7 +757,7 @@ hfsc_dequeue(struct ifaltq *ifq, int op)
* find the class with the minimum deadline among
* the eligible classes.
*/
if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time))
if ((cl = hfsc_get_mindl(hif, cur_time))
!= NULL) {
realtime = 1;
} else {
@ -994,7 +977,7 @@ init_vf(struct hfsc_class *cl, int len)
go_active = 0;
if (go_active) {
max_cl = actlist_last(cl->cl_parent->cl_actc);
max_cl = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead);
if (max_cl != NULL) {
/*
* set vt to the average of the min and max
@ -1159,12 +1142,12 @@ update_cfmin(struct hfsc_class *cl)
struct hfsc_class *p;
u_int64_t cfmin;
if (TAILQ_EMPTY(cl->cl_actc)) {
if (TAILQ_EMPTY(&cl->cl_actc)) {
cl->cl_cfmin = 0;
return;
}
cfmin = HT_INFINITY;
TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) {
if (p->cl_f == 0) {
cl->cl_cfmin = 0;
return;
@ -1184,22 +1167,6 @@ update_cfmin(struct hfsc_class *cl)
* there is one eligible list per interface.
*/
static ellist_t *
ellist_alloc(void)
{
ellist_t *head;
head = malloc(sizeof(ellist_t), M_DEVBUF, M_WAITOK);
TAILQ_INIT(head);
return (head);
}
static void
ellist_destroy(ellist_t *head)
{
free(head, M_DEVBUF);
}
static void
ellist_insert(struct hfsc_class *cl)
{
@ -1207,13 +1174,13 @@ ellist_insert(struct hfsc_class *cl)
struct hfsc_class *p;
/* check the last entry first */
if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
if ((p = TAILQ_LAST(&hif->hif_eligible, elighead)) == NULL ||
p->cl_e <= cl->cl_e) {
TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist);
return;
}
TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) {
if (cl->cl_e < p->cl_e) {
TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
return;
@ -1227,7 +1194,7 @@ ellist_remove(struct hfsc_class *cl)
{
struct hfsc_if *hif = cl->cl_hif;
TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
}
static void
@ -1245,11 +1212,11 @@ ellist_update(struct hfsc_class *cl)
return;
/* check the last entry */
last = TAILQ_LAST(hif->hif_eligible, _eligible);
last = TAILQ_LAST(&hif->hif_eligible, elighead);
ASSERT(last != NULL);
if (last->cl_e <= cl->cl_e) {
TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist);
return;
}
@ -1259,7 +1226,7 @@ ellist_update(struct hfsc_class *cl)
*/
while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
if (cl->cl_e < p->cl_e) {
TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
return;
}
@ -1269,11 +1236,11 @@ ellist_update(struct hfsc_class *cl)
/* find the class with the minimum deadline among the eligible classes */
struct hfsc_class *
ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
hfsc_get_mindl(struct hfsc_if *hif, u_int64_t cur_time)
{
struct hfsc_class *p, *cl = NULL;
TAILQ_FOREACH(p, head, cl_ellist) {
TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) {
if (p->cl_e > cur_time)
break;
if (cl == NULL || p->cl_d < cl->cl_d)
@ -1287,34 +1254,20 @@ ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
* by their virtual time.
* each intermediate class has one active children list.
*/
static actlist_t *
actlist_alloc(void)
{
actlist_t *head;
head = malloc(sizeof(actlist_t), M_DEVBUF, M_WAITOK);
TAILQ_INIT(head);
return (head);
}
static void
actlist_destroy(actlist_t *head)
{
free(head, M_DEVBUF);
}
static void
actlist_insert(struct hfsc_class *cl)
{
struct hfsc_class *p;
/* check the last entry first */
if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
if ((p = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead)) == NULL
|| p->cl_vt <= cl->cl_vt) {
TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist);
return;
}
TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
TAILQ_FOREACH(p, &cl->cl_parent->cl_actc, cl_actlist) {
if (cl->cl_vt < p->cl_vt) {
TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
return;
@ -1326,7 +1279,7 @@ actlist_insert(struct hfsc_class *cl)
static void
actlist_remove(struct hfsc_class *cl)
{
TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
}
static void
@ -1344,11 +1297,11 @@ actlist_update(struct hfsc_class *cl)
return;
/* check the last entry */
last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
last = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead);
ASSERT(last != NULL);
if (last->cl_vt <= cl->cl_vt) {
TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist);
return;
}
@ -1358,7 +1311,7 @@ actlist_update(struct hfsc_class *cl)
*/
while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
if (cl->cl_vt < p->cl_vt) {
TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
return;
}
@ -1371,7 +1324,7 @@ actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
{
struct hfsc_class *p;
TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) {
if (p->cl_f <= cur_time)
return (p);
}

View File

@ -218,16 +218,6 @@ struct runtime_sc {
u_int64_t ism2; /* scaled inverse-slope of the 2nd segment */
};
/* for TAILQ based ellist and actlist implementation */
struct hfsc_class;
typedef TAILQ_HEAD(_eligible, hfsc_class) ellist_t;
typedef TAILQ_ENTRY(hfsc_class) elentry_t;
typedef TAILQ_HEAD(_active, hfsc_class) actlist_t;
typedef TAILQ_ENTRY(hfsc_class) actentry_t;
#define ellist_first(s) TAILQ_FIRST(s)
#define actlist_first(s) TAILQ_FIRST(s)
#define actlist_last(s) TAILQ_LAST(s, _active)
struct hfsc_class {
u_int cl_id; /* class id (just for debug) */
u_int32_t cl_handle; /* class handle */
@ -277,10 +267,10 @@ struct hfsc_class {
u_int cl_vtperiod; /* vt period sequence no */
u_int cl_parentperiod; /* parent's vt period seqno */
int cl_nactive; /* number of active children */
actlist_t *cl_actc; /* active children list */
actentry_t cl_actlist; /* active children list entry */
elentry_t cl_ellist; /* eligible list entry */
TAILQ_HEAD(acthead, hfsc_class) cl_actc; /* active children list */
TAILQ_ENTRY(hfsc_class) cl_actlist; /* active children list entry */
TAILQ_ENTRY(hfsc_class) cl_ellist; /* eligible list entry */
struct {
struct pktcntr xmit_cnt;
@ -304,7 +294,7 @@ struct hfsc_if {
u_int hif_packets; /* # of packets in the tree */
u_int hif_classid; /* class id sequence number */
ellist_t *hif_eligible; /* eligible list */
TAILQ_HEAD(elighead, hfsc_class) hif_eligible; /* eligible list */
#ifdef ALTQ3_CLFIER_COMPAT
struct acc_classifier hif_classifier;

View File

@ -228,6 +228,7 @@ typedef void irqreturn_t;
#define IRQ_NONE /* nothing */
#define unlikely(x) __builtin_expect(!!(x), 0)
#define likely(x) __builtin_expect(!!(x), 1)
#define container_of(ptr, type, member) ({ \
__typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
@ -905,6 +906,7 @@ struct drm_device {
struct drm_minor *control; /**< Control node for card */
struct drm_minor *primary; /**< render type primary screen head */
void *drm_ttm_bo;
struct unrhdr *drw_unrhdr;
/* RB tree of drawable infos */
RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head;
@ -1301,10 +1303,14 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_priv);
int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
int drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
struct vm_object **obj_res, int nprot);
int drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
vm_size_t size, struct vm_object **obj_res, int nprot);
void drm_gem_pager_dtr(void *obj);
struct ttm_bo_device;
int ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset,
vm_size_t size, struct vm_object **obj_res, int nprot);
void drm_device_lock_mtx(struct drm_device *dev);
void drm_device_unlock_mtx(struct drm_device *dev);
int drm_device_sleep_mtx(struct drm_device *dev, void *chan, int flags,

View File

@ -58,6 +58,8 @@ static int drm_load(struct drm_device *dev);
static void drm_unload(struct drm_device *dev);
static drm_pci_id_list_t *drm_find_description(int vendor, int device,
drm_pci_id_list_t *idlist);
static int drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset,
vm_size_t size, struct vm_object **obj_res, int nprot);
static int
drm_modevent(module_t mod, int type, void *data)
@ -187,7 +189,7 @@ static struct cdevsw drm_cdevsw = {
.d_ioctl = drm_ioctl,
.d_poll = drm_poll,
.d_mmap = drm_mmap,
.d_mmap_single = drm_gem_mmap_single,
.d_mmap_single = drm_mmap_single,
.d_name = "drm",
.d_flags = D_TRACKCLOSE
};
@ -955,6 +957,23 @@ drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
return (0);
}
static int
drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
struct vm_object **obj_res, int nprot)
{
struct drm_device *dev;
dev = drm_get_device_from_kdev(kdev);
if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
} else if (dev->drm_ttm_bo != NULL) {
return (ttm_bo_mmap_single(dev->drm_ttm_bo, offset, size,
obj_res, nprot));
} else {
return (ENODEV);
}
}
#if DRM_LINUX
#include <sys/sysproto.h>

View File

@ -441,16 +441,12 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
}
int
drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size,
struct vm_object **obj_res, int nprot)
{
struct drm_device *dev;
struct drm_gem_object *gem_obj;
struct vm_object *vm_obj;
dev = drm_get_device_from_kdev(kdev);
if ((dev->driver->driver_features & DRIVER_GEM) == 0)
return (ENODEV);
DRM_LOCK(dev);
gem_obj = drm_gem_object_from_offset(dev, *offset);
if (gem_obj == NULL) {

110
sys/dev/drm2/drm_global.c Normal file
View File

@ -0,0 +1,110 @@
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm_global.h>
MALLOC_DEFINE(M_DRM_GLOBAL, "drm_global", "DRM Global Items");
struct drm_global_item {
struct sx mutex;
void *object;
int refcount;
};
static struct drm_global_item glob[DRM_GLOBAL_NUM];
void drm_global_init(void)
{
int i;
for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
struct drm_global_item *item = &glob[i];
sx_init(&item->mutex, "drmgi");
item->object = NULL;
item->refcount = 0;
}
}
void drm_global_release(void)
{
int i;
for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
struct drm_global_item *item = &glob[i];
MPASS(item->object == NULL);
MPASS(item->refcount == 0);
sx_destroy(&item->mutex);
}
}
int drm_global_item_ref(struct drm_global_reference *ref)
{
int ret;
struct drm_global_item *item = &glob[ref->global_type];
void *object;
sx_xlock(&item->mutex);
if (item->refcount == 0) {
item->object = malloc(ref->size, M_DRM_GLOBAL,
M_WAITOK | M_ZERO);
ref->object = item->object;
ret = ref->init(ref);
if (unlikely(ret != 0))
goto out_err;
}
++item->refcount;
ref->object = item->object;
object = item->object;
sx_xunlock(&item->mutex);
return 0;
out_err:
sx_xunlock(&item->mutex);
item->object = NULL;
return ret;
}
void drm_global_item_unref(struct drm_global_reference *ref)
{
struct drm_global_item *item = &glob[ref->global_type];
sx_xlock(&item->mutex);
MPASS(item->refcount != 0);
MPASS(ref->object == item->object);
if (--item->refcount == 0) {
ref->release(ref);
item->object = NULL;
}
sx_xunlock(&item->mutex);
}

56
sys/dev/drm2/drm_global.h Normal file
View File

@ -0,0 +1,56 @@
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/* $FreeBSD$ */
#ifndef _DRM_GLOBAL_H_
#define _DRM_GLOBAL_H_
enum drm_global_types {
DRM_GLOBAL_TTM_MEM = 0,
DRM_GLOBAL_TTM_BO,
DRM_GLOBAL_TTM_OBJECT,
DRM_GLOBAL_NUM
};
struct drm_global_reference {
enum drm_global_types global_type;
size_t size;
void *object;
int (*init) (struct drm_global_reference *);
void (*release) (struct drm_global_reference *);
};
extern void drm_global_init(void);
extern void drm_global_release(void);
extern int drm_global_item_ref(struct drm_global_reference *ref);
extern void drm_global_item_unref(struct drm_global_reference *ref);
MALLOC_DECLARE(M_DRM_GLOBAL);
#endif

View File

@ -561,3 +561,40 @@ void drm_mm_takedown(struct drm_mm * mm)
KASSERT(mm->num_unused == 0, ("num_unused != 0"));
}
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
unsigned long total_used = 0, total_free = 0, total = 0;
unsigned long hole_start, hole_end, hole_size;
hole_start = drm_mm_hole_node_start(&mm->head_node);
hole_end = drm_mm_hole_node_end(&mm->head_node);
hole_size = hole_end - hole_start;
if (hole_size)
printf("%s 0x%08lx-0x%08lx: %8lu: free\n",
prefix, hole_start, hole_end,
hole_size);
total_free += hole_size;
drm_mm_for_each_node(entry, mm) {
printf("%s 0x%08lx-0x%08lx: %8lu: used\n",
prefix, entry->start, entry->start + entry->size,
entry->size);
total_used += entry->size;
if (entry->hole_follows) {
hole_start = drm_mm_hole_node_start(entry);
hole_end = drm_mm_hole_node_end(entry);
hole_size = hole_end - hole_start;
printf("%s 0x%08lx-0x%08lx: %8lu: free\n",
prefix, hole_start, hole_end,
hole_size);
total_free += hole_size;
}
}
total = total_free + total_used;
printf("%s total: %lu, used %lu free %lu\n", prefix, total,
total_used, total_free);
}

View File

@ -182,4 +182,6 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
int drm_mm_scan_add_block(struct drm_mm_node *node);
int drm_mm_scan_remove_block(struct drm_mm_node *node);
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
#endif

View File

@ -0,0 +1,145 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
* Keith Packard.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/ttm/ttm_module.h>
#include <dev/drm2/ttm/ttm_bo_driver.h>
#include <dev/drm2/ttm/ttm_page_alloc.h>
#ifdef TTM_HAS_AGP
#include <dev/drm2/ttm/ttm_placement.h>
struct ttm_agp_backend {
struct ttm_tt ttm;
struct agp_memory *mem;
device_t bridge;
};
MALLOC_DEFINE(M_TTM_AGP, "ttm_agp", "TTM AGP Backend");
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
unsigned i;
mem = agp_alloc_memory(agp_be->bridge, AGP_USER_MEMORY, ttm->num_pages);
if (unlikely(mem == NULL))
return -ENOMEM;
mem->page_count = 0;
for (i = 0; i < ttm->num_pages; i++) {
vm_page_t page = ttm->pages[i];
if (!page)
page = ttm->dummy_read_page;
mem->pages[mem->page_count++] = page;
}
agp_be->mem = mem;
mem->is_flushed = 1;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
ret = agp_bind_memory(mem, node->start);
if (ret)
pr_err("AGP Bind memory failed\n");
return ret;
}
static int ttm_agp_unbind(struct ttm_tt *ttm)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (agp_be->mem) {
if (agp_be->mem->is_bound)
return agp_unbind_memory(agp_be->mem);
agp_free_memory(agp_be->mem);
agp_be->mem = NULL;
}
return 0;
}
static void ttm_agp_destroy(struct ttm_tt *ttm)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (agp_be->mem)
ttm_agp_unbind(ttm);
ttm_tt_fini(ttm);
free(agp_be, M_TTM_AGP);
}
static struct ttm_backend_func ttm_agp_func = {
.bind = ttm_agp_bind,
.unbind = ttm_agp_unbind,
.destroy = ttm_agp_destroy,
};
struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
device_t bridge,
unsigned long size, uint32_t page_flags,
vm_page_t dummy_read_page)
{
struct ttm_agp_backend *agp_be;
agp_be = malloc(sizeof(*agp_be), M_TTM_AGP, M_WAITOK | M_ZERO);
agp_be->mem = NULL;
agp_be->bridge = bridge;
agp_be->ttm.func = &ttm_agp_func;
if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
return NULL;
}
return &agp_be->ttm;
}
int ttm_agp_tt_populate(struct ttm_tt *ttm)
{
if (ttm->state != tt_unpopulated)
return 0;
return ttm_pool_populate(ttm);
}
void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
{
ttm_pool_unpopulate(ttm);
}
#endif

1820
sys/dev/drm2/ttm/ttm_bo.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,740 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/* $FreeBSD$ */
#ifndef _TTM_BO_API_H_
#define _TTM_BO_API_H_
#include <dev/drm2/drmP.h>
struct ttm_bo_device;
struct drm_mm_node;
/**
* struct ttm_placement
*
* @fpfn: first valid page frame number to put the object
* @lpfn: last valid page frame number to put the object
* @num_placement: number of preferred placements
* @placement: preferred placements
* @num_busy_placement: number of preferred placements when need to evict buffer
* @busy_placement: preferred placements when need to evict buffer
*
* Structure indicating the placement you request for an object.
*/
struct ttm_placement {
unsigned fpfn;
unsigned lpfn;
unsigned num_placement;
const uint32_t *placement;
unsigned num_busy_placement;
const uint32_t *busy_placement;
};
/**
* struct ttm_bus_placement
*
* @addr: mapped virtual address
* @base: bus base address
* @is_iomem: is this io memory ?
* @size: size in byte
* @offset: offset from the base address
* @io_reserved_vm: The VM system has a refcount in @io_reserved_count
* @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
*
* Structure indicating the bus placement of an object.
*/
struct ttm_bus_placement {
void *addr;
unsigned long base;
unsigned long size;
unsigned long offset;
bool is_iomem;
bool io_reserved_vm;
uint64_t io_reserved_count;
};
/**
* struct ttm_mem_reg
*
* @mm_node: Memory manager node.
* @size: Requested size of memory region.
* @num_pages: Actual size of memory region in pages.
* @page_alignment: Page alignment.
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
*
* Structure indicating the placement and space resources used by a
* buffer object.
*/
struct ttm_mem_reg {
void *mm_node;
unsigned long start;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
uint32_t placement;
struct ttm_bus_placement bus;
};
/**
* enum ttm_bo_type
*
* @ttm_bo_type_device: These are 'normal' buffers that can
* be mmapped by user space. Each of these bos occupy a slot in the
* device address space, that can be used for normal vm operations.
*
* @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
* but they cannot be accessed from user-space. For kernel-only use.
*
* @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
* driver.
*/
enum ttm_bo_type {
ttm_bo_type_device,
ttm_bo_type_kernel,
ttm_bo_type_sg
};
struct ttm_tt;
/**
* struct ttm_buffer_object
*
* @bdev: Pointer to the buffer object device structure.
* @type: The bo type.
* @destroy: Destruction function. If NULL, kfree is used.
* @num_pages: Actual number of pages.
* @addr_space_offset: Address space offset.
* @acc_size: Accounted size for this object.
* @kref: Reference count of this buffer object. When this refcount reaches
* zero, the object is put on the delayed delete list.
* @list_kref: List reference count of this buffer object. This member is
* used to avoid destruction while the buffer object is still on a list.
* Lru lists may keep one refcount, the delayed delete list, and kref != 0
* keeps one refcount. When this refcount reaches zero,
* the object is destroyed.
* @event_queue: Queue for processes waiting on buffer object status change.
* @mem: structure describing current placement.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistent shmem object.
* @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing.
* @cpu_writes: For synchronization. Number of cpu writers.
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
* @val_seq: Sequence of the validation holding the @reserved lock.
* Used to avoid starvation when many processes compete to validate the
* buffer. This member is protected by the bo_device::lru_lock.
* @seq_valid: The value of @val_seq is valid. This value is protected by
* the bo_device::lru_lock.
* @reserved: Deadlock-free lock used for synchronization state transitions.
* @sync_obj: Pointer to a synchronization object.
* @priv_flags: Flags describing buffer object internal state.
* @vm_rb: Rb node for the vm rb tree.
* @vm_node: Address space manager node.
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
*
* Base class for TTM buffer object, that deals with data placement and CPU
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
* the driver can usually use the placement offset @offset directly as the
* GPU virtual address. For drivers implementing multiple
* GPU memory manager contexts, the driver should manage the address space
* in these contexts separately and use these objects to get the correct
* placement and caching for these GPU maps. This makes it possible to use
* these objects for even quite elaborate memory management schemes.
* The destroy member, the API visibility of this object makes it possible
* to derive driver specific types.
*/
struct ttm_buffer_object {
/**
* Members constant at init.
*/
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
unsigned long num_pages;
uint64_t addr_space_offset;
size_t acc_size;
/**
* Members not needing protection.
*/
u_int kref;
u_int list_kref;
/* wait_queue_head_t event_queue; */
/**
* Members protected by the bo::reserved lock.
*/
struct ttm_mem_reg mem;
struct vm_object *persistent_swap_storage;
struct ttm_tt *ttm;
bool evicted;
/**
* Members protected by the bo::reserved lock only when written to.
*/
atomic_t cpu_writers;
/**
* Members protected by the bdev::lru_lock.
*/
struct list_head lru;
struct list_head ddestroy;
struct list_head swap;
struct list_head io_reserve_lru;
uint32_t val_seq;
bool seq_valid;
/**
* Members protected by the bdev::lru_lock
* only when written to.
*/
atomic_t reserved;
/**
* Members protected by struct buffer_object_device::fence_lock
* In addition, setting sync_obj to anything else
* than NULL requires bo::reserved to be held. This allows for
* checking NULL while reserved but not holding the mentioned lock.
*/
void *sync_obj;
unsigned long priv_flags;
/**
* Members protected by the bdev::vm_lock
*/
RB_ENTRY(ttm_buffer_object) vm_rb;
struct drm_mm_node *vm_node;
/**
* Special members that are protected by the reserve lock
* and the bo::lock when written to. Can be read with
* either of these locks held.
*/
unsigned long offset;
uint32_t cur_placement;
struct sg_table *sg;
};
/**
* struct ttm_bo_kmap_obj
*
* @virtual: The current kernel virtual address.
* @page: The page when kmap'ing a single page.
* @bo_kmap_type: Type of bo_kmap.
*
* Object describing a kernel mapping. Since a TTM bo may be located
* in various memory types with various caching policies, the
* mapping can either be an ioremap, a vmap, a kmap or part of a
* premapped region.
*/
#define TTM_BO_MAP_IOMEM_MASK 0x80
struct ttm_bo_kmap_obj {
void *virtual;
struct vm_page *page;
struct sf_buf *sf;
int num_pages;
unsigned long size;
enum {
ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
ttm_bo_map_vmap = 2,
ttm_bo_map_kmap = 3,
ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
} bo_kmap_type;
struct ttm_buffer_object *bo;
};
/**
* ttm_bo_reference - reference a struct ttm_buffer_object
*
* @bo: The buffer object.
*
* Returns a refcounted pointer to a buffer object.
*/
static inline struct ttm_buffer_object *
ttm_bo_reference(struct ttm_buffer_object *bo)
{
refcount_acquire(&bo->kref);
return bo;
}
/**
* ttm_bo_wait - wait for buffer idle.
*
* @bo: The buffer object.
* @interruptible: Use interruptible wait.
* @no_wait: Return immediately if buffer is busy.
*
* This function must be called with the bo::mutex held, and makes
* sure any previous rendering to the buffer is completed.
* Note: It might be necessary to block validations before the
* wait by reserving the buffer.
* Returns -EBUSY if no_wait is true and the buffer is busy.
* Returns -ERESTARTSYS if interrupted by a signal.
*/
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
bool interruptible, bool no_wait);
/**
* ttm_bo_validate
*
* @bo: The buffer object.
* @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping.
* @no_wait_gpu: Return immediately if the GPU is busy.
*
* Changes placement and caching policy of the buffer object
* according proposed placement.
* Returns
* -EINVAL on invalid proposed placement.
* -ENOMEM on out-of-memory condition.
* -EBUSY if no_wait is true and buffer busy.
* -ERESTARTSYS if interrupted by a signal.
*/
extern int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible,
bool no_wait_gpu);
/**
* ttm_bo_unref
*
* @bo: The buffer object.
*
* Unreference and clear a pointer to a buffer object.
*/
extern void ttm_bo_unref(struct ttm_buffer_object **bo);
/**
* ttm_bo_list_ref_sub
*
* @bo: The buffer object.
* @count: The number of references with which to decrease @bo::list_kref;
* @never_free: The refcount should not reach zero with this operation.
*
* Release @count lru list references to this buffer object.
*/
extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
bool never_free);
/**
* ttm_bo_add_to_lru
*
* @bo: The buffer object.
*
* Add this bo to the relevant mem type lru and, if it's backed by
* system pages (ttms) to the swap list.
* This function must be called with struct ttm_bo_global::lru_lock held, and
* is typically called immediately prior to unreserving a bo.
*/
extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
/**
* ttm_bo_del_from_lru
*
* @bo: The buffer object.
*
* Remove this bo from all lru lists used to lookup and reserve an object.
* This function must be called with struct ttm_bo_global::lru_lock held,
* and is usually called just immediately after the bo has been reserved to
* avoid recursive reservation from lru lists.
*/
extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
/**
* ttm_bo_lock_delayed_workqueue
*
* Prevent the delayed workqueue from running.
* Returns
* True if the workqueue was queued at the time
*/
extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
/**
* ttm_bo_unlock_delayed_workqueue
*
* Allows the delayed workqueue to run.
*/
extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
int resched);
/**
* ttm_bo_synccpu_write_grab
*
* @bo: The buffer object:
* @no_wait: Return immediately if buffer is busy.
*
* Synchronizes a buffer object for CPU RW access. This means
* command submission that affects the buffer will return -EBUSY
* until ttm_bo_synccpu_write_release is called.
*
* Returns
* -EBUSY if the buffer is busy and no_wait is true.
* -ERESTARTSYS if interrupted by a signal.
*/
extern int
ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
/**
* ttm_bo_synccpu_write_release:
*
* @bo : The buffer object.
*
* Releases a synccpu lock.
*/
extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
/**
* ttm_bo_acc_size
*
* @bdev: Pointer to a ttm_bo_device struct.
* @bo_size: size of the buffer object in byte.
* @struct_size: size of the structure holding buffer object datas
*
* Returns size to account for a buffer object
*/
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
unsigned long bo_size,
unsigned struct_size);
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
unsigned long bo_size,
unsigned struct_size);
/**
* ttm_bo_init
*
* @bdev: Pointer to a ttm_bo_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
* @interruptible: If needing to sleep to wait for GPU resources,
* sleep interruptible.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
* @acc_size: Accounted size for this object.
* @destroy: Destroy function. Use NULL for kfree().
*
* This function initializes a pre-allocated struct ttm_buffer_object.
* As this object may be part of a larger structure, this function,
* together with the @destroy function,
* enables driver-specific objects derived from a ttm_buffer_object.
* On successful return, the object kref and list_kref are set to 1.
* If a failure occurs, the function will call the @destroy function, or
* kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
* illegal and will likely cause memory corruption.
*
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
extern int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
unsigned long size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
bool interrubtible,
struct vm_object *persistent_swap_storage,
size_t acc_size,
struct sg_table *sg,
void (*destroy) (struct ttm_buffer_object *));
/**
* ttm_bo_synccpu_object_init
*
* @bdev: Pointer to a ttm_bo_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
* @interruptible: If needing to sleep while waiting for GPU resources,
* sleep interruptible.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
* @p_bo: On successful completion *p_bo points to the created object.
*
* This function allocates a ttm_buffer_object, and then calls ttm_bo_init
* on that object. The destroy function is set to kfree().
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while waiting for resources.
*/
extern int ttm_bo_create(struct ttm_bo_device *bdev,
unsigned long size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
bool interruptible,
struct vm_object *persistent_swap_storage,
struct ttm_buffer_object **p_bo);
/**
* ttm_bo_check_placement
*
* @bo: the buffer object.
* @placement: placements
*
* Performs minimal validity checking on an intended change of
* placement flags.
* Returns
* -EINVAL: Intended change is invalid or not allowed.
*/
extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
struct ttm_placement *placement);
/**
* ttm_bo_init_mm
*
* @bdev: Pointer to a ttm_bo_device struct.
* @mem_type: The memory type.
* @p_size: size managed area in pages.
*
* Initialize a manager for a given memory type.
* Note: if part of driver firstopen, it must be protected from a
* potentially racing lastclose.
* Returns:
* -EINVAL: invalid size or memory type.
* -ENOMEM: Not enough memory.
* May also return driver-specified errors.
*/
extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_size);
/**
* ttm_bo_clean_mm
*
* @bdev: Pointer to a ttm_bo_device struct.
* @mem_type: The memory type.
*
* Take down a manager for a given memory type after first walking
* the LRU list to evict any buffers left alive.
*
* Normally, this function is part of lastclose() or unload(), and at that
* point there shouldn't be any buffers left created by user-space, since
* there should've been removed by the file descriptor release() method.
* However, before this function is run, make sure to signal all sync objects,
* and verify that the delayed delete queue is empty. The driver must also
* make sure that there are no NO_EVICT buffers present in this memory type
* when the call is made.
*
* If this function is part of a VT switch, the caller must make sure that
* there are no appications currently validating buffers before this
* function is called. The caller can do that by first taking the
* struct ttm_bo_device::ttm_lock in write mode.
*
* Returns:
* -EINVAL: invalid or uninitialized memory type.
* -EBUSY: There are still buffers left in this memory type.
*/
extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
/**
* ttm_bo_evict_mm
*
* @bdev: Pointer to a ttm_bo_device struct.
* @mem_type: The memory type.
*
* Evicts all buffers on the lru list of the memory type.
* This is normally part of a VT switch or an
* out-of-memory-space-due-to-fragmentation handler.
* The caller must make sure that there are no other processes
* currently validating buffers, and can do that by taking the
* struct ttm_bo_device::ttm_lock in write mode.
*
* Returns:
* -EINVAL: Invalid or uninitialized memory type.
* -ERESTARTSYS: The call was interrupted by a signal while waiting to
* evict a buffer.
*/
extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
/**
* ttm_kmap_obj_virtual
*
* @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
* @is_iomem: Pointer to an integer that on return indicates 1 if the
* virtual map is io memory, 0 if normal memory.
*
* Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
* If *is_iomem is 1 on return, the virtual address points to an io memory area,
* that should strictly be accessed by the iowriteXX() and similar functions.
*/
static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
bool *is_iomem)
{
*is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
return map->virtual;
}
/**
* ttm_bo_kmap
*
* @bo: The buffer object.
* @start_page: The first page to map.
* @num_pages: Number of pages to map.
* @map: pointer to a struct ttm_bo_kmap_obj representing the map.
*
* Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
* data in the buffer object. The ttm_kmap_obj_virtual function can then be
* used to obtain a virtual address to the data.
*
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid range.
*/
extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
unsigned long num_pages, struct ttm_bo_kmap_obj *map);
/**
* ttm_bo_kunmap
*
* @map: Object describing the map to unmap.
*
* Unmaps a kernel map set up by ttm_bo_kmap.
*/
extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
/**
* ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
*
* @vma: vma as input from the fbdev mmap method.
* @bo: The bo backing the address space. The address space will
* have the same size as the bo, and start at offset 0.
*
* This function is intended to be called by the fbdev mmap method
* if the fbdev address space is to be backed by a bo.
*/
/* XXXKIB
extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
struct ttm_buffer_object *bo);
*/
/**
* ttm_bo_mmap - mmap out of the ttm device address space.
*
* @filp: filp as input from the mmap method.
* @vma: vma as input from the mmap method.
* @bdev: Pointer to the ttm_bo_device with the address space manager.
*
* This function is intended to be called by the device mmap method.
* if the device address space is to be backed by the bo manager.
*/
/* XXXKIB
extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev);
*/
/**
* ttm_bo_io
*
* @bdev: Pointer to the struct ttm_bo_device.
* @filp: Pointer to the struct file attempting to read / write.
* @wbuf: User-space pointer to address of buffer to write. NULL on read.
* @rbuf: User-space pointer to address of buffer to read into.
* Null on write.
* @count: Number of bytes to read / write.
* @f_pos: Pointer to current file position.
* @write: 1 for read, 0 for write.
*
* This function implements read / write into ttm buffer objects, and is
* intended to
* be called from the fops::read and fops::write method.
* Returns:
* See man (2) write, man(2) read. In particular,
* the function may return -ERESTARTSYS if
* interrupted by a signal.
*/
extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
const char *wbuf, char *rbuf,
size_t count, off_t *f_pos, bool write);
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
/**
* ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
*
* @bo: The buffer object to check.
*
* This function returns an indication if a bo is reserved or not, and should
* only be used to print an error when it is not from incorrect api usage, since
* there's no guarantee that it is the caller that is holding the reservation.
*/
static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
{
return atomic_read(&bo->reserved);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,157 @@
/**************************************************************************
*
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/ttm/ttm_module.h>
#include <dev/drm2/ttm/ttm_bo_driver.h>
#include <dev/drm2/ttm/ttm_placement.h>
#include <dev/drm2/drm_mm.h>
/**
* Currently we use a spinlock for the lock, but a mutex *may* be
* more appropriate to reduce scheduling latency if the range manager
* ends up with very fragmented allocation patterns.
*/
struct ttm_range_manager {
struct drm_mm mm;
struct mtx lock;
};
MALLOC_DEFINE(M_TTM_RMAN, "ttm_rman", "TTM Range Manager");
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node = NULL;
unsigned long lpfn;
int ret;
lpfn = placement->lpfn;
if (!lpfn)
lpfn = man->size;
do {
ret = drm_mm_pre_get(mm);
if (unlikely(ret))
return ret;
mtx_lock(&rman->lock);
node = drm_mm_search_free_in_range(mm,
mem->num_pages, mem->page_alignment,
placement->fpfn, lpfn, 1);
if (unlikely(node == NULL)) {
mtx_unlock(&rman->lock);
return 0;
}
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
mem->page_alignment,
placement->fpfn,
lpfn);
mtx_unlock(&rman->lock);
} while (node == NULL);
mem->mm_node = node;
mem->start = node->start;
return 0;
}
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
if (mem->mm_node) {
mtx_lock(&rman->lock);
drm_mm_put_block(mem->mm_node);
mtx_unlock(&rman->lock);
mem->mm_node = NULL;
}
}
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
struct ttm_range_manager *rman;
int ret;
rman = malloc(sizeof(*rman), M_TTM_RMAN, M_ZERO | M_WAITOK);
ret = drm_mm_init(&rman->mm, 0, p_size);
if (ret) {
free(rman, M_TTM_RMAN);
return ret;
}
mtx_init(&rman->lock, "ttmrman", NULL, MTX_DEF);
man->priv = rman;
return 0;
}
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
mtx_lock(&rman->lock);
if (drm_mm_clean(mm)) {
drm_mm_takedown(mm);
mtx_unlock(&rman->lock);
mtx_destroy(&rman->lock);
free(rman, M_TTM_RMAN);
man->priv = NULL;
return 0;
}
mtx_unlock(&rman->lock);
return -EBUSY;
}
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
mtx_lock(&rman->lock);
drm_mm_debug_table(&rman->mm, prefix);
mtx_unlock(&rman->lock);
}
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
ttm_bo_man_init,
ttm_bo_man_takedown,
ttm_bo_man_get_node,
ttm_bo_man_put_node,
ttm_bo_man_debug
};

View File

@ -0,0 +1,658 @@
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/ttm/ttm_bo_driver.h>
#include <dev/drm2/ttm/ttm_placement.h>
#include <sys/sf_buf.h>
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
ttm_bo_mem_put(bo, &bo->mem);
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
bool evict,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
ttm_tt_unbind(ttm);
ttm_bo_free_old_node(bo);
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
TTM_PL_MASK_MEM);
old_mem->mem_type = TTM_PL_SYSTEM;
}
ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
if (unlikely(ret != 0))
return ret;
if (new_mem->mem_type != TTM_PL_SYSTEM) {
ret = ttm_tt_bind(ttm, new_mem);
if (unlikely(ret != 0))
return ret;
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
return 0;
}
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
{
if (likely(man->io_reserve_fastpath))
return 0;
if (interruptible) {
if (sx_xlock_sig(&man->io_reserve_mutex))
return (-EINTR);
else
return (0);
}
sx_xlock(&man->io_reserve_mutex);
return 0;
}
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
if (likely(man->io_reserve_fastpath))
return;
sx_xunlock(&man->io_reserve_mutex);
}
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
struct ttm_buffer_object *bo;
if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
return -EAGAIN;
bo = list_first_entry(&man->io_reserve_lru,
struct ttm_buffer_object,
io_reserve_lru);
list_del_init(&bo->io_reserve_lru);
ttm_bo_unmap_virtual_locked(bo);
return 0;
}
static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret = 0;
if (!bdev->driver->io_mem_reserve)
return 0;
if (likely(man->io_reserve_fastpath))
return bdev->driver->io_mem_reserve(bdev, mem);
if (bdev->driver->io_mem_reserve &&
mem->bus.io_reserved_count++ == 0) {
retry:
ret = bdev->driver->io_mem_reserve(bdev, mem);
if (ret == -EAGAIN) {
ret = ttm_mem_io_evict(man);
if (ret == 0)
goto retry;
}
}
return ret;
}
static void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
if (likely(man->io_reserve_fastpath))
return;
if (bdev->driver->io_mem_reserve &&
--mem->bus.io_reserved_count == 0 &&
bdev->driver->io_mem_free)
bdev->driver->io_mem_free(bdev, mem);
}
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
struct ttm_mem_reg *mem = &bo->mem;
int ret;
if (!mem->bus.io_reserved_vm) {
struct ttm_mem_type_manager *man =
&bo->bdev->man[mem->mem_type];
ret = ttm_mem_io_reserve(bo->bdev, mem);
if (unlikely(ret != 0))
return ret;
mem->bus.io_reserved_vm = true;
if (man->use_io_reserve_lru)
list_add_tail(&bo->io_reserve_lru,
&man->io_reserve_lru);
}
return 0;
}
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
{
struct ttm_mem_reg *mem = &bo->mem;
if (mem->bus.io_reserved_vm) {
mem->bus.io_reserved_vm = false;
list_del_init(&bo->io_reserve_lru);
ttm_mem_io_free(bo->bdev, mem);
}
}
static
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
void *addr;
*virtual = NULL;
(void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bdev, mem);
ttm_mem_io_unlock(man);
if (ret || !mem->bus.is_iomem)
return ret;
if (mem->bus.addr) {
addr = mem->bus.addr;
} else {
addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
return -ENOMEM;
}
}
*virtual = addr;
return 0;
}
static
void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void *virtual)
{
struct ttm_mem_type_manager *man;
man = &bdev->man[mem->mem_type];
if (virtual && mem->bus.addr == NULL)
pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
{
uint32_t *dstP =
(uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
uint32_t *srcP =
(uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
int i;
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
/* iowrite32(ioread32(srcP++), dstP++); */
*dstP++ = *srcP++;
return 0;
}
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
vm_memattr_t prot)
{
vm_page_t d = ttm->pages[page];
void *dst;
if (!d)
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
/* XXXKIB can't sleep ? */
dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot);
if (!dst)
return -ENOMEM;
memcpy(dst, src, PAGE_SIZE);
pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE);
return 0;
}
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
unsigned long page,
vm_memattr_t prot)
{
vm_page_t s = ttm->pages[page];
void *src;
if (!s)
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot);
if (!src)
return -ENOMEM;
memcpy(dst, src, PAGE_SIZE);
pmap_unmapdev((vm_offset_t)src, PAGE_SIZE);
return 0;
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg old_copy = *old_mem;
void *old_iomap;
void *new_iomap;
int ret;
unsigned long i;
unsigned long page;
unsigned long add = 0;
int dir;
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
if (ret)
return ret;
ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
if (ret)
goto out;
if (old_iomap == NULL && new_iomap == NULL)
goto out2;
if (old_iomap == NULL && ttm == NULL)
goto out2;
if (ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
goto out1;
}
add = 0;
dir = 1;
if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->start < old_mem->start + old_mem->size)) {
dir = -1;
add = new_mem->num_pages - 1;
}
for (i = 0; i < new_mem->num_pages; ++i) {
page = i * dir + add;
if (old_iomap == NULL) {
vm_memattr_t prot = ttm_io_prot(old_mem->placement);
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
prot);
} else if (new_iomap == NULL) {
vm_memattr_t prot = ttm_io_prot(new_mem->placement);
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
prot);
} else
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
if (ret)
goto out1;
}
mb();
out2:
old_copy = *old_mem;
*old_mem = *new_mem;
new_mem->mm_node = NULL;
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
ttm_tt_unbind(ttm);
ttm_tt_destroy(ttm);
bo->ttm = NULL;
}
out1:
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
out:
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
ttm_bo_mem_put(bo, &old_copy);
return ret;
}
MALLOC_DEFINE(M_TTM_TRANSF_OBJ, "ttm_transf_obj", "TTM Transfer Objects");
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
free(bo, M_TTM_TRANSF_OBJ);
}
/**
* ttm_buffer_object_transfer
*
* @bo: A pointer to a struct ttm_buffer_object.
* @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
* holding the data of @bo with the old placement.
*
* This is a utility function that may be called after an accelerated move
* has been scheduled. A new buffer object is created as a placeholder for
* the old data while it's being copied. When that buffer object is idle,
* it can be destroyed, releasing the space of the old placement.
* Returns:
* !0: Failure.
*/
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object **new_obj)
{
struct ttm_buffer_object *fbo;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
fbo = malloc(sizeof(*fbo), M_TTM_TRANSF_OBJ, M_ZERO | M_WAITOK);
*fbo = *bo;
/**
* Fix up members that we shouldn't copy directly:
* TODO: Explicit member copy would probably be better here.
*/
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
fbo->vm_node = NULL;
atomic_set(&fbo->cpu_writers, 0);
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
refcount_init(&fbo->list_kref, 1);
refcount_init(&fbo->kref, 1);
fbo->destroy = &ttm_transfered_destroy;
fbo->acc_size = 0;
*new_obj = fbo;
return 0;
}
vm_memattr_t
ttm_io_prot(uint32_t caching_flags)
{
#if defined(__i386__) || defined(__amd64__)
if (caching_flags & TTM_PL_FLAG_WC)
return (VM_MEMATTR_WRITE_COMBINING);
else
/*
* We do not support i386, look at the linux source
* for the reason of the comment.
*/
return (VM_MEMATTR_UNCACHEABLE);
#else
#error Port me
#endif
}
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long offset,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_reg *mem = &bo->mem;
if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
map->virtual = pmap_mapdev_attr(bo->mem.bus.base +
bo->mem.bus.offset + offset, size,
(mem->placement & TTM_PL_FLAG_WC) ?
VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
map->size = size;
}
return (!map->virtual) ? -ENOMEM : 0;
}
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long start_page,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_reg *mem = &bo->mem;
vm_memattr_t prot;
struct ttm_tt *ttm = bo->ttm;
int i, ret;
MPASS(ttm != NULL);
if (ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
return ret;
}
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
*/
map->bo_kmap_type = ttm_bo_map_kmap;
map->page = ttm->pages[start_page];
map->sf = sf_buf_alloc(map->page, 0);
map->virtual = (void *)sf_buf_kva(map->sf);
} else {
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
VM_MEMATTR_WRITE_COMBINING :
ttm_io_prot(mem->placement);
map->bo_kmap_type = ttm_bo_map_vmap;
map->num_pages = num_pages;
map->virtual = (void *)kmem_alloc_nofault(kernel_map,
num_pages * PAGE_SIZE);
if (map->virtual != NULL) {
for (i = 0; i < num_pages; i++) {
/* XXXKIB hack */
pmap_page_set_memattr(ttm->pages[start_page +
i], prot);
}
pmap_qenter((vm_offset_t)map->virtual,
&ttm->pages[start_page], num_pages);
}
}
return (!map->virtual) ? -ENOMEM : 0;
}
int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_type_manager *man =
&bo->bdev->man[bo->mem.mem_type];
unsigned long offset, size;
int ret;
MPASS(list_empty(&bo->swap));
map->virtual = NULL;
map->bo = bo;
if (num_pages > bo->num_pages)
return -EINVAL;
if (start_page > bo->num_pages)
return -EINVAL;
#if 0
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
(void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
ttm_mem_io_unlock(man);
if (ret)
return ret;
if (!bo->mem.bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
offset = start_page << PAGE_SHIFT;
size = num_pages << PAGE_SHIFT;
return ttm_bo_ioremap(bo, offset, size, map);
}
}
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
struct ttm_buffer_object *bo = map->bo;
struct ttm_mem_type_manager *man =
&bo->bdev->man[bo->mem.mem_type];
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
pmap_unmapdev((vm_offset_t)map->virtual, map->size);
break;
case ttm_bo_map_vmap:
pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
kmem_free(kernel_map, (vm_offset_t)map->virtual,
map->num_pages * PAGE_SIZE);
break;
case ttm_bo_map_kmap:
sf_buf_free(map->sf);
break;
case ttm_bo_map_premapped:
break;
default:
MPASS(0);
}
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
ttm_mem_io_unlock(man);
map->virtual = NULL;
map->page = NULL;
map->sf = NULL;
}
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
bool evict,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
struct ttm_buffer_object *ghost_obj;
void *tmp_obj = NULL;
mtx_lock(&bdev->fence_lock);
if (bo->sync_obj) {
tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
}
bo->sync_obj = driver->sync_obj_ref(sync_obj);
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
mtx_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
if (ret)
return ret;
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
(bo->ttm != NULL)) {
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
ttm_bo_free_old_node(bo);
} else {
/**
* This should help pipeline ordinary buffer moves.
*
* Hang old buffer memory on a new buffer object,
* and leave it to be released when the GPU
* operation has completed.
*/
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
/* ttm_buffer_object_transfer accesses bo->sync_obj */
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
mtx_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
if (ret)
return ret;
/**
* If we're not moving to fixed memory, the TTM object
* needs to stay alive. Otherwhise hang it on the ghost
* bo to be unbound and destroyed.
*/
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
ghost_obj->ttm = NULL;
else
bo->ttm = NULL;
ttm_bo_unreserve(ghost_obj);
ttm_bo_unref(&ghost_obj);
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
return 0;
}

View File

@ -0,0 +1,492 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/*
* Copyright (c) 2013 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by Konstantin Belousov
* <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_vm.h"
#include <dev/drm2/drmP.h>
#include <dev/drm2/ttm/ttm_module.h>
#include <dev/drm2/ttm/ttm_bo_driver.h>
#include <dev/drm2/ttm/ttm_placement.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#define TTM_BO_VM_NUM_PREFAULT 16
RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
ttm_bo_cmp_rb_tree_items);
int
ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
struct ttm_buffer_object *b)
{
if (a->vm_node->start < b->vm_node->start) {
return (-1);
} else if (a->vm_node->start > b->vm_node->start) {
return (1);
} else {
return (0);
}
}
static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
unsigned long page_start,
unsigned long num_pages)
{
unsigned long cur_offset;
struct ttm_buffer_object *bo;
struct ttm_buffer_object *best_bo = NULL;
RB_FOREACH(bo, ttm_bo_device_buffer_objects, &bdev->addr_space_rb) {
cur_offset = bo->vm_node->start;
if (page_start >= cur_offset) {
best_bo = bo;
if (page_start == cur_offset)
break;
}
}
if (unlikely(best_bo == NULL))
return NULL;
if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
(page_start + num_pages)))
return NULL;
return best_bo;
}
static int
ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
int prot, vm_page_t *mres)
{
struct ttm_buffer_object *bo = vm_obj->handle;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_tt *ttm = NULL;
vm_page_t m, oldm;
int ret;
int retval = VM_PAGER_OK;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
vm_object_pip_add(vm_obj, 1);
oldm = *mres;
if (oldm != NULL) {
vm_page_lock(oldm);
vm_page_remove(oldm);
vm_page_unlock(oldm);
*mres = NULL;
} else
oldm = NULL;
retry:
VM_OBJECT_UNLOCK(vm_obj);
m = NULL;
reserve:
mtx_lock(&bo->glob->lru_lock);
ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
mtx_unlock(&bo->glob->lru_lock);
if (unlikely(ret != 0)) {
if (ret == -EBUSY) {
kern_yield(0);
goto reserve;
}
}
if (bdev->driver->fault_reserve_notify) {
ret = bdev->driver->fault_reserve_notify(bo);
switch (ret) {
case 0:
break;
case -EBUSY:
case -ERESTART:
case -EINTR:
kern_yield(0);
goto reserve;
default:
retval = VM_PAGER_ERROR;
goto out_unlock;
}
}
/*
* Wait for buffer data in transit, due to a pipelined
* move.
*/
mtx_lock(&bdev->fence_lock);
if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
ret = ttm_bo_wait(bo, false, true, false);
mtx_unlock(&bdev->fence_lock);
if (unlikely(ret != 0)) {
retval = VM_PAGER_ERROR;
goto out_unlock;
}
} else
mtx_unlock(&bdev->fence_lock);
ret = ttm_mem_io_lock(man, true);
if (unlikely(ret != 0)) {
retval = VM_PAGER_ERROR;
goto out_unlock;
}
ret = ttm_mem_io_reserve_vm(bo);
if (unlikely(ret != 0)) {
retval = VM_PAGER_ERROR;
goto out_io_unlock;
}
/*
* Strictly, we're not allowed to modify vma->vm_page_prot here,
* since the mmap_sem is only held in read mode. However, we
* modify only the caching bits of vma->vm_page_prot and
* consider those bits protected by
* the bo->mutex, as we should be the only writers.
* There shouldn't really be any readers of these bits except
* within vm_insert_mixed()? fork?
*
* TODO: Add a list of vmas to the bo, and change the
* vma->vm_page_prot when the object changes caching policy, with
* the correct locks held.
*/
if (!bo->mem.bus.is_iomem) {
/* Allocate all page at once, most common usage */
ttm = bo->ttm;
if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
retval = VM_PAGER_ERROR;
goto out_io_unlock;
}
}
if (bo->mem.bus.is_iomem) {
m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
bo->mem.bus.offset + offset);
pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
} else {
ttm = bo->ttm;
m = ttm->pages[OFF_TO_IDX(offset)];
if (unlikely(!m)) {
retval = VM_PAGER_ERROR;
goto out_io_unlock;
}
pmap_page_set_memattr(m,
(bo->mem.placement & TTM_PL_FLAG_CACHED) ?
VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
}
VM_OBJECT_LOCK(vm_obj);
if ((m->flags & VPO_BUSY) != 0) {
vm_page_sleep(m, "ttmpbs");
ttm_mem_io_unlock(man);
ttm_bo_unreserve(bo);
goto retry;
}
m->valid = VM_PAGE_BITS_ALL;
*mres = m;
vm_page_lock(m);
vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
vm_page_unlock(m);
vm_page_busy(m);
if (oldm != NULL) {
vm_page_lock(oldm);
vm_page_free(oldm);
vm_page_unlock(oldm);
}
out_io_unlock1:
ttm_mem_io_unlock(man);
out_unlock1:
ttm_bo_unreserve(bo);
vm_object_pip_wakeup(vm_obj);
return (retval);
out_io_unlock:
VM_OBJECT_LOCK(vm_obj);
goto out_io_unlock1;
out_unlock:
VM_OBJECT_LOCK(vm_obj);
goto out_unlock1;
}
static int
ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t foff, struct ucred *cred, u_short *color)
{
struct ttm_buffer_object *bo = handle;
*color = 0;
(void)ttm_bo_reference(bo);
return (0);
}
static void
ttm_bo_vm_dtor(void *handle)
{
struct ttm_buffer_object *bo = handle;
ttm_bo_unref(&bo);
}
static struct cdev_pager_ops ttm_pager_ops = {
.cdev_pg_fault = ttm_bo_vm_fault,
.cdev_pg_ctor = ttm_bo_vm_ctor,
.cdev_pg_dtor = ttm_bo_vm_dtor
};
int
ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
struct vm_object **obj_res, int nprot)
{
struct ttm_bo_driver *driver;
struct ttm_buffer_object *bo;
struct vm_object *vm_obj;
int ret;
rw_wlock(&bdev->vm_lock);
bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
if (likely(bo != NULL))
refcount_acquire(&bo->kref);
rw_wunlock(&bdev->vm_lock);
if (unlikely(bo == NULL)) {
printf("[TTM] Could not find buffer object to map\n");
return (EINVAL);
}
driver = bo->bdev->driver;
if (unlikely(!driver->verify_access)) {
ret = EPERM;
goto out_unref;
}
ret = -driver->verify_access(bo);
if (unlikely(ret != 0))
goto out_unref;
vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
size, nprot, 0, curthread->td_ucred);
if (vm_obj == NULL) {
ret = EINVAL;
goto out_unref;
}
/*
* Note: We're transferring the bo reference to vm_obj->handle here.
*/
*offset = 0;
*obj_res = vm_obj;
return 0;
out_unref:
ttm_bo_unref(&bo);
return ret;
}
#if 0
int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
if (vma->vm_pgoff != 0)
return -EACCES;
vma->vm_ops = &ttm_bo_vm_ops;
vma->vm_private_data = ttm_bo_reference(bo);
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
return 0;
}
ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
const char __user *wbuf, char __user *rbuf, size_t count,
loff_t *f_pos, bool write)
{
struct ttm_buffer_object *bo;
struct ttm_bo_driver *driver;
struct ttm_bo_kmap_obj map;
unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
unsigned long kmap_offset;
unsigned long kmap_end;
unsigned long kmap_num;
size_t io_size;
unsigned int page_offset;
char *virtual;
int ret;
bool no_wait = false;
bool dummy;
read_lock(&bdev->vm_lock);
bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
if (likely(bo != NULL))
ttm_bo_reference(bo);
read_unlock(&bdev->vm_lock);
if (unlikely(bo == NULL))
return -EFAULT;
driver = bo->bdev->driver;
if (unlikely(!driver->verify_access)) {
ret = -EPERM;
goto out_unref;
}
ret = driver->verify_access(bo, filp);
if (unlikely(ret != 0))
goto out_unref;
kmap_offset = dev_offset - bo->vm_node->start;
if (unlikely(kmap_offset >= bo->num_pages)) {
ret = -EFBIG;
goto out_unref;
}
page_offset = *f_pos & ~PAGE_MASK;
io_size = bo->num_pages - kmap_offset;
io_size = (io_size << PAGE_SHIFT) - page_offset;
if (count < io_size)
io_size = count;
kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
kmap_num = kmap_end - kmap_offset + 1;
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
switch (ret) {
case 0:
break;
case -EBUSY:
ret = -EAGAIN;
goto out_unref;
default:
goto out_unref;
}
ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0)) {
ttm_bo_unreserve(bo);
goto out_unref;
}
virtual = ttm_kmap_obj_virtual(&map, &dummy);
virtual += page_offset;
if (write)
ret = copy_from_user(virtual, wbuf, io_size);
else
ret = copy_to_user(rbuf, virtual, io_size);
ttm_bo_kunmap(&map);
ttm_bo_unreserve(bo);
ttm_bo_unref(&bo);
if (unlikely(ret != 0))
return -EFBIG;
*f_pos += io_size;
return io_size;
out_unref:
ttm_bo_unref(&bo);
return ret;
}
ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
char __user *rbuf, size_t count, loff_t *f_pos,
bool write)
{
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
unsigned long kmap_end;
unsigned long kmap_num;
size_t io_size;
unsigned int page_offset;
char *virtual;
int ret;
bool no_wait = false;
bool dummy;
kmap_offset = (*f_pos >> PAGE_SHIFT);
if (unlikely(kmap_offset >= bo->num_pages))
return -EFBIG;
page_offset = *f_pos & ~PAGE_MASK;
io_size = bo->num_pages - kmap_offset;
io_size = (io_size << PAGE_SHIFT) - page_offset;
if (count < io_size)
io_size = count;
kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
kmap_num = kmap_end - kmap_offset + 1;
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
switch (ret) {
case 0:
break;
case -EBUSY:
return -EAGAIN;
default:
return ret;
}
ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0)) {
ttm_bo_unreserve(bo);
return ret;
}
virtual = ttm_kmap_obj_virtual(&map, &dummy);
virtual += page_offset;
if (write)
ret = copy_from_user(virtual, wbuf, io_size);
else
ret = copy_to_user(rbuf, virtual, io_size);
ttm_bo_kunmap(&map);
ttm_bo_unreserve(bo);
ttm_bo_unref(&bo);
if (unlikely(ret != 0))
return ret;
*f_pos += io_size;
return io_size;
}
#endif

View File

@ -0,0 +1,230 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/ttm/ttm_execbuf_util.h>
#include <dev/drm2/ttm/ttm_bo_driver.h>
#include <dev/drm2/ttm/ttm_placement.h>
static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
if (!entry->reserved)
continue;
if (entry->removed) {
ttm_bo_add_to_lru(bo);
entry->removed = false;
}
entry->reserved = false;
atomic_set(&bo->reserved, 0);
wakeup(bo);
}
}
static void ttm_eu_del_from_lru_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
if (!entry->reserved)
continue;
if (!entry->removed) {
entry->put_count = ttm_bo_del_from_lru(bo);
entry->removed = true;
}
}
}
static void ttm_eu_list_ref_sub(struct list_head *list)
{
struct ttm_validate_buffer *entry;
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
if (entry->put_count) {
ttm_bo_list_ref_sub(bo, entry->put_count, true);
entry->put_count = 0;
}
}
}
static int ttm_eu_wait_unreserved_locked(struct list_head *list,
struct ttm_buffer_object *bo)
{
int ret;
ttm_eu_del_from_lru_locked(list);
ret = ttm_bo_wait_unreserved_locked(bo, true);
if (unlikely(ret != 0))
ttm_eu_backoff_reservation_locked(list);
return ret;
}
void ttm_eu_backoff_reservation(struct list_head *list)
{
struct ttm_validate_buffer *entry;
struct ttm_bo_global *glob;
if (list_empty(list))
return;
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
mtx_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list);
mtx_unlock(&glob->lru_lock);
}
/*
* Reserve buffers for validation.
*
* If a buffer in the list is marked for CPU access, we back off and
* wait for that buffer to become free for GPU access.
*
* If a buffer is reserved for another validation, the validator with
* the highest validation sequence backs off and waits for that buffer
* to become unreserved. This prevents deadlocks when validating multiple
* buffers in different orders.
*/
int ttm_eu_reserve_buffers(struct list_head *list)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
uint32_t val_seq;
if (list_empty(list))
return 0;
list_for_each_entry(entry, list, head) {
entry->reserved = false;
entry->put_count = 0;
entry->removed = false;
}
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
mtx_lock(&glob->lru_lock);
retry_locked:
val_seq = entry->bo->bdev->val_seq++;
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
retry_this_bo:
ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
switch (ret) {
case 0:
break;
case -EBUSY:
ret = ttm_eu_wait_unreserved_locked(list, bo);
if (unlikely(ret != 0)) {
mtx_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
return ret;
}
goto retry_this_bo;
case -EAGAIN:
ttm_eu_backoff_reservation_locked(list);
ttm_eu_list_ref_sub(list);
ret = ttm_bo_wait_unreserved_locked(bo, true);
if (unlikely(ret != 0)) {
mtx_unlock(&glob->lru_lock);
return ret;
}
goto retry_locked;
default:
ttm_eu_backoff_reservation_locked(list);
mtx_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
return ret;
}
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
ttm_eu_backoff_reservation_locked(list);
mtx_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
return -EBUSY;
}
}
ttm_eu_del_from_lru_locked(list);
mtx_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
return 0;
}
void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
struct ttm_bo_driver *driver;
if (list_empty(list))
return;
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
bdev = bo->bdev;
driver = bdev->driver;
glob = bo->glob;
mtx_lock(&glob->lru_lock);
mtx_lock(&bdev->fence_lock);
list_for_each_entry(entry, list, head) {
bo = entry->bo;
entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
ttm_bo_unreserve_locked(bo);
entry->reserved = false;
}
mtx_unlock(&bdev->fence_lock);
mtx_unlock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
driver->sync_obj_unref(&entry->old_sync_obj);
}
}

View File

@ -0,0 +1,109 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/* $FreeBSD$ */
#ifndef _TTM_EXECBUF_UTIL_H_
#define _TTM_EXECBUF_UTIL_H_
#include <dev/drm2/ttm/ttm_bo_api.h>
/**
* struct ttm_validate_buffer
*
* @head: list head for thread-private list.
* @bo: refcounted buffer object pointer.
* @reserved: Indicates whether @bo has been reserved for validation.
* @removed: Indicates whether @bo has been removed from lru lists.
* @put_count: Number of outstanding references on bo::list_kref.
* @old_sync_obj: Pointer to a sync object about to be unreferenced
*/
struct ttm_validate_buffer {
struct list_head head;
struct ttm_buffer_object *bo;
bool reserved;
bool removed;
int put_count;
void *old_sync_obj;
};
/**
* function ttm_eu_backoff_reservation
*
* @list: thread private list of ttm_validate_buffer structs.
*
* Undoes all buffer validation reservations for bos pointed to by
* the list entries.
*/
extern void ttm_eu_backoff_reservation(struct list_head *list);
/**
* function ttm_eu_reserve_buffers
*
* @list: thread private list of ttm_validate_buffer structs.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
* taken off the lru lists and are not synced for write CPU usage.
*
* If the function detects a deadlock due to multiple threads trying to
* reserve the same buffers in reverse order, all threads except one will
* back off and retry. This function may sleep while waiting for
* CPU write reservations to be cleared, and for other threads to
* unreserve their buffers.
*
* This function may return -ERESTART or -EAGAIN if the calling process
* receives a signal while waiting. In that case, no buffers on the list
* will be reserved upon return.
*
* Buffers reserved by this function should be unreserved by
* a call to either ttm_eu_backoff_reservation() or
* ttm_eu_fence_buffer_objects() when command submission is complete or
* has failed.
*/
extern int ttm_eu_reserve_buffers(struct list_head *list);
/**
* function ttm_eu_fence_buffer_objects.
*
* @list: thread private list of ttm_validate_buffer structs.
* @sync_obj: The new sync object for the buffers.
*
* This function should be called when command submission is complete, and
* it will add a new sync object to bos pointed to by entries on @list.
* It also unreserves all buffers, putting them on lru lists.
*
*/
extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
#endif

340
sys/dev/drm2/ttm/ttm_lock.c Normal file
View File

@ -0,0 +1,340 @@
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/*
* Copyright (c) 2013 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by Konstantin Belousov
* <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/ttm/ttm_lock.h>
#include <dev/drm2/ttm/ttm_module.h>
#define TTM_WRITE_LOCK_PENDING (1 << 0)
#define TTM_VT_LOCK_PENDING (1 << 1)
#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
#define TTM_VT_LOCK (1 << 3)
#define TTM_SUSPEND_LOCK (1 << 4)
void ttm_lock_init(struct ttm_lock *lock)
{
mtx_init(&lock->lock, "ttmlk", NULL, MTX_DEF);
lock->rw = 0;
lock->flags = 0;
lock->kill_takers = false;
lock->signal = SIGKILL;
}
static void
ttm_lock_send_sig(int signo)
{
struct proc *p;
p = curproc; /* XXXKIB curthread ? */
PROC_LOCK(p);
kern_psignal(p, signo);
PROC_UNLOCK(p);
}
void ttm_read_unlock(struct ttm_lock *lock)
{
mtx_lock(&lock->lock);
if (--lock->rw == 0)
wakeup(lock);
mtx_unlock(&lock->lock);
}
static bool __ttm_read_lock(struct ttm_lock *lock)
{
bool locked = false;
if (unlikely(lock->kill_takers)) {
ttm_lock_send_sig(lock->signal);
return false;
}
if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw;
locked = true;
}
return locked;
}
int
ttm_read_lock(struct ttm_lock *lock, bool interruptible)
{
const char *wmsg;
int flags, ret;
ret = 0;
if (interruptible) {
flags = PCATCH;
wmsg = "ttmri";
} else {
flags = 0;
wmsg = "ttmr";
}
mtx_lock(&lock->lock);
while (!__ttm_read_lock(lock)) {
ret = msleep(lock, &lock->lock, flags, wmsg, 0);
if (ret != 0)
break;
}
return (-ret);
}
static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
{
bool block = true;
*locked = false;
if (unlikely(lock->kill_takers)) {
ttm_lock_send_sig(lock->signal);
return false;
}
if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw;
block = false;
*locked = true;
} else if (lock->flags == 0) {
block = false;
}
return !block;
}
int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
{
const char *wmsg;
int flags, ret;
bool locked;
ret = 0;
if (interruptible) {
flags = PCATCH;
wmsg = "ttmrti";
} else {
flags = 0;
wmsg = "ttmrt";
}
mtx_lock(&lock->lock);
while (!__ttm_read_trylock(lock, &locked)) {
ret = msleep(lock, &lock->lock, flags, wmsg, 0);
if (ret != 0)
break;
}
MPASS(!locked || ret == 0);
mtx_unlock(&lock->lock);
return (locked) ? 0 : -EBUSY;
}
void ttm_write_unlock(struct ttm_lock *lock)
{
mtx_lock(&lock->lock);
lock->rw = 0;
wakeup(lock);
mtx_unlock(&lock->lock);
}
static bool __ttm_write_lock(struct ttm_lock *lock)
{
bool locked = false;
if (unlikely(lock->kill_takers)) {
ttm_lock_send_sig(lock->signal);
return false;
}
if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
lock->rw = -1;
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
locked = true;
} else {
lock->flags |= TTM_WRITE_LOCK_PENDING;
}
return locked;
}
int
ttm_write_lock(struct ttm_lock *lock, bool interruptible)
{
const char *wmsg;
int flags, ret;
ret = 0;
if (interruptible) {
flags = PCATCH;
wmsg = "ttmwi";
} else {
flags = 0;
wmsg = "ttmw";
}
mtx_lock(&lock->lock);
/* XXXKIB: linux uses __ttm_read_lock for uninterruptible sleeps */
while (!__ttm_write_lock(lock)) {
ret = msleep(lock, &lock->lock, flags, wmsg, 0);
if (interruptible && ret != 0) {
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
wakeup(lock);
break;
}
}
mtx_unlock(&lock->lock);
return (-ret);
}
void ttm_write_lock_downgrade(struct ttm_lock *lock)
{
mtx_lock(&lock->lock);
lock->rw = 1;
wakeup(lock);
mtx_unlock(&lock->lock);
}
static int __ttm_vt_unlock(struct ttm_lock *lock)
{
int ret = 0;
mtx_lock(&lock->lock);
if (unlikely(!(lock->flags & TTM_VT_LOCK)))
ret = -EINVAL;
lock->flags &= ~TTM_VT_LOCK;
wakeup(lock);
mtx_unlock(&lock->lock);
return ret;
}
static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
int ret;
*p_base = NULL;
ret = __ttm_vt_unlock(lock);
MPASS(ret == 0);
}
static bool __ttm_vt_lock(struct ttm_lock *lock)
{
bool locked = false;
if (lock->rw == 0) {
lock->flags &= ~TTM_VT_LOCK_PENDING;
lock->flags |= TTM_VT_LOCK;
locked = true;
} else {
lock->flags |= TTM_VT_LOCK_PENDING;
}
return locked;
}
int ttm_vt_lock(struct ttm_lock *lock,
bool interruptible,
struct ttm_object_file *tfile)
{
const char *wmsg;
int flags, ret;
ret = 0;
if (interruptible) {
flags = PCATCH;
wmsg = "ttmwi";
} else {
flags = 0;
wmsg = "ttmw";
}
mtx_lock(&lock->lock);
while (!__ttm_vt_lock(lock)) {
ret = msleep(lock, &lock->lock, flags, wmsg, 0);
if (interruptible && ret != 0) {
lock->flags &= ~TTM_VT_LOCK_PENDING;
wakeup(lock);
break;
}
}
/*
* Add a base-object, the destructor of which will
* make sure the lock is released if the client dies
* while holding it.
*/
ret = ttm_base_object_init(tfile, &lock->base, false,
ttm_lock_type, &ttm_vt_lock_remove, NULL);
if (ret)
(void)__ttm_vt_unlock(lock);
else
lock->vt_holder = tfile;
return (-ret);
}
int ttm_vt_unlock(struct ttm_lock *lock)
{
return ttm_ref_object_base_unref(lock->vt_holder,
lock->base.hash.key, TTM_REF_USAGE);
}
void ttm_suspend_unlock(struct ttm_lock *lock)
{
mtx_lock(&lock->lock);
lock->flags &= ~TTM_SUSPEND_LOCK;
wakeup(lock);
mtx_unlock(&lock->lock);
}
static bool __ttm_suspend_lock(struct ttm_lock *lock)
{
bool locked = false;
if (lock->rw == 0) {
lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
lock->flags |= TTM_SUSPEND_LOCK;
locked = true;
} else {
lock->flags |= TTM_SUSPEND_LOCK_PENDING;
}
return locked;
}
void ttm_suspend_lock(struct ttm_lock *lock)
{
mtx_lock(&lock->lock);
while (!__ttm_suspend_lock(lock))
msleep(lock, &lock->lock, 0, "ttms", 0);
mtx_unlock(&lock->lock);
}

228
sys/dev/drm2/ttm/ttm_lock.h Normal file
View File

@ -0,0 +1,228 @@
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/* $FreeBSD$ */
/** @file ttm_lock.h
* This file implements a simple replacement for the buffer manager use
* of the DRM heavyweight hardware lock.
* The lock is a read-write lock. Taking it in read mode and write mode
* is relatively fast, and intended for in-kernel use only.
*
* The vt mode is used only when there is a need to block all
* user-space processes from validating buffers.
* It's allowed to leave kernel space with the vt lock held.
* If a user-space process dies while having the vt-lock,
* it will be released during the file descriptor release. The vt lock
* excludes write lock and read lock.
*
* The suspend mode is used to lock out all TTM users when preparing for
* and executing suspend operations.
*
*/
#ifndef _TTM_LOCK_H_
#define _TTM_LOCK_H_
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm.h>
#include <dev/drm2/ttm/ttm_object.h>
/**
* struct ttm_lock
*
* @base: ttm base object used solely to release the lock if the client
* holding the lock dies.
* @queue: Queue for processes waiting for lock change-of-status.
* @lock: Spinlock protecting some lock members.
* @rw: Read-write lock counter. Protected by @lock.
* @flags: Lock state. Protected by @lock.
* @kill_takers: Boolean whether to kill takers of the lock.
* @signal: Signal to send when kill_takers is true.
*/
struct ttm_lock {
struct ttm_base_object base;
struct mtx lock;
int32_t rw;
uint32_t flags;
bool kill_takers;
int signal;
struct ttm_object_file *vt_holder;
};
/**
* ttm_lock_init
*
* @lock: Pointer to a struct ttm_lock
* Initializes the lock.
*/
extern void ttm_lock_init(struct ttm_lock *lock);
/**
* ttm_read_unlock
*
* @lock: Pointer to a struct ttm_lock
*
* Releases a read lock.
*/
extern void ttm_read_unlock(struct ttm_lock *lock);
/**
* ttm_read_lock
*
* @lock: Pointer to a struct ttm_lock
* @interruptible: Interruptible sleeping while waiting for a lock.
*
* Takes the lock in read mode.
* Returns:
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
*/
extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
/**
* ttm_read_trylock
*
* @lock: Pointer to a struct ttm_lock
* @interruptible: Interruptible sleeping while waiting for a lock.
*
* Tries to take the lock in read mode. If the lock is already held
* in write mode, the function will return -EBUSY. If the lock is held
* in vt or suspend mode, the function will sleep until these modes
* are unlocked.
*
* Returns:
* -EBUSY The lock was already held in write mode.
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
*/
extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
/**
* ttm_lock_downgrade
*
* @lock: Pointer to a struct ttm_lock
*
* Downgrades a write lock to a read lock.
*/
extern void ttm_lock_downgrade(struct ttm_lock *lock);
/**
* ttm_suspend_lock
*
* @lock: Pointer to a struct ttm_lock
*
* Takes the lock in suspend mode. Excludes read and write mode.
*/
extern void ttm_suspend_lock(struct ttm_lock *lock);
/**
* ttm_suspend_unlock
*
* @lock: Pointer to a struct ttm_lock
*
* Releases a suspend lock
*/
extern void ttm_suspend_unlock(struct ttm_lock *lock);
/**
* ttm_vt_lock
*
* @lock: Pointer to a struct ttm_lock
* @interruptible: Interruptible sleeping while waiting for a lock.
* @tfile: Pointer to a struct ttm_object_file to register the lock with.
*
* Takes the lock in vt mode.
* Returns:
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
* -ENOMEM: Out of memory when locking.
*/
extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
struct ttm_object_file *tfile);
/**
* ttm_vt_unlock
*
* @lock: Pointer to a struct ttm_lock
*
* Releases a vt lock.
* Returns:
* -EINVAL If the lock was not held.
*/
extern int ttm_vt_unlock(struct ttm_lock *lock);
/**
* ttm_write_unlock
*
* @lock: Pointer to a struct ttm_lock
*
* Releases a write lock.
*/
extern void ttm_write_unlock(struct ttm_lock *lock);
/**
* ttm_write_lock
*
* @lock: Pointer to a struct ttm_lock
* @interruptible: Interruptible sleeping while waiting for a lock.
*
* Takes the lock in write mode.
* Returns:
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
*/
extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
void ttm_write_lock_downgrade(struct ttm_lock *lock);
/**
* ttm_lock_set_kill
*
* @lock: Pointer to a struct ttm_lock
* @val: Boolean whether to kill processes taking the lock.
* @signal: Signal to send to the process taking the lock.
*
* The kill-when-taking-lock functionality is used to kill processes that keep
* on using the TTM functionality when its resources has been taken down, for
* example when the X server exits. A typical sequence would look like this:
* - X server takes lock in write mode.
* - ttm_lock_set_kill() is called with @val set to true.
* - As part of X server exit, TTM resources are taken down.
* - X server releases the lock on file release.
* - Another dri client wants to render, takes the lock and is killed.
*
*/
static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
int signal)
{
lock->kill_takers = val;
if (val)
lock->signal = signal;
}
#endif

View File

@ -0,0 +1,471 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/ttm/ttm_memory.h>
#include <dev/drm2/ttm/ttm_module.h>
#include <dev/drm2/ttm/ttm_page_alloc.h>
#define TTM_MEMORY_ALLOC_RETRIES 4
struct ttm_mem_zone {
u_int kobj_ref;
struct ttm_mem_global *glob;
const char *name;
uint64_t zone_mem;
uint64_t emer_mem;
uint64_t max_mem;
uint64_t swap_limit;
uint64_t used_mem;
};
MALLOC_DEFINE(M_TTM_ZONE, "ttm_zone", "TTM Zone");
static void ttm_mem_zone_kobj_release(struct ttm_mem_zone *zone)
{
printf("pTTM] Zone %7s: Used memory at exit: %llu kiB\n",
zone->name, (unsigned long long)zone->used_mem >> 10);
free(zone, M_TTM_ZONE);
}
#if 0
/* XXXKIB sysctl */
static ssize_t ttm_mem_zone_show(struct ttm_mem_zone *zone;
struct attribute *attr,
char *buffer)
{
uint64_t val = 0;
mtx_lock(&zone->glob->lock);
if (attr == &ttm_mem_sys)
val = zone->zone_mem;
else if (attr == &ttm_mem_emer)
val = zone->emer_mem;
else if (attr == &ttm_mem_max)
val = zone->max_mem;
else if (attr == &ttm_mem_swap)
val = zone->swap_limit;
else if (attr == &ttm_mem_used)
val = zone->used_mem;
mtx_unlock(&zone->glob->lock);
return snprintf(buffer, PAGE_SIZE, "%llu\n",
(unsigned long long) val >> 10);
}
#endif
static void ttm_check_swapping(struct ttm_mem_global *glob);
#if 0
/* XXXKIB sysctl */
static ssize_t ttm_mem_zone_store(struct ttm_mem_zone *zone,
struct attribute *attr,
const char *buffer,
size_t size)
{
int chars;
unsigned long val;
uint64_t val64;
chars = sscanf(buffer, "%lu", &val);
if (chars == 0)
return size;
val64 = val;
val64 <<= 10;
mtx_lock(&zone->glob->lock);
if (val64 > zone->zone_mem)
val64 = zone->zone_mem;
if (attr == &ttm_mem_emer) {
zone->emer_mem = val64;
if (zone->max_mem > val64)
zone->max_mem = val64;
} else if (attr == &ttm_mem_max) {
zone->max_mem = val64;
if (zone->emer_mem < val64)
zone->emer_mem = val64;
} else if (attr == &ttm_mem_swap)
zone->swap_limit = val64;
mtx_unlock(&zone->glob->lock);
ttm_check_swapping(zone->glob);
return size;
}
#endif
static void ttm_mem_global_kobj_release(struct ttm_mem_global *glob)
{
free(glob, M_TTM_ZONE);
}
static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
bool from_wq, uint64_t extra)
{
unsigned int i;
struct ttm_mem_zone *zone;
uint64_t target;
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (from_wq)
target = zone->swap_limit;
else if (priv_check(curthread, PRIV_VM_MLOCK) == 0)
target = zone->emer_mem;
else
target = zone->max_mem;
target = (extra > target) ? 0ULL : target;
if (zone->used_mem > target)
return true;
}
return false;
}
/**
* At this point we only support a single shrink callback.
* Extend this if needed, perhaps using a linked list of callbacks.
* Note that this function is reentrant:
* many threads may try to swap out at any given time.
*/
static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
uint64_t extra)
{
int ret;
struct ttm_mem_shrink *shrink;
mtx_lock(&glob->lock);
if (glob->shrink == NULL)
goto out;
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
shrink = glob->shrink;
mtx_unlock(&glob->lock);
ret = shrink->do_shrink(shrink);
mtx_lock(&glob->lock);
if (unlikely(ret != 0))
goto out;
}
out:
mtx_unlock(&glob->lock);
}
static void ttm_shrink_work(void *arg, int pending __unused)
{
struct ttm_mem_global *glob = arg;
ttm_shrink(glob, true, 0ULL);
}
static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
uint64_t mem)
{
struct ttm_mem_zone *zone;
zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
zone->name = "kernel";
zone->zone_mem = mem;
zone->max_mem = mem >> 1;
zone->emer_mem = (mem >> 1) + (mem >> 2);
zone->swap_limit = zone->max_mem - (mem >> 3);
zone->used_mem = 0;
zone->glob = glob;
glob->zone_kernel = zone;
refcount_init(&zone->kobj_ref, 1);
glob->zones[glob->num_zones++] = zone;
return 0;
}
static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
uint64_t mem)
{
struct ttm_mem_zone *zone;
zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
/**
* No special dma32 zone needed.
*/
if (mem <= ((uint64_t) 1ULL << 32)) {
free(zone, M_TTM_ZONE);
return 0;
}
/*
* Limit max dma32 memory to 4GB for now
* until we can figure out how big this
* zone really is.
*/
mem = ((uint64_t) 1ULL << 32);
zone->name = "dma32";
zone->zone_mem = mem;
zone->max_mem = mem >> 1;
zone->emer_mem = (mem >> 1) + (mem >> 2);
zone->swap_limit = zone->max_mem - (mem >> 3);
zone->used_mem = 0;
zone->glob = glob;
glob->zone_dma32 = zone;
refcount_init(&zone->kobj_ref, 1);
glob->zones[glob->num_zones++] = zone;
return 0;
}
int ttm_mem_global_init(struct ttm_mem_global *glob)
{
u_int64_t mem;
int ret;
int i;
struct ttm_mem_zone *zone;
mtx_init(&glob->lock, "ttmgz", NULL, MTX_DEF);
glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK,
taskqueue_thread_enqueue, &glob->swap_queue);
taskqueue_start_threads(&glob->swap_queue, 1, PVM, "ttm swap");
TASK_INIT(&glob->work, 0, ttm_shrink_work, glob);
refcount_init(&glob->kobj_ref, 1);
mem = physmem * PAGE_SIZE;
ret = ttm_mem_init_kernel_zone(glob, mem);
if (unlikely(ret != 0))
goto out_no_zone;
ret = ttm_mem_init_dma32_zone(glob, mem);
if (unlikely(ret != 0))
goto out_no_zone;
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
printf("[TTM] Zone %7s: Available graphics memory: %llu kiB\n",
zone->name, (unsigned long long)zone->max_mem >> 10);
}
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
return ret;
}
void ttm_mem_global_release(struct ttm_mem_global *glob)
{
unsigned int i;
struct ttm_mem_zone *zone;
/* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini();
ttm_dma_page_alloc_fini();
taskqueue_drain(glob->swap_queue, &glob->work);
taskqueue_free(glob->swap_queue);
glob->swap_queue = NULL;
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (refcount_release(&zone->kobj_ref))
ttm_mem_zone_kobj_release(zone);
}
if (refcount_release(&glob->kobj_ref))
ttm_mem_global_kobj_release(glob);
}
static void ttm_check_swapping(struct ttm_mem_global *glob)
{
bool needs_swapping = false;
unsigned int i;
struct ttm_mem_zone *zone;
mtx_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (zone->used_mem > zone->swap_limit) {
needs_swapping = true;
break;
}
}
mtx_unlock(&glob->lock);
if (unlikely(needs_swapping))
taskqueue_enqueue(glob->swap_queue, &glob->work);
}
static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t amount)
{
unsigned int i;
struct ttm_mem_zone *zone;
mtx_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
zone->used_mem -= amount;
}
mtx_unlock(&glob->lock);
}
void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount)
{
return ttm_mem_global_free_zone(glob, NULL, amount);
}
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t amount, bool reserve)
{
uint64_t limit;
int ret = -ENOMEM;
unsigned int i;
struct ttm_mem_zone *zone;
mtx_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
limit = (priv_check(curthread, PRIV_VM_MLOCK) == 0) ?
zone->emer_mem : zone->max_mem;
if (zone->used_mem > limit)
goto out_unlock;
}
if (reserve) {
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
zone->used_mem += amount;
}
}
ret = 0;
out_unlock:
mtx_unlock(&glob->lock);
ttm_check_swapping(glob);
return ret;
}
static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t memory,
bool no_wait, bool interruptible)
{
int count = TTM_MEMORY_ALLOC_RETRIES;
while (unlikely(ttm_mem_global_reserve(glob,
single_zone,
memory, true)
!= 0)) {
if (no_wait)
return -ENOMEM;
if (unlikely(count-- == 0))
return -ENOMEM;
ttm_shrink(glob, false, memory + (memory >> 2) + 16);
}
return 0;
}
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
bool no_wait, bool interruptible)
{
/**
* Normal allocations of kernel memory are registered in
* all zones.
*/
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
interruptible);
}
#define page_to_pfn(pp) OFF_TO_IDX(VM_PAGE_TO_PHYS(pp))
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct vm_page *page,
bool no_wait, bool interruptible)
{
struct ttm_mem_zone *zone = NULL;
/**
* Page allocations may be registed in a single zone
* only if highmem or !dma32.
*/
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
interruptible);
}
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page)
{
struct ttm_mem_zone *zone = NULL;
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
}
size_t ttm_round_pot(size_t size)
{
if ((size & (size - 1)) == 0)
return size;
else if (size > PAGE_SIZE)
return PAGE_ALIGN(size);
else {
size_t tmp_size = 4;
while (tmp_size < size)
tmp_size <<= 1;
return tmp_size;
}
return 0;
}

View File

@ -0,0 +1,149 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/* $FreeBSD$ */
#ifndef TTM_MEMORY_H
#define TTM_MEMORY_H
/**
* struct ttm_mem_shrink - callback to shrink TTM memory usage.
*
* @do_shrink: The callback function.
*
* Arguments to the do_shrink functions are intended to be passed using
* inheritance. That is, the argument class derives from struct ttm_mem_shrink,
* and can be accessed using container_of().
*/
struct ttm_mem_shrink {
int (*do_shrink) (struct ttm_mem_shrink *);
};
/**
* struct ttm_mem_global - Global memory accounting structure.
*
* @shrink: A single callback to shrink TTM memory usage. Extend this
* to a linked list to be able to handle multiple callbacks when needed.
* @swap_queue: A workqueue to handle shrinking in low memory situations. We
* need a separate workqueue since it will spend a lot of time waiting
* for the GPU, and this will otherwise block other workqueue tasks(?)
* At this point we use only a single-threaded workqueue.
* @work: The workqueue callback for the shrink queue.
* @lock: Lock to protect the @shrink - and the memory accounting members,
* that is, essentially the whole structure with some exceptions.
* @zones: Array of pointers to accounting zones.
* @num_zones: Number of populated entries in the @zones array.
* @zone_kernel: Pointer to the kernel zone.
* @zone_highmem: Pointer to the highmem zone if there is one.
* @zone_dma32: Pointer to the dma32 zone if there is one.
*
* Note that this structure is not per device. It should be global for all
* graphics devices.
*/
#define TTM_MEM_MAX_ZONES 2
struct ttm_mem_zone;
struct ttm_mem_global {
u_int kobj_ref;
struct ttm_mem_shrink *shrink;
struct taskqueue *swap_queue;
struct task work;
struct mtx lock;
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
unsigned int num_zones;
struct ttm_mem_zone *zone_kernel;
struct ttm_mem_zone *zone_dma32;
};
/**
* ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
*
* @shrink: The object to initialize.
* @func: The callback function.
*/
static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
int (*func) (struct ttm_mem_shrink *))
{
shrink->do_shrink = func;
}
/**
* ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
*
* @glob: The struct ttm_mem_global object to register with.
* @shrink: An initialized struct ttm_mem_shrink object to register.
*
* Returns:
* -EBUSY: There's already a callback registered. (May change).
*/
static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
struct ttm_mem_shrink *shrink)
{
mtx_lock(&glob->lock);
if (glob->shrink != NULL) {
mtx_unlock(&glob->lock);
return -EBUSY;
}
glob->shrink = shrink;
mtx_unlock(&glob->lock);
return 0;
}
/**
* ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
*
* @glob: The struct ttm_mem_global object to unregister from.
* @shrink: A previously registert struct ttm_mem_shrink object.
*
*/
static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
struct ttm_mem_shrink *shrink)
{
mtx_lock(&glob->lock);
MPASS(glob->shrink == shrink);
glob->shrink = NULL;
mtx_unlock(&glob->lock);
}
struct vm_page;
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
bool no_wait, bool interruptible);
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount);
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct vm_page *page,
bool no_wait, bool interruptible);
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct vm_page *page);
extern size_t ttm_round_pot(size_t size);
#endif

View File

@ -0,0 +1,37 @@
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/* $FreeBSD$ */
#ifndef _TTM_MODULE_H_
#define _TTM_MODULE_H_
#define TTM_PFX "[TTM] "
#endif /* _TTM_MODULE_H_ */

View File

@ -0,0 +1,455 @@
/**************************************************************************
*
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/** @file ttm_ref_object.c
*
* Base- and reference object implementation for the various
* ttm objects. Implements reference counting, minimal security checks
* and release on file close.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/**
* struct ttm_object_file
*
* @tdev: Pointer to the ttm_object_device.
*
* @lock: Lock that protects the ref_list list and the
* ref_hash hash tables.
*
* @ref_list: List of ttm_ref_objects to be destroyed at
* file release.
*
* @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
* for fast lookup of ref objects given a base object.
*/
#define pr_fmt(fmt) "[TTM] " fmt
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm.h>
#include <sys/rwlock.h>
#include <dev/drm2/ttm/ttm_object.h>
#include <dev/drm2/ttm/ttm_module.h>
struct ttm_object_file {
struct ttm_object_device *tdev;
struct rwlock lock;
struct list_head ref_list;
struct drm_open_hash ref_hash[TTM_REF_NUM];
u_int refcount;
};
/**
* struct ttm_object_device
*
* @object_lock: lock that protects the object_hash hash table.
*
* @object_hash: hash table for fast lookup of object global names.
*
* @object_count: Per device object count.
*
* This is the per-device data structure needed for ttm object management.
*/
struct ttm_object_device {
struct rwlock object_lock;
struct drm_open_hash object_hash;
atomic_t object_count;
struct ttm_mem_global *mem_glob;
};
/**
* struct ttm_ref_object
*
* @hash: Hash entry for the per-file object reference hash.
*
* @head: List entry for the per-file list of ref-objects.
*
* @kref: Ref count.
*
* @obj: Base object this ref object is referencing.
*
* @ref_type: Type of ref object.
*
* This is similar to an idr object, but it also has a hash table entry
* that allows lookup with a pointer to the referenced object as a key. In
* that way, one can easily detect whether a base object is referenced by
* a particular ttm_object_file. It also carries a ref count to avoid creating
* multiple ref objects if a ttm_object_file references the same base
* object more than once.
*/
struct ttm_ref_object {
struct drm_hash_item hash;
struct list_head head;
u_int kref;
enum ttm_ref_type ref_type;
struct ttm_base_object *obj;
struct ttm_object_file *tfile;
};
MALLOC_DEFINE(M_TTM_OBJ_FILE, "ttm_obj_file", "TTM File Objects");
static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file *tfile)
{
refcount_acquire(&tfile->refcount);
return tfile;
}
static void ttm_object_file_destroy(struct ttm_object_file *tfile)
{
free(tfile, M_TTM_OBJ_FILE);
}
static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
{
struct ttm_object_file *tfile = *p_tfile;
*p_tfile = NULL;
if (refcount_release(&tfile->refcount))
ttm_object_file_destroy(tfile);
}
int ttm_base_object_init(struct ttm_object_file *tfile,
struct ttm_base_object *base,
bool shareable,
enum ttm_object_type object_type,
void (*rcount_release) (struct ttm_base_object **),
void (*ref_obj_release) (struct ttm_base_object *,
enum ttm_ref_type ref_type))
{
struct ttm_object_device *tdev = tfile->tdev;
int ret;
base->shareable = shareable;
base->tfile = ttm_object_file_ref(tfile);
base->refcount_release = rcount_release;
base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
refcount_init(&base->refcount, 1);
rw_init(&tdev->object_lock, "ttmbao");
rw_wlock(&tdev->object_lock);
ret = drm_ht_just_insert_please(&tdev->object_hash,
&base->hash,
(unsigned long)base, 31, 0, 0);
rw_wunlock(&tdev->object_lock);
if (unlikely(ret != 0))
goto out_err0;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
if (unlikely(ret != 0))
goto out_err1;
ttm_base_object_unref(&base);
return 0;
out_err1:
rw_wlock(&tdev->object_lock);
(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
rw_wunlock(&tdev->object_lock);
out_err0:
return ret;
}
static void ttm_release_base(struct ttm_base_object *base)
{
struct ttm_object_device *tdev = base->tfile->tdev;
(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
rw_wunlock(&tdev->object_lock);
/*
* Note: We don't use synchronize_rcu() here because it's far
* too slow. It's up to the user to free the object using
* call_rcu() or ttm_base_object_kfree().
*/
if (base->refcount_release) {
ttm_object_file_unref(&base->tfile);
base->refcount_release(&base);
}
rw_wlock(&tdev->object_lock);
}
void ttm_base_object_unref(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct ttm_object_device *tdev = base->tfile->tdev;
*p_base = NULL;
/*
* Need to take the lock here to avoid racing with
* users trying to look up the object.
*/
rw_wlock(&tdev->object_lock);
if (refcount_release(&base->refcount))
ttm_release_base(base);
rw_wunlock(&tdev->object_lock);
}
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
{
struct ttm_object_device *tdev = tfile->tdev;
struct ttm_base_object *base;
struct drm_hash_item *hash;
int ret;
rw_rlock(&tdev->object_lock);
ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
if (ret == 0) {
base = drm_hash_entry(hash, struct ttm_base_object, hash);
refcount_acquire(&base->refcount);
}
rw_runlock(&tdev->object_lock);
if (unlikely(ret != 0))
return NULL;
if (tfile != base->tfile && !base->shareable) {
printf("[TTM] Attempted access of non-shareable object %p\n",
base);
ttm_base_object_unref(&base);
return NULL;
}
return base;
}
MALLOC_DEFINE(M_TTM_OBJ_REF, "ttm_obj_ref", "TTM Ref Objects");
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
enum ttm_ref_type ref_type, bool *existed)
{
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref;
struct drm_hash_item *hash;
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
int ret = -EINVAL;
if (existed != NULL)
*existed = true;
while (ret == -EINVAL) {
rw_rlock(&tfile->lock);
ret = drm_ht_find_item(ht, base->hash.key, &hash);
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
refcount_acquire(&ref->kref);
rw_runlock(&tfile->lock);
break;
}
rw_runlock(&tfile->lock);
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
return ret;
ref = malloc(sizeof(*ref), M_TTM_OBJ_REF, M_WAITOK);
if (unlikely(ref == NULL)) {
ttm_mem_global_free(mem_glob, sizeof(*ref));
return -ENOMEM;
}
ref->hash.key = base->hash.key;
ref->obj = base;
ref->tfile = tfile;
ref->ref_type = ref_type;
refcount_init(&ref->kref, 1);
rw_wlock(&tfile->lock);
ret = drm_ht_insert_item(ht, &ref->hash);
if (ret == 0) {
list_add_tail(&ref->head, &tfile->ref_list);
refcount_acquire(&base->refcount);
rw_wunlock(&tfile->lock);
if (existed != NULL)
*existed = false;
break;
}
rw_wunlock(&tfile->lock);
MPASS(ret == -EINVAL);
ttm_mem_global_free(mem_glob, sizeof(*ref));
free(ref, M_TTM_OBJ_REF);
}
return ret;
}
static void ttm_ref_object_release(struct ttm_ref_object *ref)
{
struct ttm_base_object *base = ref->obj;
struct ttm_object_file *tfile = ref->tfile;
struct drm_open_hash *ht;
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
ht = &tfile->ref_hash[ref->ref_type];
(void)drm_ht_remove_item(ht, &ref->hash);
list_del(&ref->head);
rw_wunlock(&tfile->lock);
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
base->ref_obj_release(base, ref->ref_type);
ttm_base_object_unref(&ref->obj);
ttm_mem_global_free(mem_glob, sizeof(*ref));
free(ref, M_TTM_OBJ_REF);
rw_wlock(&tfile->lock);
}
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
unsigned long key, enum ttm_ref_type ref_type)
{
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref;
struct drm_hash_item *hash;
int ret;
rw_wlock(&tfile->lock);
ret = drm_ht_find_item(ht, key, &hash);
if (unlikely(ret != 0)) {
rw_wunlock(&tfile->lock);
return -EINVAL;
}
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
if (refcount_release(&ref->kref))
ttm_ref_object_release(ref);
rw_wunlock(&tfile->lock);
return 0;
}
void ttm_object_file_release(struct ttm_object_file **p_tfile)
{
struct ttm_ref_object *ref;
struct list_head *list;
unsigned int i;
struct ttm_object_file *tfile = *p_tfile;
*p_tfile = NULL;
rw_wlock(&tfile->lock);
/*
* Since we release the lock within the loop, we have to
* restart it from the beginning each time.
*/
while (!list_empty(&tfile->ref_list)) {
list = tfile->ref_list.next;
ref = list_entry(list, struct ttm_ref_object, head);
ttm_ref_object_release(ref);
}
for (i = 0; i < TTM_REF_NUM; ++i)
drm_ht_remove(&tfile->ref_hash[i]);
rw_wunlock(&tfile->lock);
ttm_object_file_unref(&tfile);
}
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
unsigned int hash_order)
{
struct ttm_object_file *tfile;
unsigned int i;
unsigned int j = 0;
int ret;
tfile = malloc(sizeof(*tfile), M_TTM_OBJ_FILE, M_WAITOK);
rw_init(&tfile->lock, "ttmfo");
tfile->tdev = tdev;
refcount_init(&tfile->refcount, 1);
INIT_LIST_HEAD(&tfile->ref_list);
for (i = 0; i < TTM_REF_NUM; ++i) {
ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
if (ret) {
j = i;
goto out_err;
}
}
return tfile;
out_err:
for (i = 0; i < j; ++i)
drm_ht_remove(&tfile->ref_hash[i]);
free(tfile, M_TTM_OBJ_FILE);
return NULL;
}
MALLOC_DEFINE(M_TTM_OBJ_DEV, "ttm_obj_dev", "TTM Device Objects");
struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
*mem_glob,
unsigned int hash_order)
{
struct ttm_object_device *tdev;
int ret;
tdev = malloc(sizeof(*tdev), M_TTM_OBJ_DEV, M_WAITOK);
tdev->mem_glob = mem_glob;
rw_init(&tdev->object_lock, "ttmdo");
atomic_set(&tdev->object_count, 0);
ret = drm_ht_create(&tdev->object_hash, hash_order);
if (ret == 0)
return tdev;
free(tdev, M_TTM_OBJ_DEV);
return NULL;
}
void ttm_object_device_release(struct ttm_object_device **p_tdev)
{
struct ttm_object_device *tdev = *p_tdev;
*p_tdev = NULL;
rw_wlock(&tdev->object_lock);
drm_ht_remove(&tdev->object_hash);
rw_wunlock(&tdev->object_lock);
free(tdev, M_TTM_OBJ_DEV);
}

View File

@ -0,0 +1,271 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/* $FreeBSD$ */
/** @file ttm_object.h
*
* Base- and reference object implementation for the various
* ttm objects. Implements reference counting, minimal security checks
* and release on file close.
*/
#ifndef _TTM_OBJECT_H_
#define _TTM_OBJECT_H_
#include <dev/drm2/drm_hashtab.h>
#include <dev/drm2/ttm/ttm_memory.h>
/**
* enum ttm_ref_type
*
* Describes what type of reference a ref object holds.
*
* TTM_REF_USAGE is a simple refcount on a base object.
*
* TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
* buffer object.
*
* TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
* buffer object.
*
*/
enum ttm_ref_type {
TTM_REF_USAGE,
TTM_REF_SYNCCPU_READ,
TTM_REF_SYNCCPU_WRITE,
TTM_REF_NUM
};
/**
* enum ttm_object_type
*
* One entry per ttm object type.
* Device-specific types should use the
* ttm_driver_typex types.
*/
enum ttm_object_type {
ttm_fence_type,
ttm_buffer_type,
ttm_lock_type,
ttm_driver_type0 = 256,
ttm_driver_type1,
ttm_driver_type2,
ttm_driver_type3,
ttm_driver_type4,
ttm_driver_type5
};
struct ttm_object_file;
struct ttm_object_device;
/**
* struct ttm_base_object
*
* @hash: hash entry for the per-device object hash.
* @type: derived type this object is base class for.
* @shareable: Other ttm_object_files can access this object.
*
* @tfile: Pointer to ttm_object_file of the creator.
* NULL if the object was not created by a user request.
* (kernel object).
*
* @refcount: Number of references to this object, not
* including the hash entry. A reference to a base object can
* only be held by a ref object.
*
* @refcount_release: A function to be called when there are
* no more references to this object. This function should
* destroy the object (or make sure destruction eventually happens),
* and when it is called, the object has
* already been taken out of the per-device hash. The parameter
* "base" should be set to NULL by the function.
*
* @ref_obj_release: A function to be called when a reference object
* with another ttm_ref_type than TTM_REF_USAGE is deleted.
* This function may, for example, release a lock held by a user-space
* process.
*
* This struct is intended to be used as a base struct for objects that
* are visible to user-space. It provides a global name, race-safe
* access and refcounting, minimal access contol and hooks for unref actions.
*/
struct ttm_base_object {
/* struct rcu_head rhead;XXXKIB */
struct drm_hash_item hash;
enum ttm_object_type object_type;
bool shareable;
struct ttm_object_file *tfile;
u_int refcount;
void (*refcount_release) (struct ttm_base_object **base);
void (*ref_obj_release) (struct ttm_base_object *base,
enum ttm_ref_type ref_type);
};
/**
* ttm_base_object_init
*
* @tfile: Pointer to a struct ttm_object_file.
* @base: The struct ttm_base_object to initialize.
* @shareable: This object is shareable with other applcations.
* (different @tfile pointers.)
* @type: The object type.
* @refcount_release: See the struct ttm_base_object description.
* @ref_obj_release: See the struct ttm_base_object description.
*
* Initializes a struct ttm_base_object.
*/
extern int ttm_base_object_init(struct ttm_object_file *tfile,
struct ttm_base_object *base,
bool shareable,
enum ttm_object_type type,
void (*refcount_release) (struct ttm_base_object
**),
void (*ref_obj_release) (struct ttm_base_object
*,
enum ttm_ref_type
ref_type));
/**
* ttm_base_object_lookup
*
* @tfile: Pointer to a struct ttm_object_file.
* @key: Hash key
*
* Looks up a struct ttm_base_object with the key @key.
* Also verifies that the object is visible to the application, by
* comparing the @tfile argument and checking the object shareable flag.
*/
extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
*tfile, uint32_t key);
/**
* ttm_base_object_unref
*
* @p_base: Pointer to a pointer referencing a struct ttm_base_object.
*
* Decrements the base object refcount and clears the pointer pointed to by
* p_base.
*/
extern void ttm_base_object_unref(struct ttm_base_object **p_base);
/**
* ttm_ref_object_add.
*
* @tfile: A struct ttm_object_file representing the application owning the
* ref_object.
* @base: The base object to reference.
* @ref_type: The type of reference.
* @existed: Upon completion, indicates that an identical reference object
* already existed, and the refcount was upped on that object instead.
*
* Adding a ref object to a base object is basically like referencing the
* base object, but a user-space application holds the reference. When the
* file corresponding to @tfile is closed, all its reference objects are
* deleted. A reference object can have different types depending on what
* it's intended for. It can be refcounting to prevent object destruction,
* When user-space takes a lock, it can add a ref object to that lock to
* make sure the lock is released if the application dies. A ref object
* will hold a single reference on a base object.
*/
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
enum ttm_ref_type ref_type, bool *existed);
/**
* ttm_ref_object_base_unref
*
* @key: Key representing the base object.
* @ref_type: Ref type of the ref object to be dereferenced.
*
* Unreference a ref object with type @ref_type
* on the base object identified by @key. If there are no duplicate
* references, the ref object will be destroyed and the base object
* will be unreferenced.
*/
extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
unsigned long key,
enum ttm_ref_type ref_type);
/**
* ttm_object_file_init - initialize a struct ttm_object file
*
* @tdev: A struct ttm_object device this file is initialized on.
* @hash_order: Order of the hash table used to hold the reference objects.
*
* This is typically called by the file_ops::open function.
*/
extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
*tdev,
unsigned int hash_order);
/**
* ttm_object_file_release - release data held by a ttm_object_file
*
* @p_tfile: Pointer to pointer to the ttm_object_file object to release.
* *p_tfile will be set to NULL by this function.
*
* Releases all data associated by a ttm_object_file.
* Typically called from file_ops::release. The caller must
* ensure that there are no concurrent users of tfile.
*/
extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
/**
* ttm_object device init - initialize a struct ttm_object_device
*
* @hash_order: Order of hash table used to hash the base objects.
*
* This function is typically called on device initialization to prepare
* data structures needed for ttm base and ref objects.
*/
extern struct ttm_object_device *ttm_object_device_init
(struct ttm_mem_global *mem_glob, unsigned int hash_order);
/**
* ttm_object_device_release - release data held by a ttm_object_device
*
* @p_tdev: Pointer to pointer to the ttm_object_device object to release.
* *p_tdev will be set to NULL by this function.
*
* Releases all data associated by a ttm_object_device.
* Typically called from driver::unload before the destruction of the
* device private data structure.
*/
extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
#endif

View File

@ -0,0 +1,900 @@
/*
* Copyright (c) Red Hat Inc.
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie <airlied@redhat.com>
* Jerome Glisse <jglisse@redhat.com>
* Pauli Nieminen <suokkos@gmail.com>
*/
/*
* Copyright (c) 2013 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by Konstantin Belousov
* <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*/
/* simple list based uncached page pool
* - Pool collects resently freed pages for reuse
* - Use page->lru to keep a free list
* - doesn't track currently in use pages
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/ttm/ttm_bo_driver.h>
#include <dev/drm2/ttm/ttm_page_alloc.h>
#ifdef TTM_HAS_AGP
#include <asm/agp.h>
#endif
#define VM_ALLOC_DMA32 VM_ALLOC_RESERVED1
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t))
#define SMALL_ALLOCATION 16
#define FREE_ALL_PAGES (~0U)
/* times are in msecs */
#define PAGE_FREE_INTERVAL 1000
/**
* struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
*
* @lock: Protects the shared pool from concurrnet access. Must be used with
* irqsave/irqrestore variants because pool allocator maybe called from
* delayed work.
* @fill_lock: Prevent concurrent calls to fill.
* @list: Pool of free uc/wc pages for fast reuse.
* @gfp_flags: Flags to pass for alloc_page.
* @npages: Number of pages in pool.
*/
struct ttm_page_pool {
struct mtx lock;
bool fill_lock;
bool dma32;
struct pglist list;
int ttm_page_alloc_flags;
unsigned npages;
char *name;
unsigned long nfrees;
unsigned long nrefills;
};
/**
* Limits for the pool. They are handled without locks because only place where
* they may change is in sysfs store. They won't have immediate effect anyway
* so forcing serialization to access them is pointless.
*/
struct ttm_pool_opts {
unsigned alloc_size;
unsigned max_size;
unsigned small;
};
#define NUM_POOLS 4
/**
* struct ttm_pool_manager - Holds memory pools for fst allocation
*
* Manager is read only object for pool code so it doesn't need locking.
*
* @free_interval: minimum number of jiffies between freeing pages from pool.
* @page_alloc_inited: reference counting for pool allocation.
* @work: Work that is used to shrink the pool. Work is only run when there is
* some pages to free.
* @small_allocation: Limit in number of pages what is small allocation.
*
* @pools: All pool objects in use.
**/
struct ttm_pool_manager {
unsigned int kobj_ref;
eventhandler_tag lowmem_handler;
struct ttm_pool_opts options;
union {
struct ttm_page_pool u_pools[NUM_POOLS];
struct _utag {
struct ttm_page_pool u_wc_pool;
struct ttm_page_pool u_uc_pool;
struct ttm_page_pool u_wc_pool_dma32;
struct ttm_page_pool u_uc_pool_dma32;
} _ut;
} _u;
};
#define pools _u.u_pools
#define wc_pool _u._ut.u_wc_pool
#define uc_pool _u._ut.u_uc_pool
#define wc_pool_dma32 _u._ut.u_wc_pool_dma32
#define uc_pool_dma32 _u._ut.u_uc_pool_dma32
MALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager");
static void
ttm_vm_page_free(vm_page_t m)
{
KASSERT(m->object == NULL, ("ttm page %p is owned", m));
KASSERT(m->wire_count == 1, ("ttm lost wire %p", m));
KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m));
KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
m->flags &= ~PG_FICTITIOUS;
m->oflags |= VPO_UNMANAGED;
vm_page_unwire(m, 0);
vm_page_free(m);
}
static vm_memattr_t
ttm_caching_state_to_vm(enum ttm_caching_state cstate)
{
switch (cstate) {
case tt_uncached:
return (VM_MEMATTR_UNCACHEABLE);
case tt_wc:
return (VM_MEMATTR_WRITE_COMBINING);
case tt_cached:
return (VM_MEMATTR_WRITE_BACK);
}
panic("caching state %d\n", cstate);
}
static void ttm_pool_kobj_release(struct ttm_pool_manager *m)
{
free(m, M_TTM_POOLMGR);
}
#if 0
/* XXXKIB sysctl */
static ssize_t ttm_pool_store(struct ttm_pool_manager *m,
struct attribute *attr, const char *buffer, size_t size)
{
int chars;
unsigned val;
chars = sscanf(buffer, "%u", &val);
if (chars == 0)
return size;
/* Convert kb to number of pages */
val = val / (PAGE_SIZE >> 10);
if (attr == &ttm_page_pool_max)
m->options.max_size = val;
else if (attr == &ttm_page_pool_small)
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
pr_warn("Setting allocation size to larger than %lu is not recommended\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
return size;
}
static ssize_t ttm_pool_show(struct ttm_pool_manager *m,
struct attribute *attr, char *buffer)
{
unsigned val = 0;
if (attr == &ttm_page_pool_max)
val = m->options.max_size;
else if (attr == &ttm_page_pool_small)
val = m->options.small;
else if (attr == &ttm_page_pool_alloc_size)
val = m->options.alloc_size;
val = val * (PAGE_SIZE >> 10);
return snprintf(buffer, PAGE_SIZE, "%u\n", val);
}
#endif
static struct ttm_pool_manager *_manager;
static int set_pages_array_wb(vm_page_t *pages, int addrinarray)
{
vm_page_t m;
int i;
for (i = 0; i < addrinarray; i++) {
m = pages[i];
#ifdef TTM_HAS_AGP
unmap_page_from_agp(m);
#endif
pmap_page_set_memattr(m, VM_MEMATTR_WRITE_BACK);
}
return 0;
}
static int set_pages_array_wc(vm_page_t *pages, int addrinarray)
{
vm_page_t m;
int i;
for (i = 0; i < addrinarray; i++) {
m = pages[i];
#ifdef TTM_HAS_AGP
map_page_into_agp(pages[i]);
#endif
pmap_page_set_memattr(m, VM_MEMATTR_WRITE_COMBINING);
}
return 0;
}
static int set_pages_array_uc(vm_page_t *pages, int addrinarray)
{
vm_page_t m;
int i;
for (i = 0; i < addrinarray; i++) {
m = pages[i];
#ifdef TTM_HAS_AGP
map_page_into_agp(pages[i]);
#endif
pmap_page_set_memattr(m, VM_MEMATTR_UNCACHEABLE);
}
return 0;
}
/**
* Select the right pool or requested caching state and ttm flags. */
static struct ttm_page_pool *ttm_get_pool(int flags,
enum ttm_caching_state cstate)
{
int pool_index;
if (cstate == tt_cached)
return NULL;
if (cstate == tt_wc)
pool_index = 0x0;
else
pool_index = 0x1;
if (flags & TTM_PAGE_FLAG_DMA32)
pool_index |= 0x2;
return &_manager->pools[pool_index];
}
/* set memory back to wb and free the pages. */
static void ttm_pages_put(vm_page_t *pages, unsigned npages)
{
unsigned i;
/* Our VM handles vm memattr automatically on the page free. */
if (set_pages_array_wb(pages, npages))
printf("[TTM] Failed to set %d pages to wb!\n", npages);
for (i = 0; i < npages; ++i)
ttm_vm_page_free(pages[i]);
}
static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
unsigned freed_pages)
{
pool->npages -= freed_pages;
pool->nfrees += freed_pages;
}
/**
* Free pages from pool.
*
* To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
* number of pages in one go.
*
* @pool: to free the pages from
* @free_all: If set to true will free all pages in pool
**/
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
{
vm_page_t p, p1;
vm_page_t *pages_to_free;
unsigned freed_pages = 0,
npages_to_free = nr_free;
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
pages_to_free = malloc(npages_to_free * sizeof(vm_page_t),
M_TEMP, M_WAITOK | M_ZERO);
restart:
mtx_lock(&pool->lock);
TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, pageq, p1) {
if (freed_pages >= npages_to_free)
break;
pages_to_free[freed_pages++] = p;
/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
if (freed_pages >= NUM_PAGES_TO_ALLOC) {
/* remove range of pages from the pool */
TAILQ_REMOVE(&pool->list, p, pageq);
ttm_pool_update_free_locked(pool, freed_pages);
/**
* Because changing page caching is costly
* we unlock the pool to prevent stalling.
*/
mtx_unlock(&pool->lock);
ttm_pages_put(pages_to_free, freed_pages);
if (likely(nr_free != FREE_ALL_PAGES))
nr_free -= freed_pages;
if (NUM_PAGES_TO_ALLOC >= nr_free)
npages_to_free = nr_free;
else
npages_to_free = NUM_PAGES_TO_ALLOC;
freed_pages = 0;
/* free all so restart the processing */
if (nr_free)
goto restart;
/* Not allowed to fall through or break because
* following context is inside spinlock while we are
* outside here.
*/
goto out;
}
}
/* remove range of pages from the pool */
if (freed_pages) {
TAILQ_REMOVE(&pool->list, p, pageq);
ttm_pool_update_free_locked(pool, freed_pages);
nr_free -= freed_pages;
}
mtx_unlock(&pool->lock);
if (freed_pages)
ttm_pages_put(pages_to_free, freed_pages);
out:
free(pages_to_free, M_TEMP);
return nr_free;
}
/* Get good estimation how many pages are free in pools */
static int ttm_pool_get_num_unused_pages(void)
{
unsigned i;
int total = 0;
for (i = 0; i < NUM_POOLS; ++i)
total += _manager->pools[i].npages;
return total;
}
/**
* Callback for mm to request pool to reduce number of page held.
*/
static int ttm_pool_mm_shrink(void *arg)
{
static unsigned int start_pool = 0;
unsigned i;
unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
struct ttm_page_pool *pool;
int shrink_pages = 100; /* XXXKIB */
pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
if (shrink_pages == 0)
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
}
/* return estimated number of unused pages in pool */
return ttm_pool_get_num_unused_pages();
}
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
}
static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
{
EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
}
static int ttm_set_pages_caching(vm_page_t *pages,
enum ttm_caching_state cstate, unsigned cpages)
{
int r = 0;
/* Set page caching */
switch (cstate) {
case tt_uncached:
r = set_pages_array_uc(pages, cpages);
if (r)
printf("[TTM] Failed to set %d pages to uc!\n", cpages);
break;
case tt_wc:
r = set_pages_array_wc(pages, cpages);
if (r)
printf("[TTM] Failed to set %d pages to wc!\n", cpages);
break;
default:
break;
}
return r;
}
/**
* Free pages the pages that failed to change the caching state. If there is
* any pages that have changed their caching state already put them to the
* pool.
*/
static void ttm_handle_caching_state_failure(struct pglist *pages,
int ttm_flags, enum ttm_caching_state cstate,
vm_page_t *failed_pages, unsigned cpages)
{
unsigned i;
/* Failed pages have to be freed */
for (i = 0; i < cpages; ++i) {
TAILQ_REMOVE(pages, failed_pages[i], pageq);
ttm_vm_page_free(failed_pages[i]);
}
}
/**
* Allocate new pages with correct caching.
*
* This function is reentrant if caller updates count depending on number of
* pages returned in pages array.
*/
static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
{
vm_page_t *caching_array;
vm_page_t p;
int r = 0;
unsigned i, cpages, aflags;
unsigned max_cpages = min(count,
(unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
aflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ?
VM_ALLOC_ZERO : 0);
/* allocate array for page caching change */
caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP,
M_WAITOK | M_ZERO);
for (i = 0, cpages = 0; i < count; ++i) {
p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
(ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
VM_MAX_ADDRESS, PAGE_SIZE, 0,
ttm_caching_state_to_vm(cstate));
if (!p) {
printf("[TTM] Unable to get page %u\n", i);
/* store already allocated pages in the pool after
* setting the caching state */
if (cpages) {
r = ttm_set_pages_caching(caching_array,
cstate, cpages);
if (r)
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
caching_array, cpages);
}
r = -ENOMEM;
goto out;
}
p->oflags &= ~VPO_UNMANAGED;
p->flags |= PG_FICTITIOUS;
#ifdef CONFIG_HIGHMEM /* KIB: nop */
/* gfp flags of highmem page should never be dma32 so we
* we should be fine in such case
*/
if (!PageHighMem(p))
#endif
{
caching_array[cpages++] = p;
if (cpages == max_cpages) {
r = ttm_set_pages_caching(caching_array,
cstate, cpages);
if (r) {
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
caching_array, cpages);
goto out;
}
cpages = 0;
}
}
TAILQ_INSERT_HEAD(pages, p, pageq);
}
if (cpages) {
r = ttm_set_pages_caching(caching_array, cstate, cpages);
if (r)
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
caching_array, cpages);
}
out:
free(caching_array, M_TEMP);
return r;
}
/**
* Fill the given pool if there aren't enough pages and the requested number of
* pages is small.
*/
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
{
vm_page_t p;
int r;
unsigned cpages = 0;
/**
* Only allow one pool fill operation at a time.
* If pool doesn't have enough pages for the allocation new pages are
* allocated from outside of pool.
*/
if (pool->fill_lock)
return;
pool->fill_lock = true;
/* If allocation request is small and there are not enough
* pages in a pool we fill the pool up first. */
if (count < _manager->options.small
&& count > pool->npages) {
struct pglist new_pages;
unsigned alloc_size = _manager->options.alloc_size;
/**
* Can't change page caching if in irqsave context. We have to
* drop the pool->lock.
*/
mtx_unlock(&pool->lock);
TAILQ_INIT(&new_pages);
r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
ttm_flags, cstate, alloc_size);
mtx_lock(&pool->lock);
if (!r) {
TAILQ_CONCAT(&pool->list, &new_pages, pageq);
++pool->nrefills;
pool->npages += alloc_size;
} else {
printf("[TTM] Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
TAILQ_FOREACH(p, &pool->list, pageq) {
++cpages;
}
TAILQ_CONCAT(&pool->list, &new_pages, pageq);
pool->npages += cpages;
}
}
pool->fill_lock = false;
}
/**
* Cut 'count' number of pages from the pool and put them on the return list.
*
* @return count of pages still required to fulfill the request.
*/
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct pglist *pages,
int ttm_flags,
enum ttm_caching_state cstate,
unsigned count)
{
vm_page_t p;
unsigned i;
mtx_lock(&pool->lock);
ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
if (count >= pool->npages) {
/* take all pages from the pool */
TAILQ_CONCAT(pages, &pool->list, pageq);
count -= pool->npages;
pool->npages = 0;
goto out;
}
for (i = 0; i < count; i++) {
p = TAILQ_FIRST(&pool->list);
TAILQ_REMOVE(&pool->list, p, pageq);
TAILQ_INSERT_TAIL(pages, p, pageq);
}
pool->npages -= count;
count = 0;
out:
mtx_unlock(&pool->lock);
return count;
}
/* Put all pages in pages list to correct pool to wait for reuse */
static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
unsigned i;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
for (i = 0; i < npages; i++) {
if (pages[i]) {
ttm_vm_page_free(pages[i]);
pages[i] = NULL;
}
}
return;
}
mtx_lock(&pool->lock);
for (i = 0; i < npages; i++) {
if (pages[i]) {
TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
pages[i] = NULL;
pool->npages++;
}
}
/* Check that we don't go over the pool limit */
npages = 0;
if (pool->npages > _manager->options.max_size) {
npages = pool->npages - _manager->options.max_size;
/* free at least NUM_PAGES_TO_ALLOC number of pages
* to reduce calls to set_memory_wb */
if (npages < NUM_PAGES_TO_ALLOC)
npages = NUM_PAGES_TO_ALLOC;
}
mtx_unlock(&pool->lock);
if (npages)
ttm_page_pool_free(pool, npages);
}
/*
* On success pages list will hold count number of correctly
* cached pages.
*/
static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct pglist plist;
vm_page_t p = NULL;
int gfp_flags, aflags;
unsigned count;
int r;
aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0);
/* No pool for cached pages */
if (pool == NULL) {
for (r = 0; r < npages; ++r) {
p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
(flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
VM_MAX_ADDRESS, PAGE_SIZE,
0, ttm_caching_state_to_vm(cstate));
if (!p) {
printf("[TTM] Unable to allocate page\n");
return -ENOMEM;
}
p->oflags &= ~VPO_UNMANAGED;
p->flags |= PG_FICTITIOUS;
pages[r] = p;
}
return 0;
}
/* combine zero flag to pool flags */
gfp_flags = flags | pool->ttm_page_alloc_flags;
/* First we take pages from the pool */
TAILQ_INIT(&plist);
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
count = 0;
TAILQ_FOREACH(p, &plist, pageq) {
pages[count++] = p;
}
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
TAILQ_FOREACH(p, &plist, pageq) {
pmap_zero_page(p);
}
}
/* If pool didn't have enough pages allocate new one. */
if (npages > 0) {
/* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel.
**/
TAILQ_INIT(&plist);
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
npages);
TAILQ_FOREACH(p, &plist, pageq) {
pages[count++] = p;
}
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
printf("[TTM] Failed to allocate extra pages for large request\n");
ttm_put_pages(pages, count, flags, cstate);
return r;
}
}
return 0;
}
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
char *name)
{
mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF);
pool->fill_lock = false;
TAILQ_INIT(&pool->list);
pool->npages = pool->nfrees = 0;
pool->ttm_page_alloc_flags = flags;
pool->name = name;
}
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
{
if (_manager != NULL)
printf("[TTM] manager != NULL\n");
printf("[TTM] Initializing pool allocator\n");
_manager = malloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO);
ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
TTM_PAGE_FLAG_DMA32, "wc dma");
ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
TTM_PAGE_FLAG_DMA32, "uc dma");
_manager->options.max_size = max_pages;
_manager->options.small = SMALL_ALLOCATION;
_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
refcount_init(&_manager->kobj_ref, 1);
ttm_pool_mm_shrink_init(_manager);
return 0;
}
void ttm_page_alloc_fini(void)
{
int i;
printf("[TTM] Finalizing pool allocator\n");
ttm_pool_mm_shrink_fini(_manager);
for (i = 0; i < NUM_POOLS; ++i)
ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
if (refcount_release(&_manager->kobj_ref))
ttm_pool_kobj_release(_manager);
_manager = NULL;
}
int ttm_pool_populate(struct ttm_tt *ttm)
{
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
unsigned i;
int ret;
if (ttm->state != tt_unpopulated)
return 0;
for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_get_pages(&ttm->pages[i], 1,
ttm->page_flags,
ttm->caching_state);
if (ret != 0) {
ttm_pool_unpopulate(ttm);
return -ENOMEM;
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
false, false);
if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm);
return -ENOMEM;
}
}
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm);
return ret;
}
}
ttm->state = tt_unbound;
return 0;
}
void ttm_pool_unpopulate(struct ttm_tt *ttm)
{
unsigned i;
for (i = 0; i < ttm->num_pages; ++i) {
if (ttm->pages[i]) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
ttm->pages[i]);
ttm_put_pages(&ttm->pages[i], 1,
ttm->page_flags,
ttm->caching_state);
}
}
ttm->state = tt_unpopulated;
}
#if 0
/* XXXKIB sysctl */
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct ttm_page_pool *p;
unsigned i;
char *h[] = {"pool", "refills", "pages freed", "size"};
if (!_manager) {
seq_printf(m, "No pool allocator running.\n");
return 0;
}
seq_printf(m, "%6s %12s %13s %8s\n",
h[0], h[1], h[2], h[3]);
for (i = 0; i < NUM_POOLS; ++i) {
p = &_manager->pools[i];
seq_printf(m, "%6s %12ld %13ld %8d\n",
p->name, p->nrefills,
p->nfrees, p->npages);
}
return 0;
}
#endif

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) Red Hat Inc.
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie <airlied@redhat.com>
* Jerome Glisse <jglisse@redhat.com>
*/
/* $FreeBSD$ */
#ifndef TTM_PAGE_ALLOC
#define TTM_PAGE_ALLOC
#include <dev/drm2/ttm/ttm_bo_driver.h>
#include <dev/drm2/ttm/ttm_memory.h>
/**
* Initialize pool allocator.
*/
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
/**
* Free pool allocator.
*/
void ttm_page_alloc_fini(void);
/**
* ttm_pool_populate:
*
* @ttm: The struct ttm_tt to contain the backing pages.
*
* Add backing pages to all of @ttm
*/
extern int ttm_pool_populate(struct ttm_tt *ttm);
/**
* ttm_pool_unpopulate:
*
* @ttm: The struct ttm_tt which to free backing pages.
*
* Free all pages of @ttm
*/
extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
/**
* Output the state of pools to debugfs file
*/
/* XXXKIB
extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
*/
#ifdef CONFIG_SWIOTLB
/**
* Initialize pool allocator.
*/
int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
/**
* Free pool allocator.
*/
void ttm_dma_page_alloc_fini(void);
/**
* Output the state of pools to debugfs file
*/
extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
#else
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
unsigned max_pages)
{
return -ENODEV;
}
static inline void ttm_dma_page_alloc_fini(void) { return; }
/* XXXKIB
static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
{
return 0;
}
*/
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,93 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/* $FreeBSD$ */
#ifndef _TTM_PLACEMENT_H_
#define _TTM_PLACEMENT_H_
/*
* Memory regions for data placement.
*/
#define TTM_PL_SYSTEM 0
#define TTM_PL_TT 1
#define TTM_PL_VRAM 2
#define TTM_PL_PRIV0 3
#define TTM_PL_PRIV1 4
#define TTM_PL_PRIV2 5
#define TTM_PL_PRIV3 6
#define TTM_PL_PRIV4 7
#define TTM_PL_PRIV5 8
#define TTM_PL_SWAPPED 15
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
#define TTM_PL_MASK_MEM 0x0000FFFF
/*
* Other flags that affects data placement.
* TTM_PL_FLAG_CACHED indicates cache-coherent mappings
* if available.
* TTM_PL_FLAG_SHARED means that another application may
* reference the buffer.
* TTM_PL_FLAG_NO_EVICT means that the buffer may never
* be evicted to make room for other buffers.
*/
#define TTM_PL_FLAG_CACHED (1 << 16)
#define TTM_PL_FLAG_UNCACHED (1 << 17)
#define TTM_PL_FLAG_WC (1 << 18)
#define TTM_PL_FLAG_SHARED (1 << 20)
#define TTM_PL_FLAG_NO_EVICT (1 << 21)
#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
TTM_PL_FLAG_UNCACHED | \
TTM_PL_FLAG_WC)
#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
/*
* Access flags to be used for CPU- and GPU- mappings.
* The idea is that the TTM synchronization mechanism will
* allow concurrent READ access and exclusive write access.
* Currently GPU- and CPU accesses are exclusive.
*/
#define TTM_ACCESS_READ (1 << 0)
#define TTM_ACCESS_WRITE (1 << 1)
#endif

370
sys/dev/drm2/ttm/ttm_tt.c Normal file
View File

@ -0,0 +1,370 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/*
* Copyright (c) 2013 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by Konstantin Belousov
* <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/ttm/ttm_module.h>
#include <dev/drm2/ttm/ttm_bo_driver.h>
#include <dev/drm2/ttm/ttm_placement.h>
#include <dev/drm2/ttm/ttm_page_alloc.h>
MALLOC_DEFINE(M_TTM_PD, "ttm_pd", "TTM Page Directories");
/**
* Allocates storage for pointers to the pages that back the ttm.
*/
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
ttm->pages = malloc(ttm->num_pages * sizeof(void *),
M_TTM_PD, M_WAITOK | M_ZERO);
}
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
ttm->ttm.pages = malloc(ttm->ttm.num_pages * sizeof(void *),
M_TTM_PD, M_WAITOK | M_ZERO);
ttm->dma_address = malloc(ttm->ttm.num_pages *
sizeof(*ttm->dma_address), M_TTM_PD, M_WAITOK);
}
#if defined(__i386__) || defined(__amd64__)
static inline int ttm_tt_set_page_caching(vm_page_t p,
enum ttm_caching_state c_old,
enum ttm_caching_state c_new)
{
/* XXXKIB our VM does not need this. */
#if 0
if (c_old != tt_cached) {
/* p isn't in the default caching state, set it to
* writeback first to free its current memtype. */
pmap_page_set_memattr(p, VM_MEMATTR_WRITE_BACK);
}
#endif
if (c_new == tt_wc)
pmap_page_set_memattr(p, VM_MEMATTR_WRITE_COMBINING);
else if (c_new == tt_uncached)
pmap_page_set_memattr(p, VM_MEMATTR_UNCACHEABLE);
return (0);
}
#else
static inline int ttm_tt_set_page_caching(vm_page_t p,
enum ttm_caching_state c_old,
enum ttm_caching_state c_new)
{
return 0;
}
#endif
/*
* Change caching policy for the linear kernel map
* for range of pages in a ttm.
*/
static int ttm_tt_set_caching(struct ttm_tt *ttm,
enum ttm_caching_state c_state)
{
int i, j;
vm_page_t cur_page;
int ret;
if (ttm->caching_state == c_state)
return 0;
if (ttm->state == tt_unpopulated) {
/* Change caching but don't populate */
ttm->caching_state = c_state;
return 0;
}
if (ttm->caching_state == tt_cached)
drm_clflush_pages(ttm->pages, ttm->num_pages);
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages[i];
if (likely(cur_page != NULL)) {
ret = ttm_tt_set_page_caching(cur_page,
ttm->caching_state,
c_state);
if (unlikely(ret != 0))
goto out_err;
}
}
ttm->caching_state = c_state;
return 0;
out_err:
for (j = 0; j < i; ++j) {
cur_page = ttm->pages[j];
if (cur_page != NULL) {
(void)ttm_tt_set_page_caching(cur_page, c_state,
ttm->caching_state);
}
}
return ret;
}
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
{
enum ttm_caching_state state;
if (placement & TTM_PL_FLAG_WC)
state = tt_wc;
else if (placement & TTM_PL_FLAG_UNCACHED)
state = tt_uncached;
else
state = tt_cached;
return ttm_tt_set_caching(ttm, state);
}
void ttm_tt_destroy(struct ttm_tt *ttm)
{
if (unlikely(ttm == NULL))
return;
if (ttm->state == tt_bound) {
ttm_tt_unbind(ttm);
}
if (likely(ttm->pages != NULL)) {
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage)
vm_object_deallocate(ttm->swap_storage);
ttm->swap_storage = NULL;
ttm->func->destroy(ttm);
}
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
vm_page_t dummy_read_page)
{
ttm->bdev = bdev;
ttm->glob = bdev->glob;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
ttm->caching_state = tt_cached;
ttm->page_flags = page_flags;
ttm->dummy_read_page = dummy_read_page;
ttm->state = tt_unpopulated;
ttm->swap_storage = NULL;
ttm_tt_alloc_page_directory(ttm);
if (!ttm->pages) {
ttm_tt_destroy(ttm);
printf("Failed allocating page table\n");
return -ENOMEM;
}
return 0;
}
void ttm_tt_fini(struct ttm_tt *ttm)
{
free(ttm->pages, M_TTM_PD);
ttm->pages = NULL;
}
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
vm_page_t dummy_read_page)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
ttm->bdev = bdev;
ttm->glob = bdev->glob;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
ttm->caching_state = tt_cached;
ttm->page_flags = page_flags;
ttm->dummy_read_page = dummy_read_page;
ttm->state = tt_unpopulated;
ttm->swap_storage = NULL;
INIT_LIST_HEAD(&ttm_dma->pages_list);
ttm_dma_tt_alloc_page_directory(ttm_dma);
if (!ttm->pages || !ttm_dma->dma_address) {
ttm_tt_destroy(ttm);
printf("Failed allocating page table\n");
return -ENOMEM;
}
return 0;
}
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
free(ttm->pages, M_TTM_PD);
ttm->pages = NULL;
free(ttm_dma->dma_address, M_TTM_PD);
ttm_dma->dma_address = NULL;
}
void ttm_tt_unbind(struct ttm_tt *ttm)
{
int ret;
if (ttm->state == tt_bound) {
ret = ttm->func->unbind(ttm);
MPASS(ret == 0);
ttm->state = tt_unbound;
}
}
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
int ret = 0;
if (!ttm)
return -EINVAL;
if (ttm->state == tt_bound)
return 0;
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
return ret;
ret = ttm->func->bind(ttm, bo_mem);
if (unlikely(ret != 0))
return ret;
ttm->state = tt_bound;
return 0;
}
int ttm_tt_swapin(struct ttm_tt *ttm)
{
vm_object_t obj;
vm_page_t from_page, to_page;
int i, ret, rv;
obj = ttm->swap_storage;
VM_OBJECT_LOCK(obj);
vm_object_pip_add(obj, 1);
for (i = 0; i < ttm->num_pages; ++i) {
from_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
if (from_page->valid != VM_PAGE_BITS_ALL) {
if (vm_pager_has_page(obj, i, NULL, NULL)) {
rv = vm_pager_get_pages(obj, &from_page, 1, 0);
if (rv != VM_PAGER_OK) {
vm_page_lock(from_page);
vm_page_free(from_page);
vm_page_unlock(from_page);
ret = -EIO;
goto err_ret;
}
} else
vm_page_zero_invalid(from_page, TRUE);
}
to_page = ttm->pages[i];
if (unlikely(to_page == NULL)) {
vm_page_wakeup(from_page);
ret = -ENOMEM;
goto err_ret;
}
pmap_copy_page(from_page, to_page);
vm_page_wakeup(from_page);
}
vm_object_pip_wakeup(obj);
VM_OBJECT_UNLOCK(obj);
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
vm_object_deallocate(obj);
ttm->swap_storage = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
return (0);
err_ret:
vm_object_pip_wakeup(obj);
VM_OBJECT_UNLOCK(obj);
return (ret);
}
int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
{
vm_object_t obj;
vm_page_t from_page, to_page;
int i;
MPASS(ttm->state == tt_unbound || ttm->state == tt_unpopulated);
MPASS(ttm->caching_state == tt_cached);
if (persistent_swap_storage == NULL) {
obj = vm_pager_allocate(OBJT_SWAP, NULL,
IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0,
curthread->td_ucred);
if (obj == NULL) {
printf("[TTM] Failed allocating swap storage\n");
return (-ENOMEM);
}
} else
obj = persistent_swap_storage;
VM_OBJECT_LOCK(obj);
vm_object_pip_add(obj, 1);
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];
if (unlikely(from_page == NULL))
continue;
to_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
pmap_copy_page(from_page, to_page);
vm_page_dirty(to_page);
to_page->valid = VM_PAGE_BITS_ALL;
vm_page_wakeup(to_page);
}
vm_object_pip_wakeup(obj);
VM_OBJECT_UNLOCK(obj);
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm->swap_storage = obj;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage != NULL)
ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
return (0);
}

View File

@ -45,8 +45,6 @@ __FBSDID("$FreeBSD$");
#define SOFT_CAPS (PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INTERRUPT | \
PMC_CAP_USER | PMC_CAP_SYSTEM)
PMC_SOFT_DECLARE( , , clock, prof);
struct soft_descr {
struct pmc_descr pm_descr; /* "base class" */
};
@ -126,9 +124,10 @@ soft_allocate_pmc(int cpu, int ri, struct pmc *pm,
if (ps == NULL)
return (EINVAL);
pmc_soft_ev_release(ps);
/* Module unload is protected by pmc SX lock. */
if (ps->ps_alloc != NULL)
ps->ps_alloc();
if (ev == pmc___clock_prof.ps_ev.pm_ev_code)
cpu_startprofclock();
return (0);
}
@ -315,6 +314,8 @@ static int
soft_release_pmc(int cpu, int ri, struct pmc *pmc)
{
struct pmc_hw *phw;
enum pmc_event ev;
struct pmc_soft *ps;
(void) pmc;
@ -328,8 +329,16 @@ soft_release_pmc(int cpu, int ri, struct pmc *pmc)
KASSERT(phw->phw_pmc == NULL,
("[soft,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
if (pmc->pm_event == pmc___clock_prof.ps_ev.pm_ev_code)
cpu_stopprofclock();
ev = pmc->pm_event;
/* Check if event is registered. */
ps = pmc_soft_ev_acquire(ev);
KASSERT(ps != NULL,
("[soft,%d] unregistered event %d", __LINE__, ev));
pmc_soft_ev_release(ps);
/* Module unload is protected by pmc SX lock. */
if (ps->ps_release != NULL)
ps->ps_release();
return (0);
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2011, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -47,7 +47,7 @@ int ixgbe_display_debug_stats = 0;
/*********************************************************************
* Driver version
*********************************************************************/
char ixgbe_driver_version[] = "2.5.0";
char ixgbe_driver_version[] = "2.5.7 - HEAD";
/*********************************************************************
* PCI Device ID Table
@ -83,7 +83,7 @@ static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
@ -216,7 +216,6 @@ static device_method_t ixgbe_methods[] = {
DEVMETHOD(device_attach, ixgbe_attach),
DEVMETHOD(device_detach, ixgbe_detach),
DEVMETHOD(device_shutdown, ixgbe_shutdown),
DEVMETHOD_END
};
@ -596,6 +595,9 @@ ixgbe_attach(device_t dev)
"PCIE, or x4 PCIE 2 slot is required.\n");
}
/* Set an initial default flow control value */
adapter->fc = ixgbe_fc_full;
/* let hardware know driver is loaded */
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
@ -652,7 +654,7 @@ ixgbe_detach(device_t dev)
for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
if (que->tq) {
#ifdef IXGBE_LEGACY_TX
#ifndef IXGBE_LEGACY_TX
taskqueue_drain(que->tq, &txr->txq_task);
#endif
taskqueue_drain(que->tq, &que->que_task);
@ -1310,7 +1312,7 @@ ixgbe_init_locked(struct adapter *adapter)
tmp = IXGBE_LOW_DV(frame);
hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
adapter->fc = hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.requested_mode = adapter->fc;
hw->fc.pause_time = IXGBE_FC_PAUSE;
hw->fc.send_xon = TRUE;
}
@ -1680,7 +1682,7 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
break;
case IXGBE_LINK_SPEED_1GB_FULL:
ifmr->ifm_active |= adapter->optics | IFM_FDX;
ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
break;
case IXGBE_LINK_SPEED_10GB_FULL:
ifmr->ifm_active |= adapter->optics | IFM_FDX;
@ -1932,18 +1934,6 @@ ixgbe_set_multi(struct adapter *adapter)
bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
MAX_NUM_MULTICAST_ADDRESSES);
fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
if (ifp->if_flags & IFF_PROMISC)
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
else if (ifp->if_flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
fctrl &= ~IXGBE_FCTRL_UPE;
} else
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
#if __FreeBSD_version < 800000
IF_ADDR_LOCK(ifp);
#else
@ -1952,6 +1942,8 @@ ixgbe_set_multi(struct adapter *adapter)
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
break;
bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
&mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
IXGBE_ETH_LENGTH_OF_ADDRESS);
@ -1963,9 +1955,24 @@ ixgbe_set_multi(struct adapter *adapter)
if_maddr_runlock(ifp);
#endif
update_ptr = mta;
ixgbe_update_mc_addr_list(&adapter->hw,
update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
if (ifp->if_flags & IFF_PROMISC)
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
ifp->if_flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
fctrl &= ~IXGBE_FCTRL_UPE;
} else
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
update_ptr = mta;
ixgbe_update_mc_addr_list(&adapter->hw,
update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
}
return;
}
@ -2172,7 +2179,7 @@ ixgbe_setup_optics(struct adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int layer;
layer = ixgbe_get_supported_physical_layer(hw);
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
@ -2651,7 +2658,7 @@ ixgbe_config_link(struct adapter *adapter)
taskqueue_enqueue(adapter->tq, &adapter->mod_task);
} else {
if (hw->mac.ops.check_link)
err = ixgbe_check_link(hw, &autoneg,
err = ixgbe_check_link(hw, &adapter->link_speed,
&adapter->link_up, FALSE);
if (err)
goto out;
@ -2662,8 +2669,8 @@ ixgbe_config_link(struct adapter *adapter)
if (err)
goto out;
if (hw->mac.ops.setup_link)
err = hw->mac.ops.setup_link(hw, autoneg,
negotiate, adapter->link_up);
err = hw->mac.ops.setup_link(hw,
autoneg, adapter->link_up);
}
out:
return;
@ -3713,6 +3720,8 @@ ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
M_PKTHDR, rxr->mbuf_sz);
if (mp == NULL)
goto update;
if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
m_adj(mp, ETHER_ALIGN);
} else
mp = rxbuf->buf;
@ -4408,7 +4417,6 @@ ixgbe_rxeof(struct ix_queue *que)
/* Make sure bad packets are discarded */
if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
(rxr->discard)) {
ifp->if_ierrors++;
rxr->rx_discarded++;
if (eop)
rxr->discard = FALSE;
@ -4734,14 +4742,25 @@ ixgbe_enable_intr(struct adapter *adapter)
/* Enable Fan Failure detection */
if (hw->device_id == IXGBE_DEV_ID_82598AT)
mask |= IXGBE_EIMS_GPI_SDP1;
else {
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_GPI_SDP0;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_GPI_SDP0;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
#ifdef IXGBE_FDIR
mask |= IXGBE_EIMS_FLOW_DIR;
mask |= IXGBE_EIMS_FLOW_DIR;
#endif
break;
case ixgbe_mac_X540:
mask |= IXGBE_EIMS_ECC;
#ifdef IXGBE_FDIR
mask |= IXGBE_EIMS_FLOW_DIR;
#endif
/* falls through */
default:
break;
}
IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
@ -4969,7 +4988,7 @@ ixgbe_handle_msf(void *context, int pending)
if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
hw->mac.ops.setup_link(hw, autoneg, TRUE);
return;
}
@ -5013,6 +5032,11 @@ ixgbe_update_stats_counters(struct adapter *adapter)
adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
/*
** Note: these are for the 8 possible traffic classes,
** which in current implementation is unused,
** therefore only 0 should read real data.
*/
for (int i = 0; i < 8; i++) {
u32 mp;
mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
@ -5022,13 +5046,20 @@ ixgbe_update_stats_counters(struct adapter *adapter)
adapter->stats.mpc[i] += mp;
/* Running comprehensive total for stats display */
total_missed_rx += adapter->stats.mpc[i];
if (hw->mac.type == ixgbe_mac_82598EB)
if (hw->mac.type == ixgbe_mac_82598EB) {
adapter->stats.rnbc[i] +=
IXGBE_READ_REG(hw, IXGBE_RNBC(i));
adapter->stats.qbtc[i] +=
IXGBE_READ_REG(hw, IXGBE_QBTC(i));
adapter->stats.qbrc[i] +=
IXGBE_READ_REG(hw, IXGBE_QBRC(i));
adapter->stats.pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
} else
adapter->stats.pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
adapter->stats.pxontxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
adapter->stats.pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
adapter->stats.pxofftxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
adapter->stats.pxoffrxc[i] +=
@ -5039,12 +5070,6 @@ ixgbe_update_stats_counters(struct adapter *adapter)
for (int i = 0; i < 16; i++) {
adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
adapter->stats.qbrc[i] +=
((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
adapter->stats.qbtc[i] +=
((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
}
adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
@ -5141,8 +5166,8 @@ ixgbe_update_stats_counters(struct adapter *adapter)
ifp->if_collisions = 0;
/* Rx Errors */
ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
adapter->stats.rlec;
ifp->if_iqdrops = total_missed_rx;
ifp->if_ierrors = adapter->stats.crcerrs + adapter->stats.rlec;
}
/** ixgbe_sysctl_tdh_handler - Handler function
@ -5528,10 +5553,13 @@ ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
ixgbe_disable_rx_drop(adapter);
break;
case ixgbe_fc_none:
default:
adapter->hw.fc.requested_mode = ixgbe_fc_none;
if (adapter->num_queues > 1)
ixgbe_enable_rx_drop(adapter);
break;
default:
adapter->fc = last;
return (EINVAL);
}
/* Don't autoneg if forcing a value */
adapter->hw.fc.disable_fc_autoneg = TRUE;
@ -5560,7 +5588,7 @@ ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
last = adapter->advertise;
error = sysctl_handle_int(oidp, &adapter->advertise, 0, req);
if ((error) || (adapter->advertise == -1))
if ((error) || (req->newptr == NULL))
return (error);
if (adapter->advertise == last) /* no change */
@ -5568,11 +5596,11 @@ ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)))
return (error);
return (EINVAL);
if ((adapter->advertise == 2) && (hw->mac.type != ixgbe_mac_X540)) {
device_printf(dev, "Set Advertise: 100Mb on X540 only\n");
return (error);
return (EINVAL);
}
if (adapter->advertise == 1)
@ -5582,11 +5610,13 @@ ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
else if (adapter->advertise == 3)
speed = IXGBE_LINK_SPEED_1GB_FULL |
IXGBE_LINK_SPEED_10GB_FULL;
else /* bogus value */
return (error);
else { /* bogus value */
adapter->advertise = last;
return (EINVAL);
}
hw->mac.autotry_restart = TRUE;
hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);
hw->mac.ops.setup_link(hw, speed, TRUE);
return (error);
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -49,18 +49,17 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
bool link_up_wait_to_complete);
static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
/**
* ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
* @hw: pointer to the HW structure
@ -155,6 +154,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
/* SFP+ Module */
phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598;
/* Link */
mac->ops.check_link = &ixgbe_check_mac_link_82598;
@ -712,15 +712,15 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
* ixgbe_setup_mac_link_82598 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
bool autoneg = FALSE;
s32 status = IXGBE_SUCCESS;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@ -766,14 +766,12 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
* ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE if waiting is needed to complete
*
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status;
@ -781,7 +779,7 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_setup_copper_link_82598");
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
status = hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@ -1102,15 +1100,16 @@ s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
}
/**
* ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
* ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
* @byte_offset: EEPROM byte offset to read
* @dev_addr: address to read from
* @byte_offset: byte offset to read from dev_addr
* @eeprom_data: value read
*
* Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
u8 byte_offset, u8 *eeprom_data)
{
s32 status = IXGBE_SUCCESS;
u16 sfp_addr = 0;
@ -1118,7 +1117,7 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u16 sfp_stat = 0;
u32 i;
DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
DEBUGFUNC("ixgbe_read_i2c_phy_82598");
if (hw->phy.type == ixgbe_phy_nl) {
/*
@ -1126,7 +1125,7 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
* 0xC30D. These registers are used to talk to the SFP+
* module's EEPROM through the SDA/SCL (I2C) interface.
*/
sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
sfp_addr = (dev_addr << 8) + byte_offset;
sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
hw->phy.ops.write_reg(hw,
IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
@ -1158,13 +1157,42 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
*eeprom_data = (u8)(sfp_data >> 8);
} else {
status = IXGBE_ERR_PHY;
goto out;
}
out:
return status;
}
/**
* ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
* @byte_offset: EEPROM byte offset to read
* @eeprom_data: value read
*
* Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
{
return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
byte_offset, eeprom_data);
}
/**
* ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
* @byte_offset: byte offset at address 0xA2
* @eeprom_data: value read
*
* Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
**/
static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data)
{
return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
byte_offset, sff8472_data);
}
/**
* ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
* @hw: pointer to hardware structure

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -40,7 +40,6 @@
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
@ -48,14 +47,37 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
u32 fwsm, manc, factps;
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
return FALSE;
manc = IXGBE_READ_REG(hw, IXGBE_MANC);
if (!(manc & IXGBE_MANC_RCV_TCO_EN))
return FALSE;
factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
if (factps & IXGBE_FACTPS_MNGCG)
return FALSE;
return TRUE;
}
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
/* enable the laser control functions for SFP+ fiber */
if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
/*
* enable the laser control functions for SFP+ fiber
* and MNG not enabled
*/
if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
!(ixgbe_mng_enabled(hw))) {
mac->ops.disable_tx_laser =
&ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
@ -135,9 +157,8 @@ s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
u32 reg_anlp1 = 0;
u32 i = 0;
u16 list_offset, data_offset, data_value;
bool got_lock = FALSE;
DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
@ -171,28 +192,39 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
/* Delay obtaining semaphore again to allow FW access */
msec_delay(hw->eeprom.semaphore_delay);
/* Now restart DSP by setting Restart_AN and clearing LMS */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
IXGBE_AUTOC_AN_RESTART));
/* Need SW/FW semaphore around AUTOC writes if LESM on,
* likewise reset_pipeline requires lock as it also writes
* AUTOC.
*/
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val != IXGBE_SUCCESS) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto setup_sfp_out;
}
/* Wait for AN to leave state 0 */
for (i = 0; i < 10; i++) {
msec_delay(4);
reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
break;
got_lock = TRUE;
}
if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
/* Restart DSP and set SFI mode */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
IXGBE_AUTOC_LMS_10G_SERIAL));
hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
ret_val = ixgbe_reset_pipeline_82599(hw);
if (got_lock) {
hw->mac.ops.release_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
got_lock = FALSE;
}
if (ret_val) {
DEBUGOUT("sfp module setup not complete\n");
ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
goto setup_sfp_out;
}
/* Restart DSP by setting Restart_AN and return to SFI mode */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
IXGBE_AUTOC_AN_RESTART));
}
setup_sfp_out:
@ -216,7 +248,7 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_ops_82599");
ret_val = ixgbe_init_phy_ops_generic(hw);
ixgbe_init_phy_ops_generic(hw);
ret_val = ixgbe_init_ops_generic(hw);
/* PHY */
@ -289,13 +321,13 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
* ixgbe_get_link_capabilities_82599 - Determines link capabilities
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @negotiation: TRUE when autoneg or autotry is enabled
* @autoneg: TRUE when autoneg or autotry is enabled
*
* Determines the link capabilities by reading the AUTOC register.
**/
s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *negotiation)
bool *autoneg)
{
s32 status = IXGBE_SUCCESS;
u32 autoc = 0;
@ -309,7 +341,7 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = TRUE;
*autoneg = TRUE;
goto out;
}
@ -326,22 +358,22 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
switch (autoc & IXGBE_AUTOC_LMS_MASK) {
case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = FALSE;
*autoneg = FALSE;
break;
case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
*negotiation = FALSE;
*autoneg = FALSE;
break;
case IXGBE_AUTOC_LMS_1G_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = TRUE;
*autoneg = TRUE;
break;
case IXGBE_AUTOC_LMS_10G_SERIAL:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
*negotiation = FALSE;
*autoneg = FALSE;
break;
case IXGBE_AUTOC_LMS_KX4_KX_KR:
@ -353,7 +385,7 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = TRUE;
*autoneg = TRUE;
break;
case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
@ -364,12 +396,12 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = TRUE;
*autoneg = TRUE;
break;
case IXGBE_AUTOC_LMS_SGMII_1G_100M:
*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
*negotiation = FALSE;
*autoneg = FALSE;
break;
default:
@ -381,7 +413,7 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
if (hw->phy.multispeed_fiber) {
*speed |= IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = TRUE;
*autoneg = TRUE;
}
out:
@ -424,6 +456,7 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
media_type = ixgbe_media_type_fiber;
break;
@ -433,6 +466,10 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
media_type = ixgbe_media_type_copper;
break;
case IXGBE_DEV_ID_82599_BYPASS:
media_type = ixgbe_media_type_fiber_fixed;
hw->phy.multispeed_fiber = TRUE;
break;
default:
media_type = ixgbe_media_type_unknown;
break;
@ -456,17 +493,32 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
u32 links_reg;
u32 i;
s32 status = IXGBE_SUCCESS;
bool got_lock = FALSE;
DEBUGFUNC("ixgbe_start_mac_link_82599");
/* reset_pipeline requires us to hold this lock as it writes to
* AUTOC.
*/
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
status = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (status != IXGBE_SUCCESS)
goto out;
got_lock = TRUE;
}
/* Restart link */
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
ixgbe_reset_pipeline_82599(hw);
if (got_lock)
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
IXGBE_AUTOC_LMS_KX4_KX_KR ||
(autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@ -490,6 +542,7 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
/* Add delay to filter out noises during initial link setup */
msec_delay(50);
out:
return status;
}
@ -554,17 +607,85 @@ void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
}
}
/**
* ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
* @hw: pointer to hardware structure
* @speed: link speed to set
*
* We set the module speed differently for fixed fiber. For other
* multi-speed devices we don't have an error value so here if we
* detect an error we just log it and exit.
*/
static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed)
{
s32 status;
u8 rs, eeprom_data;
switch (speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
/* one bit mask same as setting on */
rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
break;
case IXGBE_LINK_SPEED_1GB_FULL:
rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
break;
default:
DEBUGOUT("Invalid fixed module speed\n");
return;
}
/* Set RS0 */
status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
IXGBE_I2C_EEPROM_DEV_ADDR2,
&eeprom_data);
if (status) {
DEBUGOUT("Failed to read Rx Rate Select RS0\n");
goto out;
}
eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
IXGBE_I2C_EEPROM_DEV_ADDR2,
eeprom_data);
if (status) {
DEBUGOUT("Failed to write Rx Rate Select RS0\n");
goto out;
}
/* Set RS1 */
status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
IXGBE_I2C_EEPROM_DEV_ADDR2,
&eeprom_data);
if (status) {
DEBUGOUT("Failed to read Rx Rate Select RS1\n");
goto out;
}
eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
IXGBE_I2C_EEPROM_DEV_ADDR2,
eeprom_data);
if (status) {
DEBUGOUT("Failed to write Rx Rate Select RS1\n");
goto out;
}
out:
return;
}
/**
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
s32 status = IXGBE_SUCCESS;
@ -573,13 +694,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
u32 speedcnt = 0;
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
u32 i = 0;
bool link_up = FALSE;
bool negotiation;
bool autoneg, link_up = FALSE;
DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
/* Mask off requested but non-supported speeds */
status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
if (status != IXGBE_SUCCESS)
return status;
@ -602,16 +722,20 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
goto out;
/* Set the module link speed */
esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
IXGBE_WRITE_FLUSH(hw);
if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
ixgbe_set_fiber_fixed_speed(hw,
IXGBE_LINK_SPEED_10GB_FULL);
} else {
esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
IXGBE_WRITE_FLUSH(hw);
}
/* Allow module to change analog characteristics (1G->10G) */
msec_delay(40);
status = ixgbe_setup_mac_link_82599(hw,
IXGBE_LINK_SPEED_10GB_FULL,
autoneg,
autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
return status;
@ -653,17 +777,21 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
goto out;
/* Set the module link speed */
esdp_reg &= ~IXGBE_ESDP_SDP5;
esdp_reg |= IXGBE_ESDP_SDP5_DIR;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
IXGBE_WRITE_FLUSH(hw);
if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
ixgbe_set_fiber_fixed_speed(hw,
IXGBE_LINK_SPEED_1GB_FULL);
} else {
esdp_reg &= ~IXGBE_ESDP_SDP5;
esdp_reg |= IXGBE_ESDP_SDP5_DIR;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
IXGBE_WRITE_FLUSH(hw);
}
/* Allow module to change analog characteristics (10G->1G) */
msec_delay(40);
status = ixgbe_setup_mac_link_82599(hw,
IXGBE_LINK_SPEED_1GB_FULL,
autoneg,
autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
return status;
@ -690,7 +818,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
*/
if (speedcnt > 1)
status = ixgbe_setup_mac_link_multispeed_fiber(hw,
highest_link_speed, autoneg, autoneg_wait_to_complete);
highest_link_speed, autoneg_wait_to_complete);
out:
/* Set autoneg_advertised value based on input link speed */
@ -709,13 +837,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
* ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE when waiting for completion is needed
*
* Implements the Intel SmartSpeed algorithm.
**/
s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
s32 status = IXGBE_SUCCESS;
@ -748,7 +875,7 @@ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* First, try to get link with full advertisement */
hw->phy.smart_speed_active = FALSE;
for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
goto out;
@ -783,7 +910,7 @@ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* Turn SmartSpeed on to disable KR support */
hw->phy.smart_speed_active = TRUE;
status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
goto out;
@ -808,7 +935,7 @@ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* We didn't get link. Turn SmartSpeed back off. */
hw->phy.smart_speed_active = FALSE;
status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
out:
@ -822,32 +949,30 @@ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
* ixgbe_setup_mac_link_82599 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
bool autoneg = FALSE;
s32 status = IXGBE_SUCCESS;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 autoc, pma_pmd_1g, link_mode, start_autoc;
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
u32 start_autoc = autoc;
u32 orig_autoc = 0;
u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
u32 links_reg;
u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
bool got_lock = FALSE;
DEBUGFUNC("ixgbe_setup_mac_link_82599");
/* Check to see if speed passed in is supported. */
status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
if (status != IXGBE_SUCCESS)
if (status)
goto out;
speed &= link_capabilities;
@ -859,9 +984,14 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
if (hw->mac.orig_link_settings_stored)
orig_autoc = hw->mac.orig_autoc;
autoc = hw->mac.orig_autoc;
else
orig_autoc = autoc;
autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
orig_autoc = autoc;
start_autoc = hw->mac.cached_autoc;
link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
@ -900,9 +1030,31 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (autoc != start_autoc) {
/* Need SW/FW semaphore around AUTOC writes if LESM is on,
* likewise reset_pipeline requires us to hold this lock as
* it also writes to AUTOC.
*/
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
status = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (status != IXGBE_SUCCESS) {
status = IXGBE_ERR_SWFW_SYNC;
goto out;
}
got_lock = TRUE;
}
/* Restart link */
autoc |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
hw->mac.cached_autoc = autoc;
ixgbe_reset_pipeline_82599(hw);
if (got_lock) {
hw->mac.ops.release_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
got_lock = FALSE;
}
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
@ -937,14 +1089,12 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
* ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: TRUE if autonegotiation enabled
* @autoneg_wait_to_complete: TRUE if waiting is needed to complete
*
* Restarts link on PHY and MAC based on settings passed in.
**/
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status;
@ -952,7 +1102,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_setup_copper_link_82599");
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
status = hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
@ -1056,14 +1206,45 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
*/
autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
/* Enable link if disabled in NVM */
if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
IXGBE_WRITE_FLUSH(hw);
}
if (hw->mac.orig_link_settings_stored == FALSE) {
hw->mac.orig_autoc = autoc;
hw->mac.orig_autoc2 = autoc2;
hw->mac.cached_autoc = autoc;
hw->mac.orig_link_settings_stored = TRUE;
} else {
if (autoc != hw->mac.orig_autoc)
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
IXGBE_AUTOC_AN_RESTART));
if (autoc != hw->mac.orig_autoc) {
/* Need SW/FW semaphore around AUTOC writes if LESM is
* on, likewise reset_pipeline requires us to hold
* this lock as it also writes to AUTOC.
*/
bool got_lock = FALSE;
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
status = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (status != IXGBE_SUCCESS) {
status = IXGBE_ERR_SWFW_SYNC;
goto reset_hw_out;
}
got_lock = TRUE;
}
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
hw->mac.cached_autoc = hw->mac.orig_autoc;
ixgbe_reset_pipeline_82599(hw);
if (got_lock)
hw->mac.ops.release_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
}
if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
(hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
@ -1168,7 +1349,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
IXGBE_FDIRCTRL_INIT_DONE)
break;
usec_delay(10);
msec_delay(1);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
DEBUGOUT("Flow Director Signature poll time exceeded!\n");
@ -2094,7 +2275,7 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
* Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
* if the FW version is not supported.
**/
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_EEPROM_VERSION;
u16 fw_offset, fw_ptp_cfg_offset;
@ -2243,4 +2424,55 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
return ret_val;
}
/**
* ixgbe_reset_pipeline_82599 - perform pipeline reset
*
* @hw: pointer to hardware structure
*
* Reset pipeline by asserting Restart_AN together with LMS change to ensure
* full pipeline reset
**/
s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
s32 ret_val;
u32 anlp1_reg = 0;
u32 i, autoc_reg, autoc2_reg;
/* Enable link if disabled in NVM */
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
IXGBE_WRITE_FLUSH(hw);
}
autoc_reg = hw->mac.cached_autoc;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
/* Wait for AN to leave state 0 */
for (i = 0; i < 10; i++) {
msec_delay(4);
anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
break;
}
if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
DEBUGOUT("auto negotiation not completed\n");
ret_val = IXGBE_ERR_RESET_FAILED;
goto reset_pipeline_out;
}
ret_val = IXGBE_SUCCESS;
reset_pipeline_out:
/* Write AUTOC register with original LMS field and Restart_AN */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
IXGBE_WRITE_FLUSH(hw);
return ret_val;
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -42,15 +42,15 @@ void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg, bool autoneg_wait_to_complete);
bool autoneg_wait_to_complete);
s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
@ -61,5 +61,4 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
#endif /* _IXGBE_82599_H_ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -93,53 +93,53 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_set_mac_type\n");
if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
switch (hw->device_id) {
case IXGBE_DEV_ID_82598:
case IXGBE_DEV_ID_82598_BX:
case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
case IXGBE_DEV_ID_82598AF_DUAL_PORT:
case IXGBE_DEV_ID_82598AT:
case IXGBE_DEV_ID_82598AT2:
case IXGBE_DEV_ID_82598EB_CX4:
case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
case IXGBE_DEV_ID_82598EB_XF_LR:
case IXGBE_DEV_ID_82598EB_SFP_LOM:
hw->mac.type = ixgbe_mac_82598EB;
break;
case IXGBE_DEV_ID_82599_KX4:
case IXGBE_DEV_ID_82599_KX4_MEZZ:
case IXGBE_DEV_ID_82599_XAUI_LOM:
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
case IXGBE_DEV_ID_82599_KR:
case IXGBE_DEV_ID_82599_SFP:
case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
case IXGBE_DEV_ID_82599EN_SFP:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
break;
case IXGBE_DEV_ID_82599_VF:
hw->mac.type = ixgbe_mac_82599_vf;
break;
case IXGBE_DEV_ID_X540_VF:
hw->mac.type = ixgbe_mac_X540_vf;
break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
hw->mac.type = ixgbe_mac_X540;
break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
break;
}
} else {
switch (hw->device_id) {
case IXGBE_DEV_ID_82598:
case IXGBE_DEV_ID_82598_BX:
case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
case IXGBE_DEV_ID_82598AF_DUAL_PORT:
case IXGBE_DEV_ID_82598AT:
case IXGBE_DEV_ID_82598AT2:
case IXGBE_DEV_ID_82598EB_CX4:
case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
case IXGBE_DEV_ID_82598EB_XF_LR:
case IXGBE_DEV_ID_82598EB_SFP_LOM:
hw->mac.type = ixgbe_mac_82598EB;
break;
case IXGBE_DEV_ID_82599_KX4:
case IXGBE_DEV_ID_82599_KX4_MEZZ:
case IXGBE_DEV_ID_82599_XAUI_LOM:
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
case IXGBE_DEV_ID_82599_KR:
case IXGBE_DEV_ID_82599_SFP:
case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_BYPASS:
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
break;
case IXGBE_DEV_ID_82599_VF:
case IXGBE_DEV_ID_82599_VF_HV:
hw->mac.type = ixgbe_mac_82599_vf;
break;
case IXGBE_DEV_ID_X540_VF:
case IXGBE_DEV_ID_X540_VF_HV:
hw->mac.type = ixgbe_mac_X540_vf;
break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540_BYPASS:
hw->mac.type = ixgbe_mac_X540;
break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
break;
}
DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
@ -507,16 +507,14 @@ s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* ixgbe_setup_phy_link_speed - Set auto advertise
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: TRUE if autonegotiation enabled
*
* Sets the auto advertised capabilities
**/
s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete)
{
return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
autoneg, autoneg_wait_to_complete),
autoneg_wait_to_complete),
IXGBE_NOT_IMPLEMENTED);
}
@ -576,17 +574,15 @@ void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
* ixgbe_setup_link - Set link speed
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: TRUE if autonegotiation enabled
*
* Configures link settings. Restarts the link.
* Performs autonegotiation if needed.
**/
s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete)
{
return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
autoneg, autoneg_wait_to_complete),
autoneg_wait_to_complete),
IXGBE_NOT_IMPLEMENTED);
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -72,13 +72,12 @@ s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
bool *link_up);
s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg, bool autoneg_wait_to_complete);
bool autoneg_wait_to_complete);
s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
@ -159,6 +158,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common);
bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 *data);
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -147,16 +147,14 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
* function check the device id to see if the associated phy supports
* autoneg flow control.
**/
static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
{
DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
switch (hw->device_id) {
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
return IXGBE_SUCCESS;
case IXGBE_DEV_ID_82599_T3_LOM:
case IXGBE_DEV_ID_X540T:
return IXGBE_SUCCESS;
default:
return IXGBE_ERR_FC_NOT_SUPPORTED;
@ -174,6 +172,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
s32 ret_val = IXGBE_SUCCESS;
u32 reg = 0, reg_bp = 0;
u16 reg_cu = 0;
bool got_lock = FALSE;
DEBUGFUNC("ixgbe_setup_fc");
@ -200,6 +199,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* we link at 10G, the 1G advertisement is harmless and vice versa.
*/
switch (hw->phy.media_type) {
case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
case ixgbe_media_type_backplane:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@ -297,7 +297,28 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
*/
if (hw->phy.media_type == ixgbe_media_type_backplane) {
reg_bp |= IXGBE_AUTOC_AN_RESTART;
/* Need the SW/FW semaphore around AUTOC writes if 82599 and
* LESM is on, likewise reset_pipeline requries the lock as
* it also writes AUTOC.
*/
if ((hw->mac.type == ixgbe_mac_82599EB) &&
ixgbe_verify_lesm_fw_enabled_82599(hw)) {
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val != IXGBE_SUCCESS) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto out;
}
got_lock = TRUE;
}
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
if (hw->mac.type == ixgbe_mac_82599EB)
ixgbe_reset_pipeline_82599(hw);
if (got_lock)
hw->mac.ops.release_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
(ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
@ -679,6 +700,195 @@ s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
return IXGBE_SUCCESS;
}
/**
* ixgbe_read_pba_raw
* @hw: pointer to the HW structure
* @eeprom_buf: optional pointer to EEPROM image
* @eeprom_buf_size: size of EEPROM image in words
* @max_pba_block_size: PBA block size limit
* @pba: pointer to output PBA structure
*
* Reads PBA from EEPROM image when eeprom_buf is not NULL.
* Reads PBA from physical EEPROM device when eeprom_buf is NULL.
*
**/
s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
u32 eeprom_buf_size, u16 max_pba_block_size,
struct ixgbe_pba *pba)
{
s32 ret_val;
u16 pba_block_size;
if (pba == NULL)
return IXGBE_ERR_PARAM;
if (eeprom_buf == NULL) {
ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
&pba->word[0]);
if (ret_val)
return ret_val;
} else {
if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
} else {
return IXGBE_ERR_PARAM;
}
}
if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
if (pba->pba_block == NULL)
return IXGBE_ERR_PARAM;
ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
eeprom_buf_size,
&pba_block_size);
if (ret_val)
return ret_val;
if (pba_block_size > max_pba_block_size)
return IXGBE_ERR_PARAM;
if (eeprom_buf == NULL) {
ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
pba_block_size,
pba->pba_block);
if (ret_val)
return ret_val;
} else {
if (eeprom_buf_size > (u32)(pba->word[1] +
pba->pba_block[0])) {
memcpy(pba->pba_block,
&eeprom_buf[pba->word[1]],
pba_block_size * sizeof(u16));
} else {
return IXGBE_ERR_PARAM;
}
}
}
return IXGBE_SUCCESS;
}
/**
* ixgbe_write_pba_raw
* @hw: pointer to the HW structure
* @eeprom_buf: optional pointer to EEPROM image
* @eeprom_buf_size: size of EEPROM image in words
* @pba: pointer to PBA structure
*
* Writes PBA to EEPROM image when eeprom_buf is not NULL.
* Writes PBA to physical EEPROM device when eeprom_buf is NULL.
*
**/
s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
u32 eeprom_buf_size, struct ixgbe_pba *pba)
{
s32 ret_val;
if (pba == NULL)
return IXGBE_ERR_PARAM;
if (eeprom_buf == NULL) {
ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
&pba->word[0]);
if (ret_val)
return ret_val;
} else {
if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
} else {
return IXGBE_ERR_PARAM;
}
}
if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
if (pba->pba_block == NULL)
return IXGBE_ERR_PARAM;
if (eeprom_buf == NULL) {
ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
pba->pba_block[0],
pba->pba_block);
if (ret_val)
return ret_val;
} else {
if (eeprom_buf_size > (u32)(pba->word[1] +
pba->pba_block[0])) {
memcpy(&eeprom_buf[pba->word[1]],
pba->pba_block,
pba->pba_block[0] * sizeof(u16));
} else {
return IXGBE_ERR_PARAM;
}
}
}
return IXGBE_SUCCESS;
}
/**
* ixgbe_get_pba_block_size
* @hw: pointer to the HW structure
* @eeprom_buf: optional pointer to EEPROM image
* @eeprom_buf_size: size of EEPROM image in words
* @pba_data_size: pointer to output variable
*
* Returns the size of the PBA block in words. Function operates on EEPROM
* image if the eeprom_buf pointer is not NULL otherwise it accesses physical
* EEPROM device.
*
**/
s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
u32 eeprom_buf_size, u16 *pba_block_size)
{
s32 ret_val;
u16 pba_word[2];
u16 length;
DEBUGFUNC("ixgbe_get_pba_block_size");
if (eeprom_buf == NULL) {
ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
&pba_word[0]);
if (ret_val)
return ret_val;
} else {
if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
} else {
return IXGBE_ERR_PARAM;
}
}
if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
if (eeprom_buf == NULL) {
ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
&length);
if (ret_val)
return ret_val;
} else {
if (eeprom_buf_size > pba_word[1])
length = eeprom_buf[pba_word[1] + 0];
else
return IXGBE_ERR_PARAM;
}
if (length == 0xFFFF || length == 0)
return IXGBE_ERR_PBA_SECTION;
} else {
/* PBA number in legacy format, there is no PBA Block. */
length = 0;
}
if (pba_block_size != NULL)
*pba_block_size = length;
return IXGBE_SUCCESS;
}
/**
* ixgbe_get_mac_addr_generic - Generic get MAC address
* @hw: pointer to hardware structure
@ -1268,7 +1478,7 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
}
for (i = 0; i < words; i++) {
eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
IXGBE_EEPROM_RW_REG_START;
IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
@ -2719,6 +2929,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
ret_val = ixgbe_fc_autoneg_fiber(hw);
@ -2965,6 +3176,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
bool link_up = 0;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_blink_led_start_generic");
@ -2975,10 +3187,29 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
if (!link_up) {
/* Need the SW/FW semaphore around AUTOC writes if 82599 and
* LESM is on.
*/
bool got_lock = FALSE;
if ((hw->mac.type == ixgbe_mac_82599EB) &&
ixgbe_verify_lesm_fw_enabled_82599(hw)) {
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val != IXGBE_SUCCESS) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto out;
}
got_lock = TRUE;
}
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
IXGBE_WRITE_FLUSH(hw);
if (got_lock)
hw->mac.ops.release_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
msec_delay(10);
}
@ -2987,7 +3218,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
return IXGBE_SUCCESS;
out:
return ret_val;
}
/**
@ -2999,21 +3231,43 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
s32 ret_val = IXGBE_SUCCESS;
bool got_lock = FALSE;
DEBUGFUNC("ixgbe_blink_led_stop_generic");
/* Need the SW/FW semaphore around AUTOC writes if 82599 and
* LESM is on.
*/
if ((hw->mac.type == ixgbe_mac_82599EB) &&
ixgbe_verify_lesm_fw_enabled_82599(hw)) {
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val != IXGBE_SUCCESS) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto out;
}
got_lock = TRUE;
}
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
if (hw->mac.type == ixgbe_mac_82599EB)
ixgbe_reset_pipeline_82599(hw);
if (got_lock)
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
return IXGBE_SUCCESS;
out:
return ret_val;
}
/**
@ -3882,7 +4136,7 @@ void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
* Calculates the checksum for some buffer on a specified length. The
* checksum calculated is returned.
**/
static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
{
u32 i;
u8 sum = 0;
@ -3908,8 +4162,8 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* Communicates with the manageability block. On success return IXGBE_SUCCESS
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length)
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length)
{
u32 hicr, i, bi;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -41,9 +41,14 @@
IXGBE_WRITE_REG(hw, reg, (u32) value); \
IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
} while (0)
#if !defined(NO_READ_PBA_RAW) || !defined(NO_WRITE_PBA_RAW)
struct ixgbe_pba {
u16 word[2];
u16 *pba_block;
};
#endif
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@ -52,6 +57,13 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
u32 eeprom_buf_size, u16 max_pba_block_size,
struct ixgbe_pba *pba);
s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
u32 eeprom_buf_size, struct ixgbe_pba *pba);
s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
u32 eeprom_buf_size, u16 *pba_block_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@ -96,6 +108,7 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
@ -137,5 +150,11 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
#endif /* IXGBE_COMMON */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -84,8 +84,20 @@
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
/* mailbox API, version 1.0 VF requests */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
/* mailbox API, version 1.1 VF requests */
#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
/* GET_QUEUES return data indices within the mailbox */
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -91,6 +91,9 @@
#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
#define PCI_COMMAND_REGISTER PCIR_COMMAND
/* Shared code dropped this define.. */
#define IXGBE_INTEL_VENDOR_ID 0x8086
/* Bunch of defines for shared code bogosity */
#define UNREFERENCED_PARAMETER(_p)
#define UNREFERENCED_1PARAMETER(_p)

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -47,6 +47,8 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
static bool ixgbe_get_i2c_data(u32 *i2cctl);
static s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
/**
* ixgbe_init_phy_ops_generic - Inits PHY function ptrs
@ -71,6 +73,7 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic;
phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic;
phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic;
phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
@ -563,14 +566,12 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: TRUE if autonegotiation enabled
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete)
{
UNREFERENCED_2PARAMETER(autoneg, autoneg_wait_to_complete);
UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
DEBUGFUNC("ixgbe_setup_phy_link_speed_generic");
@ -969,9 +970,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_IDENTIFIER,
&identifier);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
/* LAN ID is needed for sfp_type determination */
@ -985,26 +984,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_1GBE_COMP_CODES,
&comp_codes_1g);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES,
&comp_codes_10g);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
/* ID Module
@ -1102,27 +1095,21 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_VENDOR_OUI_BYTE0,
&oui_bytes[0]);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
vendor_oui =
@ -1332,6 +1319,22 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
eeprom_data);
}
/**
* ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: byte offset at address 0xA2
* @eeprom_data: value read
*
* Performs byte read operation to SFP module's SFF-8472 data over I2C
**/
static s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
IXGBE_I2C_EEPROM_DEV_ADDR2,
sff8472_data);
}
/**
* ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
* @hw: pointer to hardware structure
@ -1425,9 +1428,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break;
fail:
ixgbe_i2c_bus_clear(hw);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
msec_delay(100);
ixgbe_i2c_bus_clear(hw);
retry++;
if (retry < max_retry)
DEBUGOUT("I2C byte read error - Retrying.\n");

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -36,7 +36,9 @@
#define _IXGBE_PHY_H_
#include "ixgbe_type.h"
#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
#define IXGBE_I2C_EEPROM_BANK_LEN 0xFF
/* EEPROM byte offsets */
#define IXGBE_SFF_IDENTIFIER 0x0
@ -48,6 +50,10 @@
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
#define IXGBE_SFF_SFF_8472_SWAP 0x5C
#define IXGBE_SFF_SFF_8472_COMP 0x5E
#define IXGBE_SFF_SFF_8472_OSCB 0x6E
#define IXGBE_SFF_SFF_8472_ESCB 0x76
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@ -58,6 +64,9 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@ -95,6 +104,14 @@
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
/* SFP+ SFF-8472 Compliance */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
#define IXGBE_SFF_SFF_8472_REV_9_3 0x01
#define IXGBE_SFF_SFF_8472_REV_9_5 0x02
#define IXGBE_SFF_SFF_8472_REV_10_2 0x03
#define IXGBE_SFF_SFF_8472_REV_10_4 0x04
#define IXGBE_SFF_SFF_8472_REV_11_0 0x05
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
@ -108,7 +125,6 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -38,9 +38,6 @@
#include "ixgbe_osdep.h"
/* Vendor ID */
#define IXGBE_INTEL_VENDOR_ID 0x8086
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
#define IXGBE_DEV_ID_82598_BX 0x1508
@ -62,18 +59,24 @@
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_82599_VF_HV 0x152E
#define IXGBE_DEV_ID_82599_BYPASS 0x155D
#define IXGBE_DEV_ID_X540T 0x1528
#define IXGBE_DEV_ID_X540T1 0x1560
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X540_VF_HV 0x1530
#define IXGBE_DEV_ID_X540_BYPASS 0x155C
/* General Registers */
#define IXGBE_CTRL 0x00000
@ -280,6 +283,7 @@
#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
/* Flow Director registers */
#define IXGBE_FDIRCTRL 0x0EE00
#define IXGBE_FDIRHKEY 0x0EE68
@ -360,11 +364,16 @@
#define IXGBE_WUPL 0x05900
#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
/* Ext Flexible Host Filter Table */
#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100))
/* Four Flexible Filters are supported */
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
/* Six Flexible Filters are supported */
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6
#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
/* Each Flexible Filter is at most 128 (0x80) bytes in length */
@ -396,10 +405,11 @@
#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
/* Mask for Ext. flex filters */
#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000
#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */
#define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */
#define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */
#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
/* Wake Up Status */
@ -420,7 +430,6 @@
#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
/* Wake Up Packet Length */
#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
/* DCB registers */
@ -574,6 +583,7 @@
#define IXGBE_RTTBCNRTT 0x05150
#define IXGBE_RTTBCNRD 0x0498C
/* FCoE DMA Context Registers */
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
@ -754,11 +764,14 @@
#define IXGBE_BMCIP_IPADDR_VALID 0x00000002
/* Management Bit Fields and Masks */
#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */
#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */
#define IXGBE_MANC_EN_BMC2OS_SHIFT 28
/* Firmware Semaphore Register */
#define IXGBE_FWSM_MODE_MASK 0xE
#define IXGBE_FWSM_TS_ENABLED 0x1
#define IXGBE_FWSM_FW_MODE_PT 0x4
/* ARC Subsystem registers */
#define IXGBE_HICR 0x15F00
@ -1014,6 +1027,7 @@
#define IXGBE_RSCCTL_MAXDESC_4 0x04
#define IXGBE_RSCCTL_MAXDESC_8 0x08
#define IXGBE_RSCCTL_MAXDESC_16 0x0C
#define IXGBE_RSCCTL_TS_DIS 0x02
/* RSCDBU Bit Masks */
#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
@ -1026,7 +1040,7 @@
#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disabl RSC compl on LLI */
#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/
#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */
#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */
@ -1052,6 +1066,7 @@
#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
/* FACTPS */
#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */
#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
/* MHADD Bit Masks */
@ -1590,6 +1605,7 @@ enum {
#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */
#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */
#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */
#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */
#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
@ -1668,6 +1684,7 @@ enum {
#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
#define IXGBE_MACC_FLU 0x00000001
#define IXGBE_MACC_FSV_10G 0x00030000
@ -1838,7 +1855,7 @@ enum {
#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* words rd in burst */
#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */
#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */
#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
@ -2524,7 +2541,6 @@ typedef u32 ixgbe_link_speed;
IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
/* Physical layer type */
typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
@ -2757,6 +2773,7 @@ enum ixgbe_sfp_type {
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
ixgbe_media_type_fiber_fixed,
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
@ -2975,7 +2992,7 @@ struct ixgbe_mac_operations {
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
@ -3026,12 +3043,12 @@ struct ixgbe_phy_operations {
s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
bool);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
void (*i2c_bus_clear)(struct ixgbe_hw *);
@ -3069,7 +3086,9 @@ struct ixgbe_mac_info {
u32 max_tx_queues;
u32 max_rx_queues;
u32 orig_autoc;
u32 cached_autoc;
u8 san_mac_rar_index;
bool get_link_status;
u32 orig_autoc2;
u16 max_msix_vectors;
bool arc_subsystem_valid;
@ -3142,6 +3161,7 @@ struct ixgbe_hw {
u16 subsystem_vendor_id;
u8 revision_id;
bool adapter_stopped;
int api_version;
bool force_full_reset;
bool allow_unsupported_sfp;
};
@ -3185,6 +3205,7 @@ struct ixgbe_hw {
#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_ERR_OUT_OF_MEM -34
#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2013, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -142,6 +142,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
DEBUGOUT("Issuing a function level reset to MAC\n");
ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST;
@ -272,6 +273,17 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
return vector;
}
static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 retmsg[IXGBE_VFMAILBOX_SIZE];
s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
if (!retval)
mbx->ops.read_posted(hw, retmsg, size, 0);
}
/**
* ixgbe_set_rar_vf - set device MAC address
* @hw: pointer to hardware structure
@ -463,11 +475,10 @@ s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
*
* Set the link speed in the AUTOC register and restarts link.
**/
s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
UNREFERENCED_4PARAMETER(hw, speed, autoneg, autoneg_wait_to_complete);
UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete);
return IXGBE_SUCCESS;
}
@ -483,23 +494,26 @@ s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool autoneg_wait_to_complete)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
struct ixgbe_mac_info *mac = &hw->mac;
s32 ret_val = IXGBE_SUCCESS;
u32 links_reg;
u32 in_msg = 0;
UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
if (!(hw->mbx.ops.check_for_rst(hw, 0))) {
*link_up = FALSE;
*speed = 0;
return -1;
}
/* If we were hit with a reset drop the link */
if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
mac->get_link_status = TRUE;
links_reg = IXGBE_VFREAD_REG(hw, IXGBE_VFLINKS);
if (!mac->get_link_status)
goto out;
if (links_reg & IXGBE_LINKS_UP)
*link_up = TRUE;
else
*link_up = FALSE;
/* if link status is down no point in checking to see if pf is up */
links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
if (!(links_reg & IXGBE_LINKS_UP))
goto out;
switch (links_reg & IXGBE_LINKS_SPEED_10G_82599) {
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
break;
@ -511,6 +525,87 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
}
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
if (mbx->ops.read(hw, &in_msg, 1, 0))
goto out;
if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
/* msg is not CTS and is NACK we must have lost CTS status */
if (in_msg & IXGBE_VT_MSGTYPE_NACK)
ret_val = -1;
goto out;
}
/* the pf is talking, if we timed out in the past we reinit */
if (!mbx->timeout) {
ret_val = -1;
goto out;
}
/* if we passed all the tests above then the link is up and we no
* longer need to check for link
*/
mac->get_link_status = FALSE;
out:
*link_up = !mac->get_link_status;
return ret_val;
}
/**
* ixgbevf_rlpml_set_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
{
u32 msgbuf[2];
msgbuf[0] = IXGBE_VF_SET_LPE;
msgbuf[1] = max_size;
ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
}
/**
* ixgbevf_negotiate_api_version - Negotiate supported API version
* @hw: pointer to the HW structure
* @api: integer containing requested API version
**/
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
{
int err;
u32 msg[3];
/* Negotiate the mailbox API version */
msg[0] = IXGBE_VF_API_NEGOTIATE;
msg[1] = api;
msg[2] = 0;
err = hw->mbx.ops.write_posted(hw, msg, 3, 0);
if (!err)
err = hw->mbx.ops.read_posted(hw, msg, 3, 0);
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* Store value and return 0 on success */
if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
hw->api_version = api;
return 0;
}
err = IXGBE_ERR_INVALID_ARGUMENT;
}
return err;
}
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc)
{
UNREFERENCED_3PARAMETER(hw, num_tcs, default_tc);
return IXGBE_SUCCESS;
}

Some files were not shown because too many files have changed in this diff Show More