1ba4a712dd
This bring huge amount of changes, I'll enumerate only user-visible changes: - Delegated Administration Allows regular users to perform ZFS operations, like file system creation, snapshot creation, etc. - L2ARC Level 2 cache for ZFS - allows to use additional disks for cache. Huge performance improvements mostly for random read of mostly static content. - slog Allow to use additional disks for ZFS Intent Log to speed up operations like fsync(2). - vfs.zfs.super_owner Allows regular users to perform privileged operations on files stored on ZFS file systems owned by him. Very careful with this one. - chflags(2) Not all the flags are supported. This still needs work. - ZFSBoot Support to boot off of ZFS pool. Not finished, AFAIK. Submitted by: dfr - Snapshot properties - New failure modes Before if write requested failed, system paniced. Now one can select from one of three failure modes: - panic - panic on write error - wait - wait for disk to reappear - continue - serve read requests if possible, block write requests - Refquota, refreservation properties Just quota and reservation properties, but don't count space consumed by children file systems, clones and snapshots. - Sparse volumes ZVOLs that don't reserve space in the pool. - External attributes Compatible with extattr(2). - NFSv4-ACLs Not sure about the status, might not be complete yet. Submitted by: trasz - Creation-time properties - Regression tests for zpool(8) command. Obtained from: OpenSolaris
144 lines
3.3 KiB
C
144 lines
3.3 KiB
C
/*-
|
|
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*/
|
|
|
|
#include <sys/cdefs.h>
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
#include <sys/param.h>
|
|
#include <sys/lock.h>
|
|
#include <sys/mutex.h>
|
|
#include <sys/atomic.h>
|
|
|
|
#ifdef _KERNEL
|
|
#include <sys/kernel.h>
|
|
|
|
struct mtx atomic_mtx;
|
|
MTX_SYSINIT(atomic, &atomic_mtx, "atomic", MTX_DEF);
|
|
#else
|
|
#include <pthread.h>
|
|
|
|
#define mtx_lock(lock) pthread_mutex_lock(lock)
|
|
#define mtx_unlock(lock) pthread_mutex_unlock(lock)
|
|
|
|
static pthread_mutex_t atomic_mtx;
|
|
|
|
static __attribute__((constructor)) void
|
|
atomic_init(void)
|
|
{
|
|
pthread_mutex_init(&atomic_mtx, NULL);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __LP64__
|
|
void
|
|
atomic_add_64(volatile uint64_t *target, int64_t delta)
|
|
{
|
|
|
|
mtx_lock(&atomic_mtx);
|
|
*target += delta;
|
|
mtx_unlock(&atomic_mtx);
|
|
}
|
|
|
|
void
|
|
atomic_dec_64(volatile uint64_t *target)
|
|
{
|
|
|
|
mtx_lock(&atomic_mtx);
|
|
*target -= 1;
|
|
mtx_unlock(&atomic_mtx);
|
|
}
|
|
#endif
|
|
|
|
uint64_t
|
|
atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
|
|
{
|
|
uint64_t newval;
|
|
|
|
mtx_lock(&atomic_mtx);
|
|
newval = (*target += delta);
|
|
mtx_unlock(&atomic_mtx);
|
|
return (newval);
|
|
}
|
|
|
|
#if defined(__sparc64__) || defined(__powerpc__) || defined(__arm__) || \
|
|
defined(__mips__)
|
|
void
|
|
atomic_or_8(volatile uint8_t *target, uint8_t value)
|
|
{
|
|
mtx_lock(&atomic_mtx);
|
|
*target |= value;
|
|
mtx_unlock(&atomic_mtx);
|
|
}
|
|
#endif
|
|
|
|
uint8_t
|
|
atomic_or_8_nv(volatile uint8_t *target, uint8_t value)
|
|
{
|
|
uint8_t newval;
|
|
|
|
mtx_lock(&atomic_mtx);
|
|
newval = (*target |= value);
|
|
mtx_unlock(&atomic_mtx);
|
|
return (newval);
|
|
}
|
|
|
|
#ifndef __LP64__
|
|
void *
|
|
atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
|
|
{
|
|
void *oldval, **trg;
|
|
|
|
mtx_lock(&atomic_mtx);
|
|
trg = __DEVOLATILE(void **, target);
|
|
oldval = *trg;
|
|
if (oldval == cmp)
|
|
*trg = newval;
|
|
mtx_unlock(&atomic_mtx);
|
|
return (oldval);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __sparc64__
|
|
uint64_t
|
|
atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
|
|
{
|
|
uint64_t oldval;
|
|
|
|
mtx_lock(&atomic_mtx);
|
|
oldval = *target;
|
|
if (oldval == cmp)
|
|
*target = newval;
|
|
mtx_unlock(&atomic_mtx);
|
|
return (oldval);
|
|
}
|
|
#endif
|
|
|
|
void
|
|
membar_producer(void)
|
|
{
|
|
/* nothing */
|
|
}
|