Make mlx5_cmd_exec_cb() a safe API in mlx5core.
APIs that have deferred callbacks should have some kind of cleanup function that callers can use to fence the callbacks. Otherwise things like module unloading can lead to dangling function pointers, or worse. The IB MR code is the only place that calls this function and had a really poor attempt at creating this fence. Provide a good version in the core code as future patches will add more places that need this fence. Linux commit: e355477ed9e4f401e3931043df97325d38552d54 MFC after: 1 week Sponsored by: Mellanox Technologies // NVIDIA Networking
This commit is contained in:
parent
f34f0a65b2
commit
7eefcb5eea
@ -41,6 +41,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <dev/mlx5/device.h>
|
||||
#include <dev/mlx5/doorbell.h>
|
||||
@ -947,11 +948,30 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
|
||||
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
|
||||
|
||||
struct mlx5_async_ctx {
|
||||
struct mlx5_core_dev *dev;
|
||||
atomic_t num_inflight;
|
||||
struct wait_queue_head wait;
|
||||
};
|
||||
|
||||
struct mlx5_async_work;
|
||||
|
||||
typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
|
||||
|
||||
struct mlx5_async_work {
|
||||
struct mlx5_async_ctx *ctx;
|
||||
mlx5_async_cbk_t user_callback;
|
||||
};
|
||||
|
||||
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
|
||||
struct mlx5_async_ctx *ctx);
|
||||
void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
|
||||
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
|
||||
void *out, int out_size, mlx5_async_cbk_t callback,
|
||||
struct mlx5_async_work *work);
|
||||
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||
int out_size);
|
||||
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||
void *out, int out_size, mlx5_cmd_cbk_t callback,
|
||||
void *context);
|
||||
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||
void *out, int out_size);
|
||||
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
|
||||
@ -986,9 +1006,10 @@ void mlx5_init_mr_table(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_mr *mkey,
|
||||
u32 *in, int inlen,
|
||||
u32 *out, int outlen,
|
||||
mlx5_cmd_cbk_t callback, void *context);
|
||||
struct mlx5_async_ctx *async_ctx, u32 *in,
|
||||
int inlen, u32 *out, int outlen,
|
||||
mlx5_async_cbk_t callback,
|
||||
struct mlx5_async_work *context);
|
||||
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_mr *mr,
|
||||
u32 *in, int inlen);
|
||||
|
@ -1353,11 +1353,57 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_cmd_exec);
|
||||
|
||||
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||
void *out, int out_size, mlx5_cmd_cbk_t callback,
|
||||
void *context)
|
||||
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
|
||||
struct mlx5_async_ctx *ctx)
|
||||
{
|
||||
return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context, false);
|
||||
ctx->dev = dev;
|
||||
/* Starts at 1 to avoid doing wake_up if we are not cleaning up */
|
||||
atomic_set(&ctx->num_inflight, 1);
|
||||
init_waitqueue_head(&ctx->wait);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
|
||||
|
||||
/**
|
||||
* mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
|
||||
* @ctx: The ctx to clean
|
||||
*
|
||||
* Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
|
||||
* caller must ensure that mlx5_cmd_exec_cb() is not called during or after
|
||||
* the call mlx5_cleanup_async_ctx().
|
||||
*/
|
||||
void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
|
||||
{
|
||||
atomic_dec(&ctx->num_inflight);
|
||||
wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
|
||||
|
||||
static void mlx5_cmd_exec_cb_handler(int status, void *_work)
|
||||
{
|
||||
struct mlx5_async_work *work = _work;
|
||||
struct mlx5_async_ctx *ctx = work->ctx;
|
||||
|
||||
work->user_callback(status, work);
|
||||
if (atomic_dec_and_test(&ctx->num_inflight))
|
||||
wake_up(&ctx->wait);
|
||||
}
|
||||
|
||||
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
|
||||
void *out, int out_size, mlx5_async_cbk_t callback,
|
||||
struct mlx5_async_work *work)
|
||||
{
|
||||
int ret;
|
||||
|
||||
work->ctx = ctx;
|
||||
work->user_callback = callback;
|
||||
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
|
||||
return -EIO;
|
||||
ret = cmd_exec_helper(ctx->dev, in, in_size, out, out_size,
|
||||
mlx5_cmd_exec_cb_handler, work, false);
|
||||
if (ret && atomic_dec_and_test(&ctx->num_inflight))
|
||||
wake_up(&ctx->wait);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
|
||||
|
||||
|
@ -50,9 +50,10 @@ void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev)
|
||||
|
||||
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_mr *mkey,
|
||||
u32 *in, int inlen,
|
||||
u32 *out, int outlen,
|
||||
mlx5_cmd_cbk_t callback, void *context)
|
||||
struct mlx5_async_ctx *async_ctx, u32 *in,
|
||||
int inlen, u32 *out, int outlen,
|
||||
mlx5_async_cbk_t callback,
|
||||
struct mlx5_async_work *context)
|
||||
{
|
||||
struct mlx5_mr_table *table = &dev->priv.mr_table;
|
||||
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
|
||||
@ -77,7 +78,7 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
||||
}
|
||||
|
||||
if (callback)
|
||||
return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen,
|
||||
return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
|
||||
callback, context);
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout));
|
||||
@ -113,7 +114,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_mr *mkey,
|
||||
u32 *in, int inlen)
|
||||
{
|
||||
return mlx5_core_create_mkey_cb(dev, mkey, in, inlen,
|
||||
return mlx5_core_create_mkey_cb(dev, mkey, NULL, in, inlen,
|
||||
NULL, 0, NULL, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_create_mkey);
|
||||
|
@ -509,6 +509,7 @@ struct mlx5_ib_mr {
|
||||
int live;
|
||||
void *descs_alloc;
|
||||
int access_flags; /* Needed for rereg MR */
|
||||
struct mlx5_async_work cb_work;
|
||||
};
|
||||
|
||||
struct mlx5_ib_mw {
|
||||
@ -693,6 +694,8 @@ struct mlx5_ib_dev {
|
||||
/* Array with num_ports elements */
|
||||
struct mlx5_ib_port *port;
|
||||
struct mlx5_ib_congestion congestion;
|
||||
|
||||
struct mlx5_async_ctx async_ctx;
|
||||
};
|
||||
|
||||
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
|
||||
|
@ -104,9 +104,10 @@ static void update_odp_mr(struct mlx5_ib_mr *mr)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void reg_mr_callback(int status, void *context)
|
||||
static void reg_mr_callback(int status, struct mlx5_async_work *context)
|
||||
{
|
||||
struct mlx5_ib_mr *mr = context;
|
||||
struct mlx5_ib_mr *mr =
|
||||
container_of(context, struct mlx5_ib_mr, cb_work);
|
||||
struct mlx5_ib_dev *dev = mr->dev;
|
||||
struct mlx5_mr_cache *cache = &dev->cache;
|
||||
int c = order2idx(dev, mr->order);
|
||||
@ -192,9 +193,9 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
|
||||
ent->pending++;
|
||||
spin_unlock_irq(&ent->lock);
|
||||
err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
|
||||
in, inlen,
|
||||
&dev->async_ctx, in, inlen,
|
||||
mr->out, sizeof(mr->out),
|
||||
reg_mr_callback, mr);
|
||||
reg_mr_callback, &mr->cb_work);
|
||||
if (err) {
|
||||
spin_lock_irq(&ent->lock);
|
||||
ent->pending--;
|
||||
@ -429,6 +430,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
|
||||
setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
|
||||
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
|
||||
INIT_LIST_HEAD(&cache->ent[i].head);
|
||||
@ -460,6 +462,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
|
||||
|
||||
dev->cache.stopped = 1;
|
||||
flush_workqueue(dev->cache.wq);
|
||||
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
|
||||
|
||||
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
|
||||
clean_keys(dev, i);
|
||||
|
Loading…
Reference in New Issue
Block a user