Fib algo: extend KPI by allowing algo to set datapath pointers.

Some algorithms may require updating datapath and control plane
 algo pointers after the (batched) updates.

Export fib_set_datapath_ptr() to allow setting the new datapath
 function or data pointer from the algo.
Add fib_set_algo_ptr() to allow updating algo control plane
 pointer from the algo.
Add fib_epoch_call() epoch(9) wrapper to simplify freeing old
 datapath state.

Reviewed by:		zec
Differential Revision: https://reviews.freebsd.org/D29799
MFC after:		1 week
This commit is contained in:
Alexander V. Chernikov 2021-04-15 18:44:11 +01:00
parent 9e644c2300
commit e2f79d9e51
2 changed files with 42 additions and 21 deletions

View File

@ -197,7 +197,6 @@ static bool rebuild_fd(struct fib_data *fd, const char *reason);
static bool rebuild_fd_flm(struct fib_data *fd, struct fib_lookup_module *flm_new); static bool rebuild_fd_flm(struct fib_data *fd, struct fib_lookup_module *flm_new);
static void handle_fd_callout(void *_data); static void handle_fd_callout(void *_data);
static void destroy_fd_instance_epoch(epoch_context_t ctx); static void destroy_fd_instance_epoch(epoch_context_t ctx);
static enum flm_op_result attach_datapath(struct fib_data *fd);
static bool is_idx_free(struct fib_data *fd, uint32_t index); static bool is_idx_free(struct fib_data *fd, uint32_t index);
static void set_algo_fixed(struct rib_head *rh); static void set_algo_fixed(struct rib_head *rh);
static bool is_algo_fixed(struct rib_head *rh); static bool is_algo_fixed(struct rib_head *rh);
@ -1014,8 +1013,7 @@ schedule_destroy_fd_instance(struct fib_data *fd, bool in_callout)
*/ */
callout_stop(&fd->fd_callout); callout_stop(&fd->fd_callout);
epoch_call(net_epoch_preempt, destroy_fd_instance_epoch, fib_epoch_call(destroy_fd_instance_epoch, &fd->fd_epoch_ctx);
&fd->fd_epoch_ctx);
return (0); return (0);
} }
@ -1237,8 +1235,10 @@ setup_fd_instance(struct fib_lookup_module *flm, struct rib_head *rh,
for (int i = 0; i < FIB_MAX_TRIES; i++) { for (int i = 0; i < FIB_MAX_TRIES; i++) {
result = try_setup_fd_instance(flm, rh, prev_fd, &new_fd); result = try_setup_fd_instance(flm, rh, prev_fd, &new_fd);
if ((result == FLM_SUCCESS) && attach) if ((result == FLM_SUCCESS) && attach) {
result = attach_datapath(new_fd); if (!fib_set_datapath_ptr(new_fd, &new_fd->fd_dp))
result = FLM_REBUILD;
}
if ((prev_fd != NULL) && (prev_fd != orig_fd)) { if ((prev_fd != NULL) && (prev_fd != orig_fd)) {
schedule_destroy_fd_instance(prev_fd, false); schedule_destroy_fd_instance(prev_fd, false);
@ -1546,32 +1546,33 @@ get_fib_dp_header(struct fib_dp *dp)
/* /*
* Replace per-family index pool @pdp with a new one which * Replace per-family index pool @pdp with a new one which
* contains updated callback/algo data from @fd. * contains updated callback/algo data from @fd.
* Returns 0 on success. * Returns true on success.
*/ */
static enum flm_op_result static bool
replace_rtables_family(struct fib_dp **pdp, struct fib_data *fd) replace_rtables_family(struct fib_dp **pdp, struct fib_data *fd, struct fib_dp *dp)
{ {
struct fib_dp_header *new_fdh, *old_fdh; struct fib_dp_header *new_fdh, *old_fdh;
NET_EPOCH_ASSERT(); NET_EPOCH_ASSERT();
FD_PRINTF(LOG_DEBUG, fd, "[vnet %p] replace with f:%p arg:%p", FD_PRINTF(LOG_DEBUG, fd, "[vnet %p] replace with f:%p arg:%p",
curvnet, fd->fd_dp.f, fd->fd_dp.arg); curvnet, dp->f, dp->arg);
FIB_MOD_LOCK(); FIB_MOD_LOCK();
old_fdh = get_fib_dp_header(*pdp); old_fdh = get_fib_dp_header(*pdp);
new_fdh = alloc_fib_dp_array(old_fdh->fdh_num_tables, false); new_fdh = alloc_fib_dp_array(old_fdh->fdh_num_tables, false);
FD_PRINTF(LOG_DEBUG, fd, "OLD FDH: %p NEW FDH: %p", old_fdh, new_fdh); FD_PRINTF(LOG_DEBUG, fd, "OLD FDH: %p NEW FDH: %p", old_fdh, new_fdh);
if (new_fdh == NULL) { if (new_fdh == NULL) {
FIB_MOD_UNLOCK(); FIB_MOD_UNLOCK();
FD_PRINTF(LOG_WARNING, fd, "error attaching datapath"); FD_PRINTF(LOG_WARNING, fd, "error attaching datapath");
return (FLM_REBUILD); return (false);
} }
memcpy(&new_fdh->fdh_idx[0], &old_fdh->fdh_idx[0], memcpy(&new_fdh->fdh_idx[0], &old_fdh->fdh_idx[0],
old_fdh->fdh_num_tables * sizeof(struct fib_dp)); old_fdh->fdh_num_tables * sizeof(struct fib_dp));
/* Update relevant data structure for @fd */ /* Update relevant data structure for @fd */
new_fdh->fdh_idx[fd->fd_fibnum] = fd->fd_dp; new_fdh->fdh_idx[fd->fd_fibnum] = *dp;
/* Ensure memcpy() writes have completed */ /* Ensure memcpy() writes have completed */
atomic_thread_fence_rel(); atomic_thread_fence_rel();
@ -1580,10 +1581,9 @@ replace_rtables_family(struct fib_dp **pdp, struct fib_data *fd)
FIB_MOD_UNLOCK(); FIB_MOD_UNLOCK();
FD_PRINTF(LOG_DEBUG, fd, "update %p -> %p", old_fdh, new_fdh); FD_PRINTF(LOG_DEBUG, fd, "update %p -> %p", old_fdh, new_fdh);
epoch_call(net_epoch_preempt, destroy_fdh_epoch, fib_epoch_call(destroy_fdh_epoch, &old_fdh->fdh_epoch_ctx);
&old_fdh->fdh_epoch_ctx);
return (FLM_SUCCESS); return (true);
} }
static struct fib_dp ** static struct fib_dp **
@ -1601,13 +1601,13 @@ get_family_dp_ptr(int family)
/* /*
* Make datapath use fib instance @fd * Make datapath use fib instance @fd
*/ */
static enum flm_op_result bool
attach_datapath(struct fib_data *fd) fib_set_datapath_ptr(struct fib_data *fd, struct fib_dp *dp)
{ {
struct fib_dp **pdp; struct fib_dp **pdp;
pdp = get_family_dp_ptr(fd->fd_family); pdp = get_family_dp_ptr(fd->fd_family);
return (replace_rtables_family(pdp, fd)); return (replace_rtables_family(pdp, fd, dp));
} }
/* /*
@ -1635,8 +1635,7 @@ grow_rtables_family(struct fib_dp **pdp, uint32_t new_num_tables)
FIB_MOD_UNLOCK(); FIB_MOD_UNLOCK();
if (old_fdh != NULL) if (old_fdh != NULL)
epoch_call(net_epoch_preempt, destroy_fdh_epoch, fib_epoch_call(destroy_fdh_epoch, &old_fdh->fdh_epoch_ctx);
&old_fdh->fdh_epoch_ctx);
} }
/* /*
@ -1667,6 +1666,26 @@ fib_get_rtable_info(struct rib_head *rh, struct rib_rtable_info *rinfo)
#endif #endif
} }
/*
* Updates pointer to the algo data for the @fd.
*/
void
fib_set_algo_ptr(struct fib_data *fd, void *algo_data)
{
RIB_WLOCK_ASSERT(fd->fd_rh);
fd->fd_algo_data = algo_data;
}
/*
* Calls @callback with @ctx after the end of a current epoch.
*/
void
fib_epoch_call(epoch_callback_t callback, epoch_context_t ctx)
{
epoch_call(net_epoch_preempt, callback, ctx);
}
/* /*
* Accessor to get rib instance @fd is attached to. * Accessor to get rib instance @fd is attached to.
*/ */
@ -1765,7 +1784,7 @@ fib_schedule_release_nhop(struct fib_data *fd, struct nhop_object *nh)
nrd = malloc(sizeof(struct nhop_release_data), M_TEMP, M_NOWAIT | M_ZERO); nrd = malloc(sizeof(struct nhop_release_data), M_TEMP, M_NOWAIT | M_ZERO);
if (nrd != NULL) { if (nrd != NULL) {
nrd->nh = nh; nrd->nh = nh;
epoch_call(net_epoch_preempt, release_nhop_epoch, &nrd->ctx); fib_epoch_call(release_nhop_epoch, &nrd->ctx);
} else { } else {
/* /*
* Unable to allocate memory. Leak nexthop to maintain guarantee * Unable to allocate memory. Leak nexthop to maintain guarantee

View File

@ -128,5 +128,7 @@ uint32_t fib_get_nhop_idx(struct fib_data *fd, struct nhop_object *nh);
struct nhop_object **fib_get_nhop_array(struct fib_data *fd); struct nhop_object **fib_get_nhop_array(struct fib_data *fd);
void fib_get_rtable_info(struct rib_head *rh, struct rib_rtable_info *rinfo); void fib_get_rtable_info(struct rib_head *rh, struct rib_rtable_info *rinfo);
struct rib_head *fib_get_rh(struct fib_data *fd); struct rib_head *fib_get_rh(struct fib_data *fd);
bool fib_set_datapath_ptr(struct fib_data *fd, struct fib_dp *dp);
void fib_set_algo_ptr(struct fib_data *fd, void *algo_data);
void fib_epoch_call(epoch_callback_t callback, epoch_context_t ctx);