Add more comments on newly-added functions.

Add back opstate handler function.
This commit is contained in:
Alexander V. Chernikov 2014-09-02 14:27:12 +00:00
parent 0cba2b2802
commit 71af39bf34
3 changed files with 101 additions and 22 deletions

View File

@ -481,8 +481,59 @@ flush_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta,
free(ta_buf_m, M_TEMP);
}
static void
rollback_add_entry(void *object, struct op_state *_state)
{
struct ip_fw_chain *ch;
struct tableop_state *ts;
ts = (struct tableop_state *)_state;
if (ts->tc != object && ts->ch != object)
return;
ch = ts->ch;
IPFW_UH_WLOCK_ASSERT(ch);
/* Call specifid unlockers */
rollback_table_values(ts);
/* Indicate we've called */
ts->modified = 1;
}
/*
* Adds/updates one or more entries in table @ti.
*
* Function may drop/reacquire UH wlock multiple times due to
* items alloc, algorithm callbacks (check_space), value linkage
* (new values, value storage realloc), etc..
* Other processes like other adds (which may involve storage resize),
* table swaps (which changes table data and may change algo type),
* table modify (which may change value mask) may be executed
* simultaneously so we need to deal with it.
*
* The following approach was implemented:
* we have per-chain linked list, protected with UH lock.
* add_table_entry prepares special on-stack structure wthich is passed
* to its descendants. Users add this structure to this list before unlock.
* After performing needed operations and acquiring UH lock back, each user
* checks if structure has changed. If true, it rolls local state back and
* returns without error to the caller.
* add_table_entry() on its own checks if structure has changed and restarts
* its operation from the beginning (goto restart).
*
* Functions which are modifying fields of interest (currently
* resize_shared_value_storage() and swap_tables() )
* traverses given list while holding UH lock immediately before
* performing their operations calling function provided be list entry
* ( currently rollback_add_entry ) which performs rollback for all necessary
* state and sets appropriate values in structure indicating rollback
* has happened.
*
* Algo interaction:
* Function references @ti first to ensure table won't
* disappear or change its type.
* After that, prepare_add callback is called for each @tei entry.
@ -526,6 +577,7 @@ restart:
}
ta = tc->ta;
ts.ch = ch;
ts.opstate.func = rollback_add_entry;
ts.tc = tc;
ts.vshared = tc->vshared;
ts.vmask = tc->vmask;
@ -624,7 +676,7 @@ restart:
IPFW_WUNLOCK(ch);
ipfw_finalize_table_values(ch, tc, tei, count, rollback);
ipfw_garbage_table_values(ch, tc, tei, count, rollback);
/* Permit post-add algorithm grow/rehash. */
if (numadd != 0)
@ -714,7 +766,7 @@ del_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
IPFW_WUNLOCK(ch);
/* Unlink non-used values */
ipfw_finalize_table_values(ch, tc, tei, count, 0);
ipfw_garbage_table_values(ch, tc, tei, count, 0);
if (numdel != 0) {
/* Run post-del hook to permit shrinking */

View File

@ -198,12 +198,13 @@ struct tableop_state;
void ipfw_table_value_init(struct ip_fw_chain *ch);
void ipfw_table_value_destroy(struct ip_fw_chain *ch);
int ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts);
void ipfw_finalize_table_values(struct ip_fw_chain *ch, struct table_config *tc,
void ipfw_garbage_table_values(struct ip_fw_chain *ch, struct table_config *tc,
struct tentry_info *tei, uint32_t count, int rollback);
void ipfw_import_table_value_v1(ipfw_table_value *iv);
void ipfw_export_table_value_v1(struct table_value *v, ipfw_table_value *iv);
void ipfw_unref_table_values(struct ip_fw_chain *ch, struct table_config *tc,
struct table_algo *ta, void *astate, struct table_info *ti);
void rollback_table_values(struct tableop_state *ts);
int ipfw_rewrite_table_uidx(struct ip_fw_chain *chain,
struct rule_check_info *ci);

View File

@ -166,6 +166,9 @@ get_value_ptrs(struct ip_fw_chain *ch, struct table_config *tc, int vshared,
*pvi = vi;
}
/*
* Update pointers to real vaues after @pval change.
*/
static void
update_tvalue(struct namedobj_instance *ni, struct named_object *no, void *arg)
{
@ -184,9 +187,12 @@ update_tvalue(struct namedobj_instance *ni, struct named_object *no, void *arg)
/*
* Grows value storage shared among all tables.
* Drops/reacquires UH locks.
* Notifies other running adds on @ch shared storage resize.
* Note function does not guarantee that free space
* will be available after invocation, so one caller needs
* to roll cycle himself.
*
* Returns 0 on success.
* Note caller has to check @ts "modified" field.
* Returns 0 if case of no errors.
*/
static int
resize_shared_value_storage(struct ip_fw_chain *ch)
@ -259,6 +265,10 @@ done:
return (0);
}
/*
* Drops reference for table value with index @kidx, stored in @pval and
* @vi. Frees value if it has no references.
*/
static void
unref_table_value(struct namedobj_instance *vi, struct table_value *pval,
uint32_t kidx)
@ -339,21 +349,15 @@ ipfw_unref_table_values(struct ip_fw_chain *ch, struct table_config *tc,
* and set "modified" field to non-zero value to indicate
* that we need to restart original operation.
*/
static void
rollback_table_values(void *object, struct op_state *_state)
void
rollback_table_values(struct tableop_state *ts)
{
struct ip_fw_chain *ch;
struct tableop_state *ts;
struct table_value *pval;
struct tentry_info *ptei;
struct namedobj_instance *vi;
int i;
ts = (struct tableop_state *)_state;
if (ts->tc != object && ts->ch != object)
return;
ch = ts->ch;
IPFW_UH_WLOCK_ASSERT(ch);
@ -369,8 +373,6 @@ rollback_table_values(void *object, struct op_state *_state)
unref_table_value(vi, pval, ptei->value);
}
ts->modified = 1;
}
/*
@ -378,7 +380,6 @@ rollback_table_values(void *object, struct op_state *_state)
* Function may drop/reacquire UH lock.
*
* Returns 0 on success.
* Note that called has to check @ts "modified" value.
*/
static int
alloc_table_vidx(struct ip_fw_chain *ch, struct tableop_state *ts,
@ -397,7 +398,7 @@ alloc_table_vidx(struct ip_fw_chain *ch, struct tableop_state *ts,
* lock/unlock, so we need to check "modified"
* state.
*/
rollback_table_values(ts->tc, &ts->opstate);
ts->opstate.func(ts->tc, &ts->opstate);
error = resize_shared_value_storage(ch);
return (error); /* ts->modified should be set, we will restart */
}
@ -428,11 +429,11 @@ alloc_table_vidx(struct ip_fw_chain *ch, struct tableop_state *ts,
}
/*
* Drops value reference for unused values (updates, partially
* Drops value reference for unused values (updates, deletes, partially
* successful adds or rollbacks).
*/
void
ipfw_finalize_table_values(struct ip_fw_chain *ch, struct table_config *tc,
ipfw_garbage_table_values(struct ip_fw_chain *ch, struct table_config *tc,
struct tentry_info *tei, uint32_t count, int rollback)
{
int i;
@ -441,7 +442,7 @@ ipfw_finalize_table_values(struct ip_fw_chain *ch, struct table_config *tc,
struct namedobj_instance *vi;
/*
* We have two slightly different cases here:
* We have two slightly different ADD cases here:
* either (1) we are successful / partially successful,
* in that case we need
* * to ignore ADDED entries values
@ -452,6 +453,8 @@ ipfw_finalize_table_values(struct ip_fw_chain *ch, struct table_config *tc,
* (2): atomic rollback of partially successful operation
* in that case we simply need to unref all entries.
*
* DELETE case is simpler: no atomic support there, so
* we simply unref all non-zero values.
*/
/*
@ -482,6 +485,13 @@ ipfw_finalize_table_values(struct ip_fw_chain *ch, struct table_config *tc,
}
}
/*
* Main function used to link values of entries going to be added,
* to the index. Since we may perform many UH locks drops/acquires,
* handle changes by checking tablestate "modified" field.
*
* Success: return 0.
*/
int
ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts)
{
@ -496,7 +506,7 @@ ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts)
/*
* Stage 1: reference all existing values and
* save them inside the bitmask.
* save their indices.
*/
IPFW_UH_WLOCK_ASSERT(ch);
get_value_ptrs(ch, ts->tc, ts->vshared, &pval, &vi);
@ -582,9 +592,10 @@ ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts)
/* May perform UH unlock/lock */
error = alloc_table_vidx(ch, ts, vi, &vidx);
if (error != 0) {
rollback_table_values(tc, &ts->opstate);
ts->opstate.func(ts->tc, &ts->opstate);
return (error);
}
/* value storage resize has happened, return */
if (ts->modified != 0)
return (0);
@ -625,6 +636,9 @@ ipfw_import_table_value_legacy(uint32_t value, struct table_value *v)
v->limit = value;
}
/*
* Export data to legacy table dumps opcodes.
*/
uint32_t
ipfw_export_table_value_legacy(struct table_value *v)
{
@ -636,6 +650,10 @@ ipfw_export_table_value_legacy(struct table_value *v)
return (v->tag);
}
/*
* Imports table value from current userland format.
* Saves value in kernel format to the same place.
*/
void
ipfw_import_table_value_v1(ipfw_table_value *iv)
{
@ -657,6 +675,10 @@ ipfw_import_table_value_v1(ipfw_table_value *iv)
memcpy(iv, &v, sizeof(ipfw_table_value));
}
/*
* Export real table value @v to current userland format.
* Note that @v and @piv may point to the same memory.
*/
void
ipfw_export_table_value_v1(struct table_value *v, ipfw_table_value *piv)
{
@ -678,6 +700,10 @@ ipfw_export_table_value_v1(struct table_value *v, ipfw_table_value *piv)
memcpy(piv, &iv, sizeof(iv));
}
/*
* Exports real value data into ipfw_table_value structure.
* Utilizes "spare1" field to store kernel index.
*/
static void
dump_tvalue(struct namedobj_instance *ni, struct named_object *no, void *arg)
{