Remove couple Copan's vendor-specific mode pages.

Those pages are highly system-/hardware-specific, the code is incomplete,
and so they hardly can be useful for anybody else.
This commit is contained in:
Alexander Motin 2014-10-14 11:28:25 +00:00
parent 523f047ea2
commit 9a0190c9a1
6 changed files with 0 additions and 563 deletions

View File

@ -109,56 +109,6 @@ struct ctl_softc *control_softc = NULL;
* Note that these are default values only. The actual values will be
* filled in when the user does a mode sense.
*/
static struct copan_power_subpage power_page_default = {
/*page_code*/ PWR_PAGE_CODE | SMPH_SPF,
/*subpage*/ PWR_SUBPAGE_CODE,
/*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00,
(sizeof(struct copan_power_subpage) - 4) & 0x00ff},
/*page_version*/ PWR_VERSION,
/* total_luns */ 26,
/* max_active_luns*/ PWR_DFLT_MAX_LUNS,
/*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0}
};
static struct copan_power_subpage power_page_changeable = {
/*page_code*/ PWR_PAGE_CODE | SMPH_SPF,
/*subpage*/ PWR_SUBPAGE_CODE,
/*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00,
(sizeof(struct copan_power_subpage) - 4) & 0x00ff},
/*page_version*/ 0,
/* total_luns */ 0,
/* max_active_luns*/ 0,
/*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0}
};
static struct copan_aps_subpage aps_page_default = {
APS_PAGE_CODE | SMPH_SPF, //page_code
APS_SUBPAGE_CODE, //subpage
{(sizeof(struct copan_aps_subpage) - 4) & 0xff00,
(sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length
APS_VERSION, //page_version
0, //lock_active
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0} //reserved
};
static struct copan_aps_subpage aps_page_changeable = {
APS_PAGE_CODE | SMPH_SPF, //page_code
APS_SUBPAGE_CODE, //subpage
{(sizeof(struct copan_aps_subpage) - 4) & 0xff00,
(sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length
0, //page_version
0, //lock_active
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0} //reserved
};
static struct copan_debugconf_subpage debugconf_page_default = {
DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */
DBGCNF_SUBPAGE_CODE, /* subpage */
@ -371,7 +321,6 @@ static int rcv_sync_msg;
static int persis_offset;
static uint8_t ctl_pause_rtr;
static int ctl_is_single = 1;
static int index_to_aps_page;
SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
static int worker_threads = -1;
@ -949,26 +898,6 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
case CTL_MSG_SYNC_FE:
rcv_sync_msg = 1;
break;
case CTL_MSG_APS_LOCK: {
// It's quicker to execute this then to
// queue it.
struct ctl_lun *lun;
struct ctl_page_index *page_index;
struct copan_aps_subpage *current_sp;
uint32_t targ_lun;
targ_lun = msg_info.hdr.nexus.targ_mapped_lun;
lun = ctl_softc->ctl_luns[targ_lun];
mtx_lock(&lun->lun_lock);
page_index = &lun->mode_pages.index[index_to_aps_page];
current_sp = (struct copan_aps_subpage *)
(page_index->page_data +
(page_index->page_len * CTL_PAGE_CURRENT));
current_sp->lock_active = msg_info.aps.lock_flag;
mtx_unlock(&lun->lun_lock);
break;
}
default:
printf("How did I get here?\n");
}
@ -4057,156 +3986,6 @@ ctl_copy_io(union ctl_io *src, union ctl_io *dest)
dest->io_hdr.flags |= CTL_FLAG_INT_COPY;
}
#ifdef NEEDTOPORT
static void
ctl_update_power_subpage(struct copan_power_subpage *page)
{
int num_luns, num_partitions, config_type;
struct ctl_softc *softc;
cs_BOOL_t aor_present, shelf_50pct_power;
cs_raidset_personality_t rs_type;
int max_active_luns;
softc = control_softc;
/* subtract out the processor LUN */
num_luns = softc->num_luns - 1;
/*
* Default to 7 LUNs active, which was the only number we allowed
* in the past.
*/
max_active_luns = 7;
num_partitions = config_GetRsPartitionInfo();
config_type = config_GetConfigType();
shelf_50pct_power = config_GetShelfPowerMode();
aor_present = config_IsAorRsPresent();
rs_type = ddb_GetRsRaidType(1);
if ((rs_type != CS_RAIDSET_PERSONALITY_RAID5)
&& (rs_type != CS_RAIDSET_PERSONALITY_RAID1)) {
EPRINT(0, "Unsupported RS type %d!", rs_type);
}
page->total_luns = num_luns;
switch (config_type) {
case 40:
/*
* In a 40 drive configuration, it doesn't matter what DC
* cards we have, whether we have AOR enabled or not,
* partitioning or not, or what type of RAIDset we have.
* In that scenario, we can power up every LUN we present
* to the user.
*/
max_active_luns = num_luns;
break;
case 64:
if (shelf_50pct_power == CS_FALSE) {
/* 25% power */
if (aor_present == CS_TRUE) {
if (rs_type ==
CS_RAIDSET_PERSONALITY_RAID5) {
max_active_luns = 7;
} else if (rs_type ==
CS_RAIDSET_PERSONALITY_RAID1){
max_active_luns = 14;
} else {
/* XXX KDM now what?? */
}
} else {
if (rs_type ==
CS_RAIDSET_PERSONALITY_RAID5) {
max_active_luns = 8;
} else if (rs_type ==
CS_RAIDSET_PERSONALITY_RAID1){
max_active_luns = 16;
} else {
/* XXX KDM now what?? */
}
}
} else {
/* 50% power */
/*
* With 50% power in a 64 drive configuration, we
* can power all LUNs we present.
*/
max_active_luns = num_luns;
}
break;
case 112:
if (shelf_50pct_power == CS_FALSE) {
/* 25% power */
if (aor_present == CS_TRUE) {
if (rs_type ==
CS_RAIDSET_PERSONALITY_RAID5) {
max_active_luns = 7;
} else if (rs_type ==
CS_RAIDSET_PERSONALITY_RAID1){
max_active_luns = 14;
} else {
/* XXX KDM now what?? */
}
} else {
if (rs_type ==
CS_RAIDSET_PERSONALITY_RAID5) {
max_active_luns = 8;
} else if (rs_type ==
CS_RAIDSET_PERSONALITY_RAID1){
max_active_luns = 16;
} else {
/* XXX KDM now what?? */
}
}
} else {
/* 50% power */
if (aor_present == CS_TRUE) {
if (rs_type ==
CS_RAIDSET_PERSONALITY_RAID5) {
max_active_luns = 14;
} else if (rs_type ==
CS_RAIDSET_PERSONALITY_RAID1){
/*
* We're assuming here that disk
* caching is enabled, and so we're
* able to power up half of each
* LUN, and cache all writes.
*/
max_active_luns = num_luns;
} else {
/* XXX KDM now what?? */
}
} else {
if (rs_type ==
CS_RAIDSET_PERSONALITY_RAID5) {
max_active_luns = 15;
} else if (rs_type ==
CS_RAIDSET_PERSONALITY_RAID1){
max_active_luns = 30;
} else {
/* XXX KDM now what?? */
}
}
}
break;
default:
/*
* In this case, we have an unknown configuration, so we
* just use the default from above.
*/
break;
}
page->max_active_luns = max_active_luns;
#if 0
printk("%s: total_luns = %d, max_active_luns = %d\n", __func__,
page->total_luns, page->max_active_luns);
#endif
}
#endif /* NEEDTOPORT */
/*
* This routine could be used in the future to load default and/or saved
* mode page parameters for a particuar lun.
@ -4481,77 +4260,6 @@ ctl_init_page_index(struct ctl_lun *lun)
}
case SMS_VENDOR_SPECIFIC_PAGE:{
switch (page_index->subpage) {
case PWR_SUBPAGE_CODE: {
struct copan_power_subpage *current_page,
*saved_page;
memcpy(&lun->mode_pages.power_subpage[
CTL_PAGE_CURRENT],
&power_page_default,
sizeof(power_page_default));
memcpy(&lun->mode_pages.power_subpage[
CTL_PAGE_CHANGEABLE],
&power_page_changeable,
sizeof(power_page_changeable));
memcpy(&lun->mode_pages.power_subpage[
CTL_PAGE_DEFAULT],
&power_page_default,
sizeof(power_page_default));
memcpy(&lun->mode_pages.power_subpage[
CTL_PAGE_SAVED],
&power_page_default,
sizeof(power_page_default));
page_index->page_data =
(uint8_t *)lun->mode_pages.power_subpage;
current_page = (struct copan_power_subpage *)
(page_index->page_data +
(page_index->page_len *
CTL_PAGE_CURRENT));
saved_page = (struct copan_power_subpage *)
(page_index->page_data +
(page_index->page_len *
CTL_PAGE_SAVED));
break;
}
case APS_SUBPAGE_CODE: {
struct copan_aps_subpage *current_page,
*saved_page;
// This gets set multiple times but
// it should always be the same. It's
// only done during init so who cares.
index_to_aps_page = i;
memcpy(&lun->mode_pages.aps_subpage[
CTL_PAGE_CURRENT],
&aps_page_default,
sizeof(aps_page_default));
memcpy(&lun->mode_pages.aps_subpage[
CTL_PAGE_CHANGEABLE],
&aps_page_changeable,
sizeof(aps_page_changeable));
memcpy(&lun->mode_pages.aps_subpage[
CTL_PAGE_DEFAULT],
&aps_page_default,
sizeof(aps_page_default));
memcpy(&lun->mode_pages.aps_subpage[
CTL_PAGE_SAVED],
&aps_page_default,
sizeof(aps_page_default));
page_index->page_data =
(uint8_t *)lun->mode_pages.aps_subpage;
current_page = (struct copan_aps_subpage *)
(page_index->page_data +
(page_index->page_len *
CTL_PAGE_CURRENT));
saved_page = (struct copan_aps_subpage *)
(page_index->page_data +
(page_index->page_len *
CTL_PAGE_SAVED));
break;
}
case DBGCNF_SUBPAGE_CODE: {
struct copan_debugconf_subpage *current_page,
*saved_page;
@ -5270,95 +4978,6 @@ ctl_lun_operable(struct ctl_be_lun *be_lun)
return (0);
}
int
ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
int lock)
{
struct ctl_softc *softc;
struct ctl_lun *lun;
struct copan_aps_subpage *current_sp;
struct ctl_page_index *page_index;
int i;
softc = control_softc;
mtx_lock(&softc->ctl_lock);
lun = (struct ctl_lun *)be_lun->ctl_lun;
mtx_lock(&lun->lun_lock);
page_index = NULL;
for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
APS_PAGE_CODE)
continue;
if (lun->mode_pages.index[i].subpage != APS_SUBPAGE_CODE)
continue;
page_index = &lun->mode_pages.index[i];
}
if (page_index == NULL) {
mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
printf("%s: APS subpage not found for lun %ju!\n", __func__,
(uintmax_t)lun->lun);
return (1);
}
#if 0
if ((softc->aps_locked_lun != 0)
&& (softc->aps_locked_lun != lun->lun)) {
printf("%s: attempt to lock LUN %llu when %llu is already "
"locked\n");
mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
return (1);
}
#endif
current_sp = (struct copan_aps_subpage *)(page_index->page_data +
(page_index->page_len * CTL_PAGE_CURRENT));
if (lock != 0) {
current_sp->lock_active = APS_LOCK_ACTIVE;
softc->aps_locked_lun = lun->lun;
} else {
current_sp->lock_active = 0;
softc->aps_locked_lun = 0;
}
/*
* If we're in HA mode, try to send the lock message to the other
* side.
*/
if (ctl_is_single == 0) {
int isc_retval;
union ctl_ha_msg lock_msg;
lock_msg.hdr.nexus = *nexus;
lock_msg.hdr.msg_type = CTL_MSG_APS_LOCK;
if (lock != 0)
lock_msg.aps.lock_flag = 1;
else
lock_msg.aps.lock_flag = 0;
isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &lock_msg,
sizeof(lock_msg), 0);
if (isc_retval > CTL_HA_STATUS_SUCCESS) {
printf("%s: APS (lock=%d) error returned from "
"ctl_ha_msg_send: %d\n", __func__, lock, isc_retval);
mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
return (1);
}
}
mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
return (0);
}
void
ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
{
@ -6514,121 +6133,6 @@ ctl_caching_sp_handler(struct ctl_scsiio *ctsio,
return (0);
}
int
ctl_power_sp_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index, uint8_t *page_ptr)
{
return (0);
}
int
ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index, int pc)
{
struct copan_power_subpage *page;
page = (struct copan_power_subpage *)page_index->page_data +
(page_index->page_len * pc);
switch (pc) {
case SMS_PAGE_CTRL_CHANGEABLE >> 6:
/*
* We don't update the changable bits for this page.
*/
break;
case SMS_PAGE_CTRL_CURRENT >> 6:
case SMS_PAGE_CTRL_DEFAULT >> 6:
case SMS_PAGE_CTRL_SAVED >> 6:
#ifdef NEEDTOPORT
ctl_update_power_subpage(page);
#endif
break;
default:
#ifdef NEEDTOPORT
EPRINT(0, "Invalid PC %d!!", pc);
#endif
break;
}
return (0);
}
int
ctl_aps_sp_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index, uint8_t *page_ptr)
{
struct copan_aps_subpage *user_sp;
struct copan_aps_subpage *current_sp;
union ctl_modepage_info *modepage_info;
struct ctl_softc *softc;
struct ctl_lun *lun;
int retval;
retval = CTL_RETVAL_COMPLETE;
current_sp = (struct copan_aps_subpage *)(page_index->page_data +
(page_index->page_len * CTL_PAGE_CURRENT));
softc = control_softc;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
user_sp = (struct copan_aps_subpage *)page_ptr;
modepage_info = (union ctl_modepage_info *)
ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
modepage_info->header.page_code = page_index->page_code & SMPH_PC_MASK;
modepage_info->header.subpage = page_index->subpage;
modepage_info->aps.lock_active = user_sp->lock_active;
mtx_lock(&softc->ctl_lock);
/*
* If there is a request to lock the LUN and another LUN is locked
* this is an error. If the requested LUN is already locked ignore
* the request. If no LUN is locked attempt to lock it.
* if there is a request to unlock the LUN and the LUN is currently
* locked attempt to unlock it. Otherwise ignore the request. i.e.
* if another LUN is locked or no LUN is locked.
*/
if (user_sp->lock_active & APS_LOCK_ACTIVE) {
if (softc->aps_locked_lun == lun->lun) {
/*
* This LUN is already locked, so we're done.
*/
retval = CTL_RETVAL_COMPLETE;
} else if (softc->aps_locked_lun == 0) {
/*
* No one has the lock, pass the request to the
* backend.
*/
retval = lun->backend->config_write(
(union ctl_io *)ctsio);
} else {
/*
* Someone else has the lock, throw out the request.
*/
ctl_set_already_locked(ctsio);
free(ctsio->kern_data_ptr, M_CTL);
ctl_done((union ctl_io *)ctsio);
/*
* Set the return value so that ctl_do_mode_select()
* won't try to complete the command. We already
* completed it here.
*/
retval = CTL_RETVAL_ERROR;
}
} else if (softc->aps_locked_lun == lun->lun) {
/*
* This LUN is locked, so pass the unlock request to the
* backend.
*/
retval = lun->backend->config_write((union ctl_io *)ctsio);
}
mtx_unlock(&softc->ctl_lock);
return (retval);
}
int
ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,

View File

@ -172,12 +172,6 @@ int ctl_failover_sp_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
uint8_t *page_ptr);
**/
int ctl_power_sp_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index, uint8_t *page_ptr);
int ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index, int pc);
int ctl_aps_sp_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index, uint8_t *page_ptr);
int ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
int pc);

View File

@ -279,14 +279,6 @@ int ctl_stop_lun(struct ctl_be_lun *be_lun);
int ctl_lun_inoperable(struct ctl_be_lun *be_lun);
int ctl_lun_operable(struct ctl_be_lun *be_lun);
/*
* If a LUN is locked on or unlocked from a power/APS standpoint, call
* ctl_lun_power_lock() to update the current status in CTL's APS subpage.
* Set the lock flag to 1 to lock the LUN, set it to 0 to unlock the LUN.
*/
int ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
int lock);
/*
* To take a LUN offline, call ctl_lun_offline(). Generally the LUN will
* be online again once the user sends a SCSI START STOP UNIT command with

View File

@ -234,7 +234,6 @@ typedef enum {
CTL_MSG_MANAGE_TASKS,
CTL_MSG_PERS_ACTION,
CTL_MSG_SYNC_FE,
CTL_MSG_APS_LOCK,
CTL_MSG_DATAMOVE,
CTL_MSG_DATAMOVE_DONE
} ctl_msg_type;
@ -423,14 +422,6 @@ struct ctl_ha_msg_hdr {
#define CTL_HA_MAX_SG_ENTRIES 16
/*
* Used for CTL_MSG_APS_LOCK.
*/
struct ctl_ha_msg_aps {
struct ctl_ha_msg_hdr hdr;
uint8_t lock_flag;
};
/*
* Used for CTL_MSG_PERS_ACTION.
*/
@ -500,7 +491,6 @@ union ctl_ha_msg {
struct ctl_ha_msg_scsi scsi;
struct ctl_ha_msg_dt dt;
struct ctl_ha_msg_pr pr;
struct ctl_ha_msg_aps aps;
};

View File

@ -317,12 +317,6 @@ static const struct ctl_page_index page_index_template[] = {
{SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 0x02,
sizeof(struct scsi_logical_block_provisioning_page), NULL,
CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL},
{SMS_VENDOR_SPECIFIC_PAGE | SMPH_SPF, PWR_SUBPAGE_CODE,
sizeof(struct copan_power_subpage), NULL, CTL_PAGE_FLAG_NONE,
ctl_power_sp_sense_handler, ctl_power_sp_handler},
{SMS_VENDOR_SPECIFIC_PAGE | SMPH_SPF, APS_SUBPAGE_CODE,
sizeof(struct copan_aps_subpage), NULL, CTL_PAGE_FLAG_NONE,
NULL, ctl_aps_sp_handler},
{SMS_VENDOR_SPECIFIC_PAGE | SMPH_SPF, DBGCNF_SUBPAGE_CODE,
sizeof(struct copan_debugconf_subpage), NULL, CTL_PAGE_FLAG_NONE,
ctl_debugconf_sp_sense_handler, ctl_debugconf_sp_select_handler},
@ -339,8 +333,6 @@ struct ctl_mode_pages {
struct scsi_control_page control_page[4];
struct scsi_info_exceptions_page ie_page[4];
struct scsi_logical_block_provisioning_page lbp_page[4];
struct copan_power_subpage power_subpage[4];
struct copan_aps_subpage aps_subpage[4];
struct copan_debugconf_subpage debugconf_subpage[4];
struct ctl_page_index index[CTL_NUM_MODE_PAGES];
};
@ -469,7 +461,6 @@ struct ctl_softc {
uint32_t ctl_lun_mask[(CTL_MAX_LUNS + 31) / 32];
struct ctl_lun *ctl_luns[CTL_MAX_LUNS];
uint32_t ctl_port_mask[(CTL_MAX_PORTS + 31) / 32];
uint64_t aps_locked_lun;
STAILQ_HEAD(, ctl_lun) lun_list;
STAILQ_HEAD(, ctl_be_lun) pending_lun_queue;
uint32_t num_frontends;

View File

@ -706,40 +706,6 @@ struct scsi_caching_page {
uint8_t non_cache_seg_size[3];
};
/*
* XXX KDM move this off to a vendor shim.
*/
struct copan_power_subpage {
uint8_t page_code;
#define PWR_PAGE_CODE 0x00
uint8_t subpage;
#define PWR_SUBPAGE_CODE 0x02
uint8_t page_length[2];
uint8_t page_version;
#define PWR_VERSION 0x01
uint8_t total_luns;
uint8_t max_active_luns;
#define PWR_DFLT_MAX_LUNS 0x07
uint8_t reserved[25];
};
/*
* XXX KDM move this off to a vendor shim.
*/
struct copan_aps_subpage {
uint8_t page_code;
#define APS_PAGE_CODE 0x00
uint8_t subpage;
#define APS_SUBPAGE_CODE 0x03
uint8_t page_length[2];
uint8_t page_version;
#define APS_VERSION 0x00
uint8_t lock_active;
#define APS_LOCK_ACTIVE 0x01
#define APS_LOCK_INACTIVE 0x00
uint8_t reserved[26];
};
/*
* XXX KDM move this off to a vendor shim.
*/