.gen2 = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
- .tx_cmd_queue_size = 32, \
.min_umac_error_event_table = 0x400000
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
* @gen2: 22000 and on transport operation
* @cdb: CDB support
* @nvm_type: see &enum iwl_nvm_type
- * @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as
- * the regular queues
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
gen2:1,
cdb:1,
dbgc_supported:1;
- u16 tx_cmd_queue_size;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
ctxt_info->hcmd_cfg.cmd_queue_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
- TFD_QUEUE_CB_SIZE(trans_pcie->tx_cmd_queue_size);
+ TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
* @hw_init_mask: initial unmasked hw causes
* @fh_mask: current unmasked fh causes
* @hw_mask: current unmasked hw causes
- * @tx_cmd_queue_size: the size of the tx command queue
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
u32 fh_mask;
u32 hw_mask;
cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
- u16 tx_cmd_queue_size;
};
static inline struct iwl_trans_pcie *
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans);
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
u8 idx)
struct iwl_txq *cmd_queue;
int txq_id = trans_pcie->cmd_queue, ret;
- iwl_pcie_set_tx_cmd_queue_size(trans);
-
/* alloc and init the command queue */
if (!trans_pcie->txq[txq_id]) {
cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
return -ENOMEM;
}
trans_pcie->txq[txq_id] = cmd_queue;
- ret = iwl_pcie_txq_alloc(trans, cmd_queue,
- trans_pcie->tx_cmd_queue_size, true);
+ ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
cmd_queue = trans_pcie->txq[txq_id];
}
- ret = iwl_pcie_txq_init(trans, cmd_queue,
- trans_pcie->tx_cmd_queue_size, true);
+ ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
- slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size :
- TFD_TX_CMD_SLOTS;
+ slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
return ret;
}
-void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int queue_size = TFD_CMD_SLOTS;
-
- if (trans->cfg->tx_cmd_queue_size)
- queue_size = trans->cfg->tx_cmd_queue_size;
-
- if (WARN_ON(!(is_power_of_2(queue_size) &&
- TFD_QUEUE_CB_SIZE(queue_size) > 0)))
- trans_pcie->tx_cmd_queue_size = TFD_CMD_SLOTS;
- else
- trans_pcie->tx_cmd_queue_size = queue_size;
-}
-
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id, slots_num;
bool alloc = false;
- iwl_pcie_set_tx_cmd_queue_size(trans);
-
if (!trans_pcie->txq_memory) {
ret = iwl_pcie_tx_alloc(trans);
if (ret)
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
- slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size :
- TFD_TX_CMD_SLOTS;
+ slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
if (ret) {