// SPDX-License-Identifier: GPL-2.0 #include "softmac_core.h" #include "utils.h" #include "img_host_txrx_buffs.h" #include "img_mac80211_types.h" #define TX_TO_MACDEV(x) ((struct img_priv *) \ (container_of(x, struct img_priv, tx))) void img_set_buffer_status(struct sk_buff *skb,int tid,bool tx_done,u8 twt_enable); void wait_for_tx_complete(struct tx_config *tx) { int count = 0; struct img_priv *priv = NULL; /* Find_last_bit: Returns the bit number of the first set bit, * or size. */ while (find_last_bit(tx->buf_pool_bmp, NUM_TX_DESCS) != NUM_TX_DESCS) { count++; if (count < TX_COMPLETE_TIMEOUT_TICKS) { } else { priv = TX_TO_MACDEV(tx); RPU_DEBUG_TX("%s-UMACTX:WARNING: ", priv->name); RPU_DEBUG_TX("TX complete failed!!\n"); RPU_DEBUG_TX("%s-UMACTX:After ", priv->name); RPU_DEBUG_TX("%ld: bitmap is: 0x%lx\n", TX_COMPLETE_TIMEOUT_TICKS, tx->buf_pool_bmp[0]); break; } } if (count && (count < TX_COMPLETE_TIMEOUT_TICKS)) { RPU_DEBUG_TX("%s-UMACTX:TX complete after %d timer ticks\n", priv->name, count); } } int tx_queue_map(int queue) { unsigned int ac[4] = {LMAC_AC_VO, LMAC_AC_VI, LMAC_AC_BE, LMAC_AC_BK}; if (queue < 4) return ac[queue]; return LMAC_AC_VO; } int tx_queue_unmap(int queue) { unsigned int ac[4] = {3, 2, 1, 0}; return ac[queue]; } int check_80211_aggregation(struct sk_buff_head *txq, struct img_priv *priv, struct sk_buff *skb, int ac, int peer_id) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *mac_hdr = NULL, *mac_hdr_first = NULL; struct sk_buff *skb_first; struct sk_buff_head *pend_pkt_q = NULL; struct tx_config *tx = &priv->tx; bool ampdu = false, is_qos = false, addr = true; mac_hdr = (struct ieee80211_hdr *)skb->data; pend_pkt_q = &tx->pending_pkt[peer_id][ac]; #ifdef ENABLE_COALESCING_TX_PKTS if (txq == NULL) { return 1; /* */ } else { skb_first = skb_peek(txq); if (skb_first == NULL) { return 1; } } #else /* ENABLE_COALESCING_TX_PKTS */ skb_first = skb_peek(pend_pkt_q); #endif /* ENABLE_COALESCING_TX_PKTS */ if (skb_first) mac_hdr_first = (struct ieee80211_hdr *)skb_first->data; ampdu = (tx_info->flags & IEEE80211_TX_CTL_AMPDU) ? true : false; is_qos = ieee80211_is_data_qos(mac_hdr->frame_control); /* RPU has a limitation, it expects A1-A2-A3 to be same * for all MPDU's within an AMPDU. This is a temporary * solution, remove it when RPU has fix for this. */ if (skb_first && ((!ether_addr_equal(mac_hdr->addr1, mac_hdr_first->addr1)) || (!ether_addr_equal(mac_hdr->addr2, mac_hdr_first->addr2)) || (!ether_addr_equal(mac_hdr->addr3, mac_hdr_first->addr3)))) { addr = false; } /*stats and debug*/ if (!is_qos) { RPU_DEBUG_TX("Not Qos\n"); priv->stats->tx_noagg_not_qos++; } else if (!ampdu) { RPU_DEBUG_TX("Not AMPDU\n"); priv->stats->tx_noagg_not_ampdu++; } else if (!addr) { if (skb_first) { RPU_DEBUG_TX("first: A1: %pM-A2:%pM -A3%pM not same\n", mac_hdr_first->addr1, mac_hdr_first->addr2, mac_hdr_first->addr3); RPU_DEBUG_TX("curr: A1: %pM-A2:%pM -A3%pM not same\n", mac_hdr->addr1, mac_hdr->addr2, mac_hdr->addr3); } priv->stats->tx_noagg_not_addr++; } return (ampdu && is_qos && addr); } void tx_status(struct sk_buff *skb, struct lmac_event_tx_done *tx_done, unsigned int frame_idx, struct img_priv *priv, struct ieee80211_tx_info *tx_info_1st_mpdu) { int index, i; char idx = 0; struct ieee80211_tx_rate *txrate; struct ieee80211_tx_rate *tx_inf_rate = NULL; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); int tx_fixed_mcs_idx = 0; int tx_fixed_rate = 0; struct ieee80211_supported_band *band = NULL; struct umac_vif *uvif = NULL; int ret = 0; unsigned int desc_id = tx_done->descriptor_id; struct tx_pkt_info *pkt_info; unsigned char tx_done_frm_status; unsigned char tx_done_retries_num; unsigned char tx_done_rate; uvif = (struct umac_vif *)(tx_info->control.vif->drv_priv); /*Just inform ma8c0211, it will free the skb*/ if (tx_done->frm_status[frame_idx] == TX_DONE_STAT_DISCARD) { ieee80211_free_txskb(priv->hw, skb); priv->stats->tx_dones_to_stack++; return; } tx_done_frm_status = tx_done->frm_status[frame_idx]; tx_done_retries_num = tx_done->retries_num[frame_idx] + 1; tx_done_rate = tx_done->rate[frame_idx]; /* Rate info will be retained, except the count*/ // ieee80211_tx_info_clear_status(tx_info); if (tx_done_frm_status == TX_DONE_STAT_SUCCESS) tx_info->flags |= IEEE80211_TX_STAT_ACK; else if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; tx_info->flags &= ~IEEE80211_TX_STAT_AMPDU; tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU; band = priv->hw->wiphy->bands[tx_info->band]; for (index = 0; index < 4; index++) { tx_inf_rate = &tx_info->status.rates[index]; /* Populate tx_info based on 1st MPDU in an AMPDU */ txrate = (&tx_info_1st_mpdu->control.rates[index]); if (txrate->idx < 0) break; if (tx_done_retries_num > txrate->count) { tx_inf_rate->count = txrate->count; tx_done_retries_num -= txrate->count; } else { tx_inf_rate->count = tx_done_retries_num; if (tx_inf_rate->count == 0) tx_inf_rate->count = 1; break; } } /* Invalidate the remaining indices */ while (((index + 1) < 4)) { tx_info->status.rates[index + 1].idx = -1; tx_info->status.rates[index + 1].count = 0; index++; } if (((tx_info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) && (atomic_dec_return(&priv->roc_params.roc_mgmt_tx_count) == 0)) { RPU_DEBUG_ROC("%s:%d TXDONE Frame: %d\n", __func__, __LINE__, atomic_read(&priv->roc_params.roc_mgmt_tx_count)); if (priv->roc_params.roc_in_progress && priv->roc_params.roc_type == ROC_TYPE_OFFCHANNEL_TX) { CALL_RPU(rpu_prog_roc, ROC_STOP, 0, 0, 0); RPU_DEBUG_ROC("%s:%d", __func__, __LINE__); RPU_DEBUG_ROC("all offchan pending frames cleared\n"); } } priv->stats->tx_dones_to_stack++; prog_rpu_fail: return; } int get_token(struct img_priv *priv, int queue) { int cnt = 0; int curr_bit = 0; int pool_id = 0; int token_id = NUM_TX_DESCS; struct tx_config *tx = &priv->tx; /* First search for a reserved token */ for (cnt = 0; cnt < NUM_TX_DESCS_PER_AC; cnt++) { curr_bit = ((queue + (NUM_ACS * cnt)) % TX_DESC_BUCKET_BOUND); pool_id = ((queue + (NUM_ACS * cnt)) / TX_DESC_BUCKET_BOUND); if (!test_and_set_bit(curr_bit, &tx->buf_pool_bmp[pool_id])) { token_id = queue + (NUM_ACS * cnt); tx->outstanding_tokens[queue]++; break; } } /* If reserved token is not found search for a spare token * (only for non beacon queues) */ if ((cnt == NUM_TX_DESCS_PER_AC) && (queue != LMAC_AC_BCN)) { for (token_id = NUM_TX_DESCS_PER_AC * NUM_ACS; token_id < NUM_TX_DESCS; token_id++) { curr_bit = (token_id % TX_DESC_BUCKET_BOUND); pool_id = (token_id / TX_DESC_BUCKET_BOUND); if (!test_and_set_bit(curr_bit, &tx->buf_pool_bmp[pool_id])) { tx->outstanding_tokens[queue]++; break; } } } return token_id; } void free_token(struct img_priv *priv, int token_id, int queue) { struct tx_config *tx = &priv->tx; int bit = -1; int pool_id = -1; int test = 0; unsigned int old_token = tx->outstanding_tokens[queue]; bit = (token_id % TX_DESC_BUCKET_BOUND); pool_id = (token_id / TX_DESC_BUCKET_BOUND); __clear_bit(bit, &tx->buf_pool_bmp[pool_id]); tx->outstanding_tokens[queue]--; test = tx->outstanding_tokens[queue]; if (WARN_ON_ONCE(test < 0 || test > 4)) { RPU_DEBUG_TX("%s: invalid outstanding_tokens: %d, old:%d\n", __func__, test, old_token); } } struct curr_peer_info get_curr_peer_opp(struct img_priv *priv, int ac) { unsigned int curr_peer_opp = 0; unsigned int curr_vif_op_chan = LMAC_VIF_CHANCTX_TYPE_OPER; unsigned int i = 0; struct tx_config *tx = NULL; unsigned int init_peer_opp = 0; struct curr_peer_info peer_info; unsigned int pend_q_len; struct sk_buff_head *pend_q = NULL; tx = &priv->tx; init_peer_opp = tx->curr_peer_opp[ac]; /*TODO: Optimize this loop for BCN_Q */ for (i = 0; i < MAX_PEND_Q_PER_AC; i++) { curr_peer_opp = (init_peer_opp + i) % MAX_PEND_Q_PER_AC; pend_q = &tx->pending_pkt[curr_peer_opp][ac]; pend_q_len = skb_queue_len(pend_q); if (pend_q_len) { tx->curr_peer_opp[ac] = (curr_peer_opp + 1) % MAX_PEND_Q_PER_AC; break; } } if (i == MAX_PEND_Q_PER_AC) { peer_info.id = -1; peer_info.op_chan_idx = -1; } else { peer_info.id = curr_peer_opp; peer_info.op_chan_idx = curr_vif_op_chan; RPU_DEBUG_TX("%s: Queue: %d Peer: %d op_chan: %d ", __func__, ac, curr_peer_opp, curr_vif_op_chan); RPU_DEBUG_TX("Pending: %d\n", pend_q_len); } return peer_info; } int rpu_tx_proc_pend_frms(struct img_priv *priv, int ac, int token_id) { struct tx_config *tx = &priv->tx; unsigned long ampdu_len = 0; struct sk_buff *loop_skb = NULL; struct sk_buff *tmp = NULL; struct ieee80211_hdr *mac_hdr = NULL; struct ieee80211_tx_info *tx_info = NULL; struct umac_vif *uvif = NULL; struct ieee80211_vif *ivif = NULL; unsigned char *data = NULL; unsigned int max_tx_cmds = priv->params->max_tx_cmds; struct sk_buff_head *txq = NULL; struct sk_buff_head *pend_pkt_q = NULL; unsigned int total_pending_processed = 0; int pend_pkt_q_len = 0; struct curr_peer_info peer_info; int loop_cnt = 0; struct tx_pkt_info *pkt_info = NULL; #ifdef ENABLE_COALESCING_TX_PKTS unsigned int txq_pkt_count = 0; #endif /* ENABLE_COALESCING_TX_PKTS */ peer_info = get_curr_peer_opp(priv, ac); /* No pending frames for any peer in that AC. */ if (peer_info.id == -1) return 0; pend_pkt_q = &tx->pending_pkt[peer_info.id][ac]; pkt_info = &priv->tx.pkt_info[token_id]; txq = &pkt_info->pkt; /* Aggregate Only MPDU's with same RA, same Rate, * same Rate flags, same Tx Info flags */ skb_queue_walk_safe(pend_pkt_q, loop_skb, tmp) { data = loop_skb->data; mac_hdr = (struct ieee80211_hdr *)data; tx_info = IEEE80211_SKB_CB(loop_skb); ivif = tx_info->control.vif; uvif = (struct umac_vif *)(ivif->drv_priv); ampdu_len += loop_skb->len; if (vht_support && !loop_cnt) if ((tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS) && max_tx_cmds > MAX_SUBFRAMES_IN_AMPDU_HT) max_tx_cmds = MAX_SUBFRAMES_IN_AMPDU_HT; if (!check_80211_aggregation(txq, priv, loop_skb, ac, peer_info.id) || skb_queue_len(txq) >= max_tx_cmds) { break; } #ifdef ENABLE_COALESCING_TX_PKTS txq_pkt_count += img_data_get_num_tx_buff_pkts(loop_skb); if (txq_pkt_count > max_tx_cmds) { break; } #endif /* ENABLE_COALESCING_TX_PKTS */ loop_cnt++; __skb_unlink(loop_skb, pend_pkt_q); skb_queue_tail(txq, loop_skb); } /* If our criterion rejects all pending frames, or * pend_q is empty, send only 1 */ if (!skb_queue_len(txq)) skb_queue_tail(txq, skb_dequeue(pend_pkt_q)); total_pending_processed = skb_queue_len(txq); pend_pkt_q_len = skb_queue_len(pend_pkt_q); if ((ac != LMAC_AC_BCN) && (tx->queue_stopped_bmp & (1 << ac)) && pend_pkt_q_len < (MAX_TX_QUEUE_LEN / 2)) { ieee80211_wake_queue(priv->hw, tx_queue_unmap(ac)); tx->queue_stopped_bmp &= ~(1 << (ac)); } pkt_info->peer_id = peer_info.id; RPU_DEBUG_TX("%s-UMACTX: token_id: %d ", priv->name, token_id); RPU_DEBUG_TX("total_pending_packets_process: %d\n", skb_queue_len(txq)); return total_pending_processed; } int rpu_tx_alloc_token(struct img_priv *priv, int ac, int peer_id, struct sk_buff *skb) { int token_id = NUM_TX_DESCS; struct tx_config *tx = &priv->tx; struct sk_buff_head *pend_pkt_q = NULL; unsigned int pkts_pend = 0; struct ieee80211_tx_info *tx_info; spin_lock_bh(&tx->lock); pend_pkt_q = &tx->pending_pkt[peer_id][ac]; RPU_DEBUG_TX("%s-UMACTX:Alloc buf Req q = %d\n", priv->name, ac); RPU_DEBUG_TX("peerid: %d,\n", peer_id); /* Queue the frame to the pending frames queue */ skb_queue_tail(pend_pkt_q, skb); tx_info = IEEE80211_SKB_CB(skb); /* Take steps to stop the TX traffic if we have reached * the queueing limit. * We dont this for the ROC queue to avoid the case where we are in the * OFF channel but there is lot of traffic for the operating channel on * the shared ROC queue (which is VO right now), since this would block * ROC traffic too. */ if (skb_queue_len(pend_pkt_q) >= MAX_TX_QUEUE_LEN) { if ((!priv->roc_params.roc_in_progress) || (priv->roc_params.roc_in_progress && (ac != UMAC_ROC_AC))) { ieee80211_stop_queue(priv->hw, skb->queue_mapping); tx->queue_stopped_bmp |= (1 << ac); } } token_id = get_token(priv, ac); RPU_DEBUG_TX("%s-UMACTX:Alloc buf Result *id= %d q = %d out_tok: %d", priv->name, token_id, ac, tx->outstanding_tokens[ac]); RPU_DEBUG_TX(", peerid: %d,\n", peer_id); if (token_id == NUM_TX_DESCS) goto out; pkts_pend = rpu_tx_proc_pend_frms(priv, ac, token_id); /* We have just added a frame to pending_q but channel context is * mismatch. */ if (!pkts_pend) { free_token(priv, token_id, ac); token_id = NUM_TX_DESCS; } out: spin_unlock_bh(&tx->lock); RPU_DEBUG_TX("%s-UMACTX:Alloc buf Result *id= %d out_tok:%d\n", priv->name, token_id, tx->outstanding_tokens[ac]); /* If token is available, just return tokenid, list will be sent*/ return token_id; } #ifdef ENABLE_COALESCING_TX_PKTS int rpu_are_all_tx_dones_rcvd(struct img_priv *priv, int queue, unsigned int desc_id, struct sk_buff *skb) { int cnt; int deferred_desc_id; struct sk_buff_head *txq; struct tx_pkt_info *deferred_pkt_info; priv->tx.pkt_info[desc_id].deferred_skb = NULL; if (img_data_are_all_tx_dons_rcvd(skb) == 1) { return 1; } else { /* All tx dones are not received. So wait for rest. Keep skb in the txq of descriptor of which it was partially sent. Then it will be processed when it received tx dones. */ /* Find the descriptor to */ for (cnt = 0; cnt < NUM_TX_DESCS_PER_AC; cnt++) { deferred_desc_id = queue + (NUM_ACS * cnt); deferred_pkt_info = &priv->tx.pkt_info[deferred_desc_id]; if (deferred_pkt_info->deferred_skb == skb) { break; } } if ((cnt == NUM_TX_DESCS_PER_AC) && (queue != LMAC_AC_BCN)) { for (deferred_desc_id = NUM_TX_DESCS_PER_AC * NUM_ACS; deferred_desc_id < NUM_TX_DESCS; deferred_desc_id ++) { deferred_pkt_info = &priv->tx.pkt_info[deferred_desc_id]; if (deferred_pkt_info->deferred_skb == skb) { break; } } } if (deferred_desc_id != NUM_TX_DESCS) { txq = &deferred_pkt_info->pkt; skb_queue_tail(txq, skb); return 0; } else { printk("ERR: all tx dons not rcvd but " "skb is not in differed list queue: %d desc_id: %d skb: %p\n", queue, desc_id, skb); } } return 1; } #endif /* ENABLE_COALESCING_TX_PKTS */ int rpu_tx_free_buff_req(struct img_priv *priv, struct lmac_event_tx_done *tx_done, unsigned char *ac, int *vif_index_bitmap) { int i = 0; unsigned int pkts_pend = 0; struct tx_config *tx = &priv->tx; struct ieee80211_hdr *mac_hdr; struct ieee80211_tx_info *tx_info_bcn; struct ieee80211_tx_info *tx_info_1st_mpdu; struct sk_buff *skb, *tmp, *skb_first = NULL; struct sk_buff_head *skb_list, tx_done_list; int vif_index = -1; unsigned int pkt = 0; int cnt = 0; unsigned int desc_id = tx_done->descriptor_id; struct umac_vif *uvif = NULL; struct ieee80211_vif *ivif = NULL; unsigned long bcn_int = 0; struct tx_pkt_info *pkt_info = NULL; int start_ac, end_ac; int tid; struct sk_buff_head ieee80211_tx_status_call_list; skb_queue_head_init(&ieee80211_tx_status_call_list); skb_queue_head_init(&tx_done_list); update_jiffie(); spin_lock_bh(&tx->lock); pkt_info = &priv->tx.pkt_info[desc_id]; RPU_DEBUG_TX("%s-UMACTX:Free buf Req q = %d", priv->name, tx_done->queue); RPU_DEBUG_TX(", desc_id: %d out_tok: %d\n", desc_id, priv->tx.outstanding_tokens[tx_done->queue]); #ifdef ENABLE_COALESCING_TX_PKTS for(pkt = 0; pkt < pkt_info->tx_buff_pkt_count; pkt++) { img_data_rcvd_tx_done(pkt_info->tx_buff_pkt_ref[pkt], tx_done->frm_status[pkt], tx_done->retries_num[pkt], tx_done->rate[pkt], pkt, tx_done->timeStamp_t1, tx_done->timeStamp_t4); } /* some times tx done is comming two times. To overcome this added below statement */ pkt_info->tx_buff_pkt_count = 0; #endif /* ENABLE_COALESCING_TX_PKTS */ /* Defer Tx Done Processsing */ skb_list = &priv->tx.pkt_info[desc_id].pkt; if (skb_queue_len(skb_list)) { /* Cut the list to new one, tx_pkt will be re-initialized */ skb_queue_splice_tail_init(skb_list, &tx_done_list); } else { RPU_DEBUG_TX("%s-UMACTX:Got Empty List: list_addr: %p\n", priv->name, skb_list); } /* Reserved token */ if (desc_id < (NUM_TX_DESCS_PER_AC * NUM_ACS)) { start_ac = end_ac = tx_done->queue; } else { /* Spare token: * Loop through all AC's */ start_ac = LMAC_AC_VO; end_ac = LMAC_AC_BK; } for (cnt = start_ac; cnt >= end_ac; cnt--) { pkts_pend = rpu_tx_proc_pend_frms(priv, cnt, desc_id); if (pkts_pend) { *ac = cnt; /* Spare Token Case*/ if (tx_done->queue != *ac) { /*Adjust the counters*/ tx->outstanding_tokens[tx_done->queue]--; tx->outstanding_tokens[*ac]++; } break; } } if (!pkts_pend) { /* Mark the token as available */ free_token(priv, desc_id, tx_done->queue); } /* Unlock: Give a chance for Tx to add to pending lists */ spin_unlock_bh(&tx->lock); /* Protection from mac80211 _ops especially stop */ if (priv->state != STARTED) goto out; if (!skb_queue_len(&tx_done_list)) goto out; skb_first = skb_peek(&tx_done_list); tx_info_1st_mpdu = (struct ieee80211_tx_info *)IEEE80211_SKB_CB(skb_first); pkt = 0; skb_queue_walk_safe(&tx_done_list, skb, tmp) { __skb_unlink(skb, &tx_done_list); if (!skb) continue; /* In the Tx path we move the .11hdr from skb to CMD_TX * Hence pushing it here, not required for loopback case */ skb_push(skb, priv->tx.pkt_info[tx_done->descriptor_id].hdr_len); mac_hdr = (struct ieee80211_hdr *)(skb->data); if ((ieee80211_is_data_qos(mac_hdr->frame_control)) && (priv->vifs[0]->type == NL80211_IFTYPE_STATION) &&(priv->vifs[0]->bss_conf.he_support == 1)){ tid = ieee80211_get_tid(mac_hdr); img_set_buffer_status(skb,tid,1,priv->hw->twt_enable); } if (!ieee80211_is_beacon(mac_hdr->frame_control)) { vif_index = vif_addr_to_index(mac_hdr->addr2, priv); if (vif_index > -1) *vif_index_bitmap |= (1 << vif_index); /* Same Rate info for all packets */ tx_status(skb, tx_done, pkt, priv, tx_info_1st_mpdu); skb_queue_tail(&ieee80211_tx_status_call_list, skb); } else { struct ieee80211_bss_conf *bss_conf; bool bcn_status; if (tx_done->frm_status[pkt] == TX_DONE_STAT_DISCARD_BCN) { /* We did not send beacon */ priv->tx_last_beacon = 0; } else if (tx_done->frm_status[pkt] == TX_DONE_STAT_SUCCESS) { /* We did send beacon */ priv->tx_last_beacon = 1; } tx_info_bcn = IEEE80211_SKB_CB(skb); ivif = tx_info_bcn->control.vif; uvif = (struct umac_vif *)(ivif->drv_priv); bss_conf = &uvif->vif->bss_conf; bcn_status = bss_conf->enable_beacon; bcn_int = bss_conf->beacon_int - 10; bcn_int = msecs_to_jiffies(bcn_int); for (i = 0; i < MAX_VIFS; i++) { if (priv->active_vifs & (1 << i)) { if ((priv->vifs[i] == ivif) && (bcn_status == true)) { uvif->bcn_timer.function((struct timer_list *)uvif->bcn_timer.data); } } } dev_kfree_skb_any(skb); } pkt++; } while ((skb = skb_dequeue(&ieee80211_tx_status_call_list))) { #ifdef ENABLE_COALESCING_TX_PKTS if (rpu_are_all_tx_dones_rcvd(priv, tx_done->queue, tx_done->descriptor_id, skb)) { ieee80211_tx_status(priv->hw, skb); } #else /* ENABLE_COALESCING_TX_PKTS */ ieee80211_tx_status(priv->hw, skb); #endif /* ENABLE_COALESCING_TX_PKTS */ } out: return pkts_pend; } void rpu_tx_init(struct img_priv *priv) { int i = 0; int j = 0; struct tx_config *tx = &priv->tx; memset(&tx->buf_pool_bmp, 0, sizeof(long) * ((NUM_TX_DESCS/TX_DESC_BUCKET_BOUND) + 1)); tx->queue_stopped_bmp = 0; tx->next_spare_token_ac = LMAC_AC_BE; for (i = 0; i < NUM_ACS; i++) { for (j = 0; j < MAX_PEND_Q_PER_AC; j++) { skb_queue_head_init(&tx->pending_pkt[j][i]); } tx->outstanding_tokens[i] = 0; } for (i = 0; i < NUM_TX_DESCS; i++) { skb_queue_head_init(&tx->pkt_info[i].pkt); } for (j = 0; j < NUM_ACS; j++) tx->curr_peer_opp[j] = 0; spin_lock_bh_init(&tx->lock); ieee80211_wake_queues(priv->hw); RPU_DEBUG_TX("%s-UMACTX: initialization successful\n", TX_TO_MACDEV(tx)->name); } void rpu_tx_deinit(struct img_priv *priv) { int i = 0; int j = 0; struct tx_config *tx = &priv->tx; struct sk_buff *skb = NULL; struct sk_buff_head *pend_q = NULL; ieee80211_stop_queues(priv->hw); wait_for_tx_complete(tx); spin_lock_bh(&tx->lock); for (i = 0; i < NUM_TX_DESCS; i++) { while ((skb = skb_dequeue(&tx->pkt_info[i].pkt)) != NULL) dev_kfree_skb_any(skb); } for (i = 0; i < NUM_ACS; i++) { for (j = 0; j < MAX_PEND_Q_PER_AC; j++) { pend_q = &tx->pending_pkt[j][i]; while ((skb = skb_dequeue(pend_q)) != NULL) dev_kfree_skb_any(skb); } } spin_unlock_bh(&tx->lock); RPU_DEBUG_TX("%s-UMACTX: deinitialization successful\n", TX_TO_MACDEV(tx)->name); } int __rpu_tx_frame(struct img_priv *priv, unsigned int queue, unsigned int token_id, unsigned int more_frames, bool retry) { struct lmac_event_tx_done tx_done; struct sk_buff_head *txq = NULL; int ret = 0; int pkt = 0; ret = rpu_prog_tx(queue, more_frames, token_id, retry); if (ret < 0) { pr_err("%s-UMACTX: Unable to send frame, dropping ..%d\n", priv->name, ret); tx_done.descriptor_id = token_id; tx_done.queue = queue; txq = &priv->tx.pkt_info[token_id].pkt; for (pkt = 0; pkt < skb_queue_len(txq); pkt++) { tx_done.frm_status[pkt] = TX_DONE_STAT_ERR_RETRY_LIM; tx_done.rate[pkt] = 0; } rpu_tx_complete(&tx_done, priv); } return ret; } void prepare_hdr_ru_transmission(struct img_priv *priv,struct sk_buff *skb) { unsigned long host_tx_jiffie_value; u8 *newhdr; unsigned int hdrlen; struct ieee80211_hdr *ieee80211_hdr; ieee80211_hdr = (struct ieee80211_hdr *)skb->data; if ((priv->vifs[0]->type == NL80211_IFTYPE_STATION) &&(priv->vifs[0]->bss_conf.he_support == 1)){ host_tx_jiffie_value = jiffies; if (priv->hw->twt_trigger_enable != 1){ if (!(priv->ru_tx && (host_tx_jiffie_value - priv->rx_trigger_jiffie_value) <= 100)){ priv->ru_tx = 0; } } } if (priv->ru_tx == 1){ if (ieee80211_is_data_qos(ieee80211_hdr->frame_control) && !is_broadcast_ether_addr(ieee80211_hdr->addr3)){ hdrlen = ieee80211_hdrlen(ieee80211_hdr->frame_control); newhdr = skb_push(skb, IEEE80211_HT_CTL_LEN); memmove(newhdr, newhdr + IEEE80211_HT_CTL_LEN,hdrlen); ieee80211_hdr = (struct ieee80211_hdr *)newhdr; ieee80211_hdr->frame_control |= IEEE80211_FCTL_ORDER; } } } int umac_softmac_prepare_mac80211_hdrs(struct sk_buff *skb); int rpu_tx_frame(struct sk_buff *skb, struct ieee80211_sta *sta, struct img_priv *priv, bool bcast) { unsigned int queue = 0; unsigned int token_id = 0; unsigned int more_frames = 0; int ret = 0; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *mac_hdr = NULL; struct umac_vif *uvif = NULL; struct umac_sta *usta = NULL; int peer_id = -1; prepare_hdr_ru_transmission(priv,skb); umac_softmac_prepare_mac80211_hdrs(skb); uvif = (struct umac_vif *)(tx_info->control.vif->drv_priv); mac_hdr = (struct ieee80211_hdr *)(skb->data); if (sta) { usta = (struct umac_sta *)sta->drv_priv; peer_id = usta->index; } else { peer_id = MAX_PEERS + uvif->vif_index; } if (bcast == false) { queue = tx_queue_map(skb->queue_mapping); more_frames = 0; } else { queue = LMAC_AC_BCN; /* Hack: skb->priority is used to indicate more frames */ more_frames = skb->priority; } if (!ieee80211_is_beacon(mac_hdr->frame_control)) priv->stats->tx_cmds_from_stack++; if (priv->params->production_test == 1) tx_info->flags |= IEEE80211_TX_CTL_AMPDU; RPU_DEBUG_TX("%s-UMACTX:%s:%d ", priv->name, __func__, __LINE__); RPU_DEBUG_TX("Wait Alloc:queue: %d qmap: %d is_bcn: %d bcast:%d\n", queue, skb->queue_mapping, ieee80211_is_beacon(mac_hdr->frame_control), is_multicast_ether_addr(mac_hdr->addr1) ? true : false); token_id = rpu_tx_alloc_token(priv, queue, peer_id, skb); /* The frame was unable to find a reserved token */ if (token_id == NUM_TX_DESCS) { RPU_DEBUG_TX("%s-UMACTX:%s:%d Token Busy Queued:\n", priv->name, __func__, __LINE__); return NETDEV_TX_OK; } ret = __rpu_tx_frame(priv, queue, token_id, more_frames, 0); return NETDEV_TX_OK; } void rpu_tx_complete(struct lmac_event_tx_done *tx_done, void *context) { struct img_priv *priv = (struct img_priv *)context; unsigned int more_frames = 0; int vif_index = 0, vif_index_bitmap = 0, ret = 0; unsigned int pkts_pending = 0; unsigned char queue = 0; struct lmac_event_noa noa_event; int token_id = 0; int qlen = 0; IMG_TX_DBG_PARAM_INCR(tx_dones_from_lmac, 1); token_id = tx_done->descriptor_id; if (token_id < 0 || token_id > 11) { RPU_DEBUG_TX("%s:%d Invalid token_id: %d\n", __func__, __LINE__, token_id); RPU_DEBUG_DUMP_TX(DUMP_PREFIX_NONE, 16, 1, tx_done, sizeof(struct lmac_event_tx_done), 1); return; } qlen = skb_queue_len(&priv->tx.pkt_info[token_id].pkt); RPU_DEBUG_TX("%s-UMACTX:TX Done Rx for desc_id: %d", priv->name, tx_done->descriptor_id); RPU_DEBUG_TX("Q: %d qlen: %d status: %d out_tok: %d\n", tx_done->queue, qlen, tx_done->frm_status[0], priv->tx.outstanding_tokens[tx_done->queue]); pkts_pending = rpu_tx_free_buff_req(priv, tx_done, &queue, &vif_index_bitmap); if (pkts_pending) { /*TODO..Do we need to check each skb for more_frames??*/ more_frames = 0; RPU_DEBUG_TX("%s-UMACTX:%s:%d Transfer Pending Frames:\n", priv->name, __func__, __LINE__); ret = __rpu_tx_frame(priv, queue, token_id, more_frames, 0); } else { RPU_DEBUG_TX("%s-UMACTX:No Pending Packets\n", priv->name); } for (vif_index = 0; vif_index < MAX_VIFS; vif_index++) { if (vif_index_bitmap & (1 << vif_index)) { memset(&noa_event, 0, sizeof(noa_event)); noa_event.if_index = vif_index; rpu_noa_event(FROM_TX_DONE, &noa_event, (void *)priv, NULL); } } }