Update for v366
diff --git a/.compat_autoconf_3.3-OSR-2012-10-11-15-g2bd3ebf b/.compat_autoconf_3.3-OSR-2012-10-11-15-g2bd3ebf
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/.compat_autoconf_3.3-OSR-2012-10-11-15-g2bd3ebf
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c
index cc67ec3..de39a8e 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.c
+++ b/drivers/net/wireless/ath/ath6kl/htc.c
@@ -26,6 +26,8 @@
/* threshold to re-enable Tx bundling for an AC*/
#define TX_RESUME_BUNDLE_THRESHOLD 1500
+static void ath6kl_htc_tx_from_queue(struct htc_target *, struct htc_endpoint *);
+
/* Functions for Tx credit handling */
static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
struct htc_endpoint_credit_dist *ep_dist,
@@ -402,18 +404,24 @@
struct htc_endpoint *endpoint,
struct htc_packet *packet)
{
+ int packets_in_flight;
+
packet->completion = NULL;
packet->buf += HTC_HDR_LENGTH;
+ spin_lock_bh(&target->tx_lock);
+
+ // We're done working.
+ packets_in_flight = --endpoint->tx_proc_cnt;
+
if (!packet->status)
- return;
+ goto done;
ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
packet->status, packet->endpoint, packet->act_len,
packet->info.tx.cred_used);
/* on failure to submit, reclaim credits for this packet */
- spin_lock_bh(&target->tx_lock);
endpoint->cred_dist.cred_to_dist +=
packet->info.tx.cred_used;
endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
@@ -425,7 +433,12 @@
&target->cred_dist_list,
HTC_CREDIT_DIST_SEND_COMPLETE);
+done:
spin_unlock_bh(&target->tx_lock);
+
+ if (packets_in_flight == 0) {
+ ath6kl_htc_tx_from_queue(target, endpoint);
+ }
}
static void htc_tx_complete(struct htc_endpoint *endpoint,
@@ -630,6 +643,9 @@
packet->context = target;
endpoint->ep_st.tx_issued += 1;
+ // Queue up packets while we work.
+ endpoint->tx_proc_cnt++;
+
/* save send flags */
packet->info.tx.flags = flags;
packet->info.tx.seqno = endpoint->seqno;
@@ -693,10 +709,14 @@
cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
&len, endpoint);
- if (cred_pad < 0 || rem_scat < len) {
+ if (unlikely(cred_pad < 0)) {
+ // We encountered a packet that could not be bundled.
status = -ENOSPC;
break;
}
+ if (unlikely(rem_scat < len))
+ // We've used up the space in this bundle.
+ break;
rem_scat -= len;
/* now remove it from the queue */
@@ -755,7 +775,7 @@
u32 txb_mask;
u8 ac = WMM_NUM_AC;
- if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
+ if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
(WMI_CONTROL_SVC != endpoint->svc_id))
ac = target->dev->ar->ep2ac_map[endpoint->eid];
@@ -826,6 +846,8 @@
scat_req->len, scat_req->scat_entries);
ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
+ // We get -ENOSPC in ath6kl_htc_tx_setup_scat_list because we
+ // encountered a packet that could not be bundled.
if (status)
break;
}
@@ -846,16 +868,16 @@
int bundle_sent;
int n_pkts_bundle;
u8 ac = WMM_NUM_AC;
+ int status;
spin_lock_bh(&target->tx_lock);
- endpoint->tx_proc_cnt++;
- if (endpoint->tx_proc_cnt > 1) {
- endpoint->tx_proc_cnt--;
+ if (endpoint->tx_proc_cnt > 0) {
spin_unlock_bh(&target->tx_lock);
ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
return;
}
+ endpoint->tx_proc_cnt++;
/*
* drain the endpoint TX queue for transmission as long
@@ -863,7 +885,7 @@
*/
INIT_LIST_HEAD(&txq);
- if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
+ if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
(WMI_CONTROL_SVC != endpoint->svc_id))
ac = target->dev->ar->ep2ac_map[endpoint->eid];
@@ -882,32 +904,33 @@
bundle_sent = 0;
n_pkts_bundle = 0;
- while (true) {
- /* try to send a bundle on each pass */
- if ((target->tx_bndl_mask) &&
- (get_queue_depth(&txq) >=
- HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
+ while (!(list_empty(&txq))) {
+ // Try to send everything in bundles.
+ if (likely((target->tx_bndl_mask & (1 << ac)) &&
+ (get_queue_depth(&txq) >= HTC_MIN_HTC_MSGS_TO_BUNDLE))) {
int temp1 = 0, temp2 = 0;
-
- /* check if bundling is enabled for an AC */
- if (target->tx_bndl_mask & (1 << ac)) {
- ath6kl_htc_tx_bundle(endpoint, &txq,
- &temp1, &temp2);
- bundle_sent += temp1;
- n_pkts_bundle += temp2;
- }
+ ath6kl_htc_tx_bundle(endpoint, &txq,
+ &temp1, &temp2);
+ bundle_sent += temp1;
+ n_pkts_bundle += temp2;
}
-
- if (list_empty(&txq))
+ if (likely(list_empty(&txq)))
break;
+ // We encountered a packet that cannot be sent in bundle.
+ // Give it an exception to pass.
packet = list_first_entry(&txq, struct htc_packet,
list);
list_del(&packet->list);
ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
0, packet->info.tx.seqno);
- ath6kl_htc_tx_issue(target, packet);
+ status = ath6kl_htc_tx_issue(target, packet);
+
+ if (status) {
+ packet->status = status;
+ packet->completion(packet->context, packet);
+ }
}
spin_lock_bh(&target->tx_lock);
@@ -934,15 +957,19 @@
if (ac < WMM_NUM_AC)
target->ac_tx_count[ac] = 0;
}
+
+ if (endpoint->tx_proc_cnt > 1)
+ break;
}
- endpoint->tx_proc_cnt = 0;
+ endpoint->tx_proc_cnt--;
spin_unlock_bh(&target->tx_lock);
}
static bool ath6kl_htc_tx_try(struct htc_target *target,
struct htc_endpoint *endpoint,
- struct htc_packet *tx_pkt)
+ struct htc_packet *tx_pkt,
+ int push)
{
struct htc_ep_callbacks ep_cb;
int txq_depth;
@@ -975,7 +1002,8 @@
list_add_tail(&tx_pkt->list, &endpoint->txq);
spin_unlock_bh(&target->tx_lock);
- ath6kl_htc_tx_from_queue(target, endpoint);
+ if (push)
+ ath6kl_htc_tx_from_queue(target, endpoint);
return true;
}
@@ -1098,7 +1126,7 @@
}
}
-int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
+int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet, int push)
{
struct htc_endpoint *endpoint;
struct list_head queue;
@@ -1114,7 +1142,7 @@
endpoint = &target->endpoint[packet->endpoint];
- if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
+ if (!ath6kl_htc_tx_try(target, endpoint, packet, push)) {
packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
-ECANCELED : -ENOSPC;
INIT_LIST_HEAD(&queue);
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 4fe5d17..e8acb5a 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -569,7 +569,7 @@
int ath6kl_htc_conn_service(struct htc_target *target,
struct htc_service_connect_req *req,
struct htc_service_connect_resp *resp);
-int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet);
+int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet, int push);
void ath6kl_htc_stop(struct htc_target *target);
void ath6kl_htc_cleanup(struct htc_target *target);
void ath6kl_htc_flush_txep(struct htc_target *target,
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 21cc7a9..1695de3 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -1395,6 +1395,13 @@
sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
+ WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES;
+ // We wish to receive oversized TCP skbs, so we could process them into
+ // multiple packets and bundle-send them.
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->features |= NETIF_F_TSO;
+ // TODO(davidgao): do ipv6 header and TCP pseudoheader as well.
+ // dev->features |= NETIF_F_TSO6;
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index dbfe461..931ff00 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -551,7 +551,7 @@
bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
- if (!bus_req)
+ if (WARN_ON_ONCE(!bus_req))
return -ENOMEM;
bus_req->address = address;
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 0b4b703..c50a992 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -15,6 +15,11 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <net/xfrm.h>
+
#include "core.h"
#include "debug.h"
@@ -354,7 +359,7 @@
* This interface is asynchronous, if there is an error, cleanup
* will happen in the TX completion callback.
*/
- ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
+ ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt, /*push=*/1);
return 0;
@@ -363,6 +368,185 @@
return status;
}
+// Copied from net/core/skbuff.c at 2.6.38
+static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+{
+ new->tstamp = old->tstamp;
+ new->dev = old->dev;
+ new->transport_header = old->transport_header;
+ new->network_header = old->network_header;
+ new->mac_header = old->mac_header;
+ skb_dst_copy(new, old);
+ new->rxhash = old->rxhash;
+#ifdef CONFIG_XFRM
+ new->sp = secpath_get(old->sp);
+#endif
+ memcpy(new->cb, old->cb, sizeof(old->cb));
+ new->csum = old->csum;
+ new->local_df = old->local_df;
+ new->pkt_type = old->pkt_type;
+ new->ip_summed = old->ip_summed;
+ skb_copy_queue_mapping(new, old);
+ new->priority = old->priority;
+ new->deliver_no_wcard = old->deliver_no_wcard;
+#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+ new->ipvs_property = old->ipvs_property;
+#endif
+ new->protocol = old->protocol;
+ new->mark = old->mark;
+ new->skb_iif = old->skb_iif;
+ __nf_copy(new, old);
+#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
+ defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+ new->nf_trace = old->nf_trace;
+#endif
+#ifdef CONFIG_NET_SCHED
+ new->tc_index = old->tc_index;
+#ifdef CONFIG_NET_CLS_ACT
+ new->tc_verd = old->tc_verd;
+#endif
+#endif
+ new->vlan_tci = old->vlan_tci;
+
+ skb_copy_secmark(new, old);
+}
+
+// Adapted from net/core/skbuff.c at 2.6.38
+static void copy_skb_header_for_segment(struct sk_buff *new, const struct sk_buff *old)
+{
+#ifndef NET_SKBUFF_DATA_USES_OFFSET
+ /*
+ * Shift between the two data areas in bytes
+ */
+ unsigned long offset = new->data - old->data;
+#endif
+
+ __copy_skb_header(new, old);
+
+#ifndef NET_SKBUFF_DATA_USES_OFFSET
+ /* {transport,network,mac}_header are relative to skb->head */
+ new->transport_header += offset;
+ new->network_header += offset;
+ if (skb_mac_header_was_set(new))
+ new->mac_header += offset;
+#endif
+ skb_shinfo(new)->gso_size = 0;
+ skb_shinfo(new)->gso_segs = 1;
+ skb_shinfo(new)->gso_type = 0;
+}
+
+static struct sk_buff *preprocess_pskb(struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct sk_buff *head = NULL, *nskb;
+ struct tcphdr *th;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ int offset;
+ u32 seq, len_diff, partial_csum;
+ size_t header_size, body_size;
+
+ if (shinfo->gso_size == 0) {
+ // Kernel tells us to send packet as-is, without segmentation.
+ if (unlikely(skb_linearize(skb))) {
+ ath6kl_err("skb_linearize failed\n");
+ return NULL;
+ } else {
+ return skb;
+ }
+ }
+
+ switch (shinfo->gso_type) {
+ case SKB_GSO_TCPV4:
+ // case SKB_GSO_TCPV6:
+ // Find segmentation number
+ th = tcp_hdr(skb);
+ seq = ntohl(th->seq);
+ offset = header_size = (unsigned char *)th - skb->data + th->doff * 4;
+ break;
+ default:
+ // We don't know how to segment this yet.
+ ath6kl_err("Unknown GSO type %x\n", shinfo->gso_type);
+ return NULL;
+ }
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL) BUG();
+
+ while (offset < skb->len) {
+ body_size = skb->len - offset;
+ if (body_size > shinfo->gso_size) body_size = shinfo->gso_size;
+ len_diff = skb->len - header_size - body_size; // Positive
+ nskb = alloc_skb(skb_headroom(skb) + header_size + body_size, GFP_ATOMIC);
+ if (nskb == NULL) goto cleanup;
+ if (head == NULL) {
+ head = nskb;
+ nskb->next = nskb->prev = nskb;
+ }
+ else {
+ nskb->next = head;
+ nskb->prev = head->prev;
+ head->prev = nskb->prev->next = nskb;
+ }
+ skb_reserve(nskb, skb_headroom(skb));
+ skb_put(nskb, header_size + body_size);
+ // Copy the header
+ if (skb_copy_bits(skb, 0, nskb->data, header_size)) BUG();
+ // Copy the body
+ if (skb_copy_bits(skb, offset, nskb->data + header_size, body_size)) BUG();
+ // Copy the struct itself
+ copy_skb_header_for_segment(nskb, skb);
+ switch (shinfo->gso_type) {
+ case SKB_GSO_TCPV4:
+ // FIX IP length and checksum
+ iph = ip_hdr(nskb);
+ iph->tot_len = htons(ntohs(iph->tot_len) - len_diff);
+
+ partial_csum = (~ntohs(iph->check)) & 0xFFFF;
+ partial_csum -= len_diff;
+ partial_csum += (partial_csum >> 16);
+ iph->check = htons(~partial_csum);
+ // Fix segmentation number and partial checksum
+ th = tcp_hdr(nskb);
+ th->seq = htonl(seq);
+
+ // TCP partial checksum is not complemented
+ partial_csum = ntohs(th->check);
+ partial_csum -= len_diff;
+ partial_csum += (partial_csum >> 16);
+ th->check = htons(partial_csum);
+ break;
+ /*case SKB_GSO_TCPV6:
+ // FIX IP length
+ ipv6h = ipv6_hdr(nskb);
+ ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) - len_diff);
+ // IPv6 header has no checksum.
+ // Fix segmentation number and partial checksum
+ th = tcp_hdr(nskb);
+ th->seq = htonl(seq);
+ // Length in pseudoheader is 4-bytes.
+ th->check = htons(ntohs(th->check) + ~len_diff);
+ break;*/
+ }
+
+ offset += body_size;
+ seq += body_size;
+ }
+ head->prev->next = NULL;
+ head->prev = NULL;
+
+ return head;
+
+cleanup:
+ ath6kl_err("preprocess_pskb fails\n");
+ while (head) {
+ struct sk_buff *tmp = head;
+ head = head->next;
+ tmp->prev = tmp->next = NULL;
+ dev_kfree_skb(tmp);
+ }
+ return NULL;
+}
+
int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
{
struct ath6kl *ar = ath6kl_priv(dev);
@@ -374,10 +558,6 @@
u8 ac = 99 ; /* initialize to unmapped ac */
bool chk_adhoc_ps_mapping = false;
int ret;
- struct wmi_tx_meta_v2 meta_v2;
- void *meta;
- u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
- u8 meta_ver = 0;
u32 flags = 0;
ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
@@ -404,72 +584,100 @@
return 0;
}
+ {
+ // If we get a pskb, we want to apply segmentation and linearize it
+ // as requested by the kernel.
+ struct sk_buff *nskb = preprocess_pskb(skb);
+ if (nskb == NULL) goto fail_tx;
+ if (nskb != skb) {
+ dev_kfree_skb(skb);
+ skb = nskb;
+ }
+ }
+
if (test_bit(WMI_ENABLED, &ar->flag)) {
- if ((dev->features & NETIF_F_IP_CSUM) &&
- (csum == CHECKSUM_PARTIAL)) {
- csum_start = skb->csum_start -
- (skb_network_header(skb) - skb->head) +
- sizeof(struct ath6kl_llc_snap_hdr);
- csum_dest = skb->csum_offset + csum_start;
- }
-
- if (skb_headroom(skb) < dev->needed_headroom) {
- struct sk_buff *tmp_skb = skb;
-
- skb = skb_realloc_headroom(skb, dev->needed_headroom);
- kfree_skb(tmp_skb);
- if (skb == NULL) {
- vif->net_stats.tx_dropped++;
- return 0;
+ struct sk_buff *iter;
+ for (iter = skb; iter != NULL; iter = iter->next) {
+ // We do not wish to use the HW for checksum offloading because of
+ // its firmware dependency.
+ if (iter->ip_summed == CHECKSUM_PARTIAL) {
+ skb_checksum_help(iter);
}
- }
- if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
- ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
- goto fail_tx;
- }
+ if (skb_headroom(iter) < dev->needed_headroom) {
+ ath6kl_info("skb headroom too small\n");
+ struct sk_buff *tmp_skb = skb_realloc_headroom(iter, dev->needed_headroom);
+ if (tmp_skb == NULL) goto fail_tx;
+ tmp_skb->next = iter->next;
+ tmp_skb->prev = iter->prev;
+ if (tmp_skb->next) tmp_skb->next->prev = tmp_skb;
+ if (tmp_skb->prev)
+ tmp_skb->prev->next = tmp_skb;
+ else
+ // Replacing the first item.
+ skb = tmp_skb;
+ kfree_skb(iter);
+ iter = tmp_skb;
+ }
- if ((dev->features & NETIF_F_IP_CSUM) &&
- (csum == CHECKSUM_PARTIAL)) {
- meta_v2.csum_start = csum_start;
- meta_v2.csum_dest = csum_dest;
-
- /* instruct target to calculate checksum */
- meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
- meta_ver = WMI_META_VERSION_2;
- meta = &meta_v2;
- } else {
- meta_ver = 0;
- meta = NULL;
- }
-
- ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
- DATA_MSGTYPE, flags, 0,
- meta_ver,
- meta, vif->fw_vif_idx);
-
- if (ret) {
- ath6kl_warn("failed to add wmi data header:%d\n"
- , ret);
- goto fail_tx;
- }
-
- if ((vif->nw_type == ADHOC_NETWORK) &&
- ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
- chk_adhoc_ps_mapping = true;
- else {
- /* get the stream mapping */
- ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
- vif->fw_vif_idx, skb,
- 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
- if (ret)
+ if (ath6kl_wmi_dix_2_dot3(ar->wmi, iter)) {
+ ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
goto fail_tx;
+ }
+
+ ret = ath6kl_wmi_data_hdr_add(ar->wmi, iter,
+ DATA_MSGTYPE, flags, 0,
+ 0, NULL, vif->fw_vif_idx);
+
+ if (ret) {
+ ath6kl_warn("failed to add wmi data header:%d\n"
+ , ret);
+ goto fail_tx;
+ }
+
+ if ((vif->nw_type == ADHOC_NETWORK) &&
+ ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
+ chk_adhoc_ps_mapping = true;
+ else {
+ /* get the stream mapping */
+ ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
+ vif->fw_vif_idx, iter,
+ 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
+ if (ret)
+ goto fail_tx;
+ }
+
+ if (!IS_ALIGNED((unsigned long) iter->data - HTC_HDR_LENGTH, 4) &&
+ skb_cloned(iter)) {
+ /*
+ * We will touch (move the buffer data to align it. Since the
+ * skb buffer is cloned and not only the header is changed, we
+ * have to copy it to allow the changes. Since we are copying
+ * the data here, we may as well align it by reserving suitable
+ * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
+ */
+ struct sk_buff *nskb;
+
+ nskb = skb_copy_expand(iter, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
+ if (nskb == NULL) goto fail_tx;
+ nskb->next = iter->next;
+ nskb->prev = iter->prev;
+ if (nskb->next != NULL) nskb->next->prev = nskb;
+ if (nskb->prev != NULL)
+ nskb->prev->next = nskb;
+ else
+ // Replacing the first item.
+ skb = nskb;
+ kfree_skb(iter);
+ iter = nskb;
+ }
}
} else
goto fail_tx;
spin_lock_bh(&ar->lock);
+ // This one just asks for ethernet address. no need to loop.
if (chk_adhoc_ps_mapping)
eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
else
@@ -481,56 +689,54 @@
goto fail_tx;
}
- /* allocate resource for this packet */
- cookie = ath6kl_alloc_cookie(ar, eid == ar->ctrl_ep);
+ while (skb != NULL) {
+ struct sk_buff *tmp_skb = skb;
+ /* allocate resource for this packet */
+ cookie = ath6kl_alloc_cookie(ar, eid == ar->ctrl_ep);
- if (!cookie) {
- spin_unlock_bh(&ar->lock);
- goto fail_tx;
- }
-
- /* update counts while the lock is held */
- ar->tx_pending[eid]++;
- ar->total_tx_data_pend++;
-
- spin_unlock_bh(&ar->lock);
-
- if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
- skb_cloned(skb)) {
- /*
- * We will touch (move the buffer data to align it. Since the
- * skb buffer is cloned and not only the header is changed, we
- * have to copy it to allow the changes. Since we are copying
- * the data here, we may as well align it by reserving suitable
- * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
- */
- struct sk_buff *nskb;
-
- nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
- if (nskb == NULL)
+ if (!cookie) {
+ spin_unlock_bh(&ar->lock);
goto fail_tx;
- kfree_skb(skb);
- skb = nskb;
+ }
+
+ /* update counts while the lock is held */
+ ar->tx_pending[eid]++;
+ ar->total_tx_data_pend++;
+
+ spin_unlock_bh(&ar->lock);
+
+ // Unlink one skb.
+ skb = skb->next;
+ //tmp_skb->next = NULL;
+ //if (skb != NULL) skb->prev = NULL;
+
+ cookie->skb = tmp_skb;
+ cookie->map_no = map_no;
+ set_htc_pkt_info(&cookie->htc_pkt, cookie, tmp_skb->data, tmp_skb->len,
+ eid, htc_tag);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
+ tmp_skb->data, tmp_skb->len);
+
+ /*
+ * HTC interface is asynchronous, if this fails, cleanup will
+ * happen in the ath6kl_tx_complete callback.
+ */
+ ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt, skb == NULL);
+
+ spin_lock_bh(&ar->lock);
}
-
- cookie->skb = skb;
- cookie->map_no = map_no;
- set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
- eid, htc_tag);
-
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
- skb->data, skb->len);
-
- /*
- * HTC interface is asynchronous, if this fails, cleanup will
- * happen in the ath6kl_tx_complete callback.
- */
- ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
+ spin_unlock_bh(&ar->lock);
return 0;
fail_tx:
- dev_kfree_skb(skb);
+ ath6kl_err("fail_tx\n");
+ while (skb) {
+ struct sk_buff *tmp = skb;
+ skb = skb->next;
+ dev_kfree_skb(tmp);
+ }
vif->net_stats.tx_dropped++;
vif->net_stats.tx_aborted_errors++;
@@ -1349,7 +1555,7 @@
__func__, ar, ept, skb, packet->buf,
packet->act_len, status);
- if (status || !(skb->data + HTC_HDR_LENGTH)) {
+ if (status || packet->act_len < HTC_HDR_LENGTH) {
dev_kfree_skb(skb);
return;
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 49cb0af..427fb4f 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -4144,7 +4144,10 @@
break;
case WMI_REGDOMAIN_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
+#if 0
+ /* remove country code setting based on 11d message */
ath6kl_wmi_regdomain_event(wmi, datap, len);
+#endif
break;
case WMI_PSTREAM_TIMEOUT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSTREAM_TIMEOUT_EVENTID\n");
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f02e0bc..05a7caa 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -508,10 +508,13 @@
* - country_ie + 2, the start of the country ie data, and
* - and country_ie[1] which is the IE length
*/
+#if 0
+ /* remove country code setting based on connected AP's country IE */
regulatory_hint_11d(wdev->wiphy,
bss->channel->band,
country_ie + 2,
country_ie[1]);
+#endif
}
void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,