blob: f8e8c80126764efbefbcfe63f8e6914e55add795 [file] [log] [blame]
Googler298baf22022-08-01 19:58:45 -07001/*
2 ***************************************************************************
3 * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 ***************************************************************************
17 */
18
19#include <linux/version.h>
20#include <linux/types.h>
21#include <linux/ip.h>
22#include <linux/module.h>
23#include <linux/skbuff.h>
24#include <linux/debugfs.h>
25#include <linux/string.h>
26#include <linux/netfilter_bridge.h>
27#include <net/ip.h>
28#include <linux/inet.h>
29#include <sp_api.h>
30
31/*
32 * Debug output levels
33 * 0 = OFF
34 * 1 = ASSERTS / ERRORS
35 * 2 = 1 + WARN
36 * 3 = 2 + INFO
37 * 4 = 3 + TRACE
38 */
39#define DEBUG_LEVEL ECM_CLASSIFIER_EMESH_DEBUG_LEVEL
40
41#include "ecm_types.h"
42#include "ecm_db_types.h"
43#include "ecm_state.h"
44#include "ecm_tracker.h"
45#include "ecm_classifier.h"
46#include "ecm_front_end_types.h"
47#include "ecm_db.h"
48#include "ecm_interface.h"
49#include "ecm_classifier_emesh_public.h"
50#include "ecm_front_end_ipv4.h"
51#include "ecm_front_end_ipv6.h"
52#include "ecm_front_end_common.h"
53
54/*
55 * Magic numbers
56 */
57#define ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC 0xFECA
58
59/*
60 * Latency parameter operation
61 */
62#define ECM_CLASSIFIER_EMESH_ADD_LATENCY_PARAMS 0x1
63#define ECM_CLASSIFIER_EMESH_SUB_LATENCY_PARAMS 0x2
64
65/*
66 * Flag to enable SPM rule lookup
67 */
68#define ECM_CLASSIFIER_EMESH_ENABLE_SPM_RULE_LOOKUP 0x1
69#define ECM_CLASSIFIER_EMESH_ENABLE_LATENCY_UPDATE 0x2
70
71/*
72 * struct ecm_classifier_emesh_instance
73 * State to allow tracking of dynamic qos for a connection
74 */
75struct ecm_classifier_emesh_instance {
76 struct ecm_classifier_instance base; /* Base type */
77
78 struct ecm_classifier_emesh_instance *next; /* Next classifier state instance (for accouting and reporting purposes) */
79 struct ecm_classifier_emesh_instance *prev; /* Next classifier state instance (for accouting and reporting purposes) */
80
81 uint32_t ci_serial; /* RO: Serial of the connection */
82 uint32_t pcp[ECM_CONN_DIR_MAX]; /* PCP values for the connections */
83 struct ecm_classifier_process_response process_response;/* Last process response computed */
84
85 int refs; /* Integer to trap we never go negative */
86 uint8_t packet_seen[ECM_CONN_DIR_MAX]; /* Per direction packet seen flag */
87 uint32_t service_interval_dl; /* wlan downlink latency parameter: Service interval associated with this connection */
88 uint32_t burst_size_dl; /* wlan downlink latency parameter: Burst Size associated with this connection */
89 uint32_t service_interval_ul; /* wlan uplink latency parameter: Service interval associated with this connection */
90 uint32_t burst_size_ul; /* wlan uplink latency parameter: Burst Size associated with this connection */
91
92#if (DEBUG_LEVEL > 0)
93 uint16_t magic;
94#endif
95};
96
97/*
98 * Operational control
99 */
100static uint32_t ecm_classifier_emesh_enabled; /* Operational behaviour */
101static uint32_t ecm_classifier_emesh_latency_config_enabled; /* Mesh Latency profile enable flag */
102
103/*
104 * Management thread control
105 */
106static bool ecm_classifier_emesh_terminate_pending = false; /* True when the user wants us to terminate */
107
108/*
109 * Debugfs dentry object.
110 */
111static struct dentry *ecm_classifier_emesh_dentry;
112
113/*
114 * Locking of the classifier structures
115 */
116static DEFINE_SPINLOCK(ecm_classifier_emesh_lock); /* Protect SMP access. */
117
118/*
119 * List of our classifier instances
120 */
121static struct ecm_classifier_emesh_instance *ecm_classifier_emesh_instances = NULL;
122 /* list of all active instances */
123static int ecm_classifier_emesh_count = 0; /* Tracks number of instances allocated */
124
125/*
126 * Callback structure to support Mesh latency param config in WLAN driver
127 */
128static struct ecm_classifier_emesh_callbacks ecm_emesh;
129
130/*
131 * ecm_classifier_emesh_ref()
132 * Ref
133 */
134static void ecm_classifier_emesh_ref(struct ecm_classifier_instance *ci)
135{
136 struct ecm_classifier_emesh_instance *cemi;
137 cemi = (struct ecm_classifier_emesh_instance *)ci;
138
139 DEBUG_CHECK_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC, "%px: magic failed\n", cemi);
140 spin_lock_bh(&ecm_classifier_emesh_lock);
141 cemi->refs++;
142 DEBUG_TRACE("%px: cemi ref %d\n", cemi, cemi->refs);
143 DEBUG_ASSERT(cemi->refs > 0, "%px: ref wrap\n", cemi);
144 spin_unlock_bh(&ecm_classifier_emesh_lock);
145}
146
147/*
148 * ecm_classifier_emesh_deref()
149 * Deref
150 */
151static int ecm_classifier_emesh_deref(struct ecm_classifier_instance *ci)
152{
153 struct ecm_classifier_emesh_instance *cemi;
154 cemi = (struct ecm_classifier_emesh_instance *)ci;
155
156 DEBUG_CHECK_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC, "%px: magic failed\n", cemi);
157 spin_lock_bh(&ecm_classifier_emesh_lock);
158 cemi->refs--;
159 DEBUG_ASSERT(cemi->refs >= 0, "%px: refs wrapped\n", cemi);
160 DEBUG_TRACE("%px: EMESH classifier deref %d\n", cemi, cemi->refs);
161 if (cemi->refs) {
162 int refs = cemi->refs;
163 spin_unlock_bh(&ecm_classifier_emesh_lock);
164 return refs;
165 }
166
167 /*
168 * Object to be destroyed
169 */
170 ecm_classifier_emesh_count--;
171 DEBUG_ASSERT(ecm_classifier_emesh_count >= 0, "%px: ecm_classifier_emesh_count wrap\n", cemi);
172
173 /*
174 * UnLink the instance from our list
175 */
176 if (cemi->next) {
177 cemi->next->prev = cemi->prev;
178 }
179
180 if (cemi->prev) {
181 cemi->prev->next = cemi->next;
182 } else {
183 DEBUG_ASSERT(ecm_classifier_emesh_instances == cemi, "%px: list bad %px\n", cemi, ecm_classifier_emesh_instances);
184 ecm_classifier_emesh_instances = cemi->next;
185 }
186 spin_unlock_bh(&ecm_classifier_emesh_lock);
187
188 /*
189 * Final
190 */
191 DEBUG_INFO("%px: Final EMESH classifier instance\n", cemi);
192 kfree(cemi);
193
194 return 0;
195}
196
197/*
198 * ecm_classifier_emesh_is_bidi_packet_seen()
199 * Return true if both direction packets are seen.
200 */
201static inline bool ecm_classifier_emesh_is_bidi_packet_seen(struct ecm_classifier_emesh_instance *cemi)
202{
203 return ((cemi->packet_seen[ECM_CONN_DIR_FLOW] == true) && (cemi->packet_seen[ECM_CONN_DIR_RETURN] == true));
204}
205
206/*
207 * ecm_classifier_emesh_fill_pcp()
208 * Save the PCP value in the classifier instance.
209 */
210static void ecm_classifier_emesh_fill_pcp(struct ecm_classifier_emesh_instance *cemi,
211 ecm_tracker_sender_type_t sender, enum ip_conntrack_info ctinfo,
212 struct sk_buff *skb)
213{
214 if (sender == ECM_TRACKER_SENDER_TYPE_SRC) {
215 cemi->pcp[ECM_CONN_DIR_FLOW] = skb->priority;
216 cemi->packet_seen[ECM_CONN_DIR_FLOW] = true;
217 } else {
218 cemi->pcp[ECM_CONN_DIR_RETURN] = skb->priority;
219 cemi->packet_seen[ECM_CONN_DIR_RETURN] = true;
220 }
221}
222
223/*
224 * ecm_classifier_emesh_process()
225 * Process new data for connection
226 */
227static void ecm_classifier_emesh_process(struct ecm_classifier_instance *aci, ecm_tracker_sender_type_t sender,
228 struct ecm_tracker_ip_header *ip_hdr, struct sk_buff *skb,
229 struct ecm_classifier_process_response *process_response)
230{
231 struct ecm_classifier_emesh_instance *cemi;
232 ecm_classifier_relevence_t relevance;
233 struct ecm_db_connection_instance *ci = NULL;
234 struct ecm_front_end_connection_instance *feci;
235 ecm_front_end_acceleration_mode_t accel_mode;
236 uint32_t became_relevant = 0;
237 struct nf_conn *ct;
238 enum ip_conntrack_info ctinfo;
239 int protocol;
240 uint64_t slow_pkts;
241
242 cemi = (struct ecm_classifier_emesh_instance *)aci;
243 DEBUG_CHECK_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC, "%px: magic failed\n", cemi);
244
245 spin_lock_bh(&ecm_classifier_emesh_lock);
246 relevance = cemi->process_response.relevance;
247
248 /*
249 * Are we relevant?
250 * If the classifier is set as ir-relevant to the connection,
251 * the process response of the classifier instance was set from
252 * the earlier packets.
253 */
254 if (relevance == ECM_CLASSIFIER_RELEVANCE_NO) {
255 /*
256 * Lock still held
257 */
258 goto emesh_classifier_out;
259 }
260
261 /*
262 * Yes or maybe relevant.
263 *
264 * Need to decide our relevance to this connection.
265 * We are only relevent to a connection iff:
266 * 1. We are enabled.
267 * 2. Connection can be accelerated.
268 * Any other condition and we are not and will stop analysing this connection.
269 */
270 if (!ecm_classifier_emesh_enabled) {
271 /*
272 * Lock still held
273 */
274 cemi->process_response.relevance = ECM_CLASSIFIER_RELEVANCE_NO;
275 goto emesh_classifier_out;
276 }
277 spin_unlock_bh(&ecm_classifier_emesh_lock);
278
279 /*
280 * Can we accelerate?
281 */
282 ci = ecm_db_connection_serial_find_and_ref(cemi->ci_serial);
283 if (!ci) {
284 DEBUG_TRACE("%px: No ci found for %u\n", cemi, cemi->ci_serial);
285 spin_lock_bh(&ecm_classifier_emesh_lock);
286 cemi->process_response.relevance = ECM_CLASSIFIER_RELEVANCE_NO;
287 goto emesh_classifier_out;
288 }
289
290 /*
291 * Check if SPM rule lookup flag is enabled
292 */
293 if (ecm_classifier_emesh_latency_config_enabled & ECM_CLASSIFIER_EMESH_ENABLE_SPM_RULE_LOOKUP) {
294 uint8_t dmac[ETH_ALEN];
295 uint8_t smac[ETH_ALEN];
296 if (sender == ECM_TRACKER_SENDER_TYPE_SRC) {
297 DEBUG_TRACE("%px: sender is SRC\n", aci);
298 ecm_db_connection_node_address_get(ci, ECM_DB_OBJ_DIR_FROM, smac);
299 ecm_db_connection_node_address_get(ci, ECM_DB_OBJ_DIR_TO, dmac);
300 } else {
301 DEBUG_TRACE("%px: sender is DEST\n", aci);
302 ecm_db_connection_node_address_get(ci, ECM_DB_OBJ_DIR_TO, smac);
303 ecm_db_connection_node_address_get(ci, ECM_DB_OBJ_DIR_FROM, dmac);
304 }
305
306 /*
307 * Invoke SPM rule lookup API for skb priority update
308 * For bridging traffic, it will be matched with the rule table on SPM prerouting hook
309 */
310 if (skb->skb_iif != skb->dev->ifindex) {
311 sp_mapdb_apply(skb, smac, dmac);
312 }
313 }
314
315 feci = ecm_db_connection_front_end_get_and_ref(ci);
316 accel_mode = feci->accel_state_get(feci);
317 slow_pkts = ecm_front_end_get_slow_packet_count(feci);
318 feci->deref(feci);
319 protocol = ecm_db_connection_protocol_get(ci);
320 ecm_db_connection_deref(ci);
321
322 if (ECM_FRONT_END_ACCELERATION_NOT_POSSIBLE(accel_mode)) {
323 spin_lock_bh(&ecm_classifier_emesh_lock);
324 cemi->process_response.relevance = ECM_CLASSIFIER_RELEVANCE_NO;
325 goto emesh_classifier_out;
326 }
327
328 /*
329 * Is there a valid conntrack?
330 */
331 ct = nf_ct_get(skb, &ctinfo);
332 if (!ct) {
333 spin_lock_bh(&ecm_classifier_emesh_lock);
334 cemi->process_response.relevance = ECM_CLASSIFIER_RELEVANCE_NO;
335 goto emesh_classifier_out;
336 }
337
338 /*
339 * We are relevant to the connection.
340 * Set the process response to its default value, that is, to
341 * allow the acceleration.
342 */
343 became_relevant = ecm_db_time_get();
344
345 spin_lock_bh(&ecm_classifier_emesh_lock);
346 cemi->process_response.relevance = ECM_CLASSIFIER_RELEVANCE_YES;
347 cemi->process_response.became_relevant = became_relevant;
348
349 cemi->process_response.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_ACCEL_MODE;
350 cemi->process_response.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_ACCEL;
351 spin_unlock_bh(&ecm_classifier_emesh_lock);
352
353 if (protocol == IPPROTO_TCP) {
354 /*
355 * Stop the processing if both side packets are already seen.
356 * Above the process response is already set to allow the acceleration.
357 */
358 if (ecm_classifier_emesh_is_bidi_packet_seen(cemi)) {
359 spin_lock_bh(&ecm_classifier_emesh_lock);
360 goto emesh_classifier_out;
361 }
362
363 /*
364 * Store the PCP value in the classifier instance and deny the
365 * acceleration if both side PCP value is not yet available.
366 */
367 ecm_classifier_emesh_fill_pcp(cemi, sender, ctinfo, skb);
368 if (!ecm_classifier_emesh_is_bidi_packet_seen(cemi)) {
369 DEBUG_TRACE("%px: Both side PCP value is not yet picked\n", cemi);
370 spin_lock_bh(&ecm_classifier_emesh_lock);
371 cemi->process_response.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO;
372 goto emesh_classifier_out;
373 }
374 } else {
375 /*
376 * If the acceleration delay option is enabled, we will wait
377 * until seeing both side traffic.
378 *
379 * There are 2 options:
380 * Option 1: Wait forever until to see the reply direction traffic
381 * Option 2: Wait for seeing N number of packets. If we still don't see reply,
382 * set the uni-directional values.
383 */
384 if (ecm_classifier_accel_delay_pkts) {
385 /*
386 * Stop the processing if both side packets are already seen.
387 * Above the process response is already set to allow the
388 * acceleration.
389 */
390 if (ecm_classifier_emesh_is_bidi_packet_seen(cemi)) {
391 spin_lock_bh(&ecm_classifier_emesh_lock);
392 goto emesh_classifier_out;
393 }
394
395 /*
396 * Store the PCP value in the classifier instance and allow the
397 * acceleration if both side PCP value is not yet available.
398 */
399 ecm_classifier_emesh_fill_pcp(cemi, sender, ctinfo, skb);
400 if (ecm_classifier_emesh_is_bidi_packet_seen(cemi)) {
401 DEBUG_TRACE("%px: Both side PCP value is picked\n", cemi);
402 goto done;
403 }
404
405 /*
406 * Deny the acceleration if any of the below options holds true.
407 * For option 1, we wait forever
408 * For option 2, we wait until seeing ecm_classifier_accel_delay_pkts.
409 */
410 if ((ecm_classifier_accel_delay_pkts == 1) || (slow_pkts < ecm_classifier_accel_delay_pkts)) {
411 DEBUG_TRACE("%px: accel_delay_pkts: %d slow_pkts: %llu accel is not allowed yet\n",
412 cemi, ecm_classifier_accel_delay_pkts, slow_pkts);
413 spin_lock_bh(&ecm_classifier_emesh_lock);
414 cemi->process_response.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO;
415 goto emesh_classifier_out;
416 }
417 }
418
419 /*
420 * If we didn't see both direction traffic during the acceleration
421 * delay time, we can allow the acceleration by setting the uni-directional
422 * values to both flow and return PCP.
423 */
424 cemi->pcp[ECM_CONN_DIR_FLOW] = skb->priority;
425 cemi->pcp[ECM_CONN_DIR_RETURN] = skb->priority;
426 }
427
428done:
429 DEBUG_TRACE("Protocol: %d, Flow Priority: %d, Return priority: %d, sender: %d\n",
430 protocol, cemi->pcp[ECM_CONN_DIR_FLOW],
431 cemi->pcp[ECM_CONN_DIR_RETURN], sender);
432
433 spin_lock_bh(&ecm_classifier_emesh_lock);
434
435 cemi->process_response.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_QOS_TAG;
436
437 if (((sender == ECM_TRACKER_SENDER_TYPE_SRC) && (IP_CT_DIR_ORIGINAL == CTINFO2DIR(ctinfo))) ||
438 ((sender == ECM_TRACKER_SENDER_TYPE_DEST) && (IP_CT_DIR_REPLY == CTINFO2DIR(ctinfo)))) {
439 cemi->process_response.flow_qos_tag = cemi->pcp[ECM_CONN_DIR_FLOW];
440 cemi->process_response.return_qos_tag = cemi->pcp[ECM_CONN_DIR_RETURN];
441 } else {
442 cemi->process_response.flow_qos_tag = cemi->pcp[ECM_CONN_DIR_RETURN];
443 cemi->process_response.return_qos_tag = cemi->pcp[ECM_CONN_DIR_FLOW];
444 }
445
446emesh_classifier_out:
447
448 /*
449 * Return our process response
450 */
451 *process_response = cemi->process_response;
452 spin_unlock_bh(&ecm_classifier_emesh_lock);
453}
454
455/*
456 * ecm_classifier_emesh_update_latency_param_on_conn_decel()
457 * Update mesh latency parameters to wlan host driver when a connection gets decelerated in ECM
458 */
459void ecm_classifier_emesh_update_latency_param_on_conn_decel(struct ecm_classifier_instance *aci, struct ecm_classifier_rule_sync *sync)
460{
461 struct ecm_classifier_emesh_instance *cemi;
462 struct ecm_db_connection_instance *ci;
463 uint8_t peer_mac[ETH_ALEN];
464
465 cemi = (struct ecm_classifier_emesh_instance *)aci;
466 DEBUG_CHECK_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC, "%px: magic failed", cemi);
467
468 /*
469 * Return if E-Mesh functionality is not enabled.
470 */
471 if (!ecm_classifier_emesh_enabled) {
472 return;
473 }
474
475 if (!(ecm_classifier_emesh_latency_config_enabled
476 & ECM_CLASSIFIER_EMESH_ENABLE_LATENCY_UPDATE)) {
477 return;
478 }
479
480 if (!ecm_emesh.update_peer_mesh_latency_params) {
481 return;
482 }
483
484 ci = ecm_db_connection_serial_find_and_ref(cemi->ci_serial);
485 if (!ci) {
486 DEBUG_WARN("%px: No ci found for %u\n", cemi, cemi->ci_serial);
487 return;
488 }
489
490 /*
491 * Get mac address for destination node
492 */
493 ecm_db_connection_node_address_get(ci, ECM_DB_OBJ_DIR_TO, peer_mac);
494 ecm_emesh.update_peer_mesh_latency_params(peer_mac,
495 cemi->service_interval_dl, cemi->burst_size_dl, cemi->service_interval_ul, cemi->burst_size_ul,
496 cemi->pcp[ECM_CONN_DIR_FLOW], ECM_CLASSIFIER_EMESH_SUB_LATENCY_PARAMS);
497
498 /*
499 * Get mac address for source node
500 */
501 ecm_db_connection_node_address_get(ci, ECM_DB_OBJ_DIR_FROM, peer_mac);
502 ecm_emesh.update_peer_mesh_latency_params(peer_mac,
503 cemi->service_interval_dl, cemi->burst_size_dl, cemi->service_interval_ul, cemi->burst_size_ul,
504 cemi->pcp[ECM_CONN_DIR_FLOW], ECM_CLASSIFIER_EMESH_SUB_LATENCY_PARAMS);
505
506 ecm_db_connection_deref(ci);
507}
508
509/*
510 * ecm_classifier_emesh_sync_to_v4()
511 * Front end is pushing accel engine state to us
512 */
513static void ecm_classifier_emesh_sync_to_v4(struct ecm_classifier_instance *aci, struct ecm_classifier_rule_sync *sync)
514{
515 struct ecm_classifier_emesh_instance *cemi;
516 cemi = (struct ecm_classifier_emesh_instance *)aci;
517 DEBUG_CHECK_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC, "%px: magic failed", cemi);
518
519 switch(sync->reason) {
520 case ECM_FRONT_END_IPV4_RULE_SYNC_REASON_FLUSH:
521 case ECM_FRONT_END_IPV4_RULE_SYNC_REASON_EVICT:
522 case ECM_FRONT_END_IPV4_RULE_SYNC_REASON_DESTROY:
523 ecm_classifier_emesh_update_latency_param_on_conn_decel(aci, sync);
524 break;
525 default:
526 break;
527 }
528}
529
530/*
531 * ecm_classifier_emesh_update_wlan_latency_params_on_conn_accel()
532 * Update wifi latency parameters associated with SP rule to wlan host driver
533 * when a connection getting accelerated in ECM
534 */
535static void ecm_classifier_emesh_update_wlan_latency_params_on_conn_accel(struct ecm_classifier_instance *aci,
536 struct ecm_classifier_rule_create *ecrc)
537{
538 struct ecm_classifier_emesh_instance *cemi;
539 struct ecm_db_connection_instance *ci;
540 uint8_t service_interval_dl;
541 uint32_t burst_size_dl;
542 uint8_t service_interval_ul;
543 uint32_t burst_size_ul;
544 struct sk_buff *skb;
545 uint8_t dmac[ETH_ALEN];
546 uint8_t smac[ETH_ALEN];
547
548 /*
549 * Return if E-Mesh functionality is not enabled.
550 */
551 if (!ecm_classifier_emesh_enabled) {
552 return;
553 }
554
555 if (!(ecm_classifier_emesh_latency_config_enabled
556 & ECM_CLASSIFIER_EMESH_ENABLE_LATENCY_UPDATE)) {
557 /*
558 * Flow based latency parameter updation to WLAN host driver not enabled
559 */
560 return;
561 }
562
563 /*
564 * When mesh low latency feature flags is enabled, ECM gets
565 * latency config parameters associated with a SPM rule and send
566 * to WLAN host driver invoking callback
567 */
568 if (!ecm_emesh.update_peer_mesh_latency_params) {
569 return;
570 }
571
572 skb = ecrc->skb;
573
574 cemi = (struct ecm_classifier_emesh_instance *)aci;
575 DEBUG_CHECK_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC, "%px: magic failed", cemi);
576
577 ci = ecm_db_connection_serial_find_and_ref(cemi->ci_serial);
578 if (!ci) {
579 DEBUG_WARN("%px: No ci found for %u\n", cemi, cemi->ci_serial);
580 return;
581 }
582
583 /*
584 * Invoke SPM rule lookup API to update skb priority
585 * When latency config is enabled, fetch latency parameter
586 * associated with a SPM rule.Since we do not know direction of
587 * connection, we get src and destination mac address of both
588 * connection and let wlan driver find corresponding wlan peer
589 * connected
590 */
591 ecm_db_connection_node_address_get(ci, ECM_DB_OBJ_DIR_FROM, smac);
592 ecm_db_connection_node_address_get(ci, ECM_DB_OBJ_DIR_TO, dmac);
593 sp_mapdb_get_wlan_latency_params(skb, &service_interval_dl, &burst_size_dl,
594 &service_interval_ul, &burst_size_ul, smac, dmac);
595
596 spin_lock_bh(&ecm_classifier_emesh_lock);
597
598 /*
599 * Update latency parameters to accelerated connection
600 */
601 cemi->service_interval_dl = service_interval_dl;
602 cemi->burst_size_dl = burst_size_dl;
603 cemi->service_interval_ul = service_interval_ul;
604 cemi->burst_size_ul = burst_size_ul;
605 spin_unlock_bh(&ecm_classifier_emesh_lock);
606
607 /*
608 * If one of the latency parameters are zero, there could be
609 * 2 possibilities - 1. no rule match 2. sp rule does not have
610 * latency parameter configured.
611 */
612 if ((service_interval_ul && burst_size_ul) || (service_interval_dl && burst_size_dl)) {
613 /*
614 * Send destination mac address of this connection
615 */
616 ecm_emesh.update_peer_mesh_latency_params(dmac,
617 service_interval_dl, burst_size_dl, service_interval_ul, burst_size_ul,
618 skb->priority, ECM_CLASSIFIER_EMESH_ADD_LATENCY_PARAMS);
619 }
620
621 /*
622 * Get latency parameter for other direction
623 */
624 sp_mapdb_get_wlan_latency_params(skb, &service_interval_dl, &burst_size_dl,
625 &service_interval_ul, &burst_size_ul, dmac, smac);
626
627 if ((service_interval_ul && burst_size_ul) || (service_interval_dl && burst_size_dl)) {
628 /*
629 * Send source mac address of this connection
630 */
631 ecm_emesh.update_peer_mesh_latency_params(smac,
632 service_interval_dl, burst_size_dl, service_interval_ul, burst_size_ul,
633 skb->priority, ECM_CLASSIFIER_EMESH_ADD_LATENCY_PARAMS);
634 }
635
636 ecm_db_connection_deref(ci);
637}
638
639/*
640 * ecm_classifier_emesh_sync_from_v4()
641 * Front end is retrieving accel engine state from us
642 */
643static void ecm_classifier_emesh_sync_from_v4(struct ecm_classifier_instance *aci, struct ecm_classifier_rule_create *ecrc)
644{
645 ecm_classifier_emesh_update_wlan_latency_params_on_conn_accel(aci, ecrc);
646
647}
648
649/*
650 * ecm_classifier_emesh_sync_to_v6()
651 * Front end is pushing accel engine state to us
652 */
653static void ecm_classifier_emesh_sync_to_v6(struct ecm_classifier_instance *aci, struct ecm_classifier_rule_sync *sync)
654{
655 struct ecm_classifier_emesh_instance *cemi;
656 cemi = (struct ecm_classifier_emesh_instance *)aci;
657 DEBUG_CHECK_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC, "%px: magic failed", cemi);
658
659 switch(sync->reason) {
660 case ECM_FRONT_END_IPV6_RULE_SYNC_REASON_FLUSH:
661 case ECM_FRONT_END_IPV6_RULE_SYNC_REASON_EVICT:
662 case ECM_FRONT_END_IPV6_RULE_SYNC_REASON_DESTROY:
663 ecm_classifier_emesh_update_latency_param_on_conn_decel(aci, sync);
664 break;
665 default:
666 break;
667 }
668}
669
670/*
671 * ecm_classifier_emesh_sync_from_v6()
672 * Front end is retrieving accel engine state from us
673 */
674static void ecm_classifier_emesh_sync_from_v6(struct ecm_classifier_instance *aci, struct ecm_classifier_rule_create *ecrc)
675{
676 ecm_classifier_emesh_update_wlan_latency_params_on_conn_accel(aci, ecrc);
677}
678
679/*
680 * ecm_classifier_emesh_type_get()
681 * Get type of classifier this is
682 */
683static ecm_classifier_type_t ecm_classifier_emesh_type_get(struct ecm_classifier_instance *ci)
684{
685 struct ecm_classifier_emesh_instance *cemi;
686 cemi = (struct ecm_classifier_emesh_instance *)ci;
687
688 DEBUG_CHECK_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC, "%px: magic failed\n", cemi);
689 return ECM_CLASSIFIER_TYPE_EMESH;
690}
691
692/*
693 * ecm_classifier_emesh_last_process_response_get()
694 * Get result code returned by the last process call
695 */
696static void ecm_classifier_emesh_last_process_response_get(struct ecm_classifier_instance *ci,
697 struct ecm_classifier_process_response *process_response)
698{
699 struct ecm_classifier_emesh_instance *cemi;
700
701 cemi = (struct ecm_classifier_emesh_instance *)ci;
702 DEBUG_CHECK_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC, "%px: magic failed\n", cemi);
703
704 spin_lock_bh(&ecm_classifier_emesh_lock);
705 *process_response = cemi->process_response;
706 spin_unlock_bh(&ecm_classifier_emesh_lock);
707}
708
709/*
710 * ecm_classifier_emesh_reclassify_allowed()
711 * Indicate if reclassify is allowed
712 */
713static bool ecm_classifier_emesh_reclassify_allowed(struct ecm_classifier_instance *ci)
714{
715 struct ecm_classifier_emesh_instance *cemi;
716 cemi = (struct ecm_classifier_emesh_instance *)ci;
717 DEBUG_CHECK_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC, "%px: magic failed\n", cemi);
718
719 return true;
720}
721
722/*
723 * ecm_classifier_emesh_reclassify()
724 * Reclassify
725 */
726static void ecm_classifier_emesh_reclassify(struct ecm_classifier_instance *ci)
727{
728 struct ecm_classifier_emesh_instance *cemi;
729 cemi = (struct ecm_classifier_emesh_instance *)ci;
730 DEBUG_CHECK_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC, "%px: magic failed\n", cemi);
731
732 /*
733 * Revert back to MAYBE relevant - we will evaluate when we get the next process() call.
734 */
735 spin_lock_bh(&ecm_classifier_emesh_lock);
736 cemi->process_response.relevance = ECM_CLASSIFIER_RELEVANCE_MAYBE;
737 spin_unlock_bh(&ecm_classifier_emesh_lock);
738}
739
740#ifdef ECM_STATE_OUTPUT_ENABLE
741/*
742 * ecm_classifier_emesh_state_get()
743 * Return state
744 */
745static int ecm_classifier_emesh_state_get(struct ecm_classifier_instance *ci, struct ecm_state_file_instance *sfi)
746{
747 int result;
748 struct ecm_classifier_emesh_instance *cemi;
749 struct ecm_classifier_process_response process_response;
750
751 cemi = (struct ecm_classifier_emesh_instance *)ci;
752 DEBUG_CHECK_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC, "%px: magic failed", cemi);
753
754 if ((result = ecm_state_prefix_add(sfi, "emesh"))) {
755 return result;
756 }
757
758 spin_lock_bh(&ecm_classifier_emesh_lock);
759 process_response = cemi->process_response;
760 spin_unlock_bh(&ecm_classifier_emesh_lock);
761
762 /*
763 * Output our last process response
764 */
765 if ((result = ecm_classifier_process_response_state_get(sfi, &process_response))) {
766 return result;
767 }
768
769 return ecm_state_prefix_remove(sfi);
770}
771#endif
772
773/*
774 * ecm_classifier_emesh_instance_alloc()
775 * Allocate an instance of the EMESH classifier
776 */
777struct ecm_classifier_emesh_instance *ecm_classifier_emesh_instance_alloc(struct ecm_db_connection_instance *ci)
778{
779 struct ecm_classifier_emesh_instance *cemi;
780
781 /*
782 * Allocate the instance
783 */
784 cemi = (struct ecm_classifier_emesh_instance *)kzalloc(sizeof(struct ecm_classifier_emesh_instance), GFP_ATOMIC | __GFP_NOWARN);
785 if (!cemi) {
786 DEBUG_WARN("Failed to allocate EMESH instance\n");
787 return NULL;
788 }
789
790 DEBUG_SET_MAGIC(cemi, ECM_CLASSIFIER_EMESH_INSTANCE_MAGIC);
791 cemi->refs = 1;
792 cemi->base.process = ecm_classifier_emesh_process;
793 cemi->base.sync_from_v4 = ecm_classifier_emesh_sync_from_v4;
794 cemi->base.sync_to_v4 = ecm_classifier_emesh_sync_to_v4;
795 cemi->base.sync_from_v6 = ecm_classifier_emesh_sync_from_v6;
796 cemi->base.sync_to_v6 = ecm_classifier_emesh_sync_to_v6;
797 cemi->base.type_get = ecm_classifier_emesh_type_get;
798 cemi->base.last_process_response_get = ecm_classifier_emesh_last_process_response_get;
799 cemi->base.reclassify_allowed = ecm_classifier_emesh_reclassify_allowed;
800 cemi->base.reclassify = ecm_classifier_emesh_reclassify;
801#ifdef ECM_STATE_OUTPUT_ENABLE
802 cemi->base.state_get = ecm_classifier_emesh_state_get;
803#endif
804 cemi->base.ref = ecm_classifier_emesh_ref;
805 cemi->base.deref = ecm_classifier_emesh_deref;
806 cemi->ci_serial = ecm_db_connection_serial_get(ci);
807 cemi->process_response.process_actions = 0;
808 cemi->process_response.relevance = ECM_CLASSIFIER_RELEVANCE_MAYBE;
809
810 spin_lock_bh(&ecm_classifier_emesh_lock);
811
812 /*
813 * Final check if we are pending termination
814 */
815 if (ecm_classifier_emesh_terminate_pending) {
816 spin_unlock_bh(&ecm_classifier_emesh_lock);
817 DEBUG_INFO("%px: Terminating\n", ci);
818 kfree(cemi);
819 return NULL;
820 }
821
822 /*
823 * Link the new instance into our list at the head
824 */
825 cemi->next = ecm_classifier_emesh_instances;
826 if (ecm_classifier_emesh_instances) {
827 ecm_classifier_emesh_instances->prev = cemi;
828 }
829 ecm_classifier_emesh_instances = cemi;
830
831 /*
832 * Increment stats
833 */
834 ecm_classifier_emesh_count++;
835 DEBUG_ASSERT(ecm_classifier_emesh_count > 0, "%px: ecm_classifier_emesh_count wrap\n", cemi);
836 spin_unlock_bh(&ecm_classifier_emesh_lock);
837
838 DEBUG_INFO("EMESH instance alloc: %px\n", cemi);
839 return cemi;
840}
841EXPORT_SYMBOL(ecm_classifier_emesh_instance_alloc);
842
843
844/*
845 * ecm_classifier_emesh_rule_update_cb()
846 * Callback for service prioritization notification update.
847 */
848static void ecm_classifier_emesh_rule_update_cb(uint8_t add_rm_md,
849 uint32_t valid_flag, struct sp_rule *r)
850{
851 ip_addr_t ip_addr;
852 struct in6_addr ipv6addr = IN6ADDR_ANY_INIT;
853 /*
854 * Return if E-Mesh functionality is not enabled.
855 */
856 if (!ecm_classifier_emesh_enabled) {
857 return;
858 }
859
860 DEBUG_TRACE("SP rule update notification received\n");
861 /*
862 * Order of priority of rule fields to match and flush connections:
863 * Port ---> IP address ---> Mac Address ---> Protocol
864 * Flush connections for both directions as ECM creates reverse
865 * direction rule as well
866 */
867 if (valid_flag & SP_RULE_FLAG_MATCH_SRC_PORT) {
868 ecm_db_connection_defunct_by_port(r->inner.src_port, ECM_DB_OBJ_DIR_FROM);
869 ecm_db_connection_defunct_by_port(r->inner.src_port, ECM_DB_OBJ_DIR_TO);
870 return;
871 }
872
873 if (valid_flag & SP_RULE_FLAG_MATCH_DST_PORT) {
874 ecm_db_connection_defunct_by_port(r->inner.dst_port, ECM_DB_OBJ_DIR_FROM);
875 ecm_db_connection_defunct_by_port(r->inner.dst_port, ECM_DB_OBJ_DIR_TO);
876 return;
877 }
878
879 if (valid_flag & SP_RULE_FLAG_MATCH_SRC_IPV4) {
880 ECM_NIN4_ADDR_TO_IP_ADDR(ip_addr, r->inner.src_ipv4_addr);
881 ecm_db_host_connections_defunct_by_dir(ip_addr, ECM_DB_OBJ_DIR_FROM);
882 ecm_db_host_connections_defunct_by_dir(ip_addr, ECM_DB_OBJ_DIR_TO);
883 return;
884 }
885
886 if (valid_flag & SP_RULE_FLAG_MATCH_DST_IPV4) {
887 ECM_NIN4_ADDR_TO_IP_ADDR(ip_addr, r->inner.dst_ipv4_addr);
888 ecm_db_host_connections_defunct_by_dir(ip_addr, ECM_DB_OBJ_DIR_FROM);
889 ecm_db_host_connections_defunct_by_dir(ip_addr, ECM_DB_OBJ_DIR_TO);
890 return;
891 }
892
893 if (valid_flag & SP_RULE_FLAG_MATCH_SRC_IPV6) {
894 memcpy(ipv6addr.s6_addr32, r->inner.src_ipv6_addr, 4);
895 ECM_NIN6_ADDR_TO_IP_ADDR(ip_addr, ipv6addr);
896 ecm_db_host_connections_defunct_by_dir(ip_addr, ECM_DB_OBJ_DIR_FROM);
897 ecm_db_host_connections_defunct_by_dir(ip_addr, ECM_DB_OBJ_DIR_TO);
898 return;
899 }
900
901 if (valid_flag & SP_RULE_FLAG_MATCH_DST_IPV6) {
902 memcpy(ipv6addr.s6_addr32, r->inner.dst_ipv6_addr, 4);
903 ECM_NIN6_ADDR_TO_IP_ADDR(ip_addr, ipv6addr);
904 ecm_db_host_connections_defunct_by_dir(ip_addr, ECM_DB_OBJ_DIR_FROM);
905 ecm_db_host_connections_defunct_by_dir(ip_addr, ECM_DB_OBJ_DIR_TO);
906 return;
907 }
908
909 if (valid_flag & SP_RULE_FLAG_MATCH_SOURCE_MAC) {
910 ecm_interface_node_connections_defunct((uint8_t *)r->inner.sa, ECM_DB_IP_VERSION_IGNORE);
911 return;
912 }
913
914 if (valid_flag & SP_RULE_FLAG_MATCH_DST_MAC) {
915 ecm_interface_node_connections_defunct((uint8_t *)r->inner.da, ECM_DB_IP_VERSION_IGNORE);
916 return;
917 }
918
919 if (valid_flag & SP_RULE_FLAG_MATCH_PROTOCOL) {
920 ecm_db_connection_defunct_by_protocol(r->inner.protocol_number);
921 return;
922 }
923
924 /*
925 * Destroy all the connections that are currently assigned to Emesh classifier
926 * The usage of the incoming parameters in this service prioritization
927 * callback will be done in future to perform more refined flush of
928 * connections.
929 */
930 ecm_db_connection_make_defunct_by_assignment_type(ECM_CLASSIFIER_TYPE_EMESH);
931}
932
933/*
934 * ecm_classifier_emesh_latency_config_callback_register()
935 */
936int ecm_classifier_emesh_latency_config_callback_register(struct ecm_classifier_emesh_callbacks *emesh_cb)
937{
938 spin_lock_bh(&ecm_classifier_emesh_lock);
939 if (ecm_emesh.update_peer_mesh_latency_params) {
940 spin_unlock_bh(&ecm_classifier_emesh_lock);
941 DEBUG_ERROR("EMESH latency config callbacks are registered\n");
942 return -1;
943 }
944
945 ecm_emesh.update_peer_mesh_latency_params = emesh_cb->update_peer_mesh_latency_params;
946 spin_unlock_bh(&ecm_classifier_emesh_lock);
947 return 0;
948}
949EXPORT_SYMBOL(ecm_classifier_emesh_latency_config_callback_register);
950
951/*
952 * ecm_classifier_emesh_latency_config_callback_unregister()
953 */
954void ecm_classifier_emesh_latency_config_callback_unregister(void)
955{
956 spin_lock_bh(&ecm_classifier_emesh_lock);
957 ecm_emesh.update_peer_mesh_latency_params = NULL;
958 spin_unlock_bh(&ecm_classifier_emesh_lock);
959}
960EXPORT_SYMBOL(ecm_classifier_emesh_latency_config_callback_unregister);
961
962/*
963 * ecm_classifier_emesh_init()
964 */
965int ecm_classifier_emesh_init(struct dentry *dentry)
966{
967 int ret;
968
969 DEBUG_INFO("EMESH classifier Module init\n");
970
971 ecm_classifier_emesh_dentry = debugfs_create_dir("ecm_classifier_emesh", dentry);
972 if (!ecm_classifier_emesh_dentry) {
973 DEBUG_ERROR("Failed to create ecm emesh directory in debugfs\n");
974 return -1;
975 }
976
977 if (!debugfs_create_u32("enabled", S_IRUGO | S_IWUSR, ecm_classifier_emesh_dentry,
978 (u32 *)&ecm_classifier_emesh_enabled)) {
979 DEBUG_ERROR("Failed to create ecm emesh classifier enabled file in debugfs\n");
980 debugfs_remove_recursive(ecm_classifier_emesh_dentry);
981 return -1;
982 }
983
984 if (!debugfs_create_u32("latency_config_enabled", S_IRUGO | S_IWUSR, ecm_classifier_emesh_dentry,
985 (u32 *)&ecm_classifier_emesh_latency_config_enabled)) {
986 DEBUG_ERROR("Failed to create ecm emesh classifier latency config enabled file in debugfs\n");
987 debugfs_remove_recursive(ecm_classifier_emesh_dentry);
988 return -1;
989 }
990
991 /*
992 * Register for service prioritization notification update.
993 */
994 ret = sp_mapdb_rule_update_register_notify(ecm_classifier_emesh_rule_update_cb);
995 if (ret) {
996 DEBUG_ERROR("SP update registration failed: %d\n", ret);
997 debugfs_remove_recursive(ecm_classifier_emesh_dentry);
998 return -1;
999 }
1000
1001 return 0;
1002}
1003EXPORT_SYMBOL(ecm_classifier_emesh_init);
1004
1005/*
1006 * ecm_classifier_emesh_exit()
1007 */
1008void ecm_classifier_emesh_exit(void)
1009{
1010 DEBUG_INFO("Emesh classifier Module exit\n");
1011
1012 spin_lock_bh(&ecm_classifier_emesh_lock);
1013 ecm_classifier_emesh_terminate_pending = true;
1014 spin_unlock_bh(&ecm_classifier_emesh_lock);
1015
1016 /*
1017 * Remove the debugfs files recursively.
1018 */
1019 if (ecm_classifier_emesh_dentry) {
1020 debugfs_remove_recursive(ecm_classifier_emesh_dentry);
1021 }
1022
1023 /*
1024 * De-register service prioritization notification update.
1025 */
1026 sp_mapdb_rule_update_unregister_notify();
1027}
1028EXPORT_SYMBOL(ecm_classifier_emesh_exit);