diff options
author | nbd <nbd@3c298f89-4303-0410-b956-a3cf2f4a3e73> | 2013-09-11 14:30:15 +0000 |
---|---|---|
committer | nbd <nbd@3c298f89-4303-0410-b956-a3cf2f4a3e73> | 2013-09-11 14:30:15 +0000 |
commit | 5d385cd6b50e3b570aa6a02f8bb29ad94bd6a750 (patch) | |
tree | 81010cf0bbc64a9e06b1036c5812a6bbb6edc22e /target/linux | |
parent | eb0f42bb0503a4b7ebb4181b376e4d8f0732a209 (diff) |
kernel: align the skb padding to power of two
The skb is usually started by a padding which allows the protocols in the
network stack to add their headers in front of the payload. The skb can be
reallocated in case the preallocated padding is not large enough. This can for
example happen in the function __skb_cow which will check the requested extra
headroom and allocate more buffer when the requested headroom is bigger than
the available one. The extra buffer is aligned again to the multiple of the
NET_SKB_PAD of the target architecture.
The macro used to create the multiple of the NET_SKB_PAD is written in a way
which allows only values power two as alignment parameter. The currently used
value of 48 bytes can not be written as n ** 2 but as 2 ** 4 + 2 ** 5. The
extra buffer is therefore not always the multiple of 48 but can be 16, 64, 80,
128, 144 and so on. The generated values are also not monotonic (48 requested
bytes are mapped to 80 allocated bytes and 49 requested bytes are mapped to 64
allocated bytes).
These unexpected small values result in more reallocations of the buffer. This
was noticed prominently during tests between two QCA9558 720 MHz devices which
were connected via ethernet to PCs and had a HT40 802.11n 3x3 link between each
other. The throughput PC-to-PC during iperf TCP runs increased reliable from
186 Mibit/s to 214 Mibit/s in one direction and from 195 Mibit/s to 220 Mibit/s
in the other direction. This is a performance increase of ~14% just by reducing
the amount of reallocations.
Signed-off-by: Sven Eckelmann <sven@open-mesh.com>
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@37948 3c298f89-4303-0410-b956-a3cf2f4a3e73
Diffstat (limited to 'target/linux')
5 files changed, 5 insertions, 5 deletions
diff --git a/target/linux/generic/patches-3.10/655-increase_skb_pad.patch b/target/linux/generic/patches-3.10/655-increase_skb_pad.patch index b1a0fb71f8..6e10a1ace4 100644 --- a/target/linux/generic/patches-3.10/655-increase_skb_pad.patch +++ b/target/linux/generic/patches-3.10/655-increase_skb_pad.patch @@ -5,7 +5,7 @@ */ #ifndef NET_SKB_PAD -#define NET_SKB_PAD max(32, L1_CACHE_BYTES) -+#define NET_SKB_PAD max(48, L1_CACHE_BYTES) ++#define NET_SKB_PAD max(64, L1_CACHE_BYTES) #endif extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); diff --git a/target/linux/generic/patches-3.3/655-increase_skb_pad.patch b/target/linux/generic/patches-3.3/655-increase_skb_pad.patch index 5d14daadfa..c4c8c3416e 100644 --- a/target/linux/generic/patches-3.3/655-increase_skb_pad.patch +++ b/target/linux/generic/patches-3.3/655-increase_skb_pad.patch @@ -5,7 +5,7 @@ */ #ifndef NET_SKB_PAD -#define NET_SKB_PAD max(32, L1_CACHE_BYTES) -+#define NET_SKB_PAD max(48, L1_CACHE_BYTES) ++#define NET_SKB_PAD max(64, L1_CACHE_BYTES) #endif extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); diff --git a/target/linux/generic/patches-3.6/655-increase_skb_pad.patch b/target/linux/generic/patches-3.6/655-increase_skb_pad.patch index c67a6a4797..0f856a6cc0 100644 --- a/target/linux/generic/patches-3.6/655-increase_skb_pad.patch +++ b/target/linux/generic/patches-3.6/655-increase_skb_pad.patch @@ -5,7 +5,7 @@ */ #ifndef NET_SKB_PAD -#define NET_SKB_PAD max(32, L1_CACHE_BYTES) -+#define NET_SKB_PAD max(48, L1_CACHE_BYTES) ++#define NET_SKB_PAD max(64, L1_CACHE_BYTES) #endif extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); diff --git a/target/linux/generic/patches-3.8/655-increase_skb_pad.patch b/target/linux/generic/patches-3.8/655-increase_skb_pad.patch index 6150d5237d..9f0b3dc931 100644 --- a/target/linux/generic/patches-3.8/655-increase_skb_pad.patch +++ b/target/linux/generic/patches-3.8/655-increase_skb_pad.patch @@ -5,7 +5,7 @@ */ #ifndef NET_SKB_PAD -#define NET_SKB_PAD max(32, L1_CACHE_BYTES) -+#define NET_SKB_PAD max(48, L1_CACHE_BYTES) ++#define NET_SKB_PAD max(64, L1_CACHE_BYTES) #endif extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); diff --git a/target/linux/generic/patches-3.9/655-increase_skb_pad.patch b/target/linux/generic/patches-3.9/655-increase_skb_pad.patch index 6d0d3df841..9f669fa8bd 100644 --- a/target/linux/generic/patches-3.9/655-increase_skb_pad.patch +++ b/target/linux/generic/patches-3.9/655-increase_skb_pad.patch @@ -5,7 +5,7 @@ */ #ifndef NET_SKB_PAD -#define NET_SKB_PAD max(32, L1_CACHE_BYTES) -+#define NET_SKB_PAD max(48, L1_CACHE_BYTES) ++#define NET_SKB_PAD max(64, L1_CACHE_BYTES) #endif extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); |