├── .github
└── workflows
│ └── build_kext.yml
├── LICENSE
├── README.md
├── SimpleGBE.xcodeproj
├── project.pbxproj
├── project.xcworkspace
│ ├── contents.xcworkspacedata
│ ├── xcshareddata
│ │ └── IDEWorkspaceChecks.plist
│ └── xcuserdata
│ │ ├── laobamac.xcuserdatad
│ │ ├── IDEFindNavigatorScopes.plist
│ │ └── UserInterfaceState.xcuserstate
│ │ └── shaneee.xcuserdatad
│ │ └── UserInterfaceState.xcuserstate
└── xcuserdata
│ ├── don.xcuserdatad
│ └── xcschemes
│ │ └── xcschememanagement.plist
│ ├── laobamac.xcuserdatad
│ └── xcschemes
│ │ └── xcschememanagement.plist
│ └── shaneee.xcuserdatad
│ └── xcschemes
│ └── xcschememanagement.plist
└── SimpleGBE
├── Info.plist
├── SimpleGBE.cpp
├── SimpleGBE.h
├── e1000_82575.c
├── e1000_82575.h
├── e1000_api.c
├── e1000_api.h
├── e1000_base.c
├── e1000_base.h
├── e1000_defines.h
├── e1000_hw.h
├── e1000_i210.c
├── e1000_i210.h
├── e1000_mac.c
├── e1000_mac.h
├── e1000_manage.c
├── e1000_manage.h
├── e1000_mbx.c
├── e1000_mbx.h
├── e1000_nvm.c
├── e1000_nvm.h
├── e1000_osdep.h
├── e1000_phy.c
├── e1000_phy.h
├── e1000_regs.h
├── igb.h
├── igb_debugfs.c
├── igb_ethtool.c
├── igb_hwmon.c
├── igb_main.c
├── igb_param.c
├── igb_procfs.c
├── igb_ptp.c
├── igb_regtest.h
├── igb_vmdq.c
├── igb_vmdq.h
├── kcompat.c
└── kcompat.h
/.github/workflows/build_kext.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | pull_request:
6 | workflow_dispatch:
7 | release:
8 | types: [published]
9 |
10 | jobs:
11 | build:
12 | name: Build
13 | runs-on: macos-latest
14 | permissions:
15 | contents: write
16 | env:
17 | JOB_TYPE: BUILD
18 | steps:
19 | - uses: actions/checkout@v4
20 | with:
21 | submodules: "recursive"
22 | - run: rm -rf build/*
23 | - run: xcodebuild -configuration Debug -arch x86_64 build
24 | - run: xcodebuild -configuration Release -arch x86_64 build
25 | - run: zip -q -r build/Debug/SimpleGBE-DEBUG-DEV.zip build/Debug/*
26 | - run: zip -q -r build/Release/SimpleGBE-RELEASE-DEV.zip build/Release/*
27 | - name: Upload to Artifacts
28 | uses: actions/upload-artifact@v4
29 | with:
30 | name: Artifacts
31 | path: build/*/*.zip
32 | - name: Upload build
33 | uses: svenstaro/upload-release-action@2.9.0
34 | if: github.event_name != 'pull_request'
35 | with:
36 | repo_token: ${{ secrets.GITHUB_TOKEN }}
37 | file: build/*/*.zip
38 | tag: ${{ github.event_name == 'release' && github.ref || 'nightly' }}
39 | body: ${{ github.event_name != 'release' && github.event.head_commit.message || '' }}
40 | file_glob: true
41 | prerelease: ${{ github.event_name != 'release' }}
42 | overwrite: true
43 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 laobamac
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # SimpleGBE
2 | A macOS kext for Intel Gbit Ethernet.
3 |
4 | macOS下英特尔千兆网卡驱动
5 |
6 | 移植自Ubuntu下的网卡驱动:EthernetIgb-5.17.4
7 |
8 | ## 支持列表
9 | * I210 千兆卡
10 | * I211 千兆卡
11 | * 82575/82576/82580 千兆卡
12 | * I350/I350T4 千兆卡
13 |
14 | ## 支持情况
15 | * IPV4、IPV6自动获取(DHCP)
16 | * TCP校验和
17 | * TSO和TSO6
18 | * 网线热插拔&插拔后重分配
19 | * 支持Sequoia
20 | * ~~强制速率受`igb_ethtool_set_link_ksettings()`影响无法生效~~
21 | * 自v1.0.1开始,支持自定义速率和特大MTU 9000
22 |
23 | ## 鸣谢
24 | * Intel为Linux开发的[IGB驱动](https://www.intel.com/content/www/us/en/download/14098/intel-network-adapter-driver-for-82575-6-82580-i350-and-i210-211-based-gigabit-network-connections-for-linux.html)
25 |
--------------------------------------------------------------------------------
/SimpleGBE.xcodeproj/project.xcworkspace/contents.xcworkspacedata:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/SimpleGBE.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | IDEDidComputeMac32BitWarning
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/SimpleGBE.xcodeproj/project.xcworkspace/xcuserdata/laobamac.xcuserdatad/IDEFindNavigatorScopes.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/SimpleGBE.xcodeproj/project.xcworkspace/xcuserdata/laobamac.xcuserdatad/UserInterfaceState.xcuserstate:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SimpleHac/SimpleGBE/06d2b2066db5925c4e99ace88c86464f988098b5/SimpleGBE.xcodeproj/project.xcworkspace/xcuserdata/laobamac.xcuserdatad/UserInterfaceState.xcuserstate
--------------------------------------------------------------------------------
/SimpleGBE.xcodeproj/project.xcworkspace/xcuserdata/shaneee.xcuserdatad/UserInterfaceState.xcuserstate:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SimpleHac/SimpleGBE/06d2b2066db5925c4e99ace88c86464f988098b5/SimpleGBE.xcodeproj/project.xcworkspace/xcuserdata/shaneee.xcuserdatad/UserInterfaceState.xcuserstate
--------------------------------------------------------------------------------
/SimpleGBE.xcodeproj/xcuserdata/don.xcuserdatad/xcschemes/xcschememanagement.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | SchemeUserState
6 |
7 | AppleIGB.xcscheme_^#shared#^_
8 |
9 | orderHint
10 | 0
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/SimpleGBE.xcodeproj/xcuserdata/laobamac.xcuserdatad/xcschemes/xcschememanagement.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | SchemeUserState
6 |
7 | AppleIGB.xcscheme_^#shared#^_
8 |
9 | orderHint
10 | 0
11 |
12 | SimpleGBE.xcscheme_^#shared#^_
13 |
14 | orderHint
15 | 0
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/SimpleGBE.xcodeproj/xcuserdata/shaneee.xcuserdatad/xcschemes/xcschememanagement.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | SchemeUserState
6 |
7 | AppleIGB.xcscheme_^#shared#^_
8 |
9 | orderHint
10 | 0
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/SimpleGBE/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | English
7 | CFBundleExecutable
8 | ${EXECUTABLE_NAME}
9 | CFBundleIconFile
10 |
11 | CFBundleIdentifier
12 | $(PRODUCT_BUNDLE_IDENTIFIER)
13 | CFBundleInfoDictionaryVersion
14 | 6.0
15 | CFBundleName
16 | ${PRODUCT_NAME}
17 | CFBundlePackageType
18 | KEXT
19 | CFBundleShortVersionString
20 | $(MARKETING_VERSION)
21 | CFBundleSignature
22 | ????
23 | CFBundleVersion
24 | $(CURRENT_PROJECT_VERSION)
25 | IOKitPersonalities
26 |
27 | 82575
28 |
29 | CFBundleIdentifier
30 | com.laobamac.${PRODUCT_NAME}
31 | IOClass
32 | ${PRODUCT_NAME}
33 | IOPCIMatch
34 | 0x10a78086 0x10a98086 0x10d68086
35 | IOProviderClass
36 | IOPCIDevice
37 | NETIF_F_TSO
38 |
39 |
40 | 82576
41 |
42 | CFBundleIdentifier
43 | com.laobamac.${PRODUCT_NAME}
44 | IOClass
45 | ${PRODUCT_NAME}
46 | IOPCIMatch
47 | 0x10c98086 0x10e68086 0x10e78086 0x10e88086 0x15268086 0x150a8086 0x15188086 0x150d8086
48 | IOProviderClass
49 | IOPCIDevice
50 | NETIF_F_TSO
51 |
52 |
53 | 82580
54 |
55 | CFBundleIdentifier
56 | com.laobamac.${PRODUCT_NAME}
57 | IOClass
58 | ${PRODUCT_NAME}
59 | IOPCIMatch
60 | 0x150e8086 0x150f8086 0x15108086 0x15118086 0x15168086 0x15278086
61 | IOProviderClass
62 | IOPCIDevice
63 | NETIF_F_TSO
64 |
65 |
66 | dh89xxcc
67 |
68 | CFBundleIdentifier
69 | com.laobamac.${PRODUCT_NAME}
70 | IOClass
71 | ${PRODUCT_NAME}
72 | IOPCIMatch
73 | 0x04388086 0x034a8086 0x043c8086 0x04408086
74 | IOProviderClass
75 | IOPCIDevice
76 | NETIF_F_TSO
77 |
78 |
79 | i21x
80 |
81 | CFBundleIdentifier
82 | com.laobamac.${PRODUCT_NAME}
83 | IOClass
84 | ${PRODUCT_NAME}
85 | IOPCIMatch
86 | 0x15338086 0x15348086 0x15358086 0x15368086 0x15378086 0x15388086 0x15398086 0x157B8086 0x157C8086
87 | IOProviderClass
88 | IOPCIDevice
89 | NETIF_F_TSO
90 |
91 |
92 | i350
93 |
94 | CFBundleIdentifier
95 | com.laobamac.${PRODUCT_NAME}
96 | IOClass
97 | ${PRODUCT_NAME}
98 | IOPCIMatch
99 | 0x15218086 0x15228086 0x15238086 0x15248086 0x15468086
100 | IOProviderClass
101 | IOPCIDevice
102 | NETIF_F_TSO
103 |
104 |
105 | i354
106 |
107 | CFBundleIdentifier
108 | com.laobamac.${PRODUCT_NAME}
109 | IOClass
110 | ${PRODUCT_NAME}
111 | IOPCIMatch
112 | 0x1F408086 0x1F418086 0x1F458086
113 | IOProviderClass
114 | IOPCIDevice
115 | NETIF_F_TSO
116 |
117 |
118 |
119 | OSBundleLibraries
120 |
121 | com.apple.iokit.IONetworkingFamily
122 | 1.5.0
123 | com.apple.iokit.IOPCIFamily
124 | 1.7
125 | com.apple.kpi.bsd
126 | 8.10.0
127 | com.apple.kpi.iokit
128 | 8.10.0
129 | com.apple.kpi.libkern
130 | 8.10.0
131 | com.apple.kpi.mach
132 | 8.10.0
133 |
134 | OSBundleRequired
135 | Root
136 |
137 |
138 |
--------------------------------------------------------------------------------
/SimpleGBE/SimpleGBE.h:
--------------------------------------------------------------------------------
1 | /* Copyright 2024 王孝慈(laobamac)*/
2 | /* MIT License */
3 |
4 | #ifndef __APPLE_IGB_H__
5 | #define __APPLE_IGB_H__
6 |
7 | #define MBit 1000000
8 |
9 | enum {
10 | eePowerStateOff = 0,
11 | eePowerStateOn,
12 | eePowerStateCount
13 | };
14 |
15 | enum
16 | {
17 | MEDIUM_INDEX_AUTO = 0,
18 | MEDIUM_INDEX_10HD,
19 | MEDIUM_INDEX_10FD,
20 | MEDIUM_INDEX_100HD,
21 | MEDIUM_INDEX_100FD,
22 | MEDIUM_INDEX_100FDFC,
23 | MEDIUM_INDEX_1000FD,
24 | MEDIUM_INDEX_1000FDFC,
25 | MEDIUM_INDEX_1000FDEEE,
26 | MEDIUM_INDEX_1000FDFCEEE,
27 | MEDIUM_INDEX_100FDEEE,
28 | MEDIUM_INDEX_100FDFCEEE,
29 | MEDIUM_INDEX_COUNT
30 | };
31 |
32 |
33 | enum {
34 | kSpeed1000MBit = 1000*MBit,
35 | kSpeed100MBit = 100*MBit,
36 | kSpeed10MBit = 10*MBit,
37 | };
38 |
39 | enum {
40 | kFlowControlTypeNone = 0,
41 | kFlowControlTypeRx = 1,
42 | kFlowControlTypeTx = 2,
43 | kFlowControlTypeRxTx = 3,
44 | kFlowControlTypeCount
45 | };
46 |
47 | enum {
48 | kEEETypeNo = 0,
49 | kEEETypeYes = 1,
50 | kEEETypeCount
51 | };
52 |
53 | #define super IOEthernetController
54 |
55 | class SimpleGBE: public super
56 | {
57 |
58 | OSDeclareDefaultStructors(SimpleGBE);
59 |
60 | public:
61 | // --------------------------------------------------
62 | // IOService (or its superclass) methods.
63 | // --------------------------------------------------
64 |
65 | virtual bool start(IOService * provider);
66 | virtual void stop(IOService * provider);
67 | virtual bool init(OSDictionary *properties);
68 | virtual void free();
69 |
70 | // --------------------------------------------------
71 | // Power Management Support
72 | // --------------------------------------------------
73 | virtual IOReturn registerWithPolicyMaker(IOService* policyMaker);
74 | virtual IOReturn setPowerState( unsigned long powerStateOrdinal, IOService *policyMaker );
75 | virtual void systemWillShutdown(IOOptionBits specifier);
76 |
77 | // --------------------------------------------------
78 | // IONetworkController methods.
79 | // --------------------------------------------------
80 |
81 | virtual IOReturn enable(IONetworkInterface * netif);
82 | virtual IOReturn disable(IONetworkInterface * netif);
83 |
84 | virtual UInt32 outputPacket(mbuf_t m, void * param);
85 |
86 | virtual void getPacketBufferConstraints(IOPacketBufferConstraints * constraints) const;
87 |
88 | virtual IOOutputQueue * createOutputQueue();
89 |
90 | virtual const OSString * newVendorString() const;
91 | virtual const OSString * newModelString() const;
92 |
93 | virtual IOReturn selectMedium(const IONetworkMedium * medium);
94 | virtual bool configureInterface(IONetworkInterface * interface);
95 |
96 | virtual bool createWorkLoop();
97 | virtual IOWorkLoop * getWorkLoop() const;
98 |
99 | //-----------------------------------------------------------------------
100 | // Methods inherited from IOEthernetController.
101 | //-----------------------------------------------------------------------
102 |
103 | virtual IOReturn getHardwareAddress(IOEthernetAddress * addr);
104 | virtual IOReturn setHardwareAddress(const IOEthernetAddress * addr);
105 | virtual IOReturn setPromiscuousMode(bool active);
106 | virtual IOReturn setMulticastMode(bool active);
107 | virtual IOReturn setMulticastList(IOEthernetAddress * addrs, UInt32 count);
108 | virtual IOReturn getChecksumSupport(UInt32 *checksumMask, UInt32 checksumFamily, bool isOutput);
109 | virtual IOReturn setMaxPacketSize (UInt32 maxSize);
110 | virtual IOReturn getMaxPacketSize (UInt32 *maxSize) const;
111 | virtual IOReturn getMinPacketSize (UInt32 *minSize) const;
112 | virtual IOReturn setWakeOnMagicPacket(bool active);
113 | virtual IOReturn getPacketFilters(const OSSymbol * group, UInt32 * filters) const;
114 | virtual UInt32 getFeatures() const;
115 |
116 | private:
117 | IOWorkLoop* workLoop;
118 | IOPCIDevice* pdev;
119 | OSDictionary * mediumDict;
120 | IONetworkMedium * mediumTable[MEDIUM_INDEX_COUNT];
121 | IOOutputQueue * transmitQueue;
122 |
123 | IOInterruptEventSource * interruptSource;
124 | IOTimerEventSource * watchdogSource;
125 | IOTimerEventSource * resetSource;
126 | IOTimerEventSource * dmaErrSource;
127 |
128 | IOEthernetInterface * netif;
129 | IONetworkStats * netStats;
130 | IOEthernetStats * etherStats;
131 |
132 | IOMemoryMap * csrPCIAddress;
133 |
134 | IOMbufNaturalMemoryCursor * txMbufCursor;
135 |
136 | bool enabledForNetif;
137 | bool bSuspended;
138 | bool useTSO;
139 |
140 | bool linkUp;
141 | bool stalled;
142 |
143 | UInt16 eeeMode;
144 |
145 | UInt32 iff_flags;
146 | UInt32 _features;
147 | UInt32 preLinkStatus;
148 | UInt32 powerState;
149 | UInt32 _mtu;
150 | SInt32 txNumFreeDesc;
151 |
152 | UInt32 chip_idx;
153 |
154 | struct igb_adapter priv_adapter;
155 | public:
156 | void startTxQueue();
157 | void stopTxQueue();
158 | UInt32 mtu() { return _mtu; }
159 | UInt32 flags(){ return iff_flags;}
160 | UInt32 features() { return _features; }
161 | igb_adapter* adapter(){ return &priv_adapter; }
162 | IONetworkStats* getNetStats(){ return netStats; }
163 | IOEthernetStats* getEtherStats() { return etherStats; }
164 | dma_addr_t mapSingle( mbuf_t );
165 | void receive(mbuf_t skb );
166 | void setVid(mbuf_t skb, UInt16 vid);
167 | IOMbufNaturalMemoryCursor * txCursor(){ return txMbufCursor; }
168 | void rxChecksumOK( mbuf_t, UInt32 flag );
169 | bool running(){return enabledForNetif;}
170 | bool queueStopped(){return txMbufCursor == NULL || stalled;}
171 | bool carrier();
172 | void setCarrier(bool);
173 |
174 | void setTimers(bool enable);
175 | private:
176 | void interruptOccurred(IOInterruptEventSource * src, int count);
177 |
178 | void watchdogTask();
179 | void updatePhyInfoTask();
180 |
181 | void intelRestart();
182 | bool intelCheckLink(struct igb_adapter *adapter);
183 | void setLinkUp();
184 | void setLinkDown();
185 | void checkLinkStatus();
186 |
187 |
188 | UInt16 intelSupportsEEE(struct igb_adapter *adapter);
189 | bool setupMediumDict();
190 |
191 | void intelSetupAdvForMedium(const IONetworkMedium *medium);
192 | bool addNetworkMedium(UInt32 type, UInt32 bps, UInt32 index);
193 |
194 | bool initEventSources( IOService* provider );
195 |
196 | bool igb_probe();
197 | void igb_remove();
198 |
199 | bool getBoolOption(const char *name, bool defVal);
200 | int getIntOption(const char *name, int defVal, int maxVal, int minVal );
201 |
202 | public:
203 | static void interruptHandler(OSObject * target,
204 | IOInterruptEventSource * src,
205 | int count );
206 |
207 |
208 |
209 | static void watchdogHandler(OSObject * target, IOTimerEventSource * src);
210 | static void resetHandler(OSObject * target, IOTimerEventSource * src);
211 |
212 | };
213 |
214 |
215 | #endif //__APPLE_IGB_H__
216 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_82575.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #ifndef _E1000_82575_H_
5 | #define _E1000_82575_H_
6 |
7 | #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
8 | (ID_LED_DEF1_DEF2 << 8) | \
9 | (ID_LED_DEF1_DEF2 << 4) | \
10 | (ID_LED_OFF1_ON2))
11 | /*
12 | * Receive Address Register Count
13 | * Number of high/low register pairs in the RAR. The RAR (Receive Address
14 | * Registers) holds the directed and multicast addresses that we monitor.
15 | * These entries are also used for MAC-based filtering.
16 | */
17 | /*
18 | * For 82576, there are an additional set of RARs that begin at an offset
19 | * separate from the first set of RARs.
20 | */
21 | #define E1000_RAR_ENTRIES_82575 16
22 | #define E1000_RAR_ENTRIES_82576 24
23 | #define E1000_RAR_ENTRIES_82580 24
24 | #define E1000_RAR_ENTRIES_I350 32
25 | #define E1000_SW_SYNCH_MB 0x00000100
26 | #define E1000_STAT_DEV_RST_SET 0x00100000
27 |
28 | struct e1000_adv_data_desc {
29 | __le64 buffer_addr; /* Address of the descriptor's data buffer */
30 | union {
31 | u32 data;
32 | struct {
33 | u32 datalen:16; /* Data buffer length */
34 | u32 rsvd:4;
35 | u32 dtyp:4; /* Descriptor type */
36 | u32 dcmd:8; /* Descriptor command */
37 | } config;
38 | } lower;
39 | union {
40 | u32 data;
41 | struct {
42 | u32 status:4; /* Descriptor status */
43 | u32 idx:4;
44 | u32 popts:6; /* Packet Options */
45 | u32 paylen:18; /* Payload length */
46 | } options;
47 | } upper;
48 | };
49 |
50 | #define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */
51 | #define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */
52 | #define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */
53 | #define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */
54 | #define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */
55 | #define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */
56 | #define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */
57 | #define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */
58 | #define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */
59 | #define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */
60 | #define E1000_ADV_DCMD_RS 0x8 /* Report Status */
61 | #define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */
62 | #define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */
63 | /* Extended Device Control */
64 | #define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */
65 |
66 | struct e1000_adv_context_desc {
67 | union {
68 | u32 ip_config;
69 | struct {
70 | u32 iplen:9;
71 | u32 maclen:7;
72 | u32 vlan_tag:16;
73 | } fields;
74 | } ip_setup;
75 | u32 seq_num;
76 | union {
77 | u64 l4_config;
78 | struct {
79 | u32 mkrloc:9;
80 | u32 tucmd:11;
81 | u32 dtyp:4;
82 | u32 adv:8;
83 | u32 rsvd:4;
84 | u32 idx:4;
85 | u32 l4len:8;
86 | u32 mss:16;
87 | } fields;
88 | } l4_setup;
89 | };
90 |
91 | /* SRRCTL bit definitions */
92 | #define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
93 | #define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000
94 | #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
95 | #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
96 | #define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
97 | #define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
98 | #define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
99 | #define E1000_SRRCTL_TIMESTAMP 0x40000000
100 | #define E1000_SRRCTL_DROP_EN 0x80000000
101 |
102 | #define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
103 | #define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
104 |
105 | #define E1000_TX_HEAD_WB_ENABLE 0x1
106 | #define E1000_TX_SEQNUM_WB_ENABLE 0x2
107 |
108 | #define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
109 | #define E1000_MRQC_ENABLE_VMDQ 0x00000003
110 | #define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
111 | #define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
112 | #define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
113 | #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
114 | #define E1000_MRQC_ENABLE_RSS_8Q 0x00000002
115 |
116 | #define E1000_VMRCTL_MIRROR_PORT_SHIFT 8
117 | #define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \
118 | E1000_VMRCTL_MIRROR_PORT_SHIFT)
119 | #define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0)
120 | #define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1)
121 | #define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
122 |
123 | #define E1000_EICR_TX_QUEUE ( \
124 | E1000_EICR_TX_QUEUE0 | \
125 | E1000_EICR_TX_QUEUE1 | \
126 | E1000_EICR_TX_QUEUE2 | \
127 | E1000_EICR_TX_QUEUE3)
128 |
129 | #define E1000_EICR_RX_QUEUE ( \
130 | E1000_EICR_RX_QUEUE0 | \
131 | E1000_EICR_RX_QUEUE1 | \
132 | E1000_EICR_RX_QUEUE2 | \
133 | E1000_EICR_RX_QUEUE3)
134 |
135 | #define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
136 | #define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
137 |
138 | #define EIMS_ENABLE_MASK ( \
139 | E1000_EIMS_RX_QUEUE | \
140 | E1000_EIMS_TX_QUEUE | \
141 | E1000_EIMS_TCP_TIMER | \
142 | E1000_EIMS_OTHER)
143 |
144 | /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
145 | #define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
146 | #define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
147 | #define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
148 | #define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
149 | #define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
150 | #define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
151 | #define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
152 | #define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
153 |
154 | #define E1000_RXDADV_RSSTYPE_MASK 0x0000000F
155 | #define E1000_RXDADV_RSSTYPE_SHIFT 12
156 | #define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
157 | #define E1000_RXDADV_HDRBUFLEN_SHIFT 5
158 | #define E1000_RXDADV_SPLITHEADER_EN 0x00001000
159 | #define E1000_RXDADV_SPH 0x8000
160 | #define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
161 | #define E1000_RXDADV_ERR_HBO 0x00800000
162 |
163 | /* RSS Hash results */
164 | #define E1000_RXDADV_RSSTYPE_NONE 0x00000000
165 | #define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
166 | #define E1000_RXDADV_RSSTYPE_IPV4 0x00000002
167 | #define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
168 | #define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004
169 | #define E1000_RXDADV_RSSTYPE_IPV6 0x00000005
170 | #define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
171 | #define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
172 | #define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
173 | #define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
174 |
175 | /* RSS Packet Types as indicated in the receive descriptor */
176 | #define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0
177 | #define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00
178 | #define E1000_RXDADV_PKTTYPE_NONE 0x00000000
179 | #define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
180 | #define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */
181 | #define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */
182 | #define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */
183 | #define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
184 | #define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
185 | #define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
186 | #define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
187 |
188 | #define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
189 | #define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
190 | #define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
191 | #define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
192 | #define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
193 | #define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
194 |
195 | /* LinkSec results */
196 | /* Security Processing bit Indication */
197 | #define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000
198 | #define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
199 | #define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
200 | #define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
201 | #define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
202 |
203 | #define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000
204 | #define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
205 | #define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
206 | #define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
207 | #define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
208 |
209 | #define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */
210 | /* Tx Queue Arbitration Priority 0=low, 1=high */
211 | #define E1000_TXDCTL_PRIORITY 0x08000000
212 |
213 | #define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */
214 |
215 | /* Direct Cache Access (DCA) definitions */
216 | #define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
217 | #define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
218 |
219 | #define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
220 | #define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
221 |
222 | #define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
223 | #define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
224 | #define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */
225 | #define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */
226 | #define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */
227 |
228 | #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
229 | #define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
230 | #define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
231 | #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
232 | #define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
233 |
234 | #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
235 | #define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
236 | #define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
237 | #define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
238 |
239 | /* Additional interrupt register bit definitions */
240 | #define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */
241 | #define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
242 | #define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
243 |
244 | /*
245 | * ETQF filter list: one static filter per filter consumer. This is
246 | * to avoid filter collisions later. Add new filters
247 | * here!!
248 | *
249 | * Current filters:
250 | * EAPOL 802.1x (0x888e): Filter 0
251 | */
252 | #define E1000_ETQF_FILTER_EAPOL 0
253 |
254 | #define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
255 | #define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000
256 | #define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
257 |
258 | #define E1000_NVM_APME_82575 0x0400
259 | #define MAX_NUM_VFS 7
260 |
261 | #define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */
262 | #define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */
263 | #define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
264 | #define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
265 | #define E1000_DTXSWC_LLE_SHIFT 16
266 | #define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
267 |
268 | /* Easy defines for setting default pool, would normally be left a zero */
269 | #define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
270 | #define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
271 |
272 | /* Other useful VMD_CTL register defines */
273 | #define E1000_VT_CTL_IGNORE_MAC (1 << 28)
274 | #define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
275 | #define E1000_VT_CTL_VM_REPL_EN (1 << 30)
276 |
277 | /* Per VM Offload register setup */
278 | #define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
279 | #define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
280 | #define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
281 | #define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
282 | #define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
283 | #define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
284 | #define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
285 | #define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
286 | #define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
287 | #define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
288 |
289 | #define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */
290 | #define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */
291 | #define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */
292 | #define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
293 | #define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
294 |
295 | #define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */
296 | #define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */
297 |
298 | #define E1000_VLVF_ARRAY_SIZE 32
299 | #define E1000_VLVF_VLANID_MASK 0x00000FFF
300 | #define E1000_VLVF_POOLSEL_SHIFT 12
301 | #define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
302 | #define E1000_VLVF_LVLAN 0x00100000
303 | #define E1000_VLVF_VLANID_ENABLE 0x80000000
304 |
305 | #define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
306 | #define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
307 |
308 | #define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
309 |
310 | #define E1000_IOVCTL 0x05BBC
311 | #define E1000_IOVCTL_REUSE_VFQ 0x00000001
312 |
313 | #define E1000_RPLOLR_STRVLAN 0x40000000
314 | #define E1000_RPLOLR_STRCRC 0x80000000
315 |
316 | #define E1000_TCTL_EXT_COLD 0x000FFC00
317 | #define E1000_TCTL_EXT_COLD_SHIFT 10
318 |
319 | #define E1000_DTXCTL_8023LL 0x0004
320 | #define E1000_DTXCTL_VLAN_ADDED 0x0008
321 | #define E1000_DTXCTL_OOS_ENABLE 0x0010
322 | #define E1000_DTXCTL_MDP_EN 0x0020
323 | #define E1000_DTXCTL_SPOOF_INT 0x0040
324 |
325 | #define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
326 |
327 | #define ALL_QUEUES 0xFFFF
328 |
329 | s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
330 | s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
331 |
332 | /* Rx packet buffer size defines */
333 | #define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
334 | void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
335 | void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
336 | void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
337 |
338 | void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
339 | u16 e1000_rxpbs_adjust_82580(u32 data);
340 | s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
341 | s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M);
342 | s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M);
343 | s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
344 | s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw);
345 | s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw);
346 | #define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
347 | #define E1000_EMC_INTERNAL_DATA 0x00
348 | #define E1000_EMC_INTERNAL_THERM_LIMIT 0x20
349 | #define E1000_EMC_DIODE1_DATA 0x01
350 | #define E1000_EMC_DIODE1_THERM_LIMIT 0x19
351 | #define E1000_EMC_DIODE2_DATA 0x23
352 | #define E1000_EMC_DIODE2_THERM_LIMIT 0x1A
353 | #define E1000_EMC_DIODE3_DATA 0x2A
354 | #define E1000_EMC_DIODE3_THERM_LIMIT 0x30
355 |
356 | s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw);
357 | s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw);
358 |
359 | /* I2C SDA and SCL timing parameters for standard mode */
360 | #define E1000_I2C_T_HD_STA 4
361 | #define E1000_I2C_T_LOW 5
362 | #define E1000_I2C_T_HIGH 4
363 | #define E1000_I2C_T_SU_STA 5
364 | #define E1000_I2C_T_HD_DATA 5
365 | #define E1000_I2C_T_SU_DATA 1
366 | #define E1000_I2C_T_RISE 1
367 | #define E1000_I2C_T_FALL 1
368 | #define E1000_I2C_T_SU_STO 4
369 | #define E1000_I2C_T_BUF 5
370 |
371 | s32 e1000_set_i2c_bb(struct e1000_hw *hw);
372 | s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
373 | u8 dev_addr, u8 *data);
374 | s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
375 | u8 dev_addr, u8 data);
376 | void e1000_i2c_bus_clear(struct e1000_hw *hw);
377 | #endif /* _E1000_82575_H_ */
378 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_api.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #ifndef _E1000_API_H_
5 | #define _E1000_API_H_
6 |
7 | #include "e1000_hw.h"
8 |
9 | extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
10 | extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
11 | extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
12 | extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
13 | extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
14 | extern void e1000_init_function_pointers_i210(struct e1000_hw *hw);
15 |
16 | s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr);
17 | s32 e1000_set_mac_type(struct e1000_hw *hw);
18 | s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
19 | s32 e1000_init_mac_params(struct e1000_hw *hw);
20 | s32 e1000_init_nvm_params(struct e1000_hw *hw);
21 | s32 e1000_init_phy_params(struct e1000_hw *hw);
22 | s32 e1000_init_mbx_params(struct e1000_hw *hw);
23 | s32 e1000_get_bus_info(struct e1000_hw *hw);
24 | void e1000_clear_vfta(struct e1000_hw *hw);
25 | void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
26 | s32 e1000_force_mac_fc(struct e1000_hw *hw);
27 | s32 e1000_check_for_link(struct e1000_hw *hw);
28 | s32 e1000_reset_hw(struct e1000_hw *hw);
29 | s32 e1000_init_hw(struct e1000_hw *hw);
30 | s32 e1000_setup_link(struct e1000_hw *hw);
31 | s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
32 | s32 e1000_disable_pcie_primary(struct e1000_hw *hw);
33 | void e1000_config_collision_dist(struct e1000_hw *hw);
34 | int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
35 | u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
36 | void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
37 | u32 mc_addr_count);
38 | s32 e1000_setup_led(struct e1000_hw *hw);
39 | s32 e1000_cleanup_led(struct e1000_hw *hw);
40 | s32 e1000_check_reset_block(struct e1000_hw *hw);
41 | s32 e1000_blink_led(struct e1000_hw *hw);
42 | s32 e1000_led_on(struct e1000_hw *hw);
43 | s32 e1000_led_off(struct e1000_hw *hw);
44 | s32 e1000_id_led_init(struct e1000_hw *hw);
45 | void e1000_reset_adaptive(struct e1000_hw *hw);
46 | void e1000_update_adaptive(struct e1000_hw *hw);
47 | s32 e1000_get_cable_length(struct e1000_hw *hw);
48 | s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
49 | s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
50 | s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
51 | s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
52 | u8 data);
53 | s32 e1000_get_phy_info(struct e1000_hw *hw);
54 | void e1000_release_phy(struct e1000_hw *hw);
55 | s32 e1000_acquire_phy(struct e1000_hw *hw);
56 | s32 e1000_phy_hw_reset(struct e1000_hw *hw);
57 | s32 e1000_phy_commit(struct e1000_hw *hw);
58 | void e1000_power_up_phy(struct e1000_hw *hw);
59 | void e1000_power_down_phy(struct e1000_hw *hw);
60 | s32 e1000_read_mac_addr(struct e1000_hw *hw);
61 | s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size);
62 | s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
63 | void e1000_reload_nvm(struct e1000_hw *hw);
64 | s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
65 | s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
66 | s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
67 | s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
68 | s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
69 | s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
70 | s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
71 | s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
72 | bool e1000_check_mng_mode(struct e1000_hw *hw);
73 | bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
74 | s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
75 | s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
76 | u16 offset, u8 *sum);
77 | s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
78 | struct e1000_host_mng_command_header *hdr);
79 | s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
80 | s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw);
81 | s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw);
82 |
83 | /*
84 | * TBI_ACCEPT macro definition:
85 | *
86 | * This macro requires:
87 | * a = a pointer to struct e1000_hw
88 | * status = the 8 bit status field of the Rx descriptor with EOP set
89 | * errors = the 8 bit error field of the Rx descriptor with EOP set
90 | * length = the sum of all the length fields of the Rx descriptors that
91 | * make up the current frame
92 | * last_byte = the last byte of the frame DMAed by the hardware
93 | * min_frame_size = the minimum frame length we want to accept.
94 | * max_frame_size = the maximum frame length we want to accept.
95 | *
96 | * This macro is a conditional that should be used in the interrupt
97 | * handler's Rx processing routine when RxErrors have been detected.
98 | *
99 | * Typical use:
100 | * ...
101 | * if (TBI_ACCEPT) {
102 | * accept_frame = true;
103 | * e1000_tbi_adjust_stats(adapter, MacAddress);
104 | * frame_length--;
105 | * } else {
106 | * accept_frame = false;
107 | * }
108 | * ...
109 | */
110 |
111 | /* The carrier extension symbol, as received by the NIC. */
112 | #define CARRIER_EXTENSION 0x0F
113 |
114 | #define TBI_ACCEPT(a, status, errors, length, last_byte, \
115 | min_frame_size, max_frame_size) \
116 | (e1000_tbi_sbp_enabled_82543(a) && \
117 | (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
118 | ((last_byte) == CARRIER_EXTENSION) && \
119 | (((status) & E1000_RXD_STAT_VP) ? \
120 | (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \
121 | ((length) <= ((max_frame_size) + 1))) : \
122 | (((length) > (min_frame_size)) && \
123 | ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1)))))
124 |
125 | #ifndef E1000_MAX
126 | #define E1000_MAX(a, b) ((a) > (b) ? (a) : (b))
127 | #endif
128 | #ifndef E1000_DIVIDE_ROUND_UP
129 | #define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */
130 | #endif
131 | #endif /* _E1000_API_H_ */
132 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_base.c:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #include "e1000_hw.h"
5 | #include "e1000_82575.h"
6 | #include "e1000_mac.h"
7 | #include "e1000_base.h"
8 | #include "e1000_manage.h"
9 |
10 | /**
11 | * e1000_acquire_phy_base - Acquire rights to access PHY
12 | * @hw: pointer to the HW structure
13 | *
14 | * Acquire access rights to the correct PHY.
15 | **/
16 | s32 e1000_acquire_phy_base(struct e1000_hw *hw)
17 | {
18 | u16 mask = E1000_SWFW_PHY0_SM;
19 |
20 | DEBUGFUNC("e1000_acquire_phy_base");
21 |
22 | if (hw->bus.func == E1000_FUNC_1)
23 | mask = E1000_SWFW_PHY1_SM;
24 | else if (hw->bus.func == E1000_FUNC_2)
25 | mask = E1000_SWFW_PHY2_SM;
26 | else if (hw->bus.func == E1000_FUNC_3)
27 | mask = E1000_SWFW_PHY3_SM;
28 |
29 | return hw->mac.ops.acquire_swfw_sync(hw, mask);
30 | }
31 |
32 | /**
33 | * e1000_release_phy_base - Release rights to access PHY
34 | * @hw: pointer to the HW structure
35 | *
36 | * A wrapper to release access rights to the correct PHY.
37 | **/
38 | void e1000_release_phy_base(struct e1000_hw *hw)
39 | {
40 | u16 mask = E1000_SWFW_PHY0_SM;
41 |
42 | DEBUGFUNC("e1000_release_phy_base");
43 |
44 | if (hw->bus.func == E1000_FUNC_1)
45 | mask = E1000_SWFW_PHY1_SM;
46 | else if (hw->bus.func == E1000_FUNC_2)
47 | mask = E1000_SWFW_PHY2_SM;
48 | else if (hw->bus.func == E1000_FUNC_3)
49 | mask = E1000_SWFW_PHY3_SM;
50 |
51 | hw->mac.ops.release_swfw_sync(hw, mask);
52 | }
53 |
54 | /**
55 | * e1000_init_hw_base - Initialize hardware
56 | * @hw: pointer to the HW structure
57 | *
58 | * This inits the hardware readying it for operation.
59 | **/
60 | s32 e1000_init_hw_base(struct e1000_hw *hw)
61 | {
62 | struct e1000_mac_info *mac = &hw->mac;
63 | s32 ret_val;
64 | u16 i, rar_count = mac->rar_entry_count;
65 |
66 | DEBUGFUNC("e1000_init_hw_base");
67 |
68 | /* Setup the receive address */
69 | e1000_init_rx_addrs_generic(hw, rar_count);
70 |
71 | /* Zero out the Multicast HASH table */
72 | DEBUGOUT("Zeroing the MTA\n");
73 | for (i = 0; i < mac->mta_reg_count; i++)
74 | E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
75 |
76 | /* Zero out the Unicast HASH table */
77 | DEBUGOUT("Zeroing the UTA\n");
78 | for (i = 0; i < mac->uta_reg_count; i++)
79 | E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
80 |
81 | /* Setup link and flow control */
82 | ret_val = mac->ops.setup_link(hw);
83 |
84 | return ret_val;
85 | }
86 |
87 | /**
88 | * e1000_power_down_phy_copper_base - Remove link during PHY power down
89 | * @hw: pointer to the HW structure
90 | *
91 | * In the case of a PHY power down to save power, or to turn off link during a
92 | * driver unload, or wake on lan is not enabled, remove the link.
93 | **/
94 | void e1000_power_down_phy_copper_base(struct e1000_hw *hw)
95 | {
96 | struct e1000_phy_info *phy = &hw->phy;
97 |
98 | if (!(phy->ops.check_reset_block))
99 | return;
100 |
101 | /* If the management interface is not enabled, then power down */
102 | if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
103 | e1000_power_down_phy_copper(hw);
104 |
105 | return;
106 | }
107 |
108 | /**
109 | * e1000_rx_fifo_flush_base - Clean Rx FIFO after Rx enable
110 | * @hw: pointer to the HW structure
111 | *
112 | * After Rx enable, if manageability is enabled then there is likely some
113 | * bad data at the start of the FIFO and possibly in the DMA FIFO. This
114 | * function clears the FIFOs and flushes any packets that came in as Rx was
115 | * being enabled.
116 | **/
117 | void e1000_rx_fifo_flush_base(struct e1000_hw *hw)
118 | {
119 | u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
120 | int i, ms_wait;
121 |
122 | DEBUGFUNC("e1000_rx_fifo_flush_base");
123 |
124 | /* disable IPv6 options as per hardware errata */
125 | rfctl = E1000_READ_REG(hw, E1000_RFCTL);
126 | rfctl |= E1000_RFCTL_IPV6_EX_DIS;
127 | E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
128 |
129 | if (hw->mac.type != e1000_82575 ||
130 | !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
131 | return;
132 |
133 | /* Disable all Rx queues */
134 | for (i = 0; i < 4; i++) {
135 | rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
136 | E1000_WRITE_REG(hw, E1000_RXDCTL(i),
137 | rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
138 | }
139 | /* Poll all queues to verify they have shut down */
140 | for (ms_wait = 0; ms_wait < 10; ms_wait++) {
141 | msec_delay(1);
142 | rx_enabled = 0;
143 | for (i = 0; i < 4; i++)
144 | rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
145 | if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
146 | break;
147 | }
148 |
149 | if (ms_wait == 10)
150 | DEBUGOUT("Queue disable timed out after 10ms\n");
151 |
152 | /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
153 | * incoming packets are rejected. Set enable and wait 2ms so that
154 | * any packet that was coming in as RCTL.EN was set is flushed
155 | */
156 | E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
157 |
158 | rlpml = E1000_READ_REG(hw, E1000_RLPML);
159 | E1000_WRITE_REG(hw, E1000_RLPML, 0);
160 |
161 | rctl = E1000_READ_REG(hw, E1000_RCTL);
162 | temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
163 | temp_rctl |= E1000_RCTL_LPE;
164 |
165 | E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
166 | E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
167 | E1000_WRITE_FLUSH(hw);
168 | msec_delay(2);
169 |
170 | /* Enable Rx queues that were previously enabled and restore our
171 | * previous state
172 | */
173 | for (i = 0; i < 4; i++)
174 | E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
175 | E1000_WRITE_REG(hw, E1000_RCTL, rctl);
176 | E1000_WRITE_FLUSH(hw);
177 |
178 | E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
179 | E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
180 |
181 | /* Flush receive errors generated by workaround */
182 | E1000_READ_REG(hw, E1000_ROC);
183 | E1000_READ_REG(hw, E1000_RNBC);
184 | E1000_READ_REG(hw, E1000_MPC);
185 | }
186 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_base.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 | /* Copyright 2024 王孝慈(laobamac)*/
4 | /* MIT License */
5 | #ifndef _E1000_BASE_H_
6 | #define _E1000_BASE_H_
7 |
8 | /* forward declaration */
9 | s32 e1000_init_hw_base(struct e1000_hw *hw);
10 | void e1000_power_down_phy_copper_base(struct e1000_hw *hw);
11 | extern void e1000_rx_fifo_flush_base(struct e1000_hw *hw);
12 | s32 e1000_acquire_phy_base(struct e1000_hw *hw);
13 | void e1000_release_phy_base(struct e1000_hw *hw);
14 |
15 | /* Transmit Descriptor - Advanced */
16 | union e1000_adv_tx_desc {
17 | struct {
18 | __le64 buffer_addr; /* Address of descriptor's data buf */
19 | __le32 cmd_type_len;
20 | __le32 olinfo_status;
21 | } read;
22 | struct {
23 | __le64 rsvd; /* Reserved */
24 | __le32 nxtseq_seed;
25 | __le32 status;
26 | } wb;
27 | };
28 |
29 | /* Context descriptors */
30 | struct e1000_adv_tx_context_desc {
31 | __le32 vlan_macip_lens;
32 | union {
33 | __le32 launch_time;
34 | __le32 seqnum_seed;
35 | };
36 | __le32 type_tucmd_mlhl;
37 | __le32 mss_l4len_idx;
38 | };
39 |
40 | /* Adv Transmit Descriptor Config Masks */
41 | #define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
42 | #define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
43 | #define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
44 | #define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
45 | #define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
46 | #define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
47 | #define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
48 | #define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
49 | #define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
50 | #define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */
51 | #define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */
52 | #define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */
53 | #define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
54 | #define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
55 | #define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
56 | #define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
57 | /* 1st & Last TSO-full iSCSI PDU*/
58 | #define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800
59 | #define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
60 | #define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
61 |
62 | /* Advanced Transmit Context Descriptor Config */
63 | #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
64 | #define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
65 | #define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
66 | #define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
67 | #define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
68 | #define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
69 | #define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
70 | #define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
71 | /* IPSec Encrypt Enable for ESP */
72 | #define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
73 | /* Req requires Markers and CRC */
74 | #define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000
75 | #define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
76 | #define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
77 | /* Adv ctxt IPSec SA IDX mask */
78 | #define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF
79 | /* Adv ctxt IPSec ESP len mask */
80 | #define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF
81 |
82 | #define E1000_RAR_ENTRIES_BASE 16
83 |
84 | /* Receive Descriptor - Advanced */
85 | union e1000_adv_rx_desc {
86 | struct {
87 | __le64 pkt_addr; /* Packet buffer address */
88 | __le64 hdr_addr; /* Header buffer address */
89 | } read;
90 | struct {
91 | struct {
92 | union {
93 | __le32 data;
94 | struct {
95 | __le16 pkt_info; /*RSS type, Pkt type*/
96 | /* Split Header, header buffer len */
97 | __le16 hdr_info;
98 | } hs_rss;
99 | } lo_dword;
100 | union {
101 | __le32 rss; /* RSS Hash */
102 | struct {
103 | __le16 ip_id; /* IP id */
104 | __le16 csum; /* Packet Checksum */
105 | } csum_ip;
106 | } hi_dword;
107 | } lower;
108 | struct {
109 | __le32 status_error; /* ext status/error */
110 | __le16 length; /* Packet length */
111 | __le16 vlan; /* VLAN tag */
112 | } upper;
113 | } wb; /* writeback */
114 | };
115 |
116 | /* Additional Transmit Descriptor Control definitions */
117 | #define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
118 |
119 | /* Additional Receive Descriptor Control definitions */
120 | #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
121 |
122 | /* SRRCTL bit definitions */
123 | #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
124 | #define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
125 | #define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
126 | #endif /* _E1000_BASE_H_ */
127 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_hw.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #ifndef _E1000_HW_H_
5 | #define _E1000_HW_H_
6 |
7 | #include "e1000_osdep.h"
8 | #include "e1000_regs.h"
9 | #include "e1000_defines.h"
10 |
11 | struct e1000_hw;
12 |
13 | #define E1000_DEV_ID_82576 0x10C9
14 | #define E1000_DEV_ID_82576_FIBER 0x10E6
15 | #define E1000_DEV_ID_82576_SERDES 0x10E7
16 | #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
17 | #define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
18 | #define E1000_DEV_ID_82576_NS 0x150A
19 | #define E1000_DEV_ID_82576_NS_SERDES 0x1518
20 | #define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
21 | #define E1000_DEV_ID_82575EB_COPPER 0x10A7
22 | #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
23 | #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
24 | #define E1000_DEV_ID_82580_COPPER 0x150E
25 | #define E1000_DEV_ID_82580_FIBER 0x150F
26 | #define E1000_DEV_ID_82580_SERDES 0x1510
27 | #define E1000_DEV_ID_82580_SGMII 0x1511
28 | #define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
29 | #define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
30 | #define E1000_DEV_ID_I350_COPPER 0x1521
31 | #define E1000_DEV_ID_I350_FIBER 0x1522
32 | #define E1000_DEV_ID_I350_SERDES 0x1523
33 | #define E1000_DEV_ID_I350_SGMII 0x1524
34 | #define E1000_DEV_ID_I350_DA4 0x1546
35 | #define E1000_DEV_ID_I210_COPPER 0x1533
36 | #define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
37 | #define E1000_DEV_ID_I210_COPPER_IT 0x1535
38 | #define E1000_DEV_ID_I210_FIBER 0x1536
39 | #define E1000_DEV_ID_I210_SERDES 0x1537
40 | #define E1000_DEV_ID_I210_SGMII 0x1538
41 | #define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
42 | #define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
43 | #define E1000_DEV_ID_I210_SGMII_FLASHLESS 0x15F6
44 | #define E1000_DEV_ID_I211_COPPER 0x1539
45 | #define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
46 | #define E1000_DEV_ID_I354_SGMII 0x1F41
47 | #define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
48 | #define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
49 | #define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
50 | #define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
51 | #define E1000_DEV_ID_DH89XXCC_SFP 0x0440
52 |
53 | #define E1000_REVISION_0 0
54 | #define E1000_REVISION_1 1
55 | #define E1000_REVISION_2 2
56 | #define E1000_REVISION_3 3
57 | #define E1000_REVISION_4 4
58 |
59 | #define E1000_FUNC_0 0
60 | #define E1000_FUNC_1 1
61 | #define E1000_FUNC_2 2
62 | #define E1000_FUNC_3 3
63 |
64 | #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
65 | #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
66 | #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
67 | #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
68 |
69 | enum e1000_mac_type {
70 | e1000_undefined = 0,
71 | e1000_82575,
72 | e1000_82576,
73 | e1000_82580,
74 | e1000_i350,
75 | e1000_i354,
76 | e1000_i210,
77 | e1000_i211,
78 | e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
79 | };
80 |
81 | enum e1000_media_type {
82 | e1000_media_type_unknown = 0,
83 | e1000_media_type_copper = 1,
84 | e1000_media_type_fiber = 2,
85 | e1000_media_type_internal_serdes = 3,
86 | e1000_num_media_types
87 | };
88 |
89 | enum e1000_nvm_type {
90 | e1000_nvm_unknown = 0,
91 | e1000_nvm_none,
92 | e1000_nvm_eeprom_spi,
93 | e1000_nvm_flash_hw,
94 | e1000_nvm_invm,
95 | e1000_nvm_flash_sw
96 | };
97 |
98 | enum e1000_nvm_override {
99 | e1000_nvm_override_none = 0,
100 | e1000_nvm_override_spi_small,
101 | e1000_nvm_override_spi_large,
102 | };
103 |
104 | enum e1000_phy_type {
105 | e1000_phy_unknown = 0,
106 | e1000_phy_none,
107 | e1000_phy_m88,
108 | e1000_phy_igp,
109 | e1000_phy_igp_2,
110 | e1000_phy_gg82563,
111 | e1000_phy_igp_3,
112 | e1000_phy_ife,
113 | e1000_phy_82580,
114 | e1000_phy_vf,
115 | e1000_phy_i210,
116 | };
117 |
118 | enum e1000_bus_type {
119 | e1000_bus_type_unknown = 0,
120 | e1000_bus_type_pci,
121 | e1000_bus_type_pcix,
122 | e1000_bus_type_pci_express,
123 | e1000_bus_type_reserved
124 | };
125 |
126 | enum e1000_bus_speed {
127 | e1000_bus_speed_unknown = 0,
128 | e1000_bus_speed_33,
129 | e1000_bus_speed_66,
130 | e1000_bus_speed_100,
131 | e1000_bus_speed_120,
132 | e1000_bus_speed_133,
133 | e1000_bus_speed_2500,
134 | e1000_bus_speed_5000,
135 | e1000_bus_speed_reserved
136 | };
137 |
138 | enum e1000_bus_width {
139 | e1000_bus_width_unknown = 0,
140 | e1000_bus_width_pcie_x1,
141 | e1000_bus_width_pcie_x2,
142 | e1000_bus_width_pcie_x4 = 4,
143 | e1000_bus_width_pcie_x8 = 8,
144 | e1000_bus_width_32,
145 | e1000_bus_width_64,
146 | e1000_bus_width_reserved
147 | };
148 |
149 | enum e1000_1000t_rx_status {
150 | e1000_1000t_rx_status_not_ok = 0,
151 | e1000_1000t_rx_status_ok,
152 | e1000_1000t_rx_status_undefined = 0xFF
153 | };
154 |
155 | enum e1000_rev_polarity {
156 | e1000_rev_polarity_normal = 0,
157 | e1000_rev_polarity_reversed,
158 | e1000_rev_polarity_undefined = 0xFF
159 | };
160 |
161 | enum e1000_fc_mode {
162 | e1000_fc_none = 0,
163 | e1000_fc_rx_pause,
164 | e1000_fc_tx_pause,
165 | e1000_fc_full,
166 | e1000_fc_default = 0xFF
167 | };
168 |
169 | enum e1000_ms_type {
170 | e1000_ms_hw_default = 0,
171 | e1000_ms_force_primary,
172 | e1000_ms_force_secondary,
173 | e1000_ms_auto
174 | };
175 |
176 | enum e1000_smart_speed {
177 | e1000_smart_speed_default = 0,
178 | e1000_smart_speed_on,
179 | e1000_smart_speed_off
180 | };
181 |
182 | enum e1000_serdes_link_state {
183 | e1000_serdes_link_down = 0,
184 | e1000_serdes_link_autoneg_progress,
185 | e1000_serdes_link_autoneg_complete,
186 | e1000_serdes_link_forced_up
187 | };
188 |
189 | #ifndef __le16
190 | #define __le16 u16
191 | #endif
192 | #ifndef __le32
193 | #define __le32 u32
194 | #endif
195 | #ifndef __le64
196 | #define __le64 u64
197 | #endif
198 | /* Receive Descriptor */
199 | struct e1000_rx_desc {
200 | __le64 buffer_addr; /* Address of the descriptor's data buffer */
201 | __le16 length; /* Length of data DMAed into data buffer */
202 | __le16 csum; /* Packet checksum */
203 | u8 status; /* Descriptor status */
204 | u8 errors; /* Descriptor Errors */
205 | __le16 special;
206 | };
207 |
208 | /* Receive Descriptor - Extended */
209 | union e1000_rx_desc_extended {
210 | struct {
211 | __le64 buffer_addr;
212 | __le64 reserved;
213 | } read;
214 | struct {
215 | struct {
216 | __le32 mrq; /* Multiple Rx Queues */
217 | union {
218 | __le32 rss; /* RSS Hash */
219 | struct {
220 | __le16 ip_id; /* IP id */
221 | __le16 csum; /* Packet Checksum */
222 | } csum_ip;
223 | } hi_dword;
224 | } lower;
225 | struct {
226 | __le32 status_error; /* ext status/error */
227 | __le16 length;
228 | __le16 vlan; /* VLAN tag */
229 | } upper;
230 | } wb; /* writeback */
231 | };
232 |
233 | #define MAX_PS_BUFFERS 4
234 |
235 | /* Number of packet split data buffers (not including the header buffer) */
236 | #define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
237 |
238 | /* Receive Descriptor - Packet Split */
239 | union e1000_rx_desc_packet_split {
240 | struct {
241 | /* one buffer for protocol header(s), three data buffers */
242 | __le64 buffer_addr[MAX_PS_BUFFERS];
243 | } read;
244 | struct {
245 | struct {
246 | __le32 mrq; /* Multiple Rx Queues */
247 | union {
248 | __le32 rss; /* RSS Hash */
249 | struct {
250 | __le16 ip_id; /* IP id */
251 | __le16 csum; /* Packet Checksum */
252 | } csum_ip;
253 | } hi_dword;
254 | } lower;
255 | struct {
256 | __le32 status_error; /* ext status/error */
257 | __le16 length0; /* length of buffer 0 */
258 | __le16 vlan; /* VLAN tag */
259 | } middle;
260 | struct {
261 | __le16 header_status;
262 | /* length of buffers 1-3 */
263 | __le16 length[PS_PAGE_BUFFERS];
264 | } upper;
265 | __le64 reserved;
266 | } wb; /* writeback */
267 | };
268 |
269 | /* Transmit Descriptor */
270 | struct e1000_tx_desc {
271 | __le64 buffer_addr; /* Address of the descriptor's data buffer */
272 | union {
273 | __le32 data;
274 | struct {
275 | __le16 length; /* Data buffer length */
276 | u8 cso; /* Checksum offset */
277 | u8 cmd; /* Descriptor control */
278 | } flags;
279 | } lower;
280 | union {
281 | __le32 data;
282 | struct {
283 | u8 status; /* Descriptor status */
284 | u8 css; /* Checksum start */
285 | __le16 special;
286 | } fields;
287 | } upper;
288 | };
289 |
290 | /* Offload Context Descriptor */
291 | struct e1000_context_desc {
292 | union {
293 | __le32 ip_config;
294 | struct {
295 | u8 ipcss; /* IP checksum start */
296 | u8 ipcso; /* IP checksum offset */
297 | __le16 ipcse; /* IP checksum end */
298 | } ip_fields;
299 | } lower_setup;
300 | union {
301 | __le32 tcp_config;
302 | struct {
303 | u8 tucss; /* TCP checksum start */
304 | u8 tucso; /* TCP checksum offset */
305 | __le16 tucse; /* TCP checksum end */
306 | } tcp_fields;
307 | } upper_setup;
308 | __le32 cmd_and_length;
309 | union {
310 | __le32 data;
311 | struct {
312 | u8 status; /* Descriptor status */
313 | u8 hdr_len; /* Header length */
314 | __le16 mss; /* Maximum segment size */
315 | } fields;
316 | } tcp_seg_setup;
317 | };
318 |
319 | /* Offload data descriptor */
320 | struct e1000_data_desc {
321 | __le64 buffer_addr; /* Address of the descriptor's buffer address */
322 | union {
323 | __le32 data;
324 | struct {
325 | __le16 length; /* Data buffer length */
326 | u8 typ_len_ext;
327 | u8 cmd;
328 | } flags;
329 | } lower;
330 | union {
331 | __le32 data;
332 | struct {
333 | u8 status; /* Descriptor status */
334 | u8 popts; /* Packet Options */
335 | __le16 special;
336 | } fields;
337 | } upper;
338 | };
339 |
340 | /* Statistics counters collected by the MAC */
341 | struct e1000_hw_stats {
342 | u64 crcerrs;
343 | u64 algnerrc;
344 | u64 symerrs;
345 | u64 rxerrc;
346 | u64 mpc;
347 | u64 scc;
348 | u64 ecol;
349 | u64 mcc;
350 | u64 latecol;
351 | u64 colc;
352 | u64 dc;
353 | u64 tncrs;
354 | u64 sec;
355 | u64 cexterr;
356 | u64 rlec;
357 | u64 xonrxc;
358 | u64 xontxc;
359 | u64 xoffrxc;
360 | u64 xofftxc;
361 | u64 fcruc;
362 | u64 prc64;
363 | u64 prc127;
364 | u64 prc255;
365 | u64 prc511;
366 | u64 prc1023;
367 | u64 prc1522;
368 | u64 gprc;
369 | u64 bprc;
370 | u64 mprc;
371 | u64 gptc;
372 | u64 gorc;
373 | u64 gotc;
374 | u64 rnbc;
375 | u64 ruc;
376 | u64 rfc;
377 | u64 roc;
378 | u64 rjc;
379 | u64 mgprc;
380 | u64 mgpdc;
381 | u64 mgptc;
382 | u64 tor;
383 | u64 tot;
384 | u64 tpr;
385 | u64 tpt;
386 | u64 ptc64;
387 | u64 ptc127;
388 | u64 ptc255;
389 | u64 ptc511;
390 | u64 ptc1023;
391 | u64 ptc1522;
392 | u64 mptc;
393 | u64 bptc;
394 | u64 tsctc;
395 | u64 tsctfc;
396 | u64 iac;
397 | u64 icrxptc;
398 | u64 icrxatc;
399 | u64 ictxptc;
400 | u64 ictxatc;
401 | u64 ictxqec;
402 | u64 ictxqmtc;
403 | u64 icrxdmtc;
404 | u64 icrxoc;
405 | u64 cbtmpc;
406 | u64 htdpmc;
407 | u64 cbrdpc;
408 | u64 cbrmpc;
409 | u64 rpthc;
410 | u64 hgptc;
411 | u64 htcbdpc;
412 | u64 hgorc;
413 | u64 hgotc;
414 | u64 lenerrs;
415 | u64 scvpc;
416 | u64 hrmpc;
417 | u64 doosync;
418 | u64 o2bgptc;
419 | u64 o2bspc;
420 | u64 b2ospc;
421 | u64 b2ogprc;
422 | };
423 |
424 | struct e1000_phy_stats {
425 | u32 idle_errors;
426 | u32 receive_errors;
427 | };
428 |
429 | struct e1000_host_mng_dhcp_cookie {
430 | u32 signature;
431 | u8 status;
432 | u8 reserved0;
433 | u16 vlan_id;
434 | u32 reserved1;
435 | u16 reserved2;
436 | u8 reserved3;
437 | u8 checksum;
438 | };
439 |
440 | /* Host Interface "Rev 1" */
441 | struct e1000_host_command_header {
442 | u8 command_id;
443 | u8 command_length;
444 | u8 command_options;
445 | u8 checksum;
446 | };
447 |
448 | #define E1000_HI_MAX_DATA_LENGTH 252
449 | struct e1000_host_command_info {
450 | struct e1000_host_command_header command_header;
451 | u8 command_data[E1000_HI_MAX_DATA_LENGTH];
452 | };
453 |
454 | /* Host Interface "Rev 2" */
455 | struct e1000_host_mng_command_header {
456 | u8 command_id;
457 | u8 checksum;
458 | u16 reserved1;
459 | u16 reserved2;
460 | u16 command_length;
461 | };
462 |
463 | #define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
464 | struct e1000_host_mng_command_info {
465 | struct e1000_host_mng_command_header command_header;
466 | u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
467 | };
468 |
469 | #include "e1000_mac.h"
470 | #include "e1000_phy.h"
471 | #include "e1000_nvm.h"
472 | #include "e1000_manage.h"
473 | #include "e1000_mbx.h"
474 |
475 | /* NVM Update commands */
476 | #define E1000_NVMUPD_CMD_REG_READ 0x0000000B
477 | #define E1000_NVMUPD_CMD_REG_WRITE 0x0000000C
478 |
479 | /* NVM Update features API */
480 | #define E1000_NVMUPD_FEATURES_API_VER_MAJOR 0
481 | #define E1000_NVMUPD_FEATURES_API_VER_MINOR 0
482 | #define E1000_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12
483 | #define E1000_NVMUPD_EXEC_FEATURES 0xE
484 | #define E1000_NVMUPD_FEATURE_FLAT_NVM_SUPPORT (1 << 0)
485 | #define E1000_NVMUPD_FEATURE_REGISTER_ACCESS_SUPPORT (1 << 1)
486 |
487 | #define E1000_NVMUPD_MOD_PNT_MASK 0xFF
488 |
489 | struct e1000_nvm_access {
490 | u32 command;
491 | u32 config;
492 | u32 offset; /* in bytes */
493 | u32 data_size; /* in bytes */
494 | u8 data[1];
495 | };
496 |
497 | struct e1000_nvm_features {
498 | u8 major;
499 | u8 minor;
500 | u16 size;
501 | u8 features[E1000_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN];
502 | };
503 |
504 | /* Function pointers for the MAC. */
505 | struct e1000_mac_operations {
506 | s32 (*init_params)(struct e1000_hw *);
507 | s32 (*id_led_init)(struct e1000_hw *);
508 | s32 (*blink_led)(struct e1000_hw *);
509 | bool (*check_mng_mode)(struct e1000_hw *);
510 | s32 (*check_for_link)(struct e1000_hw *);
511 | s32 (*cleanup_led)(struct e1000_hw *);
512 | void (*clear_hw_cntrs)(struct e1000_hw *);
513 | void (*clear_vfta)(struct e1000_hw *);
514 | s32 (*get_bus_info)(struct e1000_hw *);
515 | void (*set_lan_id)(struct e1000_hw *);
516 | s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
517 | s32 (*led_on)(struct e1000_hw *);
518 | s32 (*led_off)(struct e1000_hw *);
519 | void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
520 | s32 (*reset_hw)(struct e1000_hw *);
521 | s32 (*init_hw)(struct e1000_hw *);
522 | void (*shutdown_serdes)(struct e1000_hw *);
523 | void (*power_up_serdes)(struct e1000_hw *);
524 | s32 (*setup_link)(struct e1000_hw *);
525 | s32 (*setup_physical_interface)(struct e1000_hw *);
526 | s32 (*setup_led)(struct e1000_hw *);
527 | void (*write_vfta)(struct e1000_hw *, u32, u32);
528 | void (*config_collision_dist)(struct e1000_hw *);
529 | int (*rar_set)(struct e1000_hw *, u8*, u32);
530 | s32 (*read_mac_addr)(struct e1000_hw *);
531 | s32 (*validate_mdi_setting)(struct e1000_hw *);
532 | s32 (*get_thermal_sensor_data)(struct e1000_hw *);
533 | s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
534 | s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
535 | void (*release_swfw_sync)(struct e1000_hw *, u16);
536 | };
537 |
538 | /* When to use various PHY register access functions:
539 | *
540 | * Func Caller
541 | * Function Does Does When to use
542 | * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
543 | * X_reg L,P,A n/a for simple PHY reg accesses
544 | * X_reg_locked P,A L for multiple accesses of different regs
545 | * on different pages
546 | * X_reg_page A L,P for multiple accesses of different regs
547 | * on the same page
548 | *
549 | * Where X=[read|write], L=locking, P=sets page, A=register access
550 | *
551 | */
552 | struct e1000_phy_operations {
553 | s32 (*init_params)(struct e1000_hw *);
554 | s32 (*acquire)(struct e1000_hw *);
555 | s32 (*check_polarity)(struct e1000_hw *);
556 | s32 (*check_reset_block)(struct e1000_hw *);
557 | s32 (*commit)(struct e1000_hw *);
558 | s32 (*force_speed_duplex)(struct e1000_hw *);
559 | s32 (*get_cfg_done)(struct e1000_hw *hw);
560 | s32 (*get_cable_length)(struct e1000_hw *);
561 | s32 (*get_info)(struct e1000_hw *);
562 | s32 (*set_page)(struct e1000_hw *, u16);
563 | s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
564 | s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
565 | s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
566 | void (*release)(struct e1000_hw *);
567 | s32 (*reset)(struct e1000_hw *);
568 | s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
569 | s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
570 | s32 (*write_reg)(struct e1000_hw *, u32, u16);
571 | s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
572 | s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
573 | void (*power_up)(struct e1000_hw *);
574 | void (*power_down)(struct e1000_hw *);
575 | s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
576 | s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
577 | };
578 |
579 | /* Function pointers for the NVM. */
580 | struct e1000_nvm_operations {
581 | s32 (*init_params)(struct e1000_hw *);
582 | s32 (*acquire)(struct e1000_hw *);
583 | s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
584 | void (*release)(struct e1000_hw *);
585 | void (*reload)(struct e1000_hw *);
586 | s32 (*update)(struct e1000_hw *);
587 | s32 (*valid_led_default)(struct e1000_hw *, u16 *);
588 | s32 (*validate)(struct e1000_hw *);
589 | s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
590 | };
591 |
592 | #define E1000_MAX_SENSORS 3
593 |
594 | struct e1000_thermal_diode_data {
595 | u8 location;
596 | u8 temp;
597 | u8 caution_thresh;
598 | u8 max_op_thresh;
599 | };
600 |
601 | struct e1000_thermal_sensor_data {
602 | struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS];
603 | };
604 |
605 | struct e1000_mac_info {
606 | struct e1000_mac_operations ops;
607 | u8 addr[ETH_ADDR_LEN];
608 | u8 perm_addr[ETH_ADDR_LEN];
609 |
610 | enum e1000_mac_type type;
611 |
612 | u32 collision_delta;
613 | u32 ledctl_default;
614 | u32 ledctl_mode1;
615 | u32 ledctl_mode2;
616 | u32 mc_filter_type;
617 | u32 tx_packet_delta;
618 | u32 txcw;
619 |
620 | u16 current_ifs_val;
621 | u16 ifs_max_val;
622 | u16 ifs_min_val;
623 | u16 ifs_ratio;
624 | u16 ifs_step_size;
625 | u16 mta_reg_count;
626 | u16 uta_reg_count;
627 |
628 | /* Maximum size of the MTA register table in all supported adapters */
629 | #define MAX_MTA_REG 128
630 | u32 mta_shadow[MAX_MTA_REG];
631 | u16 rar_entry_count;
632 |
633 | u8 forced_speed_duplex;
634 |
635 | bool adaptive_ifs;
636 | bool has_fwsm;
637 | bool arc_subsystem_valid;
638 | bool asf_firmware_present;
639 | bool autoneg;
640 | bool autoneg_failed;
641 | bool get_link_status;
642 | bool in_ifs_mode;
643 | enum e1000_serdes_link_state serdes_link_state;
644 | bool serdes_has_link;
645 | bool tx_pkt_filtering;
646 | struct e1000_thermal_sensor_data thermal_sensor_data;
647 | };
648 |
649 | struct e1000_phy_info {
650 | struct e1000_phy_operations ops;
651 | enum e1000_phy_type type;
652 |
653 | enum e1000_1000t_rx_status local_rx;
654 | enum e1000_1000t_rx_status remote_rx;
655 | enum e1000_ms_type ms_type;
656 | enum e1000_ms_type original_ms_type;
657 | enum e1000_rev_polarity cable_polarity;
658 | enum e1000_smart_speed smart_speed;
659 |
660 | u32 addr;
661 | u32 id;
662 | u32 reset_delay_us; /* in usec */
663 | u32 revision;
664 | u32 current_retry_counter;
665 |
666 | enum e1000_media_type media_type;
667 |
668 | u16 autoneg_advertised;
669 | u16 autoneg_mask;
670 | u16 cable_length;
671 | u16 max_cable_length;
672 | u16 min_cable_length;
673 |
674 | u8 mdix;
675 |
676 | bool disable_polarity_correction;
677 | bool is_mdix;
678 | bool polarity_correction;
679 | bool reset_disable;
680 | bool speed_downgraded;
681 | bool autoneg_wait_to_complete;
682 | };
683 |
684 | struct e1000_nvm_info {
685 | struct e1000_nvm_operations ops;
686 | enum e1000_nvm_type type;
687 | enum e1000_nvm_override override;
688 |
689 | u32 flash_bank_size;
690 | u32 flash_base_addr;
691 |
692 | u16 word_size;
693 | u16 delay_usec;
694 | u16 address_bits;
695 | u16 opcode_bits;
696 | u16 page_size;
697 | };
698 |
699 | struct e1000_bus_info {
700 | enum e1000_bus_type type;
701 | enum e1000_bus_speed speed;
702 | enum e1000_bus_width width;
703 |
704 | u16 func;
705 | u16 pci_cmd_word;
706 | };
707 |
708 | struct e1000_fc_info {
709 | u32 high_water; /* Flow control high-water mark */
710 | u32 low_water; /* Flow control low-water mark */
711 | u16 pause_time; /* Flow control pause timer */
712 | u16 refresh_time; /* Flow control refresh timer */
713 | bool send_xon; /* Flow control send XON */
714 | bool strict_ieee; /* Strict IEEE mode */
715 | enum e1000_fc_mode current_mode; /* FC mode in effect */
716 | enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
717 | };
718 |
719 | struct e1000_mbx_operations {
720 | s32 (*init_params)(struct e1000_hw *hw);
721 | s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
722 | s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
723 | s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
724 | s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
725 | s32 (*check_for_msg)(struct e1000_hw *, u16);
726 | s32 (*check_for_ack)(struct e1000_hw *, u16);
727 | s32 (*check_for_rst)(struct e1000_hw *, u16);
728 | };
729 |
730 | struct e1000_mbx_stats {
731 | u32 msgs_tx;
732 | u32 msgs_rx;
733 |
734 | u32 acks;
735 | u32 reqs;
736 | u32 rsts;
737 | };
738 |
739 | struct e1000_mbx_info {
740 | struct e1000_mbx_operations ops;
741 | struct e1000_mbx_stats stats;
742 | u32 timeout;
743 | u32 usec_delay;
744 | u16 size;
745 | };
746 |
747 | struct e1000_dev_spec_82575 {
748 | bool sgmii_active;
749 | bool global_device_reset;
750 | bool eee_disable;
751 | bool module_plugged;
752 | bool clear_semaphore_once;
753 | u32 mtu;
754 | struct sfp_e1000_flags eth_flags;
755 | u8 media_port;
756 | bool media_changed;
757 | };
758 |
759 | struct e1000_dev_spec_vf {
760 | u32 vf_number;
761 | u32 v2p_mailbox;
762 | };
763 |
764 | struct e1000_hw {
765 | void *back;
766 |
767 | u8 __iomem *hw_addr;
768 | u8 __iomem *flash_address;
769 | unsigned long io_base;
770 |
771 | struct e1000_mac_info mac;
772 | struct e1000_fc_info fc;
773 | struct e1000_phy_info phy;
774 | struct e1000_nvm_info nvm;
775 | struct e1000_bus_info bus;
776 | struct e1000_mbx_info mbx;
777 | struct e1000_host_mng_dhcp_cookie mng_cookie;
778 |
779 | union {
780 | struct e1000_dev_spec_82575 _82575;
781 | struct e1000_dev_spec_vf vf;
782 | } dev_spec;
783 |
784 | u16 device_id;
785 | u16 subsystem_vendor_id;
786 | u16 subsystem_device_id;
787 | u16 vendor_id;
788 |
789 | u8 revision_id;
790 | /* NVM Update features */
791 | struct e1000_nvm_features nvmupd_features;
792 | };
793 |
794 | #include "e1000_82575.h"
795 | #include "e1000_i210.h"
796 | #include "e1000_base.h"
797 |
798 | /* These functions must be implemented by drivers */
799 | s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
800 | s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
801 | void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
802 | void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
803 |
804 | #endif
805 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_i210.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #ifndef _E1000_I210_H_
5 | #define _E1000_I210_H_
6 |
7 | bool e1000_get_flash_presence_i210(struct e1000_hw *hw);
8 | s32 e1000_update_flash_i210(struct e1000_hw *hw);
9 | s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw);
10 | s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw);
11 | s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
12 | u16 words, u16 *data);
13 | s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
14 | u16 words, u16 *data);
15 | s32 e1000_read_invm_version(struct e1000_hw *hw,
16 | struct e1000_fw_version *invm_ver);
17 | s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
18 | void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
19 | s32 e1000_init_hw_i210(struct e1000_hw *hw);
20 |
21 | #define E1000_STM_OPCODE 0xDB00
22 | #define E1000_EEPROM_FLASH_SIZE_WORD 0x11
23 |
24 | #define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
25 | (u8)((invm_dword) & 0x7)
26 | #define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
27 | (u8)(((invm_dword) & 0x0000FE00) >> 9)
28 | #define INVM_DWORD_TO_WORD_DATA(invm_dword) \
29 | (u16)(((invm_dword) & 0xFFFF0000) >> 16)
30 |
31 | enum E1000_INVM_STRUCTURE_TYPE {
32 | E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00,
33 | E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01,
34 | E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02,
35 | E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03,
36 | E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04,
37 | E1000_INVM_INVALIDATED_STRUCTURE = 0x0F,
38 | };
39 |
40 | #define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
41 | #define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
42 | #define E1000_INVM_ULT_BYTES_SIZE 8
43 | #define E1000_INVM_RECORD_SIZE_IN_BYTES 4
44 | #define E1000_INVM_VER_FIELD_ONE 0x1FF8
45 | #define E1000_INVM_VER_FIELD_TWO 0x7FE000
46 | #define E1000_INVM_IMGTYPE_FIELD 0x1F800000
47 |
48 | #define E1000_INVM_MAJOR_MASK 0x3F0
49 | #define E1000_INVM_MINOR_MASK 0xF
50 | #define E1000_INVM_MAJOR_SHIFT 4
51 |
52 | #define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
53 | (ID_LED_DEF1_DEF2 << 4) | \
54 | (ID_LED_OFF1_OFF2))
55 | #define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
56 | (ID_LED_DEF1_DEF2 << 4) | \
57 | (ID_LED_OFF1_ON2))
58 |
59 | /* NVM offset defaults for I211 devices */
60 | #define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
61 | #define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
62 | #define NVM_LED_1_CFG_DEFAULT_I211 0x0184
63 | #define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
64 |
65 | /* PLL Defines */
66 | #define E1000_PCI_PMCSR 0x44
67 | #define E1000_PCI_PMCSR_D3 0x03
68 | #define E1000_PCI_PMCSR_PME_EN 0x100
69 | #define E1000_MAX_PLL_TRIES 5
70 | #define E1000_PHY_PLL_UNCONF 0xFF
71 | #define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
72 | #define E1000_PHY_PLL_FREQ_REG 0x000E
73 | #define E1000_INVM_DEFAULT_AL 0x202F
74 | #define E1000_INVM_AUTOLOAD 0x0A
75 | #define E1000_INVM_PLL_WO_VAL 0x0010
76 |
77 | #define E1000_NVM_CTRL_WORD_2 0x0F
78 | #define E1000_NVM_APMPME_ENABLE 0x8000
79 |
80 | #endif
81 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_mac.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #ifndef _E1000_MAC_H_
5 | #define _E1000_MAC_H_
6 |
7 | void e1000_init_mac_ops_generic(struct e1000_hw *hw);
8 | #ifndef E1000_REMOVED
9 | #define E1000_REMOVED(a) (0)
10 | #endif /* E1000_REMOVED */
11 | void e1000_null_mac_generic(struct e1000_hw *hw);
12 | s32 e1000_null_ops_generic(struct e1000_hw *hw);
13 | s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
14 | bool e1000_null_mng_mode(struct e1000_hw *hw);
15 | void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
16 | void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
17 | int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
18 | s32 e1000_blink_led_generic(struct e1000_hw *hw);
19 | s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
20 | s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
21 | s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
22 | s32 e1000_cleanup_led_generic(struct e1000_hw *hw);
23 | s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
24 | s32 e1000_disable_pcie_primary_generic(struct e1000_hw *hw);
25 | s32 e1000_force_mac_fc_generic(struct e1000_hw *hw);
26 | s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
27 | s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
28 | void e1000_set_lan_id_single_port(struct e1000_hw *hw);
29 | s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
30 | s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
31 | u16 *duplex);
32 | s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
33 | u16 *speed, u16 *duplex);
34 | s32 e1000_id_led_init_generic(struct e1000_hw *hw);
35 | s32 e1000_led_on_generic(struct e1000_hw *hw);
36 | s32 e1000_led_off_generic(struct e1000_hw *hw);
37 | void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
38 | u8 *mc_addr_list, u32 mc_addr_count);
39 | int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
40 | s32 e1000_set_default_fc_generic(struct e1000_hw *hw);
41 | s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
42 | s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
43 | s32 e1000_setup_led_generic(struct e1000_hw *hw);
44 | s32 e1000_setup_link_generic(struct e1000_hw *hw);
45 | s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
46 | s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
47 | u32 offset, u8 data);
48 |
49 | u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
50 |
51 | void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
52 | void e1000_clear_vfta_generic(struct e1000_hw *hw);
53 | void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
54 | void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
55 | void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
56 | s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
57 | void e1000_reset_adaptive_generic(struct e1000_hw *hw);
58 | void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
59 | void e1000_update_adaptive_generic(struct e1000_hw *hw);
60 | void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
61 |
62 | #endif
63 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_manage.c:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #include "e1000_api.h"
5 | #include "e1000_manage.h"
6 |
7 | /**
8 | * e1000_calculate_checksum - Calculate checksum for buffer
9 | * @buffer: pointer to EEPROM
10 | * @length: size of EEPROM to calculate a checksum for
11 | *
12 | * Calculates the checksum for some buffer on a specified length. The
13 | * checksum calculated is returned.
14 | **/
15 | u8 e1000_calculate_checksum(u8 *buffer, u32 length)
16 | {
17 | u32 i;
18 | u8 sum = 0;
19 |
20 | DEBUGFUNC("e1000_calculate_checksum");
21 |
22 | if (!buffer)
23 | return 0;
24 |
25 | for (i = 0; i < length; i++)
26 | sum += buffer[i];
27 |
28 | return (u8) (0 - sum);
29 | }
30 |
31 | /**
32 | * e1000_enable_mng_pass_thru - Check if management passthrough is needed
33 | * @hw: pointer to the HW structure
34 | *
35 | * Verifies the hardware needs to leave interface enabled so that frames can
36 | * be directed to and from the management interface.
37 | **/
38 | bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
39 | {
40 | u32 manc;
41 | u32 fwsm, factps;
42 |
43 | DEBUGFUNC("e1000_enable_mng_pass_thru");
44 |
45 | if (!hw->mac.asf_firmware_present)
46 | return false;
47 |
48 | manc = E1000_READ_REG(hw, E1000_MANC);
49 |
50 | if (!(manc & E1000_MANC_RCV_TCO_EN))
51 | return false;
52 |
53 | if (hw->mac.has_fwsm) {
54 | fwsm = E1000_READ_REG(hw, E1000_FWSM);
55 | factps = E1000_READ_REG(hw, E1000_FACTPS);
56 |
57 | if (!(factps & E1000_FACTPS_MNGCG) &&
58 | ((fwsm & E1000_FWSM_MODE_MASK) ==
59 | (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)))
60 | return true;
61 | } else if ((manc & E1000_MANC_SMBUS_EN) &&
62 | !(manc & E1000_MANC_ASF_EN)) {
63 | return true;
64 | }
65 |
66 | return false;
67 | }
68 |
69 | /**
70 | * e1000_host_interface_command - Writes buffer to host interface
71 | * @hw: pointer to the HW structure
72 | * @buffer: contains a command to write
73 | * @length: the byte length of the buffer, must be multiple of 4 bytes
74 | *
75 | * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS
76 | * else returns E1000_ERR_HOST_INTERFACE_COMMAND.
77 | **/
78 | s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
79 | {
80 | u32 hicr, i;
81 |
82 | DEBUGFUNC("e1000_host_interface_command");
83 |
84 | if (!(hw->mac.arc_subsystem_valid)) {
85 | DEBUGOUT("Hardware doesn't support host interface command.\n");
86 | return E1000_SUCCESS;
87 | }
88 |
89 | if (!hw->mac.asf_firmware_present) {
90 | DEBUGOUT("Firmware is not present.\n");
91 | return E1000_SUCCESS;
92 | }
93 |
94 | if (length == 0 || length & 0x3 ||
95 | length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
96 | DEBUGOUT("Buffer length failure.\n");
97 | return -E1000_ERR_HOST_INTERFACE_COMMAND;
98 | }
99 |
100 | /* Check that the host interface is enabled. */
101 | hicr = E1000_READ_REG(hw, E1000_HICR);
102 | if (!(hicr & E1000_HICR_EN)) {
103 | DEBUGOUT("E1000_HOST_EN bit disabled.\n");
104 | return -E1000_ERR_HOST_INTERFACE_COMMAND;
105 | }
106 |
107 | /* Calculate length in DWORDs */
108 | length >>= 2;
109 |
110 | /* The device driver writes the relevant command block
111 | * into the ram area.
112 | */
113 | for (i = 0; i < length; i++)
114 | E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
115 | *((u32 *)buffer + i));
116 |
117 | /* Setting this bit tells the ARC that a new command is pending. */
118 | E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
119 |
120 | for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
121 | hicr = E1000_READ_REG(hw, E1000_HICR);
122 | if (!(hicr & E1000_HICR_C))
123 | break;
124 | msec_delay(1);
125 | }
126 |
127 | /* Check command successful completion. */
128 | if (i == E1000_HI_COMMAND_TIMEOUT ||
129 | (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
130 | DEBUGOUT("Command has failed with no status valid.\n");
131 | return -E1000_ERR_HOST_INTERFACE_COMMAND;
132 | }
133 |
134 | for (i = 0; i < length; i++)
135 | *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
136 | E1000_HOST_IF,
137 | i);
138 |
139 | return E1000_SUCCESS;
140 | }
141 |
142 | /**
143 | * e1000_load_firmware - Writes proxy FW code buffer to host interface
144 | * and execute.
145 | * @hw: pointer to the HW structure
146 | * @buffer: contains a firmware to write
147 | * @length: the byte length of the buffer, must be multiple of 4 bytes
148 | *
149 | * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled
150 | * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND.
151 | **/
152 | s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length)
153 | {
154 | u32 hicr, hibba, fwsm, icr, i;
155 |
156 | DEBUGFUNC("e1000_load_firmware");
157 |
158 | if (hw->mac.type < e1000_i210) {
159 | DEBUGOUT("Hardware doesn't support loading FW by the driver\n");
160 | return -E1000_ERR_CONFIG;
161 | }
162 |
163 | /* Check that the host interface is enabled. */
164 | hicr = E1000_READ_REG(hw, E1000_HICR);
165 | if (!(hicr & E1000_HICR_EN)) {
166 | DEBUGOUT("E1000_HOST_EN bit disabled.\n");
167 | return -E1000_ERR_CONFIG;
168 | }
169 | if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) {
170 | DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n");
171 | return -E1000_ERR_CONFIG;
172 | }
173 |
174 | if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) {
175 | DEBUGOUT("Buffer length failure.\n");
176 | return -E1000_ERR_INVALID_ARGUMENT;
177 | }
178 |
179 | /* Clear notification from ROM-FW by reading ICR register */
180 | icr = E1000_READ_REG(hw, E1000_ICR_V2);
181 |
182 | /* Reset ROM-FW */
183 | hicr = E1000_READ_REG(hw, E1000_HICR);
184 | hicr |= E1000_HICR_FW_RESET_ENABLE;
185 | E1000_WRITE_REG(hw, E1000_HICR, hicr);
186 | hicr |= E1000_HICR_FW_RESET;
187 | E1000_WRITE_REG(hw, E1000_HICR, hicr);
188 | E1000_WRITE_FLUSH(hw);
189 |
190 | /* Wait till MAC notifies about its readiness after ROM-FW reset */
191 | for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) {
192 | icr = E1000_READ_REG(hw, E1000_ICR_V2);
193 | if (icr & E1000_ICR_MNG)
194 | break;
195 | msec_delay(1);
196 | }
197 |
198 | /* Check for timeout */
199 | if (i == E1000_HI_COMMAND_TIMEOUT) {
200 | DEBUGOUT("FW reset failed.\n");
201 | return -E1000_ERR_HOST_INTERFACE_COMMAND;
202 | }
203 |
204 | /* Wait till MAC is ready to accept new FW code */
205 | for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
206 | fwsm = E1000_READ_REG(hw, E1000_FWSM);
207 | if ((fwsm & E1000_FWSM_FW_VALID) &&
208 | ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT ==
209 | E1000_FWSM_HI_EN_ONLY_MODE))
210 | break;
211 | msec_delay(1);
212 | }
213 |
214 | /* Check for timeout */
215 | if (i == E1000_HI_COMMAND_TIMEOUT) {
216 | DEBUGOUT("FW reset failed.\n");
217 | return -E1000_ERR_HOST_INTERFACE_COMMAND;
218 | }
219 |
220 | /* Calculate length in DWORDs */
221 | length >>= 2;
222 |
223 | /* The device driver writes the relevant FW code block
224 | * into the ram area in DWORDs via 1kB ram addressing window.
225 | */
226 | for (i = 0; i < length; i++) {
227 | if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) {
228 | /* Point to correct 1kB ram window */
229 | hibba = E1000_HI_FW_BASE_ADDRESS +
230 | ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) *
231 | (i / E1000_HI_FW_BLOCK_DWORD_LENGTH));
232 |
233 | E1000_WRITE_REG(hw, E1000_HIBBA, hibba);
234 | }
235 |
236 | E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
237 | i % E1000_HI_FW_BLOCK_DWORD_LENGTH,
238 | *((u32 *)buffer + i));
239 | }
240 |
241 | /* Setting this bit tells the ARC that a new FW is ready to execute. */
242 | hicr = E1000_READ_REG(hw, E1000_HICR);
243 | E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
244 |
245 | for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
246 | hicr = E1000_READ_REG(hw, E1000_HICR);
247 | if (!(hicr & E1000_HICR_C))
248 | break;
249 | msec_delay(1);
250 | }
251 |
252 | /* Check for successful FW start. */
253 | if (i == E1000_HI_COMMAND_TIMEOUT) {
254 | DEBUGOUT("New FW did not start within timeout period.\n");
255 | return -E1000_ERR_HOST_INTERFACE_COMMAND;
256 | }
257 |
258 | return E1000_SUCCESS;
259 | }
260 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_manage.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #ifndef _E1000_MANAGE_H_
5 | #define _E1000_MANAGE_H_
6 |
7 | bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
8 | u8 e1000_calculate_checksum(u8 *buffer, u32 length);
9 | s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
10 | s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length);
11 |
12 | enum e1000_mng_mode {
13 | e1000_mng_mode_none = 0,
14 | e1000_mng_mode_asf,
15 | e1000_mng_mode_pt,
16 | e1000_mng_mode_ipmi,
17 | e1000_mng_mode_host_if_only
18 | };
19 |
20 | #define E1000_FACTPS_MNGCG 0x20000000
21 |
22 | #define E1000_FWSM_MODE_MASK 0xE
23 | #define E1000_FWSM_MODE_SHIFT 1
24 | #define E1000_FWSM_FW_VALID 0x00008000
25 | #define E1000_FWSM_HI_EN_ONLY_MODE 0x4
26 |
27 | #define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
28 |
29 | #define E1000_VFTA_ENTRY_SHIFT 5
30 | #define E1000_VFTA_ENTRY_MASK 0x7F
31 | #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
32 |
33 | #define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
34 | #define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
35 | #define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */
36 | #define E1000_HI_FW_BASE_ADDRESS 0x10000
37 | #define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */
38 | #define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */
39 | #define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */
40 | #define E1000_HICR_EN 0x01 /* Enable bit - RO */
41 | /* Driver sets this bit when done to put command in RAM */
42 | #define E1000_HICR_C 0x02
43 | #define E1000_HICR_SV 0x04 /* Status Validity */
44 | #define E1000_HICR_FW_RESET_ENABLE 0x40
45 | #define E1000_HICR_FW_RESET 0x80
46 |
47 | /* Intel(R) Active Management Technology signature */
48 | #define E1000_IAMT_SIGNATURE 0x544D4149
49 | #endif
50 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_mbx.c:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #include "e1000_mbx.h"
5 |
6 | /**
7 | * e1000_null_mbx_check_for_flag - No-op function, return 0
8 | * @hw: pointer to the HW structure
9 | * @mbx_id: id of mailbox to read
10 | **/
11 | static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw,
12 | u16 E1000_UNUSEDARG mbx_id)
13 | {
14 | DEBUGFUNC("e1000_null_mbx_check_flag");
15 |
16 | return E1000_SUCCESS;
17 | }
18 |
19 | /**
20 | * e1000_null_mbx_transact - No-op function, return 0
21 | * @hw: pointer to the HW structure
22 | * @msg: The message buffer
23 | * @size: Length of buffer
24 | * @mbx_id: id of mailbox to read
25 | **/
26 | static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw,
27 | u32 E1000_UNUSEDARG *msg,
28 | u16 E1000_UNUSEDARG size,
29 | u16 E1000_UNUSEDARG mbx_id)
30 | {
31 | DEBUGFUNC("e1000_null_mbx_rw_msg");
32 |
33 | return E1000_SUCCESS;
34 | }
35 |
36 | /**
37 | * e1000_read_mbx - Reads a message from the mailbox
38 | * @hw: pointer to the HW structure
39 | * @msg: The message buffer
40 | * @size: Length of buffer
41 | * @mbx_id: id of mailbox to read
42 | *
43 | * returns SUCCESS if it successfuly read message from buffer
44 | **/
45 | s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
46 | {
47 | struct e1000_mbx_info *mbx = &hw->mbx;
48 | s32 ret_val = -E1000_ERR_MBX;
49 |
50 | DEBUGFUNC("e1000_read_mbx");
51 |
52 | /* limit read to size of mailbox */
53 | if (size > mbx->size)
54 | size = mbx->size;
55 |
56 | if (mbx->ops.read)
57 | ret_val = mbx->ops.read(hw, msg, size, mbx_id);
58 |
59 | return ret_val;
60 | }
61 |
62 | /**
63 | * e1000_write_mbx - Write a message to the mailbox
64 | * @hw: pointer to the HW structure
65 | * @msg: The message buffer
66 | * @size: Length of buffer
67 | * @mbx_id: id of mailbox to write
68 | *
69 | * returns SUCCESS if it successfully copied message into the buffer
70 | **/
71 | s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
72 | {
73 | struct e1000_mbx_info *mbx = &hw->mbx;
74 | s32 ret_val = E1000_SUCCESS;
75 |
76 | DEBUGFUNC("e1000_write_mbx");
77 |
78 | if (size > mbx->size)
79 | ret_val = -E1000_ERR_MBX;
80 |
81 | else if (mbx->ops.write)
82 | ret_val = mbx->ops.write(hw, msg, size, mbx_id);
83 |
84 | return ret_val;
85 | }
86 |
87 | /**
88 | * e1000_check_for_msg - checks to see if someone sent us mail
89 | * @hw: pointer to the HW structure
90 | * @mbx_id: id of mailbox to check
91 | *
92 | * returns SUCCESS if the Status bit was found or else ERR_MBX
93 | **/
94 | s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
95 | {
96 | struct e1000_mbx_info *mbx = &hw->mbx;
97 | s32 ret_val = -E1000_ERR_MBX;
98 |
99 | DEBUGFUNC("e1000_check_for_msg");
100 |
101 | if (mbx->ops.check_for_msg)
102 | ret_val = mbx->ops.check_for_msg(hw, mbx_id);
103 |
104 | return ret_val;
105 | }
106 |
107 | /**
108 | * e1000_check_for_ack - checks to see if someone sent us ACK
109 | * @hw: pointer to the HW structure
110 | * @mbx_id: id of mailbox to check
111 | *
112 | * returns SUCCESS if the Status bit was found or else ERR_MBX
113 | **/
114 | s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
115 | {
116 | struct e1000_mbx_info *mbx = &hw->mbx;
117 | s32 ret_val = -E1000_ERR_MBX;
118 |
119 | DEBUGFUNC("e1000_check_for_ack");
120 |
121 | if (mbx->ops.check_for_ack)
122 | ret_val = mbx->ops.check_for_ack(hw, mbx_id);
123 |
124 | return ret_val;
125 | }
126 |
127 | /**
128 | * e1000_check_for_rst - checks to see if other side has reset
129 | * @hw: pointer to the HW structure
130 | * @mbx_id: id of mailbox to check
131 | *
132 | * returns SUCCESS if the Status bit was found or else ERR_MBX
133 | **/
134 | s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
135 | {
136 | struct e1000_mbx_info *mbx = &hw->mbx;
137 | s32 ret_val = -E1000_ERR_MBX;
138 |
139 | DEBUGFUNC("e1000_check_for_rst");
140 |
141 | if (mbx->ops.check_for_rst)
142 | ret_val = mbx->ops.check_for_rst(hw, mbx_id);
143 |
144 | return ret_val;
145 | }
146 |
147 | /**
148 | * e1000_poll_for_msg - Wait for message notification
149 | * @hw: pointer to the HW structure
150 | * @mbx_id: id of mailbox to write
151 | *
152 | * returns SUCCESS if it successfully received a message notification
153 | **/
154 | static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
155 | {
156 | struct e1000_mbx_info *mbx = &hw->mbx;
157 | int countdown = mbx->timeout;
158 |
159 | DEBUGFUNC("e1000_poll_for_msg");
160 |
161 | if (!countdown || !mbx->ops.check_for_msg)
162 | goto out;
163 |
164 | while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
165 | countdown--;
166 | if (!countdown)
167 | break;
168 | usec_delay(mbx->usec_delay);
169 | }
170 |
171 | /* if we failed, all future posted messages fail until reset */
172 | if (!countdown)
173 | mbx->timeout = 0;
174 | out:
175 | return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
176 | }
177 |
178 | /**
179 | * e1000_poll_for_ack - Wait for message acknowledgement
180 | * @hw: pointer to the HW structure
181 | * @mbx_id: id of mailbox to write
182 | *
183 | * returns SUCCESS if it successfully received a message acknowledgement
184 | **/
185 | static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
186 | {
187 | struct e1000_mbx_info *mbx = &hw->mbx;
188 | int countdown = mbx->timeout;
189 |
190 | DEBUGFUNC("e1000_poll_for_ack");
191 |
192 | if (!countdown || !mbx->ops.check_for_ack)
193 | goto out;
194 |
195 | while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
196 | countdown--;
197 | if (!countdown)
198 | break;
199 | usec_delay(mbx->usec_delay);
200 | }
201 |
202 | /* if we failed, all future posted messages fail until reset */
203 | if (!countdown)
204 | mbx->timeout = 0;
205 | out:
206 | return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
207 | }
208 |
209 | /**
210 | * e1000_read_posted_mbx - Wait for message notification and receive message
211 | * @hw: pointer to the HW structure
212 | * @msg: The message buffer
213 | * @size: Length of buffer
214 | * @mbx_id: id of mailbox to write
215 | *
216 | * returns SUCCESS if it successfully received a message notification and
217 | * copied it into the receive buffer.
218 | **/
219 | s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
220 | {
221 | struct e1000_mbx_info *mbx = &hw->mbx;
222 | s32 ret_val = -E1000_ERR_MBX;
223 |
224 | DEBUGFUNC("e1000_read_posted_mbx");
225 |
226 | if (!mbx->ops.read)
227 | goto out;
228 |
229 | ret_val = e1000_poll_for_msg(hw, mbx_id);
230 |
231 | /* if ack received read message, otherwise we timed out */
232 | if (!ret_val)
233 | ret_val = mbx->ops.read(hw, msg, size, mbx_id);
234 | out:
235 | return ret_val;
236 | }
237 |
238 | /**
239 | * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
240 | * @hw: pointer to the HW structure
241 | * @msg: The message buffer
242 | * @size: Length of buffer
243 | * @mbx_id: id of mailbox to write
244 | *
245 | * returns SUCCESS if it successfully copied message into the buffer and
246 | * received an ack to that message within delay * timeout period
247 | **/
248 | s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
249 | {
250 | struct e1000_mbx_info *mbx = &hw->mbx;
251 | s32 ret_val = -E1000_ERR_MBX;
252 |
253 | DEBUGFUNC("e1000_write_posted_mbx");
254 |
255 | /* exit if either we can't write or there isn't a defined timeout */
256 | if (!mbx->ops.write || !mbx->timeout)
257 | goto out;
258 |
259 | /* send msg */
260 | ret_val = mbx->ops.write(hw, msg, size, mbx_id);
261 |
262 | /* if msg sent wait until we receive an ack */
263 | if (!ret_val)
264 | ret_val = e1000_poll_for_ack(hw, mbx_id);
265 | out:
266 | return ret_val;
267 | }
268 |
269 | /**
270 | * e1000_init_mbx_ops_generic - Initialize mbx function pointers
271 | * @hw: pointer to the HW structure
272 | *
273 | * Sets the function pointers to no-op functions
274 | **/
275 | void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
276 | {
277 | struct e1000_mbx_info *mbx = &hw->mbx;
278 | mbx->ops.init_params = e1000_null_ops_generic;
279 | mbx->ops.read = e1000_null_mbx_transact;
280 | mbx->ops.write = e1000_null_mbx_transact;
281 | mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag;
282 | mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag;
283 | mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag;
284 | mbx->ops.read_posted = e1000_read_posted_mbx;
285 | mbx->ops.write_posted = e1000_write_posted_mbx;
286 | }
287 |
288 | static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
289 | {
290 | u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
291 | s32 ret_val = -E1000_ERR_MBX;
292 |
293 | if (mbvficr & mask) {
294 | ret_val = E1000_SUCCESS;
295 | E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
296 | }
297 |
298 | return ret_val;
299 | }
300 |
301 | /**
302 | * e1000_check_for_msg_pf - checks to see if the VF has sent mail
303 | * @hw: pointer to the HW structure
304 | * @vf_number: the VF index
305 | *
306 | * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
307 | **/
308 | static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
309 | {
310 | s32 ret_val = -E1000_ERR_MBX;
311 |
312 | DEBUGFUNC("e1000_check_for_msg_pf");
313 |
314 | if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
315 | ret_val = E1000_SUCCESS;
316 | hw->mbx.stats.reqs++;
317 | }
318 |
319 | return ret_val;
320 | }
321 |
322 | /**
323 | * e1000_check_for_ack_pf - checks to see if the VF has ACKed
324 | * @hw: pointer to the HW structure
325 | * @vf_number: the VF index
326 | *
327 | * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
328 | **/
329 | static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
330 | {
331 | s32 ret_val = -E1000_ERR_MBX;
332 |
333 | DEBUGFUNC("e1000_check_for_ack_pf");
334 |
335 | if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
336 | ret_val = E1000_SUCCESS;
337 | hw->mbx.stats.acks++;
338 | }
339 |
340 | return ret_val;
341 | }
342 |
343 | /**
344 | * e1000_check_for_rst_pf - checks to see if the VF has reset
345 | * @hw: pointer to the HW structure
346 | * @vf_number: the VF index
347 | *
348 | * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
349 | **/
350 | static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
351 | {
352 | u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
353 | s32 ret_val = -E1000_ERR_MBX;
354 |
355 | DEBUGFUNC("e1000_check_for_rst_pf");
356 |
357 | if (vflre & (1 << vf_number)) {
358 | ret_val = E1000_SUCCESS;
359 | E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
360 | hw->mbx.stats.rsts++;
361 | }
362 |
363 | return ret_val;
364 | }
365 |
366 | /**
367 | * e1000_obtain_mbx_lock_pf - obtain mailbox lock
368 | * @hw: pointer to the HW structure
369 | * @vf_number: the VF index
370 | *
371 | * return SUCCESS if we obtained the mailbox lock
372 | **/
373 | static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
374 | {
375 | s32 ret_val = -E1000_ERR_MBX;
376 | u32 p2v_mailbox;
377 | int count = 10;
378 |
379 | DEBUGFUNC("e1000_obtain_mbx_lock_pf");
380 |
381 | do {
382 | /* Take ownership of the buffer */
383 | E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number),
384 | E1000_P2VMAILBOX_PFU);
385 |
386 | /* reserve mailbox for pf use */
387 | p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
388 | if (p2v_mailbox & E1000_P2VMAILBOX_PFU) {
389 | ret_val = E1000_SUCCESS;
390 | break;
391 | }
392 | usec_delay(1000);
393 | } while (count-- > 0);
394 |
395 | return ret_val;
396 |
397 | }
398 |
399 | /**
400 | * e1000_write_mbx_pf - Places a message in the mailbox
401 | * @hw: pointer to the HW structure
402 | * @msg: The message buffer
403 | * @size: Length of buffer
404 | * @vf_number: the VF index
405 | *
406 | * returns SUCCESS if it successfully copied message into the buffer
407 | **/
408 | static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
409 | u16 vf_number)
410 | {
411 | s32 ret_val;
412 | u16 i;
413 |
414 | DEBUGFUNC("e1000_write_mbx_pf");
415 |
416 | /* lock the mailbox to prevent pf/vf race condition */
417 | ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
418 | if (ret_val)
419 | goto out_no_write;
420 |
421 | /* flush msg and acks as we are overwriting the message buffer */
422 | e1000_check_for_msg_pf(hw, vf_number);
423 | e1000_check_for_ack_pf(hw, vf_number);
424 |
425 | /* copy the caller specified message to the mailbox memory buffer */
426 | for (i = 0; i < size; i++)
427 | E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
428 |
429 | /* Interrupt VF to tell it a message has been sent and release buffer*/
430 | E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
431 |
432 | /* update stats */
433 | hw->mbx.stats.msgs_tx++;
434 |
435 | out_no_write:
436 | return ret_val;
437 |
438 | }
439 |
440 | /**
441 | * e1000_read_mbx_pf - Read a message from the mailbox
442 | * @hw: pointer to the HW structure
443 | * @msg: The message buffer
444 | * @size: Length of buffer
445 | * @vf_number: the VF index
446 | *
447 | * This function copies a message from the mailbox buffer to the caller's
448 | * memory buffer. The presumption is that the caller knows that there was
449 | * a message due to a VF request so no polling for message is needed.
450 | **/
451 | static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
452 | u16 vf_number)
453 | {
454 | s32 ret_val;
455 | u16 i;
456 |
457 | DEBUGFUNC("e1000_read_mbx_pf");
458 |
459 | /* lock the mailbox to prevent pf/vf race condition */
460 | ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
461 | if (ret_val)
462 | goto out_no_read;
463 |
464 | /* copy the message to the mailbox memory buffer */
465 | for (i = 0; i < size; i++)
466 | msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
467 |
468 | /* Acknowledge the message and release buffer */
469 | E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
470 |
471 | /* update stats */
472 | hw->mbx.stats.msgs_rx++;
473 |
474 | out_no_read:
475 | return ret_val;
476 | }
477 |
478 | /**
479 | * e1000_init_mbx_params_pf - set initial values for pf mailbox
480 | * @hw: pointer to the HW structure
481 | *
482 | * Initializes the hw->mbx struct to correct values for pf mailbox
483 | */
484 | s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
485 | {
486 | struct e1000_mbx_info *mbx = &hw->mbx;
487 |
488 | switch (hw->mac.type) {
489 | case e1000_82576:
490 | case e1000_i350:
491 | case e1000_i354:
492 | mbx->timeout = 0;
493 | mbx->usec_delay = 0;
494 |
495 | mbx->size = E1000_VFMAILBOX_SIZE;
496 |
497 | mbx->ops.read = e1000_read_mbx_pf;
498 | mbx->ops.write = e1000_write_mbx_pf;
499 | mbx->ops.read_posted = e1000_read_posted_mbx;
500 | mbx->ops.write_posted = e1000_write_posted_mbx;
501 | mbx->ops.check_for_msg = e1000_check_for_msg_pf;
502 | mbx->ops.check_for_ack = e1000_check_for_ack_pf;
503 | mbx->ops.check_for_rst = e1000_check_for_rst_pf;
504 |
505 | mbx->stats.msgs_tx = 0;
506 | mbx->stats.msgs_rx = 0;
507 | mbx->stats.reqs = 0;
508 | mbx->stats.acks = 0;
509 | mbx->stats.rsts = 0;
510 | #ifdef LINUX_VERSION_CODE
511 | fallthrough;
512 | #else
513 | /* Fall through */
514 | #endif /* LINUX_VERSION_CODE */
515 | default:
516 | return E1000_SUCCESS;
517 | }
518 | }
519 |
520 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_mbx.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #ifndef _E1000_MBX_H_
5 | #define _E1000_MBX_H_
6 |
7 | #include "e1000_api.h"
8 |
9 | #define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
10 | #define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
11 | #define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
12 | #define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
13 | #define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
14 |
15 | #define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
16 | #define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
17 | #define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
18 | #define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
19 |
20 | #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
21 |
22 | /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
23 | * PF. The reverse is true if it is E1000_PF_*.
24 | * Message ACK's are the value or'd with 0xF0000000
25 | */
26 | /* Msgs below or'd with this are the ACK */
27 | #define E1000_VT_MSGTYPE_ACK 0x80000000
28 | /* Msgs below or'd with this are the NACK */
29 | #define E1000_VT_MSGTYPE_NACK 0x40000000
30 | /* Indicates that VF is still clear to send requests */
31 | #define E1000_VT_MSGTYPE_CTS 0x20000000
32 | #define E1000_VT_MSGINFO_SHIFT 16
33 | /* bits 23:16 are used for extra info for certain messages */
34 | #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
35 |
36 | #define E1000_VF_RESET 0x01 /* VF requests reset */
37 | #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
38 | #define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
39 | #define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
40 | #define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT)
41 | #define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
42 | #define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT)
43 | #define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */
44 | #define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/
45 | #define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT)
46 | #define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
47 |
48 | #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
49 |
50 | #define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
51 | #define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
52 |
53 | s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
54 | s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
55 | s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
56 | s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
57 | s32 e1000_check_for_msg(struct e1000_hw *, u16);
58 | s32 e1000_check_for_ack(struct e1000_hw *, u16);
59 | s32 e1000_check_for_rst(struct e1000_hw *, u16);
60 | void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
61 | s32 e1000_init_mbx_params_pf(struct e1000_hw *);
62 |
63 | #endif /* _E1000_MBX_H_ */
64 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_nvm.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #ifndef _E1000_NVM_H_
5 | #define _E1000_NVM_H_
6 |
7 | struct e1000_fw_version {
8 | u32 etrack_id;
9 | u16 eep_major;
10 | u16 eep_minor;
11 | u16 eep_build;
12 |
13 | u8 invm_major;
14 | u8 invm_minor;
15 | u8 invm_img_type;
16 |
17 | bool or_valid;
18 | u16 or_major;
19 | u16 or_build;
20 | u16 or_patch;
21 | };
22 |
23 | void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
24 | s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
25 | void e1000_null_nvm_generic(struct e1000_hw *hw);
26 | s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data);
27 | s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
28 | s32 e1000_acquire_nvm_generic(struct e1000_hw *hw);
29 |
30 | s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
31 | s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
32 | s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
33 | u32 pba_num_size);
34 | s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
35 | s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
36 | s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
37 | u16 *data);
38 | s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
39 | s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
40 | s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
41 | u16 *data);
42 | s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
43 | void e1000_release_nvm_generic(struct e1000_hw *hw);
44 | void e1000_get_fw_version(struct e1000_hw *hw,
45 | struct e1000_fw_version *fw_vers);
46 |
47 | #define E1000_STM_OPCODE 0xDB00
48 |
49 | #endif
50 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_osdep.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | /* glue for the OS independent part of e1000
5 | * includes register access macros
6 | */
7 |
8 | #ifndef _E1000_OSDEP_H_
9 | #define _E1000_OSDEP_H_
10 |
11 | #ifdef __APPLE__
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #else
24 | #include
25 | #include
26 | #include
27 | #include
28 | #include
29 | #endif
30 | #include "kcompat.h"
31 |
32 | #define usec_delay(x) udelay(x)
33 | #define usec_delay_irq(x) udelay(x)
34 | #ifndef msec_delay
35 | #define msec_delay(x) do { \
36 | /* Don't mdelay in interrupt context! */ \
37 | if (in_interrupt()) \
38 | BUG(); \
39 | else \
40 | msleep(x); \
41 | } while (0)
42 |
43 | /* Some workarounds require millisecond delays and are run during interrupt
44 | * context. Most notably, when establishing link, the phy may need tweaking
45 | * but cannot process phy register reads/writes faster than millisecond
46 | * intervals...and we establish link due to a "link status change" interrupt.
47 | */
48 | #define msec_delay_irq(x) mdelay(x)
49 |
50 | #define E1000_READ_REG(x, y) e1000_read_reg(x, y)
51 | #define E1000_READ_REG8(h, r) readb(READ_ONCE(h->hw_addr) + r)
52 | #endif
53 |
54 | #define PCI_COMMAND_REGISTER PCI_COMMAND
55 | #define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
56 | #define ETH_ADDR_LEN ETH_ALEN
57 |
58 | #ifdef __BIG_ENDIAN
59 | #define E1000_BIG_ENDIAN __BIG_ENDIAN
60 | #endif
61 |
62 | #ifdef DEBUG
63 | #define DEBUGOUT(S) pr_debug(S)
64 | #define DEBUGOUT1(S, A...) pr_debug(S, ## A)
65 | #else
66 | #define DEBUGOUT(S) do { } while (0)
67 | #define DEBUGOUT1(S, A...) do { } while (0)
68 | #endif
69 |
70 | #ifdef DEBUG_FUNC
71 | #define DEBUGFUNC(F) DEBUGOUT(F "\n")
72 | #else
73 | #define DEBUGFUNC(F)
74 | #endif
75 | #define DEBUGOUT2 DEBUGOUT1
76 | #define DEBUGOUT3 DEBUGOUT2
77 | #define DEBUGOUT7 DEBUGOUT3
78 |
79 | #define E1000_REGISTER(a, reg) reg
80 |
81 | /* forward declaration */
82 | struct e1000_hw;
83 |
84 | /* write operations, indexed using DWORDS */
85 | #define E1000_WRITE_REG(hw, reg, val) \
86 | do { \
87 | u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
88 | if (!E1000_REMOVED(hw_addr)) \
89 | writel((val), &hw_addr[(reg)]); \
90 | } while (0)
91 |
92 | u32 e1000_read_reg(struct e1000_hw *hw, u32 reg);
93 |
94 | #define E1000_WRITE_REG_ARRAY(hw, reg, idx, val) \
95 | E1000_WRITE_REG((hw), (reg) + ((idx) << 2), (val))
96 |
97 | #define E1000_READ_REG_ARRAY(hw, reg, idx) ( \
98 | e1000_read_reg((hw), (reg) + ((idx) << 2)))
99 |
100 | #define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
101 | #define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
102 |
103 | #define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
104 | writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + \
105 | ((offset) << 1))))
106 |
107 | #define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
108 | readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))
109 |
110 | #define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
111 | writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))))
112 |
113 | #define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
114 | readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))
115 |
116 | #define E1000_WRITE_REG_IO(a, reg, offset) do { \
117 | outl(reg, ((a)->io_base)); \
118 | outl(offset, ((a)->io_base + 4)); \
119 | } while (0)
120 |
121 | #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
122 |
123 | #define E1000_WRITE_FLASH_REG(a, reg, value) ( \
124 | writel((value), ((a)->flash_address + reg)))
125 |
126 | #define E1000_WRITE_FLASH_REG16(a, reg, value) ( \
127 | writew((value), ((a)->flash_address + reg)))
128 |
129 | #define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg))
130 |
131 | #define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg))
132 |
133 | #define E1000_READ_FLASH_REG8(a, reg) ( \
134 | readb(READ_ONCE((a)->flash_address) + reg))
135 |
136 | #define E1000_REMOVED(h) unlikely(!(h))
137 |
138 | /* VF requests to clear all unicast MAC filters */
139 | #define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT)
140 | /* VF requests to add unicast MAC filter */
141 | #define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT)
142 |
143 | #endif /* _E1000_OSDEP_H_ */
144 |
--------------------------------------------------------------------------------
/SimpleGBE/e1000_phy.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #ifndef _E1000_PHY_H_
5 | #define _E1000_PHY_H_
6 |
7 | void e1000_init_phy_ops_generic(struct e1000_hw *hw);
8 | s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data);
9 | void e1000_null_phy_generic(struct e1000_hw *hw);
10 | s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active);
11 | s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data);
12 | s32 e1000_null_set_page(struct e1000_hw *hw, u16 data);
13 | s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
14 | u8 dev_addr, u8 *data);
15 | s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
16 | u8 dev_addr, u8 data);
17 | s32 e1000_check_downshift_generic(struct e1000_hw *hw);
18 | s32 e1000_check_polarity_m88(struct e1000_hw *hw);
19 | s32 e1000_check_polarity_igp(struct e1000_hw *hw);
20 | s32 e1000_check_polarity_ife(struct e1000_hw *hw);
21 | s32 e1000_check_reset_block_generic(struct e1000_hw *hw);
22 | s32 e1000_copper_link_setup_igp(struct e1000_hw *hw);
23 | s32 e1000_copper_link_setup_m88(struct e1000_hw *hw);
24 | s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw);
25 | s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
26 | s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
27 | s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
28 | s32 e1000_get_cable_length_m88(struct e1000_hw *hw);
29 | s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw);
30 | s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw);
31 | s32 e1000_get_cfg_done_generic(struct e1000_hw *hw);
32 | s32 e1000_get_phy_id(struct e1000_hw *hw);
33 | s32 e1000_get_phy_info_igp(struct e1000_hw *hw);
34 | s32 e1000_get_phy_info_m88(struct e1000_hw *hw);
35 | s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
36 | s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw);
37 | void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
38 | s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw);
39 | s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
40 | s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
41 | s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
42 | s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
43 | s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
44 | s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
45 | s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
46 | s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
47 | s32 e1000_setup_copper_link_generic(struct e1000_hw *hw);
48 | s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
49 | s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
50 | s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
51 | s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
52 | s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
53 | s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
54 | u32 usec_interval, bool *success);
55 | s32 e1000_phy_init_script_igp3(struct e1000_hw *hw);
56 | enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
57 | s32 e1000_determine_phy_address(struct e1000_hw *hw);
58 | s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
59 | s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
60 | s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
61 | u16 *data, bool read, bool page_set);
62 | void e1000_power_up_phy_copper(struct e1000_hw *hw);
63 | void e1000_power_down_phy_copper(struct e1000_hw *hw);
64 | void e1000_disable_phy_retry_mechanism(struct e1000_hw *hw, u32 *phy_retries_original);
65 | void e1000_enable_phy_retry_mechanism(struct e1000_hw *hw, u32 phy_retries_original);
66 | s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
67 | s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
68 | s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
69 | s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
70 | s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
71 | s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
72 | s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
73 | s32 e1000_check_polarity_82577(struct e1000_hw *hw);
74 | s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
75 | s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
76 | s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
77 | s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
78 | s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
79 | s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data);
80 | s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
81 | bool line_override);
82 | bool e1000_is_mphy_ready(struct e1000_hw *hw);
83 |
84 | s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
85 | u16 *data);
86 | s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
87 | u16 data);
88 |
89 | #define E1000_MAX_PHY_ADDR 8
90 |
91 | /* IGP01E1000 Specific Registers */
92 | #define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
93 | #define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
94 | #define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
95 | #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
96 | #define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
97 | #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
98 | #define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
99 | #define IGP_PAGE_SHIFT 5
100 | #define PHY_REG_MASK 0x1F
101 |
102 | /* GS40G - I210 PHY defines */
103 | #define GS40G_PAGE_SELECT 0x16
104 | #define GS40G_PAGE_SHIFT 16
105 | #define GS40G_OFFSET_MASK 0xFFFF
106 | #define GS40G_PAGE_2 0x20000
107 | #define GS40G_MAC_REG2 0x15
108 | #define GS40G_MAC_LB 0x4140
109 | #define GS40G_MAC_SPEED_1G 0X0006
110 | #define GS40G_COPPER_SPEC 0x0010
111 |
112 | #define HV_INTC_FC_PAGE_START 768
113 | #define I82578_ADDR_REG 29
114 | #define I82577_ADDR_REG 16
115 | #define I82577_CFG_REG 22
116 | #define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
117 | #define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
118 | #define I82577_CTRL_REG 23
119 |
120 | /* 82577 specific PHY registers */
121 | #define I82577_PHY_CTRL_2 18
122 | #define I82577_PHY_LBK_CTRL 19
123 | #define I82577_PHY_STATUS_2 26
124 | #define I82577_PHY_DIAG_STATUS 31
125 |
126 | /* I82577 PHY Status 2 */
127 | #define I82577_PHY_STATUS2_REV_POLARITY 0x0400
128 | #define I82577_PHY_STATUS2_MDIX 0x0800
129 | #define I82577_PHY_STATUS2_SPEED_MASK 0x0300
130 | #define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
131 |
132 | /* I82577 PHY Control 2 */
133 | #define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
134 | #define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
135 | #define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
136 |
137 | /* I82577 PHY Diagnostics Status */
138 | #define I82577_DSTATUS_CABLE_LENGTH 0x03FC
139 | #define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
140 |
141 | /* 82580 PHY Power Management */
142 | #define E1000_82580_PHY_POWER_MGMT 0xE14
143 | #define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
144 | #define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
145 | #define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
146 | #define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
147 |
148 | #define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */
149 | #define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */
150 | #define E1000_MPHY_BUSY 0x00010000 /* busy bit */
151 | #define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */
152 | #define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */
153 |
154 | #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
155 | #define IGP01E1000_PHY_POLARITY_MASK 0x0078
156 |
157 | #define IGP01E1000_PSCR_AUTO_MDIX 0x1000
158 | #define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
159 |
160 | #define IGP01E1000_PSCFR_SMART_SPEED 0x0080
161 |
162 | #define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
163 | #define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
164 | #define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
165 |
166 | #define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
167 |
168 | #define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
169 | #define IGP01E1000_PSSR_MDIX 0x0800
170 | #define IGP01E1000_PSSR_SPEED_MASK 0xC000
171 | #define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
172 |
173 | #define IGP02E1000_PHY_CHANNEL_NUM 4
174 | #define IGP02E1000_PHY_AGC_A 0x11B1
175 | #define IGP02E1000_PHY_AGC_B 0x12B1
176 | #define IGP02E1000_PHY_AGC_C 0x14B1
177 | #define IGP02E1000_PHY_AGC_D 0x18B1
178 |
179 | #define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
180 | #define IGP02E1000_AGC_LENGTH_MASK 0x7F
181 | #define IGP02E1000_AGC_RANGE 15
182 |
183 | #define E1000_CABLE_LENGTH_UNDEFINED 0xFF
184 |
185 | #define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
186 | #define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
187 | #define E1000_KMRNCTRLSTA_REN 0x00200000
188 | #define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
189 | #define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
190 | #define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
191 | #define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
192 | #define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
193 |
194 | #define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
195 | #define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
196 | #define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
197 | #define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
198 |
199 | /* IFE PHY Extended Status Control */
200 | #define IFE_PESC_POLARITY_REVERSED 0x0100
201 |
202 | /* IFE PHY Special Control */
203 | #define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
204 | #define IFE_PSC_FORCE_POLARITY 0x0020
205 |
206 | /* IFE PHY Special Control and LED Control */
207 | #define IFE_PSCL_PROBE_MODE 0x0020
208 | #define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
209 | #define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
210 |
211 | /* IFE PHY MDIX Control */
212 | #define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
213 | #define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
214 | #define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
215 |
216 | /* SFP modules ID memory locations */
217 | #define E1000_SFF_IDENTIFIER_OFFSET 0x00
218 | #define E1000_SFF_IDENTIFIER_SFF 0x02
219 | #define E1000_SFF_IDENTIFIER_SFP 0x03
220 |
221 | #define E1000_SFF_ETH_FLAGS_OFFSET 0x06
222 | /* Flags for SFP modules compatible with ETH up to 1Gb */
223 | struct sfp_e1000_flags {
224 | u8 e1000_base_sx:1;
225 | u8 e1000_base_lx:1;
226 | u8 e1000_base_cx:1;
227 | u8 e1000_base_t:1;
228 | u8 e100_base_lx:1;
229 | u8 e100_base_fx:1;
230 | u8 e10_base_bx10:1;
231 | u8 e10_base_px:1;
232 | };
233 |
234 | /* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
235 | #define E1000_SFF_VENDOR_OUI_TYCO 0x00407600
236 | #define E1000_SFF_VENDOR_OUI_FTL 0x00906500
237 | #define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00
238 | #define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100
239 |
240 | #endif
241 |
--------------------------------------------------------------------------------
/SimpleGBE/igb_debugfs.c:
--------------------------------------------------------------------------------
1 | // SPDX-License-Identifier: GPL-2.0
2 | /* Copyright(c) 2007 - 2022 Intel Corporation. */
3 |
4 | #include "igb.h"
5 |
6 |
--------------------------------------------------------------------------------
/SimpleGBE/igb_hwmon.c:
--------------------------------------------------------------------------------
1 | // SPDX-License-Identifier: GPL-2.0
2 | /* Copyright(c) 2007 - 2022 Intel Corporation. */
3 |
4 | #include "igb.h"
5 | #include "e1000_82575.h"
6 | #include "e1000_hw.h"
7 | #ifdef IGB_HWMON
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 |
17 | #ifdef HAVE_I2C_SUPPORT
18 | static struct i2c_board_info i350_sensor_info = {
19 | I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
20 | };
21 | #endif /* HAVE_I2C_SUPPORT */
22 |
23 | /* hwmon callback functions */
24 | static ssize_t igb_hwmon_show_location(struct device *dev,
25 | struct device_attribute *attr,
26 | char *buf)
27 | {
28 | struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
29 | dev_attr);
30 | return sprintf(buf, "loc%u\n",
31 | igb_attr->sensor->location);
32 | }
33 |
34 | static ssize_t igb_hwmon_show_temp(struct device *dev,
35 | struct device_attribute *attr,
36 | char *buf)
37 | {
38 | struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
39 | dev_attr);
40 | unsigned int value;
41 |
42 | /* reset the temp field */
43 | igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw);
44 |
45 | value = igb_attr->sensor->temp;
46 |
47 | /* display millidegree */
48 | value *= 1000;
49 |
50 | return sprintf(buf, "%u\n", value);
51 | }
52 |
53 | static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
54 | struct device_attribute *attr,
55 | char *buf)
56 | {
57 | struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
58 | dev_attr);
59 | unsigned int value = igb_attr->sensor->caution_thresh;
60 |
61 | /* display millidegree */
62 | value *= 1000;
63 |
64 | return sprintf(buf, "%u\n", value);
65 | }
66 |
67 | static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
68 | struct device_attribute *attr,
69 | char *buf)
70 | {
71 | struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
72 | dev_attr);
73 | unsigned int value = igb_attr->sensor->max_op_thresh;
74 |
75 | /* display millidegree */
76 | value *= 1000;
77 |
78 | return sprintf(buf, "%u\n", value);
79 | }
80 |
81 | /* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
82 | * @ adapter: pointer to the adapter structure
83 | * @ offset: offset in the eeprom sensor data table
84 | * @ type: type of sensor data to display
85 | *
86 | * For each file we want in hwmon's sysfs interface we need a device_attribute
87 | * This is included in our hwmon_attr struct that contains the references to
88 | * the data structures we need to get the data to display.
89 | */
90 | static int igb_add_hwmon_attr(struct igb_adapter *adapter,
91 | unsigned int offset, int type) {
92 | int rc;
93 | unsigned int n_attr;
94 | struct hwmon_attr *igb_attr;
95 |
96 | n_attr = adapter->igb_hwmon_buff.n_hwmon;
97 | igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
98 |
99 | switch (type) {
100 | case IGB_HWMON_TYPE_LOC:
101 | igb_attr->dev_attr.show = igb_hwmon_show_location;
102 | snprintf(igb_attr->name, sizeof(igb_attr->name),
103 | "temp%u_label", offset);
104 | break;
105 | case IGB_HWMON_TYPE_TEMP:
106 | igb_attr->dev_attr.show = igb_hwmon_show_temp;
107 | snprintf(igb_attr->name, sizeof(igb_attr->name),
108 | "temp%u_input", offset);
109 | break;
110 | case IGB_HWMON_TYPE_CAUTION:
111 | igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
112 | snprintf(igb_attr->name, sizeof(igb_attr->name),
113 | "temp%u_max", offset);
114 | break;
115 | case IGB_HWMON_TYPE_MAX:
116 | igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
117 | snprintf(igb_attr->name, sizeof(igb_attr->name),
118 | "temp%u_crit", offset);
119 | break;
120 | default:
121 | rc = -EPERM;
122 | return rc;
123 | }
124 |
125 | /* These always the same regardless of type */
126 | igb_attr->sensor =
127 | &adapter->hw.mac.thermal_sensor_data.sensor[offset];
128 | igb_attr->hw = &adapter->hw;
129 | igb_attr->dev_attr.store = NULL;
130 | igb_attr->dev_attr.attr.mode = 0444;
131 | igb_attr->dev_attr.attr.name = igb_attr->name;
132 | sysfs_attr_init(&igb_attr->dev_attr.attr);
133 | rc = device_create_file(&adapter->pdev->dev,
134 | &igb_attr->dev_attr);
135 | if (rc == 0)
136 | ++adapter->igb_hwmon_buff.n_hwmon;
137 |
138 | return rc;
139 | }
140 |
141 | static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
142 | {
143 | int i;
144 |
145 | if (adapter == NULL)
146 | return;
147 |
148 | for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
149 | device_remove_file(&adapter->pdev->dev,
150 | &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
151 | }
152 |
153 | kfree(adapter->igb_hwmon_buff.hwmon_list);
154 |
155 | if (adapter->igb_hwmon_buff.device)
156 | hwmon_device_unregister(adapter->igb_hwmon_buff.device);
157 | }
158 |
159 | /* called from igb_main.c */
160 | void igb_sysfs_exit(struct igb_adapter *adapter)
161 | {
162 | igb_sysfs_del_adapter(adapter);
163 | }
164 |
165 | /* called from igb_main.c */
166 | int igb_sysfs_init(struct igb_adapter *adapter)
167 | {
168 | struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
169 | unsigned int i;
170 | int n_attrs;
171 | int rc = 0;
172 | #ifdef HAVE_I2C_SUPPORT
173 | struct i2c_client *client = NULL;
174 | #endif /* HAVE_I2C_SUPPORT */
175 |
176 | /* If this method isn't defined we don't support thermals */
177 | if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
178 | goto exit;
179 |
180 | /* Don't create thermal hwmon interface if no sensors present */
181 | rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
182 | if (rc)
183 | goto exit;
184 | #ifdef HAVE_I2C_SUPPORT
185 | /* init i2c_client */
186 | client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
187 | if (client == NULL) {
188 | dev_info(&adapter->pdev->dev,
189 | "Failed to create new i2c device..\n");
190 | goto exit;
191 | }
192 | adapter->i2c_client = client;
193 | #endif /* HAVE_I2C_SUPPORT */
194 |
195 | /* Allocation space for max attributes
196 | * max num sensors * values (loc, temp, max, caution)
197 | */
198 | n_attrs = E1000_MAX_SENSORS * 4;
199 | igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
200 | GFP_KERNEL);
201 | if (!igb_hwmon->hwmon_list) {
202 | rc = -ENOMEM;
203 | goto err;
204 | }
205 |
206 | igb_hwmon->device =
207 | #ifdef HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
208 | hwmon_device_register_with_groups(&adapter->pdev->dev,
209 | "igb", NULL, NULL);
210 | #else
211 | hwmon_device_register(&adapter->pdev->dev);
212 | #endif /* HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS */
213 | if (IS_ERR(igb_hwmon->device)) {
214 | rc = PTR_ERR(igb_hwmon->device);
215 | goto err;
216 | }
217 |
218 | for (i = 0; i < E1000_MAX_SENSORS; i++) {
219 |
220 | /* Only create hwmon sysfs entries for sensors that have
221 | * meaningful data.
222 | */
223 | if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
224 | continue;
225 |
226 | /* Bail if any hwmon attr struct fails to initialize */
227 | rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
228 | rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
229 | rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
230 | rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
231 | if (rc)
232 | goto err;
233 | }
234 |
235 | goto exit;
236 |
237 | err:
238 | igb_sysfs_del_adapter(adapter);
239 | exit:
240 | return rc;
241 | }
242 | #endif /* IGB_HWMON */
243 |
--------------------------------------------------------------------------------
/SimpleGBE/igb_param.c:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 |
5 | #ifdef __APPLE__
6 | #include
7 |
8 | static int num_online_cpus()
9 | {
10 | size_t len;
11 | int count = 0;
12 | len = sizeof(count);
13 | sysctlbyname("hw.physicalcpu", &count, &len, NULL, 0);
14 | return count;
15 | }
16 |
17 | #else
18 | #include
19 | #endif
20 |
21 | #include "igb.h"
22 |
23 | /* This is the only thing that needs to be changed to adjust the
24 | * maximum number of ports that the driver can manage.
25 | */
26 |
27 | #define IGB_MAX_NIC 32
28 |
29 | #define OPTION_UNSET -1
30 | #define OPTION_DISABLED 0
31 | #define OPTION_ENABLED 1
32 | #define MAX_NUM_LIST_OPTS 15
33 |
34 | /* All parameters are treated the same, as an integer array of values.
35 | * This macro just reduces the need to repeat the same declaration code
36 | * over and over (plus this helps to avoid typo bugs).
37 | */
38 |
39 | #define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET }
40 | #ifndef module_param_array
41 | /* Module Parameters are always initialized to -1, so that the driver
42 | * can tell the difference between no user specified value or the
43 | * user asking for the default value.
44 | * The true default values are loaded in when igb_check_options is called.
45 | *
46 | * This is a GCC extension to ANSI C.
47 | * See the item "Labeled Elements in Initializers" in the section
48 | * "Extensions to the C Language Family" of the GCC documentation.
49 | */
50 |
51 | #define IGB_PARAM(X, desc) \
52 | static const int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
53 | MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \
54 | MODULE_PARM_DESC(X, desc);
55 | #else
56 | #define IGB_PARAM(X, desc) \
57 | static int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
58 | static unsigned int num_##X; \
59 | module_param_array_named(X, X, int, &num_##X, 0); \
60 | MODULE_PARM_DESC(X, desc);
61 | #endif
62 | #ifdef __APPLE__
63 | #undef IGB_PARAM
64 | #define IGB_PARAM(X, desc) static const int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT;
65 | #endif
66 |
67 | /* Interrupt Throttle Rate (interrupts/sec)
68 | *
69 | * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
70 | */
71 | IGB_PARAM(InterruptThrottleRate,
72 | "Maximum interrupts per second, per vector, (max 100000), default 3=adaptive");
73 | #define DEFAULT_ITR 3
74 | #define MAX_ITR 100000
75 | /* #define MIN_ITR 120 */
76 | #define MIN_ITR 0
77 | /* IntMode (Interrupt Mode)
78 | *
79 | * Valid Range: 0 - 2
80 | *
81 | * Default Value: 2 (MSI-X)
82 | */
83 | IGB_PARAM(IntMode,
84 | "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2");
85 | #define MAX_INTMODE IGB_INT_MODE_MSIX
86 | #define MIN_INTMODE IGB_INT_MODE_LEGACY
87 |
88 | IGB_PARAM(Node, "set the starting node to allocate memory on, default -1");
89 |
90 | /* LLIPort (Low Latency Interrupt TCP Port)
91 | *
92 | * Valid Range: 0 - 65535
93 | *
94 | * Default Value: 0 (disabled)
95 | */
96 | IGB_PARAM(LLIPort,
97 | "Low Latency Interrupt TCP Port (0-65535), default 0=off");
98 |
99 | #define DEFAULT_LLIPORT 0
100 | #define MAX_LLIPORT 0xFFFF
101 | #define MIN_LLIPORT 0
102 |
103 | /* LLIPush (Low Latency Interrupt on TCP Push flag)
104 | *
105 | * Valid Range: 0, 1
106 | *
107 | * Default Value: 0 (disabled)
108 | */
109 | IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1), default 0=off");
110 |
111 | #define DEFAULT_LLIPUSH 0
112 | #define MAX_LLIPUSH 1
113 | #define MIN_LLIPUSH 0
114 |
115 | /* LLISize (Low Latency Interrupt on Packet Size)
116 | *
117 | * Valid Range: 0 - 1500
118 | *
119 | * Default Value: 0 (disabled)
120 | */
121 | IGB_PARAM(LLISize,
122 | "Low Latency Interrupt on Packet Size (0-1500), default 0=off");
123 |
124 | #define DEFAULT_LLISIZE 0
125 | #define MAX_LLISIZE 1500
126 | #define MIN_LLISIZE 0
127 |
128 | /* RSS (Enable RSS multiqueue receive)
129 | *
130 | * Valid Range: 0 - 8
131 | *
132 | * Default Value: 1
133 | */
134 | IGB_PARAM(RSS,
135 | "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1, 0=number of cpus");
136 |
137 | #define DEFAULT_RSS 1
138 | #define MAX_RSS 8
139 | #define MIN_RSS 0
140 |
141 | /* VMDQ (Enable VMDq multiqueue receive)
142 | *
143 | * Valid Range: 0 - 8
144 | *
145 | * Default Value: 0
146 | */
147 | IGB_PARAM(VMDQ,
148 | "Number of Virtual Machine Device Queues: 0-1 = disable, 2-8 enable, default 0");
149 |
150 | #define DEFAULT_VMDQ 0
151 | #define MAX_VMDQ MAX_RSS
152 | #define MIN_VMDQ 0
153 |
154 | /* max_vfs (Enable SR-IOV VF devices)
155 | *
156 | * Valid Range: 0 - 7
157 | *
158 | * Default Value: 0
159 | */
160 | IGB_PARAM(max_vfs,
161 | "Number of Virtual Functions: 0 = disable, 1-7 enable, default 0");
162 |
163 | #define DEFAULT_SRIOV 0
164 | #define MAX_SRIOV 7
165 | #define MIN_SRIOV 0
166 |
167 | /* MDD (Enable Malicious Driver Detection)
168 | *
169 | * Only available when SR-IOV is enabled - max_vfs is greater than 0
170 | *
171 | * Valid Range: 0, 1
172 | *
173 | * Default Value: 1
174 | */
175 | IGB_PARAM(MDD,
176 | "Malicious Driver Detection (0/1), default 1 = enabled. Only available when max_vfs is greater than 0");
177 |
178 | #ifdef DEBUG
179 |
180 | /* Disable Hardware Reset on Tx Hang
181 | *
182 | * Valid Range: 0, 1
183 | *
184 | * Default Value: 0 (disabled, i.e. h/w will reset)
185 | */
186 | IGB_PARAM(DisableHwReset, "Disable reset of hardware on Tx hang");
187 |
188 | /* Dump Transmit and Receive buffers
189 | *
190 | * Valid Range: 0, 1
191 | *
192 | * Default Value: 0
193 | */
194 | IGB_PARAM(DumpBuffers, "Dump Tx/Rx buffers on Tx hang or by request");
195 |
196 | #endif /* DEBUG */
197 |
198 | /* QueuePairs (Enable TX/RX queue pairs for interrupt handling)
199 | *
200 | * Valid Range: 0 - 1
201 | *
202 | * Default Value: 1
203 | */
204 | IGB_PARAM(QueuePairs,
205 | "Enable Tx/Rx queue pairs for interrupt handling (0,1), default 1=on");
206 |
207 | #define DEFAULT_QUEUE_PAIRS 1
208 | #define MAX_QUEUE_PAIRS 1
209 | #define MIN_QUEUE_PAIRS 0
210 |
211 | /* Enable/disable EEE (a.k.a. IEEE802.3az)
212 | *
213 | * Valid Range: 0, 1
214 | *
215 | * Default Value: 1
216 | */
217 | IGB_PARAM(EEE,
218 | "Enable/disable on parts that support the feature");
219 |
220 | /* Enable/disable DMA Coalescing
221 | *
222 | * Valid Values: 0(off), 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000,
223 | * 9000, 10000(msec), 250(usec), 500(usec)
224 | *
225 | * Default Value: 0
226 | */
227 | IGB_PARAM(DMAC,
228 | "Disable or set latency for DMA Coalescing ((0=off, 1000-10000(msec), 250, 500 (usec))");
229 |
230 | #ifndef IGB_NO_LRO
231 | /* Enable/disable Large Receive Offload
232 | *
233 | * Valid Values: 0(off), 1(on)
234 | *
235 | * Default Value: 0
236 | */
237 | IGB_PARAM(LRO, "Large Receive Offload (0,1), default 0=off");
238 |
239 | #endif
240 | struct igb_opt_list {
241 | int i;
242 | char *str;
243 | };
244 | struct igb_option {
245 | enum { enable_option, range_option, list_option } type;
246 | const char *name;
247 | const char *err;
248 | int def;
249 | union {
250 | struct { /* range_option info */
251 | int min;
252 | int max;
253 | } r;
254 | struct { /* list_option info */
255 | int nr;
256 | struct igb_opt_list *p;
257 | } l;
258 | } arg;
259 | };
260 |
261 | static int igb_validate_option(unsigned int *value,
262 | struct igb_option *opt,
263 | struct igb_adapter *adapter)
264 | {
265 | if (*value == OPTION_UNSET) {
266 | *value = opt->def;
267 | return 0;
268 | }
269 |
270 | switch (opt->type) {
271 | case enable_option:
272 | switch (*value) {
273 | case OPTION_ENABLED:
274 | DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
275 | return 0;
276 | case OPTION_DISABLED:
277 | DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
278 | return 0;
279 | }
280 | break;
281 | case range_option:
282 | if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
283 | DPRINTK(PROBE, INFO,
284 | "%s set to %d\n", opt->name, *value);
285 | return 0;
286 | }
287 | break;
288 | case list_option: {
289 | int i;
290 | struct igb_opt_list *ent;
291 |
292 | for (i = 0; i < opt->arg.l.nr; i++) {
293 | ent = &opt->arg.l.p[i];
294 | if (*value == ent->i) {
295 | if (ent->str[0] != '\0')
296 | DPRINTK(PROBE, INFO, "%s\n", ent->str);
297 | return 0;
298 | }
299 | }
300 | }
301 | break;
302 | default:
303 | BUG();
304 | }
305 |
306 | DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n",
307 | opt->name, *value, opt->err);
308 | *value = opt->def;
309 | return -1;
310 | }
311 |
312 | /**
313 | * igb_check_options - Range Checking for Command Line Parameters
314 | * @adapter: board private structure
315 | *
316 | * This routine checks all command line parameters for valid user
317 | * input. If an invalid value is given, or if no user specified
318 | * value exists, a default value is used. The final value is stored
319 | * in a variable in the adapter structure.
320 | **/
321 |
322 | void igb_check_options(struct igb_adapter *adapter)
323 | {
324 | int bd = adapter->bd_number;
325 | struct e1000_hw *hw = &adapter->hw;
326 |
327 | if (bd >= IGB_MAX_NIC) {
328 | DPRINTK(PROBE, NOTICE,
329 | "Warning: no configuration for board #%d\n", bd);
330 | DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
331 | #ifndef module_param_array
332 | bd = IGB_MAX_NIC;
333 | #endif
334 | }
335 |
336 | { /* Interrupt Throttling Rate */
337 | struct igb_option opt = {
338 | .type = range_option,
339 | .name = "Interrupt Throttling Rate (ints/sec)",
340 | .err = "using default of "__MODULE_STRING(DEFAULT_ITR),
341 | .def = DEFAULT_ITR,
342 | .arg = { .r = { .min = MIN_ITR,
343 | .max = MAX_ITR } }
344 | };
345 |
346 | #ifdef module_param_array
347 | if (num_InterruptThrottleRate > bd) {
348 | #endif
349 | unsigned int itr = InterruptThrottleRate[bd];
350 |
351 | switch (itr) {
352 | case 0:
353 | DPRINTK(PROBE, INFO, "%s turned off\n",
354 | opt.name);
355 | if (hw->mac.type >= e1000_i350)
356 | adapter->dmac = IGB_DMAC_DISABLE;
357 | adapter->rx_itr_setting = itr;
358 | break;
359 | case 1:
360 | DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
361 | opt.name);
362 | adapter->rx_itr_setting = itr;
363 | break;
364 | case 3:
365 | DPRINTK(PROBE, INFO,
366 | "%s set to dynamic conservative mode\n",
367 | opt.name);
368 | adapter->rx_itr_setting = itr;
369 | break;
370 | default:
371 | igb_validate_option(&itr, &opt, adapter);
372 | /* Save the setting, because the dynamic bits
373 | * change itr. In case of invalid user value,
374 | * default to conservative mode, else need to
375 | * clear the lower two bits because they are
376 | * used as control */
377 | if (itr == 3) {
378 | adapter->rx_itr_setting = itr;
379 | } else {
380 | adapter->rx_itr_setting = 1000000000
381 | / (itr * 256);
382 | adapter->rx_itr_setting &= ~3;
383 | }
384 | break;
385 | }
386 | #ifdef module_param_array
387 | } else {
388 | adapter->rx_itr_setting = opt.def;
389 | }
390 | #endif
391 | adapter->tx_itr_setting = adapter->rx_itr_setting;
392 | }
393 | { /* Interrupt Mode */
394 | struct igb_option opt = {
395 | .type = range_option,
396 | .name = "Interrupt Mode",
397 | .err = "defaulting to 2 (MSI-X)",
398 | .def = IGB_INT_MODE_MSIX,
399 | .arg = { .r = { .min = MIN_INTMODE,
400 | .max = MAX_INTMODE } }
401 | };
402 |
403 | #ifdef module_param_array
404 | if (num_IntMode > bd) {
405 | #endif
406 | unsigned int int_mode = IntMode[bd];
407 | igb_validate_option(&int_mode, &opt, adapter);
408 | adapter->int_mode = int_mode;
409 | #ifdef module_param_array
410 | } else {
411 | adapter->int_mode = opt.def;
412 | }
413 | #endif
414 | }
415 | { /* Low Latency Interrupt TCP Port */
416 | struct igb_option opt = {
417 | .type = range_option,
418 | .name = "Low Latency Interrupt TCP Port",
419 | .err = "using default of "
420 | __MODULE_STRING(DEFAULT_LLIPORT),
421 | .def = DEFAULT_LLIPORT,
422 | .arg = { .r = { .min = MIN_LLIPORT,
423 | .max = MAX_LLIPORT } }
424 | };
425 |
426 | #ifdef module_param_array
427 | if (num_LLIPort > bd) {
428 | #endif
429 | adapter->lli_port = LLIPort[bd];
430 | if (adapter->lli_port) {
431 | igb_validate_option(&adapter->lli_port, &opt,
432 | adapter);
433 | } else {
434 | DPRINTK(PROBE, INFO, "%s turned off\n",
435 | opt.name);
436 | }
437 | #ifdef module_param_array
438 | } else {
439 | adapter->lli_port = opt.def;
440 | }
441 | #endif
442 | }
443 | { /* Low Latency Interrupt on Packet Size */
444 | struct igb_option opt = {
445 | .type = range_option,
446 | .name = "Low Latency Interrupt on Packet Size",
447 | .err = "using default of "
448 | __MODULE_STRING(DEFAULT_LLISIZE),
449 | .def = DEFAULT_LLISIZE,
450 | .arg = { .r = { .min = MIN_LLISIZE,
451 | .max = MAX_LLISIZE } }
452 | };
453 |
454 | #ifdef module_param_array
455 | if (num_LLISize > bd) {
456 | #endif
457 | adapter->lli_size = LLISize[bd];
458 | if (adapter->lli_size) {
459 | igb_validate_option(&adapter->lli_size, &opt,
460 | adapter);
461 | } else {
462 | DPRINTK(PROBE, INFO, "%s turned off\n",
463 | opt.name);
464 | }
465 | #ifdef module_param_array
466 | } else {
467 | adapter->lli_size = opt.def;
468 | }
469 | #endif
470 | }
471 | { /* Low Latency Interrupt on TCP Push flag */
472 | struct igb_option opt = {
473 | .type = enable_option,
474 | .name = "Low Latency Interrupt on TCP Push flag",
475 | .err = "defaulting to Disabled",
476 | .def = OPTION_DISABLED
477 | };
478 |
479 | #ifdef module_param_array
480 | if (num_LLIPush > bd) {
481 | #endif
482 | unsigned int lli_push = LLIPush[bd];
483 | igb_validate_option(&lli_push, &opt, adapter);
484 | adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0;
485 | #ifdef module_param_array
486 | } else {
487 | adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0;
488 | }
489 | #endif
490 | }
491 | { /* SRIOV - Enable SR-IOV VF devices */
492 | struct igb_option opt = {
493 | .type = range_option,
494 | .name = "max_vfs - SR-IOV VF devices",
495 | .err = "using default of "
496 | __MODULE_STRING(DEFAULT_SRIOV),
497 | .def = DEFAULT_SRIOV,
498 | .arg = { .r = { .min = MIN_SRIOV,
499 | .max = MAX_SRIOV } }
500 | };
501 |
502 | #ifdef module_param_array
503 | if (num_max_vfs > bd) {
504 | #endif
505 | adapter->vfs_allocated_count = max_vfs[bd];
506 | igb_validate_option(&adapter->vfs_allocated_count,
507 | &opt, adapter);
508 |
509 | #ifdef module_param_array
510 | } else {
511 | adapter->vfs_allocated_count = opt.def;
512 | }
513 | #endif
514 | if (adapter->vfs_allocated_count) {
515 | switch (hw->mac.type) {
516 | case e1000_82575:
517 | case e1000_82580:
518 | case e1000_i210:
519 | case e1000_i211:
520 | case e1000_i354:
521 | adapter->vfs_allocated_count = 0;
522 | DPRINTK(PROBE, INFO,
523 | "SR-IOV option max_vfs not supported.\n");
524 | fallthrough;
525 | default:
526 | break;
527 | }
528 | }
529 | }
530 | { /* VMDQ - Enable VMDq multiqueue receive */
531 | struct igb_option opt = {
532 | .type = range_option,
533 | .name = "VMDQ - VMDq multiqueue queue count",
534 | .err = "using default of "__MODULE_STRING(DEFAULT_VMDQ),
535 | .def = DEFAULT_VMDQ,
536 | .arg = { .r = { .min = MIN_VMDQ,
537 | .max = (MAX_VMDQ
538 | - adapter->vfs_allocated_count)} }
539 | };
540 | if ((hw->mac.type != e1000_i210) &&
541 | (hw->mac.type != e1000_i211)) {
542 | #ifdef module_param_array
543 | if (num_VMDQ > bd) {
544 | #endif
545 | adapter->vmdq_pools = (VMDQ[bd] == 1 ? 0 : VMDQ[bd]);
546 | if (adapter->vfs_allocated_count &&
547 | !adapter->vmdq_pools) {
548 | DPRINTK(PROBE, INFO,
549 | "Enabling SR-IOV requires VMDq be set to at least 1\n");
550 | adapter->vmdq_pools = 1;
551 | }
552 | igb_validate_option(&adapter->vmdq_pools, &opt,
553 | adapter);
554 |
555 | #ifdef module_param_array
556 | } else {
557 | if (!adapter->vfs_allocated_count)
558 | adapter->vmdq_pools = (opt.def == 1 ? 0
559 | : opt.def);
560 | else
561 | adapter->vmdq_pools = 1;
562 | }
563 | #endif
564 | #ifdef CONFIG_IGB_VMDQ_NETDEV
565 | if (hw->mac.type == e1000_82575 && adapter->vmdq_pools) {
566 | DPRINTK(PROBE, INFO,
567 | "VMDq not supported on this part.\n");
568 | adapter->vmdq_pools = 0;
569 | }
570 | #endif
571 |
572 | #ifdef CONFIG_IGB_VMDQ_NETDEV
573 | } else {
574 | DPRINTK(PROBE, INFO, "VMDq option is not supported.\n");
575 | adapter->vmdq_pools = opt.def;
576 | #endif
577 | }
578 | }
579 | { /* RSS - Enable RSS multiqueue receives */
580 | struct igb_option opt = {
581 | .type = range_option,
582 | .name = "RSS - RSS multiqueue receive count",
583 | .err = "using default of "__MODULE_STRING(DEFAULT_RSS),
584 | .def = DEFAULT_RSS,
585 | .arg = { .r = { .min = MIN_RSS,
586 | .max = MAX_RSS } }
587 | };
588 |
589 | switch (hw->mac.type) {
590 | case e1000_82575:
591 | #ifndef CONFIG_IGB_VMDQ_NETDEV
592 | if (!!adapter->vmdq_pools) {
593 | if (adapter->vmdq_pools <= 2) {
594 | if (adapter->vmdq_pools == 2)
595 | opt.arg.r.max = 3;
596 | } else {
597 | opt.arg.r.max = 1;
598 | }
599 | } else {
600 | opt.arg.r.max = 4;
601 | }
602 | #else
603 | opt.arg.r.max = !!adapter->vmdq_pools ? 1 : 4;
604 | #endif /* CONFIG_IGB_VMDQ_NETDEV */
605 | break;
606 | case e1000_i210:
607 | opt.arg.r.max = 4;
608 | break;
609 | case e1000_i211:
610 | opt.arg.r.max = 2;
611 | break;
612 | case e1000_82576:
613 | #ifndef CONFIG_IGB_VMDQ_NETDEV
614 | if (!!adapter->vmdq_pools)
615 | opt.arg.r.max = 2;
616 | break;
617 | #endif /* CONFIG_IGB_VMDQ_NETDEV */
618 | case e1000_82580:
619 | case e1000_i350:
620 | case e1000_i354:
621 | default:
622 | if (!!adapter->vmdq_pools)
623 | opt.arg.r.max = 1;
624 | break;
625 | }
626 |
627 | if (adapter->int_mode != IGB_INT_MODE_MSIX) {
628 | DPRINTK(PROBE, INFO,
629 | "RSS is not supported when in MSI/Legacy Interrupt mode, %s\n",
630 | opt.err);
631 | opt.arg.r.max = 1;
632 | }
633 |
634 | #ifdef module_param_array
635 | if (num_RSS > bd) {
636 | #endif
637 | adapter->rss_queues = RSS[bd];
638 | switch (adapter->rss_queues) {
639 | case 1:
640 | break;
641 | default:
642 | igb_validate_option(&adapter->rss_queues, &opt,
643 | adapter);
644 | if (adapter->rss_queues)
645 | break;
646 | fallthrough;
647 | case 0:
648 | adapter->rss_queues = min_t(u32, opt.arg.r.max,
649 | num_online_cpus());
650 | break;
651 | }
652 | #ifdef module_param_array
653 | } else {
654 | adapter->rss_queues = opt.def;
655 | }
656 | #endif
657 | }
658 | { /* QueuePairs - Enable Tx/Rx queue pairs for interrupt handling */
659 | struct igb_option opt = {
660 | .type = enable_option,
661 | .name =
662 | "QueuePairs - Tx/Rx queue pairs for interrupt handling",
663 | .err = "defaulting to Enabled",
664 | .def = OPTION_ENABLED
665 | };
666 | #ifdef module_param_array
667 | if (num_QueuePairs > bd) {
668 | #endif
669 | unsigned int qp = QueuePairs[bd];
670 | /*
671 | * We must enable queue pairs if the number of queues
672 | * exceeds the number of available interrupts. We are
673 | * limited to 10, or 3 per unallocated vf. On I210 and
674 | * I211 devices, we are limited to 5 interrupts.
675 | * However, since I211 only supports 2 queues, we do not
676 | * need to check and override the user option.
677 | */
678 | if (qp == OPTION_DISABLED) {
679 | if (adapter->rss_queues > 4)
680 | qp = OPTION_ENABLED;
681 |
682 | if (adapter->vmdq_pools > 4)
683 | qp = OPTION_ENABLED;
684 |
685 | if (adapter->rss_queues > 1 &&
686 | (adapter->vmdq_pools > 3 ||
687 | adapter->vfs_allocated_count > 6))
688 | qp = OPTION_ENABLED;
689 |
690 | if (hw->mac.type == e1000_i210 &&
691 | adapter->rss_queues > 2)
692 | qp = OPTION_ENABLED;
693 |
694 | if (qp == OPTION_ENABLED)
695 | DPRINTK(PROBE, INFO,
696 | "Number of queues exceeds available interrupts, %s\n",
697 | opt.err);
698 | }
699 | igb_validate_option(&qp, &opt, adapter);
700 | adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0;
701 | #ifdef module_param_array
702 | } else {
703 | adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0;
704 | }
705 | #endif
706 | }
707 | { /* EEE - Enable EEE for capable adapters */
708 |
709 | if (hw->mac.type >= e1000_i350) {
710 | struct igb_option opt = {
711 | .type = enable_option,
712 | .name = "EEE Support",
713 | .err = "defaulting to Enabled",
714 | .def = OPTION_ENABLED
715 | };
716 | #ifdef module_param_array
717 | if (num_EEE > bd) {
718 | #endif
719 | unsigned int eee = EEE[bd];
720 | igb_validate_option(&eee, &opt, adapter);
721 | adapter->flags |= eee ? IGB_FLAG_EEE : 0;
722 | if (eee)
723 | hw->dev_spec._82575.eee_disable = false;
724 | else
725 | hw->dev_spec._82575.eee_disable = true;
726 |
727 | #ifdef module_param_array
728 | } else {
729 | adapter->flags |= opt.def ? IGB_FLAG_EEE : 0;
730 | if (adapter->flags & IGB_FLAG_EEE)
731 | hw->dev_spec._82575.eee_disable = false;
732 | else
733 | hw->dev_spec._82575.eee_disable = true;
734 | }
735 | #endif
736 | }
737 | }
738 | { /* DMAC - Enable DMA Coalescing for capable adapters */
739 |
740 | if (hw->mac.type >= e1000_i350) {
741 | struct igb_opt_list list[] = {
742 | { IGB_DMAC_DISABLE, "DMAC Disable"},
743 | { IGB_DMAC_MIN, "DMAC 250 usec"},
744 | { IGB_DMAC_500, "DMAC 500 usec"},
745 | { IGB_DMAC_EN_DEFAULT, "DMAC 1000 usec"},
746 | { IGB_DMAC_2000, "DMAC 2000 usec"},
747 | { IGB_DMAC_3000, "DMAC 3000 usec"},
748 | { IGB_DMAC_4000, "DMAC 4000 usec"},
749 | { IGB_DMAC_5000, "DMAC 5000 usec"},
750 | { IGB_DMAC_6000, "DMAC 6000 usec"},
751 | { IGB_DMAC_7000, "DMAC 7000 usec"},
752 | { IGB_DMAC_8000, "DMAC 8000 usec"},
753 | { IGB_DMAC_9000, "DMAC 9000 usec"},
754 | { IGB_DMAC_MAX, "DMAC 10000 usec"}
755 | };
756 | struct igb_option opt = {
757 | .type = list_option,
758 | .name = "DMA Coalescing",
759 | .err = "using default of "
760 | __MODULE_STRING(IGB_DMAC_DISABLE),
761 | .def = IGB_DMAC_DISABLE,
762 | .arg = { .l = { .nr = 13,
763 | .p = list
764 | }
765 | }
766 | };
767 | #ifdef module_param_array
768 | if (num_DMAC > bd) {
769 | #endif
770 | unsigned int dmac = DMAC[bd];
771 | if (adapter->rx_itr_setting == IGB_DMAC_DISABLE)
772 | dmac = IGB_DMAC_DISABLE;
773 | igb_validate_option(&dmac, &opt, adapter);
774 | switch (dmac) {
775 | case IGB_DMAC_DISABLE:
776 | adapter->dmac = dmac;
777 | break;
778 | case IGB_DMAC_MIN:
779 | adapter->dmac = dmac;
780 | break;
781 | case IGB_DMAC_500:
782 | adapter->dmac = dmac;
783 | break;
784 | case IGB_DMAC_EN_DEFAULT:
785 | adapter->dmac = dmac;
786 | break;
787 | case IGB_DMAC_2000:
788 | adapter->dmac = dmac;
789 | break;
790 | case IGB_DMAC_3000:
791 | adapter->dmac = dmac;
792 | break;
793 | case IGB_DMAC_4000:
794 | adapter->dmac = dmac;
795 | break;
796 | case IGB_DMAC_5000:
797 | adapter->dmac = dmac;
798 | break;
799 | case IGB_DMAC_6000:
800 | adapter->dmac = dmac;
801 | break;
802 | case IGB_DMAC_7000:
803 | adapter->dmac = dmac;
804 | break;
805 | case IGB_DMAC_8000:
806 | adapter->dmac = dmac;
807 | break;
808 | case IGB_DMAC_9000:
809 | adapter->dmac = dmac;
810 | break;
811 | case IGB_DMAC_MAX:
812 | adapter->dmac = dmac;
813 | break;
814 | default:
815 | adapter->dmac = opt.def;
816 | DPRINTK(PROBE, INFO,
817 | "Invalid DMAC setting, resetting DMAC to %d\n",
818 | opt.def);
819 | }
820 | #ifdef module_param_array
821 | } else
822 | adapter->dmac = opt.def;
823 | #endif
824 | }
825 | }
826 | #ifndef IGB_NO_LRO
827 | { /* LRO - Enable Large Receive Offload */
828 | struct igb_option opt = {
829 | .type = enable_option,
830 | .name = "LRO - Large Receive Offload",
831 | .err = "defaulting to Disabled",
832 | .def = OPTION_DISABLED
833 | };
834 | struct net_device *netdev = adapter->netdev;
835 | #ifdef module_param_array
836 | if (num_LRO > bd) {
837 | unsigned int lro = LRO[bd];
838 |
839 | igb_validate_option(&lro, &opt, adapter);
840 | netdev->features |= lro ? NETIF_F_LRO : 0;
841 | }
842 | #else
843 | unsigned int lro = LRO[bd];
844 |
845 | igb_validate_option(&lro, &opt, adapter);
846 | netdev->features |= lro ? NETIF_F_LRO : 0;
847 | #endif
848 | }
849 | #endif /* IGB_NO_LRO */
850 | { /* MDD - Enable Malicious Driver Detection. Only available when
851 | SR-IOV is enabled. */
852 | struct igb_option opt = {
853 | .type = enable_option,
854 | .name = "Malicious Driver Detection",
855 | .err = "defaulting to 1",
856 | .def = OPTION_ENABLED,
857 | .arg = { .r = { .min = OPTION_DISABLED,
858 | .max = OPTION_ENABLED } }
859 | };
860 |
861 | #ifdef module_param_array
862 | if (num_MDD > bd) {
863 | #endif
864 | adapter->mdd = MDD[bd];
865 | igb_validate_option((uint *)&adapter->mdd, &opt,
866 | adapter);
867 | #ifdef module_param_array
868 | } else {
869 | adapter->mdd = opt.def;
870 | }
871 | #endif
872 | }
873 | }
874 |
875 |
--------------------------------------------------------------------------------
/SimpleGBE/igb_procfs.c:
--------------------------------------------------------------------------------
1 | // SPDX-License-Identifier: GPL-2.0
2 | /* Copyright(c) 2007 - 2022 Intel Corporation. */
3 |
4 | #include "igb.h"
5 | #include "e1000_82575.h"
6 | #include "e1000_hw.h"
7 |
8 | #ifdef IGB_PROCFS
9 | #ifndef IGB_HWMON
10 |
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 |
17 | static struct proc_dir_entry *igb_top_dir;
18 |
19 | bool igb_thermal_present(struct igb_adapter *adapter)
20 | {
21 | s32 status;
22 | struct e1000_hw *hw;
23 |
24 | if (adapter == NULL)
25 | return false;
26 | hw = &adapter->hw;
27 |
28 | /*
29 | * Only set I2C bit-bang mode if an external thermal sensor is
30 | * supported on this device.
31 | */
32 | if (adapter->ets) {
33 | status = e1000_set_i2c_bb(hw);
34 | if (status != E1000_SUCCESS)
35 | return false;
36 | }
37 |
38 | status = hw->mac.ops.init_thermal_sensor_thresh(hw);
39 | if (status != E1000_SUCCESS)
40 | return false;
41 |
42 | return true;
43 | }
44 |
45 | static int igb_macburn(char *page, char **start, off_t off, int count,
46 | int *eof, void *data)
47 | {
48 | struct e1000_hw *hw;
49 | struct igb_adapter *adapter = (struct igb_adapter *)data;
50 | if (adapter == NULL)
51 | return snprintf(page, count, "error: no adapter\n");
52 |
53 | hw = &adapter->hw;
54 | if (hw == NULL)
55 | return snprintf(page, count, "error: no hw data\n");
56 |
57 | return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
58 | (unsigned int)hw->mac.perm_addr[0],
59 | (unsigned int)hw->mac.perm_addr[1],
60 | (unsigned int)hw->mac.perm_addr[2],
61 | (unsigned int)hw->mac.perm_addr[3],
62 | (unsigned int)hw->mac.perm_addr[4],
63 | (unsigned int)hw->mac.perm_addr[5]);
64 | }
65 |
66 | static int igb_macadmn(char *page, char **start, off_t off,
67 | int count, int *eof, void *data)
68 | {
69 | struct e1000_hw *hw;
70 | struct igb_adapter *adapter = (struct igb_adapter *)data;
71 | if (adapter == NULL)
72 | return snprintf(page, count, "error: no adapter\n");
73 |
74 | hw = &adapter->hw;
75 | if (hw == NULL)
76 | return snprintf(page, count, "error: no hw data\n");
77 |
78 | return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
79 | (unsigned int)hw->mac.addr[0],
80 | (unsigned int)hw->mac.addr[1],
81 | (unsigned int)hw->mac.addr[2],
82 | (unsigned int)hw->mac.addr[3],
83 | (unsigned int)hw->mac.addr[4],
84 | (unsigned int)hw->mac.addr[5]);
85 | }
86 |
87 | static int igb_numeports(char *page, char **start, off_t off, int count,
88 | int *eof, void *data)
89 | {
90 | struct e1000_hw *hw;
91 | int ports;
92 | struct igb_adapter *adapter = (struct igb_adapter *)data;
93 | if (adapter == NULL)
94 | return snprintf(page, count, "error: no adapter\n");
95 |
96 | hw = &adapter->hw;
97 | if (hw == NULL)
98 | return snprintf(page, count, "error: no hw data\n");
99 |
100 | ports = 4;
101 |
102 | return snprintf(page, count, "%d\n", ports);
103 | }
104 |
105 | static int igb_porttype(char *page, char **start, off_t off, int count,
106 | int *eof, void *data)
107 | {
108 | struct igb_adapter *adapter = (struct igb_adapter *)data;
109 | if (adapter == NULL)
110 | return snprintf(page, count, "error: no adapter\n");
111 |
112 | return snprintf(page, count, "%d\n",
113 | test_bit(__IGB_DOWN, &adapter->state));
114 | }
115 |
116 | static int igb_therm_location(char *page, char **start, off_t off,
117 | int count, int *eof, void *data)
118 | {
119 | struct igb_therm_proc_data *therm_data =
120 | (struct igb_therm_proc_data *)data;
121 |
122 | if (therm_data == NULL)
123 | return snprintf(page, count, "error: no therm_data\n");
124 |
125 | return snprintf(page, count, "%d\n", therm_data->sensor_data->location);
126 | }
127 |
128 | static int igb_therm_maxopthresh(char *page, char **start, off_t off,
129 | int count, int *eof, void *data)
130 | {
131 | struct igb_therm_proc_data *therm_data =
132 | (struct igb_therm_proc_data *)data;
133 |
134 | if (therm_data == NULL)
135 | return snprintf(page, count, "error: no therm_data\n");
136 |
137 | return snprintf(page, count, "%d\n",
138 | therm_data->sensor_data->max_op_thresh);
139 | }
140 |
141 | static int igb_therm_cautionthresh(char *page, char **start, off_t off,
142 | int count, int *eof, void *data)
143 | {
144 | struct igb_therm_proc_data *therm_data =
145 | (struct igb_therm_proc_data *)data;
146 |
147 | if (therm_data == NULL)
148 | return snprintf(page, count, "error: no therm_data\n");
149 |
150 | return snprintf(page, count, "%d\n",
151 | therm_data->sensor_data->caution_thresh);
152 | }
153 |
154 | static int igb_therm_temp(char *page, char **start, off_t off,
155 | int count, int *eof, void *data)
156 | {
157 | s32 status;
158 | struct igb_therm_proc_data *therm_data =
159 | (struct igb_therm_proc_data *)data;
160 |
161 | if (therm_data == NULL)
162 | return snprintf(page, count, "error: no therm_data\n");
163 |
164 | status = e1000_get_thermal_sensor_data(therm_data->hw);
165 | if (status != E1000_SUCCESS)
166 | snprintf(page, count, "error: status %d returned\n", status);
167 |
168 | return snprintf(page, count, "%d\n", therm_data->sensor_data->temp);
169 | }
170 |
171 | struct igb_proc_type {
172 | char name[32];
173 | int (*read)(char*, char**, off_t, int, int*, void*);
174 | };
175 |
176 | struct igb_proc_type igb_proc_entries[] = {
177 | {"numeports", &igb_numeports},
178 | {"porttype", &igb_porttype},
179 | {"macburn", &igb_macburn},
180 | {"macadmn", &igb_macadmn},
181 | {"", NULL}
182 | };
183 |
184 | struct igb_proc_type igb_internal_entries[] = {
185 | {"location", &igb_therm_location},
186 | {"temp", &igb_therm_temp},
187 | {"cautionthresh", &igb_therm_cautionthresh},
188 | {"maxopthresh", &igb_therm_maxopthresh},
189 | {"", NULL}
190 | };
191 |
192 | void igb_del_proc_entries(struct igb_adapter *adapter)
193 | {
194 | int index, i;
195 | char buf[16]; /* much larger than the sensor number will ever be */
196 |
197 | if (igb_top_dir == NULL)
198 | return;
199 |
200 | for (i = 0; i < E1000_MAX_SENSORS; i++) {
201 | if (adapter->therm_dir[i] == NULL)
202 | continue;
203 |
204 | for (index = 0; ; index++) {
205 | if (igb_internal_entries[index].read == NULL)
206 | break;
207 |
208 | remove_proc_entry(igb_internal_entries[index].name,
209 | adapter->therm_dir[i]);
210 | }
211 | snprintf(buf, sizeof(buf), "sensor_%d", i);
212 | remove_proc_entry(buf, adapter->info_dir);
213 | }
214 |
215 | if (adapter->info_dir != NULL) {
216 | for (index = 0; ; index++) {
217 | if (igb_proc_entries[index].read == NULL)
218 | break;
219 | remove_proc_entry(igb_proc_entries[index].name,
220 | adapter->info_dir);
221 | }
222 | remove_proc_entry("info", adapter->eth_dir);
223 | }
224 |
225 | if (adapter->eth_dir != NULL)
226 | remove_proc_entry(pci_name(adapter->pdev), igb_top_dir);
227 | }
228 |
229 | /* called from igb_main.c */
230 | void igb_procfs_exit(struct igb_adapter *adapter)
231 | {
232 | igb_del_proc_entries(adapter);
233 | }
234 |
235 | int igb_procfs_topdir_init(void)
236 | {
237 | igb_top_dir = proc_mkdir("driver/igb", NULL);
238 | if (igb_top_dir == NULL)
239 | return (-ENOMEM);
240 |
241 | return 0;
242 | }
243 |
244 | void igb_procfs_topdir_exit(void)
245 | {
246 | remove_proc_entry("driver/igb", NULL);
247 | }
248 |
249 | /* called from igb_main.c */
250 | int igb_procfs_init(struct igb_adapter *adapter)
251 | {
252 | int rc = 0;
253 | int i;
254 | int index;
255 | char buf[16]; /* much larger than the sensor number will ever be */
256 |
257 | adapter->eth_dir = NULL;
258 | adapter->info_dir = NULL;
259 | for (i = 0; i < E1000_MAX_SENSORS; i++)
260 | adapter->therm_dir[i] = NULL;
261 |
262 | if (igb_top_dir == NULL) {
263 | rc = -ENOMEM;
264 | goto fail;
265 | }
266 |
267 | adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), igb_top_dir);
268 | if (adapter->eth_dir == NULL) {
269 | rc = -ENOMEM;
270 | goto fail;
271 | }
272 |
273 | adapter->info_dir = proc_mkdir("info", adapter->eth_dir);
274 | if (adapter->info_dir == NULL) {
275 | rc = -ENOMEM;
276 | goto fail;
277 | }
278 | for (index = 0; ; index++) {
279 | if (igb_proc_entries[index].read == NULL)
280 | break;
281 | if (!(create_proc_read_entry(igb_proc_entries[index].name,
282 | 0444,
283 | adapter->info_dir,
284 | igb_proc_entries[index].read,
285 | adapter))) {
286 |
287 | rc = -ENOMEM;
288 | goto fail;
289 | }
290 | }
291 | if (igb_thermal_present(adapter) == false)
292 | goto exit;
293 |
294 | for (i = 0; i < E1000_MAX_SENSORS; i++) {
295 | if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
296 | continue;
297 |
298 | snprintf(buf, sizeof(buf), "sensor_%d", i);
299 | adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir);
300 | if (adapter->therm_dir[i] == NULL) {
301 | rc = -ENOMEM;
302 | goto fail;
303 | }
304 | for (index = 0; ; index++) {
305 | if (igb_internal_entries[index].read == NULL)
306 | break;
307 | /*
308 | * therm_data struct contains pointer the read func
309 | * will be needing
310 | */
311 | adapter->therm_data[i].hw = &adapter->hw;
312 | adapter->therm_data[i].sensor_data =
313 | &adapter->hw.mac.thermal_sensor_data.sensor[i];
314 |
315 | if (!(create_proc_read_entry(
316 | igb_internal_entries[index].name,
317 | 0444,
318 | adapter->therm_dir[i],
319 | igb_internal_entries[index].read,
320 | &adapter->therm_data[i]))) {
321 | rc = -ENOMEM;
322 | goto fail;
323 | }
324 | }
325 | }
326 | goto exit;
327 |
328 | fail:
329 | igb_del_proc_entries(adapter);
330 | exit:
331 | return rc;
332 | }
333 |
334 | #endif /* !IGB_HWMON */
335 | #endif /* IGB_PROCFS */
336 |
--------------------------------------------------------------------------------
/SimpleGBE/igb_regtest.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | /* ethtool register test data */
5 | struct igb_reg_test {
6 | u16 reg;
7 | u16 reg_offset;
8 | u16 array_len;
9 | u16 test_type;
10 | u32 mask;
11 | u32 write;
12 | };
13 |
14 | /* In the hardware, registers are laid out either singly, in arrays
15 | * spaced 0x100 bytes apart, or in contiguous tables. We assume
16 | * most tests take place on arrays or single registers (handled
17 | * as a single-element array) and special-case the tables.
18 | * Table tests are always pattern tests.
19 | *
20 | * We also make provision for some required setup steps by specifying
21 | * registers to be written without any read-back testing.
22 | */
23 |
24 | #define PATTERN_TEST 1
25 | #define SET_READ_TEST 2
26 | #define WRITE_NO_TEST 3
27 | #define TABLE32_TEST 4
28 | #define TABLE64_TEST_LO 5
29 | #define TABLE64_TEST_HI 6
30 |
31 | /* i210 reg test */
32 | static struct igb_reg_test reg_test_i210[] = {
33 | { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
34 | { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
35 | { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
36 | { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
37 | { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
38 | { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
39 | /* RDH is read-only for i210, only test RDT. */
40 | { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
41 | { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 },
42 | { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
43 | { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
44 | { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
45 | { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
46 | { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
47 | { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
48 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
49 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
50 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
51 | { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
52 | { E1000_RA, 0, 16, TABLE64_TEST_LO,
53 | 0xFFFFFFFF, 0xFFFFFFFF },
54 | { E1000_RA, 0, 16, TABLE64_TEST_HI,
55 | 0x900FFFFF, 0xFFFFFFFF },
56 | { E1000_MTA, 0, 128, TABLE32_TEST,
57 | 0xFFFFFFFF, 0xFFFFFFFF },
58 | { 0, 0, 0, 0 }
59 | };
60 |
61 | /* i350 reg test */
62 | static struct igb_reg_test reg_test_i350[] = {
63 | { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
64 | { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
65 | { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
66 | /* VET is readonly on i350 */
67 | { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
68 | { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
69 | { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
70 | { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
71 | { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
72 | { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
73 | /* RDH is read-only for i350, only test RDT. */
74 | { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
75 | { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
76 | { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
77 | { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
78 | { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
79 | { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
80 | { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
81 | { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
82 | { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
83 | { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
84 | { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
85 | { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
86 | { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
87 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
88 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
89 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
90 | { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
91 | { E1000_RA, 0, 16, TABLE64_TEST_LO,
92 | 0xFFFFFFFF, 0xFFFFFFFF },
93 | { E1000_RA, 0, 16, TABLE64_TEST_HI,
94 | 0xC3FFFFFF, 0xFFFFFFFF },
95 | { E1000_RA2, 0, 16, TABLE64_TEST_LO,
96 | 0xFFFFFFFF, 0xFFFFFFFF },
97 | { E1000_RA2, 0, 16, TABLE64_TEST_HI,
98 | 0xC3FFFFFF, 0xFFFFFFFF },
99 | { E1000_MTA, 0, 128, TABLE32_TEST,
100 | 0xFFFFFFFF, 0xFFFFFFFF },
101 | { 0, 0, 0, 0 }
102 | };
103 |
104 | /* 82580 reg test */
105 | static struct igb_reg_test reg_test_82580[] = {
106 | { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
107 | { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
108 | { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
109 | { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
110 | { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
111 | { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
112 | { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
113 | { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
114 | { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
115 | { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
116 | /* RDH is read-only for 82580, only test RDT. */
117 | { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
118 | { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
119 | { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
120 | { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
121 | { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
122 | { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
123 | { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
124 | { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
125 | { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
126 | { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
127 | { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
128 | { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
129 | { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
130 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
131 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
132 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
133 | { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
134 | { E1000_RA, 0, 16, TABLE64_TEST_LO,
135 | 0xFFFFFFFF, 0xFFFFFFFF },
136 | { E1000_RA, 0, 16, TABLE64_TEST_HI,
137 | 0x83FFFFFF, 0xFFFFFFFF },
138 | { E1000_RA2, 0, 8, TABLE64_TEST_LO,
139 | 0xFFFFFFFF, 0xFFFFFFFF },
140 | { E1000_RA2, 0, 8, TABLE64_TEST_HI,
141 | 0x83FFFFFF, 0xFFFFFFFF },
142 | { E1000_MTA, 0, 128, TABLE32_TEST,
143 | 0xFFFFFFFF, 0xFFFFFFFF },
144 | { 0, 0, 0, 0 }
145 | };
146 |
147 | /* 82576 reg test */
148 | static struct igb_reg_test reg_test_82576[] = {
149 | { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
150 | { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
151 | { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
152 | { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
153 | { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
154 | { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
155 | { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
156 | { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
157 | { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
158 | { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
159 | /* Enable all queues before testing. */
160 | { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
161 | E1000_RXDCTL_QUEUE_ENABLE },
162 | { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
163 | E1000_RXDCTL_QUEUE_ENABLE },
164 | /* RDH is read-only for 82576, only test RDT. */
165 | { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
166 | { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
167 | { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
168 | { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
169 | { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
170 | { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
171 | { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
172 | { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
173 | { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
174 | { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
175 | { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
176 | { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
177 | { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
178 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
179 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
180 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
181 | { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
182 | { E1000_RA, 0, 16, TABLE64_TEST_LO,
183 | 0xFFFFFFFF, 0xFFFFFFFF },
184 | { E1000_RA, 0, 16, TABLE64_TEST_HI,
185 | 0x83FFFFFF, 0xFFFFFFFF },
186 | { E1000_RA2, 0, 8, TABLE64_TEST_LO,
187 | 0xFFFFFFFF, 0xFFFFFFFF },
188 | { E1000_RA2, 0, 8, TABLE64_TEST_HI,
189 | 0x83FFFFFF, 0xFFFFFFFF },
190 | { E1000_MTA, 0, 128, TABLE32_TEST,
191 | 0xFFFFFFFF, 0xFFFFFFFF },
192 | { 0, 0, 0, 0 }
193 | };
194 |
195 | /* 82575 register test */
196 | static struct igb_reg_test reg_test_82575[] = {
197 | { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
198 | { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
199 | { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
200 | { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
201 | { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80,
202 | 0xFFFFFFFF },
203 | { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF,
204 | 0xFFFFFFFF },
205 | { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
206 | /* Enable all four RX queues before testing. */
207 | { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
208 | E1000_RXDCTL_QUEUE_ENABLE },
209 | /* RDH is read-only for 82575, only test RDT. */
210 | { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
211 | { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
212 | { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
213 | { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
214 | { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
215 | { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80,
216 | 0xFFFFFFFF },
217 | { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF,
218 | 0xFFFFFFFF },
219 | { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80,
220 | 0x000FFFFF },
221 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
222 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
223 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
224 | { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
225 | { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
226 | { E1000_RA, 0, 16, TABLE64_TEST_LO,
227 | 0xFFFFFFFF, 0xFFFFFFFF },
228 | { E1000_RA, 0, 16, TABLE64_TEST_HI,
229 | 0x800FFFFF, 0xFFFFFFFF },
230 | { E1000_MTA, 0, 128, TABLE32_TEST,
231 | 0xFFFFFFFF, 0xFFFFFFFF },
232 | { 0, 0, 0, 0 }
233 | };
234 |
235 |
236 |
--------------------------------------------------------------------------------
/SimpleGBE/igb_vmdq.c:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 |
5 | #ifndef __APPLE__
6 | #include
7 | #endif
8 |
9 | #include "igb.h"
10 | #include "igb_vmdq.h"
11 | #ifndef __APPLE__
12 | #include
13 | #endif
14 |
15 | #ifdef CONFIG_IGB_VMDQ_NETDEV
16 | int igb_vmdq_open(struct net_device *dev)
17 | {
18 | struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
19 | struct igb_adapter *adapter = vadapter->real_adapter;
20 | struct net_device *main_netdev = adapter->netdev;
21 | int hw_queue = vadapter->rx_ring->queue_index +
22 | adapter->vfs_allocated_count;
23 |
24 | if (test_bit(__IGB_DOWN, adapter->state)) {
25 | DPRINTK(DRV, WARNING,
26 | "Open %s before opening this device.\n",
27 | main_netdev->name);
28 | return -EAGAIN;
29 | }
30 | netif_carrier_off(dev);
31 | vadapter->tx_ring->vmdq_netdev = dev;
32 | vadapter->rx_ring->vmdq_netdev = dev;
33 | if (is_valid_ether_addr(dev->dev_addr)) {
34 | igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
35 | igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
36 | }
37 | netif_carrier_on(dev);
38 | return 0;
39 | }
40 |
41 | int igb_vmdq_close(struct net_device *dev)
42 | {
43 | struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
44 | struct igb_adapter *adapter = vadapter->real_adapter;
45 | int hw_queue = vadapter->rx_ring->queue_index +
46 | adapter->vfs_allocated_count;
47 |
48 | netif_carrier_off(dev);
49 | igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
50 |
51 | vadapter->tx_ring->vmdq_netdev = NULL;
52 | vadapter->rx_ring->vmdq_netdev = NULL;
53 | return 0;
54 | }
55 |
56 | netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev)
57 | {
58 | struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
59 |
60 | return igb_xmit_frame_ring(skb, vadapter->tx_ring);
61 | }
62 |
63 | struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev)
64 | {
65 | struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
66 | struct igb_adapter *adapter = vadapter->real_adapter;
67 | struct e1000_hw *hw = &adapter->hw;
68 | int hw_queue = vadapter->rx_ring->queue_index +
69 | adapter->vfs_allocated_count;
70 |
71 | vadapter->net_stats.rx_packets +=
72 | E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue));
73 | E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0);
74 | vadapter->net_stats.tx_packets +=
75 | E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue));
76 | E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0);
77 | vadapter->net_stats.rx_bytes +=
78 | E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue));
79 | E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0);
80 | vadapter->net_stats.tx_bytes +=
81 | E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue));
82 | E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0);
83 | vadapter->net_stats.multicast +=
84 | E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue));
85 | E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0);
86 | /* only return the current stats */
87 | return &vadapter->net_stats;
88 | }
89 |
90 | /**
91 | * igb_write_vm_addr_list - write unicast addresses to RAR table
92 | * @netdev: network interface device structure
93 | *
94 | * Writes unicast address list to the RAR table.
95 | * Returns: -ENOMEM on failure/insufficient address space
96 | * 0 on no addresses written
97 | * X on writing X addresses to the RAR table
98 | **/
99 | static int igb_write_vm_addr_list(struct net_device *netdev)
100 | {
101 | struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
102 | struct igb_adapter *adapter = vadapter->real_adapter;
103 | int count = 0;
104 | int hw_queue = vadapter->rx_ring->queue_index +
105 | adapter->vfs_allocated_count;
106 |
107 | /* return ENOMEM indicating insufficient memory for addresses */
108 | if (netdev_uc_count(netdev) > igb_available_rars(adapter))
109 | return -ENOMEM;
110 |
111 | if (!netdev_uc_empty(netdev)) {
112 | #ifdef NETDEV_HW_ADDR_T_UNICAST
113 | struct netdev_hw_addr *ha;
114 | #else
115 | struct dev_mc_list *ha;
116 | #endif
117 | netdev_for_each_uc_addr(ha, netdev) {
118 | #ifdef NETDEV_HW_ADDR_T_UNICAST
119 | igb_del_mac_filter(adapter, ha->addr, hw_queue);
120 | igb_add_mac_filter(adapter, ha->addr, hw_queue);
121 | #else
122 | igb_del_mac_filter(adapter, ha->da_addr, hw_queue);
123 | igb_add_mac_filter(adapter, ha->da_addr, hw_queue);
124 | #endif
125 | count++;
126 | }
127 | }
128 | return count;
129 | }
130 |
131 |
132 | #define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */
133 | void igb_vmdq_set_rx_mode(struct net_device *dev)
134 | {
135 | struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
136 | struct igb_adapter *adapter = vadapter->real_adapter;
137 | struct e1000_hw *hw = &adapter->hw;
138 | u32 vmolr, rctl;
139 | int hw_queue = vadapter->rx_ring->queue_index +
140 | adapter->vfs_allocated_count;
141 |
142 | /* Check for Promiscuous and All Multicast modes */
143 | vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue));
144 |
145 | /* clear the affected bits */
146 | vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME |
147 | E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE);
148 |
149 | if (dev->flags & IFF_PROMISC) {
150 | vmolr |= E1000_VMOLR_UPE;
151 | rctl = E1000_READ_REG(hw, E1000_RCTL);
152 | rctl |= E1000_RCTL_UPE;
153 | E1000_WRITE_REG(hw, E1000_RCTL, rctl);
154 | } else {
155 | rctl = E1000_READ_REG(hw, E1000_RCTL);
156 | rctl &= ~E1000_RCTL_UPE;
157 | E1000_WRITE_REG(hw, E1000_RCTL, rctl);
158 | if (dev->flags & IFF_ALLMULTI) {
159 | vmolr |= E1000_VMOLR_MPME;
160 | } else {
161 | /*
162 | * Write addresses to the MTA, if the attempt fails
163 | * then we should just turn on promiscous mode so
164 | * that we can at least receive multicast traffic
165 | */
166 | if (igb_write_mc_addr_list(adapter->netdev) != 0)
167 | vmolr |= E1000_VMOLR_ROMPE;
168 | }
169 | #ifdef HAVE_SET_RX_MODE
170 | /*
171 | * Write addresses to available RAR registers, if there is not
172 | * sufficient space to store all the addresses then enable
173 | * unicast promiscous mode
174 | */
175 | if (igb_write_vm_addr_list(dev) < 0)
176 | vmolr |= E1000_VMOLR_UPE;
177 | #endif
178 | }
179 | E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr);
180 |
181 | return;
182 | }
183 |
184 | int igb_vmdq_set_mac(struct net_device *dev, void *p)
185 | {
186 | struct sockaddr *addr = p;
187 | struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
188 | struct igb_adapter *adapter = vadapter->real_adapter;
189 | int hw_queue = vadapter->rx_ring->queue_index +
190 | adapter->vfs_allocated_count;
191 |
192 | igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
193 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
194 | return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
195 | }
196 |
197 | int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu)
198 | {
199 | struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
200 | struct igb_adapter *adapter = vadapter->real_adapter;
201 |
202 | if (adapter->netdev->mtu < new_mtu) {
203 | DPRINTK(PROBE, INFO,
204 | "Set MTU on %s to >= %d before changing MTU on %s\n",
205 | adapter->netdev->name, new_mtu, dev->name);
206 | return -EINVAL;
207 | }
208 | dev->mtu = new_mtu;
209 | return 0;
210 | }
211 |
212 | void igb_vmdq_tx_timeout(struct net_device *dev)
213 | {
214 | return;
215 | }
216 |
217 | void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
218 | {
219 | struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
220 | struct igb_adapter *adapter = vadapter->real_adapter;
221 | struct e1000_hw *hw = &adapter->hw;
222 | int hw_queue = vadapter->rx_ring->queue_index +
223 | adapter->vfs_allocated_count;
224 |
225 | vadapter->vlgrp = grp;
226 |
227 | igb_enable_vlan_tags(adapter);
228 | E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0);
229 |
230 | return;
231 | }
232 | void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
233 | {
234 | struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
235 | struct igb_adapter *adapter = vadapter->real_adapter;
236 | #ifndef HAVE_NETDEV_VLAN_FEATURES
237 | struct net_device *v_netdev;
238 | #endif
239 | int hw_queue = vadapter->rx_ring->queue_index +
240 | adapter->vfs_allocated_count;
241 |
242 | /* attempt to add filter to vlvf array */
243 | igb_vlvf_set(adapter, vid, TRUE, hw_queue);
244 |
245 | #ifndef HAVE_NETDEV_VLAN_FEATURES
246 |
247 | /* Copy feature flags from netdev to the vlan netdev for this vid.
248 | * This allows things like TSO to bubble down to our vlan device.
249 | */
250 | v_netdev = vlan_group_get_device(vadapter->vlgrp, vid);
251 | v_netdev->features |= adapter->netdev->features;
252 | vlan_group_set_device(vadapter->vlgrp, vid, v_netdev);
253 | #endif
254 |
255 | return;
256 | }
257 | void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
258 | {
259 | struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
260 | struct igb_adapter *adapter = vadapter->real_adapter;
261 | int hw_queue = vadapter->rx_ring->queue_index +
262 | adapter->vfs_allocated_count;
263 |
264 | vlan_group_set_device(vadapter->vlgrp, vid, NULL);
265 | /* remove vlan from VLVF table array */
266 | igb_vlvf_set(adapter, vid, FALSE, hw_queue);
267 |
268 |
269 | return;
270 | }
271 |
272 | static int igb_vmdq_get_settings(struct net_device *netdev,
273 | struct ethtool_cmd *ecmd)
274 | {
275 | struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
276 | struct igb_adapter *adapter = vadapter->real_adapter;
277 | struct e1000_hw *hw = &adapter->hw;
278 | u32 status;
279 |
280 | if (hw->phy.media_type == e1000_media_type_copper) {
281 |
282 | ecmd->supported = (SUPPORTED_10baseT_Half |
283 | SUPPORTED_10baseT_Full |
284 | SUPPORTED_100baseT_Half |
285 | SUPPORTED_100baseT_Full |
286 | SUPPORTED_1000baseT_Full|
287 | SUPPORTED_Autoneg |
288 | SUPPORTED_TP);
289 | ecmd->advertising = ADVERTISED_TP;
290 |
291 | if (hw->mac.autoneg == 1) {
292 | ecmd->advertising |= ADVERTISED_Autoneg;
293 | /* the e1000 autoneg seems to match ethtool nicely */
294 | ecmd->advertising |= hw->phy.autoneg_advertised;
295 | }
296 |
297 | ecmd->port = PORT_TP;
298 | ecmd->phy_address = hw->phy.addr;
299 | } else {
300 | ecmd->supported = (SUPPORTED_1000baseT_Full |
301 | SUPPORTED_FIBRE |
302 | SUPPORTED_Autoneg);
303 |
304 | ecmd->advertising = (ADVERTISED_1000baseT_Full |
305 | ADVERTISED_FIBRE |
306 | ADVERTISED_Autoneg);
307 |
308 | ecmd->port = PORT_FIBRE;
309 | }
310 |
311 | ecmd->transceiver = XCVR_INTERNAL;
312 |
313 | status = E1000_READ_REG(hw, E1000_STATUS);
314 |
315 | if (status & E1000_STATUS_LU) {
316 |
317 | if ((status & E1000_STATUS_SPEED_1000) ||
318 | hw->phy.media_type != e1000_media_type_copper)
319 | ethtool_cmd_speed_set(ecmd, SPEED_1000);
320 | else if (status & E1000_STATUS_SPEED_100)
321 | ethtool_cmd_speed_set(ecmd, SPEED_100);
322 | else
323 | ethtool_cmd_speed_set(ecmd, SPEED_10);
324 |
325 | if ((status & E1000_STATUS_FD) ||
326 | hw->phy.media_type != e1000_media_type_copper)
327 | ecmd->duplex = DUPLEX_FULL;
328 | else
329 | ecmd->duplex = DUPLEX_HALF;
330 | } else {
331 | ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
332 | ecmd->duplex = -1;
333 | }
334 |
335 | ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
336 | return 0;
337 | }
338 |
339 |
340 | static u32 igb_vmdq_get_msglevel(struct net_device *netdev)
341 | {
342 | struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
343 | struct igb_adapter *adapter = vadapter->real_adapter;
344 | return adapter->msg_enable;
345 | }
346 |
347 | static void igb_vmdq_get_drvinfo(struct net_device *netdev,
348 | struct ethtool_drvinfo *drvinfo)
349 | {
350 | struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
351 | struct igb_adapter *adapter = vadapter->real_adapter;
352 | struct net_device *main_netdev = adapter->netdev;
353 |
354 | strlcpy(drvinfo->driver, igb_driver_name, 32);
355 | strlcpy(drvinfo->version, igb_driver_version, 32);
356 |
357 | strlcpy(drvinfo->fw_version, "N/A", 4);
358 | snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name,
359 | vadapter->rx_ring->queue_index);
360 | drvinfo->n_stats = 0;
361 | drvinfo->testinfo_len = 0;
362 | drvinfo->regdump_len = 0;
363 | }
364 |
365 | static void igb_vmdq_get_ringparam(struct net_device *netdev,
366 | struct ethtool_ringparam *ring)
367 | {
368 | struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
369 |
370 | struct igb_ring *tx_ring = vadapter->tx_ring;
371 | struct igb_ring *rx_ring = vadapter->rx_ring;
372 |
373 | ring->rx_max_pending = IGB_MAX_RXD;
374 | ring->tx_max_pending = IGB_MAX_TXD;
375 | ring->rx_mini_max_pending = 0;
376 | ring->rx_jumbo_max_pending = 0;
377 | ring->rx_pending = rx_ring->count;
378 | ring->tx_pending = tx_ring->count;
379 | ring->rx_mini_pending = 0;
380 | ring->rx_jumbo_pending = 0;
381 | }
382 | static u32 igb_vmdq_get_rx_csum(struct net_device *netdev)
383 | {
384 | struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
385 | struct igb_adapter *adapter = vadapter->real_adapter;
386 |
387 | return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags);
388 | }
389 |
390 |
391 | static struct ethtool_ops igb_vmdq_ethtool_ops = {
392 | .get_settings = igb_vmdq_get_settings,
393 | .get_drvinfo = igb_vmdq_get_drvinfo,
394 | .get_link = ethtool_op_get_link,
395 | .get_ringparam = igb_vmdq_get_ringparam,
396 | .get_rx_csum = igb_vmdq_get_rx_csum,
397 | .get_tx_csum = ethtool_op_get_tx_csum,
398 | .get_sg = ethtool_op_get_sg,
399 | .set_sg = ethtool_op_set_sg,
400 | .get_msglevel = igb_vmdq_get_msglevel,
401 | #ifdef NETIF_F_TSO
402 | .get_tso = ethtool_op_get_tso,
403 | #endif
404 | #ifdef HAVE_ETHTOOL_GET_PERM_ADDR
405 | .get_perm_addr = ethtool_op_get_perm_addr,
406 | #endif
407 | };
408 |
409 | void igb_vmdq_set_ethtool_ops(struct net_device *netdev)
410 | {
411 | SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops);
412 | }
413 |
414 |
415 | #endif /* CONFIG_IGB_VMDQ_NETDEV */
416 |
417 |
--------------------------------------------------------------------------------
/SimpleGBE/igb_vmdq.h:
--------------------------------------------------------------------------------
1 | /* SPDX-License-Identifier: @SPDX@ */
2 | /* Copyright(c) 2007 - 2024 Intel Corporation. */
3 |
4 | #ifndef _IGB_VMDQ_H_
5 | #define _IGB_VMDQ_H_
6 |
7 | #ifdef CONFIG_IGB_VMDQ_NETDEV
8 | int igb_vmdq_open(struct net_device *dev);
9 | int igb_vmdq_close(struct net_device *dev);
10 | netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev);
11 | struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev);
12 | void igb_vmdq_set_rx_mode(struct net_device *dev);
13 | int igb_vmdq_set_mac(struct net_device *dev, void *addr);
14 | int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu);
15 | void igb_vmdq_tx_timeout(struct net_device *dev);
16 | void igb_vmdq_vlan_rx_register(struct net_device *dev,
17 | struct vlan_group *grp);
18 | void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
19 | void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
20 | void igb_vmdq_set_ethtool_ops(struct net_device *netdev);
21 | #endif /* CONFIG_IGB_VMDQ_NETDEV */
22 | #endif /* _IGB_VMDQ_H_ */
23 |
--------------------------------------------------------------------------------
/SimpleGBE/kcompat.h:
--------------------------------------------------------------------------------
1 | /*******************************************************************************
2 |
3 | Macros to compile Intel PRO/1000 Linux driver almost-as-is for Mac OS X.
4 |
5 | *******************************************************************************/
6 |
7 | #ifndef _KCOMPAT_H_
8 | #define _KCOMPAT_H_
9 |
10 | #include
11 |
12 | typedef __int64_t s64;
13 | typedef __int32_t s32;
14 | typedef __int16_t s16;
15 | typedef __int8_t s8;
16 | typedef __uint64_t u64;
17 | typedef __uint32_t u32;
18 | typedef __uint16_t u16;
19 | typedef __uint8_t u8;
20 |
21 | #ifndef __le16
22 | #define __le16 __uint16_t
23 | #endif
24 | #ifndef __le32
25 | #define __le32 __uint32_t
26 | #endif
27 | #ifndef __le64
28 | #define __le64 __uint64_t
29 | #endif
30 | #ifndef __be16
31 | #define __be16 __uint16_t
32 | #endif
33 | #ifndef __be32
34 | #define __be32 __uint32_t
35 | #endif
36 | #ifndef __be64
37 | #define __be64 __uint64_t
38 | #endif
39 |
40 | #define sk_buff __mbuf
41 |
42 | #define __iomem volatile
43 |
44 | #define dma_addr_t IOPhysicalAddress
45 |
46 | #define ____cacheline_aligned_in_smp
47 |
48 | #define netdev_features_t __uint32_t
49 |
50 | #define cpu_to_le16(x) OSSwapHostToLittleConstInt16(x)
51 | #define cpu_to_le32(x) OSSwapHostToLittleConstInt32(x)
52 | #define cpu_to_le64(x) OSSwapHostToLittleConstInt64(x)
53 | #define le16_to_cpu(x) OSSwapLittleToHostInt16(x)
54 | #define le32_to_cpu(x) OSSwapLittleToHostInt32(x)
55 | #define be16_to_cpu(x) OSSwapBigToHostInt16(x)
56 |
57 | #define writel(val, reg) _OSWriteInt32(reg, 0, val)
58 | #define writew(val, reg) _OSWriteInt16(reg, 0, val)
59 | #define readl(reg) _OSReadInt32(reg, 0)
60 | #define readw(reg) _OSReadInt16(reg, 0)
61 | #define read_barrier_depends()
62 |
63 | #define intelWriteMem8(reg, val8) _OSWriteInt8((baseAddr), (reg), (val8))
64 | #define intelWriteMem16(reg, val16) OSWriteLittleInt16((baseAddr), (reg), (val16))
65 | #define intelWriteMem32(reg, val32) OSWriteLittleInt32((baseAddr), (reg), (val32))
66 | #define intelReadMem8(reg) _OSReadInt8((baseAddr), (reg))
67 | #define intelReadMem16(reg) OSReadLittleInt16((baseAddr), (reg))
68 | #define intelReadMem32(reg) OSReadLittleInt32((baseAddr), (reg))
69 | #define intelFlush() OSReadLittleInt32((baseAddr), (E1000_STATUS))
70 |
71 | #ifdef ALIGN
72 | #undef ALIGN
73 | #endif
74 | #define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
75 |
76 | #define BITS_PER_LONG 32
77 |
78 | #define BITS_TO_LONGS(bits) \
79 | (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
80 |
81 | /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
82 | #define GFP_ATOMIC 0
83 |
84 | typedef unsigned int __u32;
85 |
86 | #undef DEFINE_DMA_UNMAP_ADDR
87 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
88 | #undef DEFINE_DMA_UNMAP_LEN
89 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
90 | #undef dma_unmap_addr
91 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
92 | #undef dma_unmap_addr_set
93 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
94 | #undef dma_unmap_len
95 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
96 | #undef dma_unmap_len_set
97 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
98 |
99 |
100 | struct net_device_stats {
101 | unsigned long rx_packets; /* total packets received */
102 | unsigned long tx_packets; /* total packets transmitted */
103 | unsigned long rx_bytes; /* total bytes received */
104 | unsigned long tx_bytes; /* total bytes transmitted */
105 | unsigned long rx_errors; /* bad packets received */
106 | unsigned long tx_errors; /* packet transmit problems */
107 | unsigned long rx_dropped; /* no space in linux buffers */
108 | unsigned long tx_dropped; /* no space available in linux */
109 | unsigned long multicast; /* multicast packets received */
110 | unsigned long collisions;
111 |
112 | /* detailed rx_errors: */
113 | unsigned long rx_length_errors;
114 | unsigned long rx_over_errors; /* receiver ring buff overflow */
115 | unsigned long rx_crc_errors; /* recved pkt with crc error */
116 | unsigned long rx_frame_errors; /* recv'd frame alignment error */
117 | unsigned long rx_fifo_errors; /* recv'r fifo overrun */
118 | unsigned long rx_missed_errors; /* receiver missed packet */
119 |
120 | /* detailed tx_errors */
121 | unsigned long tx_aborted_errors;
122 | unsigned long tx_carrier_errors;
123 | unsigned long tx_fifo_errors;
124 | unsigned long tx_heartbeat_errors;
125 | unsigned long tx_window_errors;
126 |
127 | /* for cslip etc */
128 | unsigned long rx_compressed;
129 | unsigned long tx_compressed;
130 | };
131 |
132 | struct list_head {
133 | struct list_head *next, *prev;
134 | };
135 |
136 | struct timer_list {
137 | struct list_head entry;
138 | unsigned long expires;
139 |
140 | //spinlock_t lock;
141 | unsigned long magic;
142 |
143 | void (*function)(unsigned long);
144 | unsigned long data;
145 |
146 | //struct tvec_t_base_s *base;
147 | };
148 |
149 | struct work_struct {
150 | unsigned long pending;
151 | struct list_head entry;
152 | void (*func)(void *);
153 | void *data;
154 | void *wq_data;
155 | struct timer_list timer;
156 | };
157 |
158 | /* hlist_* code - double linked lists */
159 | struct hlist_head {
160 | struct hlist_node *first;
161 | };
162 |
163 | struct hlist_node {
164 | struct hlist_node *next, **pprev;
165 | };
166 |
167 | static inline void __hlist_del(struct hlist_node *n)
168 | {
169 | struct hlist_node *next = n->next;
170 | struct hlist_node **pprev = n->pprev;
171 | *pprev = next;
172 | if (next)
173 | next->pprev = pprev;
174 | }
175 |
176 | static inline void hlist_del(struct hlist_node *n)
177 | {
178 | __hlist_del(n);
179 | n->next = NULL;
180 | n->pprev = NULL;
181 | }
182 |
183 | static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
184 | {
185 | struct hlist_node *first = h->first;
186 | n->next = first;
187 | if (first)
188 | first->pprev = &n->next;
189 | h->first = n;
190 | n->pprev = &h->first;
191 | }
192 |
193 | static inline int hlist_empty(const struct hlist_head *h)
194 | {
195 | return !h->first;
196 | }
197 | #define HLIST_HEAD_INIT { .first = NULL }
198 | #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
199 | #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
200 | static inline void INIT_HLIST_NODE(struct hlist_node *h)
201 | {
202 | h->next = NULL;
203 | h->pprev = NULL;
204 | }
205 |
206 | #ifndef rcu_head
207 | struct __kc_callback_head {
208 | struct __kc_callback_head *next;
209 | void (*func)(struct callback_head* head);
210 | };
211 | #define rcu_head __kc_callback_head
212 | #endif
213 | #ifndef kfree_rcu
214 | /* this is placed here due to a lack of rcu_barrier in previous kernels */
215 | #define kfree_rcu(_ptr, _offset) kfree(_ptr)
216 | #endif /* kfree_rcu */
217 |
218 | #ifndef rounddown_pow_of_two
219 | #define rounddown_pow_of_two(n) \
220 | __builtin_constant_p(n) ? ( \
221 | (n == 1) ? 0 : \
222 | (1UL << ilog2(n))) : \
223 | (1UL << (fls_long(n) - 1))
224 | #endif
225 |
226 |
227 | #define ETH_ALEN 6 /* Octets in one ethernet addr */
228 | #define ETH_HLEN 14 /* Total octets in header. */
229 | #define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
230 | #define ETH_DATA_LEN 1500 /* Max. octets in payload */
231 | #define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
232 | #define ETH_FCS_LEN 4 /* Octets in the FCS */
233 |
234 | #define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header) that VLAN requires. */
235 | #define VLAN_ETH_ALEN 6 /* Octets in one ethernet addr */
236 | #define VLAN_ETH_HLEN 18 /* Total octets in header. */
237 | #define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */
238 | #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
239 | #define VLAN_N_VID 4096
240 |
241 | #define IFF_PROMISC 0x100 /* receive all packets */
242 | #define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
243 |
244 | #define NET_IP_ALIGN 2
245 |
246 | #define NETIF_F_SG 1 /* Scatter/gather IO. */
247 | #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
248 | #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
249 | #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
250 | #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
251 | #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
252 | #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
253 | #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
254 | #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
255 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
256 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
257 | #define NETIF_F_GSO 2048 /* Enable software GSO. */
258 |
259 | #define NETIF_F_GRO 16384 /* Generic receive offload */
260 | #define NETIF_F_LRO 32768 /* large receive offload */
261 |
262 | #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
263 | //#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
264 | #define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */
265 |
266 | #define DUPLEX_HALF 0x00
267 | #define DUPLEX_FULL 0x01
268 |
269 | #if (65536/PAGE_SIZE + 2) < 16
270 | #define MAX_SKB_FRAGS 16UL
271 | #else
272 | #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
273 | #endif
274 |
275 | #define PCI_COMMAND 0x04 /* 16 bits */
276 | #define PCI_EXP_DEVCTL 8
277 | #define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
278 | #define PCI_EXP_LNKCTL 16
279 | #define PCIE_LINK_STATE_L0S 1
280 | #define PCIE_LINK_STATE_L1 2
281 |
282 | #define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */
283 | #define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */
284 | #define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */
285 |
286 | #define MDIO_EEE_100TX 0x0002 /* Advertise 100TX EEE cap */
287 | #define MDIO_EEE_1000T 0x0004 /* Advertise 1000T EEE cap */
288 |
289 | #define MAX_NUMNODES 1
290 | #define first_online_node 0
291 | #define node_online(node) ((node) == 0)
292 | #define ether_crc_le(length, data) _kc_ether_crc_le(length, data)
293 | #ifndef is_zero_ether_addr
294 | #define is_zero_ether_addr _kc_is_zero_ether_addr
295 | static inline int _kc_is_zero_ether_addr(const u8 *addr)
296 | {
297 | return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
298 | }
299 | #endif
300 | #ifndef is_multicast_ether_addr
301 | #define is_multicast_ether_addr _kc_is_multicast_ether_addr
302 | static inline int _kc_is_multicast_ether_addr(const u8 *addr)
303 | {
304 | return addr[0] & 0x01;
305 | }
306 | #endif /* is_multicast_ether_addr */
307 |
308 | static inline unsigned int _kc_ether_crc_le(int length, unsigned char *data)
309 | {
310 | unsigned int crc = 0xffffffff; /* Initial value. */
311 | while(--length >= 0) {
312 | unsigned char current_octet = *data++;
313 | int bit;
314 | for (bit = 8; --bit >= 0; current_octet >>= 1) {
315 | if ((crc ^ current_octet) & 1) {
316 | crc >>= 1;
317 | crc ^= 0xedb88320U;
318 | } else
319 | crc >>= 1;
320 | }
321 | }
322 | return crc;
323 | }
324 |
325 | #define EIO 5
326 | #define ENOENT 2
327 | #define ENOMEM 12
328 | #define EBUSY 16
329 | #define EINVAL 22 /* Invalid argument */
330 | #define ENOTSUP 524
331 | #define EOPNOTSUPP ENOTSUP
332 |
333 | /*****************************************************************************/
334 | #define msleep(x) IOSleep(x)
335 | #define udelay(x) IODelay(x)
336 |
337 | #define mdelay(x) for(int i = 0; i < x; i++ )udelay(1000)
338 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
339 | #define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
340 |
341 |
342 | /*****************************************************************************/
343 |
344 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
345 |
346 |
347 | #ifdef __cplusplus
348 | class SimpleGBE;
349 | #else
350 | typedef void IOBufferMemoryDescriptor;
351 | typedef void IOPCIDevice;
352 | typedef void IOEthernetController;
353 | typedef void IOTimerEventSource;
354 | typedef void SimpleGBE;
355 | #endif
356 |
357 | #define prefetch(x)
358 | #define prefetchw(x)
359 | //#define unlikely(x) (x)
360 | #define unlikely(x) __builtin_expect(!!(x), 0)
361 | //#define likely(x) (x)
362 | #define likely(x) __builtin_expect(!!(x), 1)
363 | #define BUG()
364 |
365 | #define wmb() atomic_thread_fence(memory_order_release)
366 | #define rmb() atomic_thread_fence(memory_order_acquire)
367 | #define mmiowb()
368 | #define smp_mb() mb()
369 | #define smp_rmb() rmb()
370 | #define mb() atomic_thread_fence(memory_order_seq_cst)
371 |
372 | #define __MODULE_STRING(s) "x"
373 |
374 | /** DPRINTK specific variables*/
375 | #define DRV 0x00
376 | #define PROBE 0x01
377 |
378 | #define PFX "igb: "
379 |
380 | #ifdef APPLE_OS_LOG
381 |
382 | extern os_log_t igb_logger;
383 |
384 | /** Have to redefine log types as macOS log doesn't have warning for DPRINTK*/
385 | #define K_LOG_TYPE_NOTICE OS_LOG_TYPE_DEFAULT
386 | #define K_LOG_TYPE_INFO OS_LOG_TYPE_INFO
387 | #define K_LOG_TYPE_DEBUG OS_LOG_TYPE_DEBUG
388 | #define K_LOG_TYPE_WARNING OS_LOG_TYPE_ERROR
389 | #define K_LOG_TYPE_ERROR OS_LOG_TYPE_FAULT
390 |
391 |
392 |
393 | #define pr_debug(args...) os_log_debug(igb_logger, PFX args)
394 | #define pr_err(args...) os_log_error(igb_logger, PFX args)
395 | #define dev_warn(dev,args...) os_log_error(igb_logger, PFX##dev args)
396 | #define dev_info(dev,args...) os_log_info(igb_logger, PFX##dev args)
397 |
398 | #define IGB_ERR(args...) pr_err("IGBERR " PFX args)
399 |
400 | #ifdef __APPLE__
401 | #define DPRINTK(nlevel, klevel, fmt, args...) \
402 | os_log_with_type(igb_logger, K_LOG_TYPE_##klevel, PFX fmt, args)
403 | #else
404 | #define DPRINTK(nlevel, klevel, fmt, args...) \
405 | (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
406 | printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
407 | __func__ , ## args))
408 | #endif
409 |
410 | #else
411 |
412 | #ifdef DEBUG
413 | #define pr_debug(args...) IOLog(PFX args)
414 | #else
415 | #define pr_debug(args...)
416 | #endif
417 | #define pr_err(args...) IOLog(PFX args)
418 | #define dev_warn(dev,args...) IOLog(PFX args)
419 | #define dev_info(dev,args...) IOLog(PFX args)
420 |
421 | #define IGB_ERR(args...) pr_err("IGBERR " PFX args)
422 |
423 | #define DPRINTK(nlevel, klevel, fmt, args...) IOLog(PFX fmt, ##args)
424 |
425 | #endif /* APPLE_OS_LOG */
426 |
427 | #define in_interrupt() (0)
428 |
429 | #define __stringify_1(x...) #x
430 | #define __stringify(x...) __stringify_1(x)
431 | #define __devinit
432 | #define __devexit
433 | #define WARN_ON(x)
434 |
435 | #define min_t(type,x,y) \
436 | ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
437 |
438 | #define iphdr ip
439 | struct net_device { void* dummy; };
440 | struct ifreq { void* dummy; };
441 |
442 | enum irqreturn {
443 | IRQ_NONE,
444 | IRQ_HANDLED,
445 | IRQ_WAKE_THREAD,
446 | };
447 | typedef enum irqreturn irqreturn_t;
448 |
449 | typedef struct sk_buff_head {
450 | struct sk_buff *next;
451 | struct sk_buff *prev;
452 | u32 qlen;
453 | //spinlock_t lock;
454 | } sk_buff_head;
455 |
456 | typedef struct napi_struct {
457 | struct list_head poll_list;
458 | unsigned long state;
459 | int weight;
460 | int (*poll)(struct napi_struct *, int);
461 |
462 | unsigned int gro_count;
463 | //struct net_device *dev;
464 | struct list_head dev_list;
465 | struct sk_buff *gro_list;
466 | struct sk_buff *skb;
467 | } napi_struct;
468 |
469 | struct msix_entry {
470 | u32 vector; /* kernel uses to write allocated vector */
471 | u16 entry; /* driver uses to specify entry, OS writes */
472 | };
473 |
474 | #define IFNAMSIZ 16
475 | #define ____cacheline_internodealigned_in_smp
476 |
477 | enum netdev_tx {
478 | __NETDEV_TX_MIN = -100, /* make sure enum is signed */
479 | NETDEV_TX_OK = 0x00, /* driver took care of packet */
480 | NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
481 | NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
482 | };
483 | typedef enum netdev_tx netdev_tx_t;
484 |
485 | #define max_t(type, x, y) ({ \
486 | type __max1 = (x); \
487 | type __max2 = (y); \
488 | __max1 > __max2 ? __max1: __max2; })
489 |
490 | static inline int test_bit(int nr, const volatile unsigned long * addr) {
491 | return (*addr & (1<member ) *__mptr = (ptr); \
557 | (type *)( (char *)__mptr - offsetof(type,member) );})
558 |
559 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
560 | #ifndef READ_ONCE
561 | #define READ_ONCE(_x) ACCESS_ONCE(_x)
562 | #endif
563 |
564 | #define fallthrough do {} while (0) /* fallthrough */
565 |
566 | #ifndef BIT
567 | #define BIT(nr) (1UL << (nr))
568 | #endif
569 |
570 | #ifndef ARRAY_SIZE
571 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
572 | #endif
573 |
574 | #endif /* _KCOMPAT_H_ */
575 |
--------------------------------------------------------------------------------