1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */ |
3 | #include <linux/kernel.h> |
4 | #include <linux/mhi.h> |
5 | #include <linux/mod_devicetable.h> |
6 | #include <linux/module.h> |
7 | #include <linux/wwan.h> |
8 | |
9 | /* MHI wwan flags */ |
10 | enum mhi_wwan_flags { |
11 | MHI_WWAN_DL_CAP, |
12 | MHI_WWAN_UL_CAP, |
13 | MHI_WWAN_RX_REFILL, |
14 | }; |
15 | |
16 | #define MHI_WWAN_MAX_MTU 0x8000 |
17 | |
18 | struct mhi_wwan_dev { |
19 | /* Lower level is a mhi dev, upper level is a wwan port */ |
20 | struct mhi_device *mhi_dev; |
21 | struct wwan_port *wwan_port; |
22 | |
23 | /* State and capabilities */ |
24 | unsigned long flags; |
25 | size_t mtu; |
26 | |
27 | /* Protect against concurrent TX and TX-completion (bh) */ |
28 | spinlock_t tx_lock; |
29 | |
30 | /* Protect RX budget and rx_refill scheduling */ |
31 | spinlock_t rx_lock; |
32 | struct work_struct rx_refill; |
33 | |
34 | /* RX budget is initially set to the size of the MHI RX queue and is |
35 | * used to limit the number of allocated and queued packets. It is |
36 | * decremented on data queueing and incremented on data release. |
37 | */ |
38 | unsigned int rx_budget; |
39 | }; |
40 | |
41 | /* Increment RX budget and schedule RX refill if necessary */ |
42 | static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan) |
43 | { |
44 | spin_lock_bh(lock: &mhiwwan->rx_lock); |
45 | |
46 | mhiwwan->rx_budget++; |
47 | |
48 | if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags)) |
49 | schedule_work(work: &mhiwwan->rx_refill); |
50 | |
51 | spin_unlock_bh(lock: &mhiwwan->rx_lock); |
52 | } |
53 | |
54 | /* Decrement RX budget if non-zero and return true on success */ |
55 | static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan) |
56 | { |
57 | bool ret = false; |
58 | |
59 | spin_lock_bh(lock: &mhiwwan->rx_lock); |
60 | |
61 | if (mhiwwan->rx_budget) { |
62 | mhiwwan->rx_budget--; |
63 | if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags)) |
64 | ret = true; |
65 | } |
66 | |
67 | spin_unlock_bh(lock: &mhiwwan->rx_lock); |
68 | |
69 | return ret; |
70 | } |
71 | |
72 | static void __mhi_skb_destructor(struct sk_buff *skb) |
73 | { |
74 | /* RX buffer has been consumed, increase the allowed budget */ |
75 | mhi_wwan_rx_budget_inc(skb_shinfo(skb)->destructor_arg); |
76 | } |
77 | |
78 | static void mhi_wwan_ctrl_refill_work(struct work_struct *work) |
79 | { |
80 | struct mhi_wwan_dev *mhiwwan = container_of(work, struct mhi_wwan_dev, rx_refill); |
81 | struct mhi_device *mhi_dev = mhiwwan->mhi_dev; |
82 | |
83 | while (mhi_wwan_rx_budget_dec(mhiwwan)) { |
84 | struct sk_buff *skb; |
85 | |
86 | skb = alloc_skb(size: mhiwwan->mtu, GFP_KERNEL); |
87 | if (!skb) { |
88 | mhi_wwan_rx_budget_inc(mhiwwan); |
89 | break; |
90 | } |
91 | |
92 | /* To prevent unlimited buffer allocation if nothing consumes |
93 | * the RX buffers (passed to WWAN core), track their lifespan |
94 | * to not allocate more than allowed budget. |
95 | */ |
96 | skb->destructor = __mhi_skb_destructor; |
97 | skb_shinfo(skb)->destructor_arg = mhiwwan; |
98 | |
99 | if (mhi_queue_skb(mhi_dev, dir: DMA_FROM_DEVICE, skb, len: mhiwwan->mtu, mflags: MHI_EOT)) { |
100 | dev_err(&mhi_dev->dev, "Failed to queue buffer\n" ); |
101 | kfree_skb(skb); |
102 | break; |
103 | } |
104 | } |
105 | } |
106 | |
107 | static int mhi_wwan_ctrl_start(struct wwan_port *port) |
108 | { |
109 | struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port); |
110 | int ret; |
111 | |
112 | /* Start mhi device's channel(s) */ |
113 | ret = mhi_prepare_for_transfer(mhi_dev: mhiwwan->mhi_dev); |
114 | if (ret) |
115 | return ret; |
116 | |
117 | /* Don't allocate more buffers than MHI channel queue size */ |
118 | mhiwwan->rx_budget = mhi_get_free_desc_count(mhi_dev: mhiwwan->mhi_dev, dir: DMA_FROM_DEVICE); |
119 | |
120 | /* Add buffers to the MHI inbound queue */ |
121 | if (test_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags)) { |
122 | set_bit(nr: MHI_WWAN_RX_REFILL, addr: &mhiwwan->flags); |
123 | mhi_wwan_ctrl_refill_work(work: &mhiwwan->rx_refill); |
124 | } |
125 | |
126 | return 0; |
127 | } |
128 | |
129 | static void mhi_wwan_ctrl_stop(struct wwan_port *port) |
130 | { |
131 | struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port); |
132 | |
133 | spin_lock_bh(lock: &mhiwwan->rx_lock); |
134 | clear_bit(nr: MHI_WWAN_RX_REFILL, addr: &mhiwwan->flags); |
135 | spin_unlock_bh(lock: &mhiwwan->rx_lock); |
136 | |
137 | cancel_work_sync(work: &mhiwwan->rx_refill); |
138 | |
139 | mhi_unprepare_from_transfer(mhi_dev: mhiwwan->mhi_dev); |
140 | } |
141 | |
142 | static int mhi_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) |
143 | { |
144 | struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port); |
145 | int ret; |
146 | |
147 | if (skb->len > mhiwwan->mtu) |
148 | return -EMSGSIZE; |
149 | |
150 | if (!test_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags)) |
151 | return -EOPNOTSUPP; |
152 | |
153 | /* Queue the packet for MHI transfer and check fullness of the queue */ |
154 | spin_lock_bh(lock: &mhiwwan->tx_lock); |
155 | ret = mhi_queue_skb(mhi_dev: mhiwwan->mhi_dev, dir: DMA_TO_DEVICE, skb, len: skb->len, mflags: MHI_EOT); |
156 | if (mhi_queue_is_full(mhi_dev: mhiwwan->mhi_dev, dir: DMA_TO_DEVICE)) |
157 | wwan_port_txoff(port); |
158 | spin_unlock_bh(lock: &mhiwwan->tx_lock); |
159 | |
160 | return ret; |
161 | } |
162 | |
163 | static const struct wwan_port_ops wwan_pops = { |
164 | .start = mhi_wwan_ctrl_start, |
165 | .stop = mhi_wwan_ctrl_stop, |
166 | .tx = mhi_wwan_ctrl_tx, |
167 | }; |
168 | |
169 | static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev, |
170 | struct mhi_result *mhi_result) |
171 | { |
172 | struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(dev: &mhi_dev->dev); |
173 | struct wwan_port *port = mhiwwan->wwan_port; |
174 | struct sk_buff *skb = mhi_result->buf_addr; |
175 | |
176 | dev_dbg(&mhi_dev->dev, "%s: status: %d xfer_len: %zu\n" , __func__, |
177 | mhi_result->transaction_status, mhi_result->bytes_xferd); |
178 | |
179 | /* MHI core has done with the buffer, release it */ |
180 | consume_skb(skb); |
181 | |
182 | /* There is likely new slot available in the MHI queue, re-allow TX */ |
183 | spin_lock_bh(lock: &mhiwwan->tx_lock); |
184 | if (!mhi_queue_is_full(mhi_dev: mhiwwan->mhi_dev, dir: DMA_TO_DEVICE)) |
185 | wwan_port_txon(port); |
186 | spin_unlock_bh(lock: &mhiwwan->tx_lock); |
187 | } |
188 | |
189 | static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev, |
190 | struct mhi_result *mhi_result) |
191 | { |
192 | struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(dev: &mhi_dev->dev); |
193 | struct wwan_port *port = mhiwwan->wwan_port; |
194 | struct sk_buff *skb = mhi_result->buf_addr; |
195 | |
196 | dev_dbg(&mhi_dev->dev, "%s: status: %d receive_len: %zu\n" , __func__, |
197 | mhi_result->transaction_status, mhi_result->bytes_xferd); |
198 | |
199 | if (mhi_result->transaction_status && |
200 | mhi_result->transaction_status != -EOVERFLOW) { |
201 | kfree_skb(skb); |
202 | return; |
203 | } |
204 | |
205 | /* MHI core does not update skb->len, do it before forward */ |
206 | skb_put(skb, len: mhi_result->bytes_xferd); |
207 | wwan_port_rx(port, skb); |
208 | |
209 | /* Do not increment rx budget nor refill RX buffers now, wait for the |
210 | * buffer to be consumed. Done from __mhi_skb_destructor(). |
211 | */ |
212 | } |
213 | |
214 | static int mhi_wwan_ctrl_probe(struct mhi_device *mhi_dev, |
215 | const struct mhi_device_id *id) |
216 | { |
217 | struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; |
218 | struct mhi_wwan_dev *mhiwwan; |
219 | struct wwan_port *port; |
220 | |
221 | mhiwwan = kzalloc(size: sizeof(*mhiwwan), GFP_KERNEL); |
222 | if (!mhiwwan) |
223 | return -ENOMEM; |
224 | |
225 | mhiwwan->mhi_dev = mhi_dev; |
226 | mhiwwan->mtu = MHI_WWAN_MAX_MTU; |
227 | INIT_WORK(&mhiwwan->rx_refill, mhi_wwan_ctrl_refill_work); |
228 | spin_lock_init(&mhiwwan->tx_lock); |
229 | spin_lock_init(&mhiwwan->rx_lock); |
230 | |
231 | if (mhi_dev->dl_chan) |
232 | set_bit(nr: MHI_WWAN_DL_CAP, addr: &mhiwwan->flags); |
233 | if (mhi_dev->ul_chan) |
234 | set_bit(nr: MHI_WWAN_UL_CAP, addr: &mhiwwan->flags); |
235 | |
236 | dev_set_drvdata(dev: &mhi_dev->dev, data: mhiwwan); |
237 | |
238 | /* Register as a wwan port, id->driver_data contains wwan port type */ |
239 | port = wwan_create_port(parent: &cntrl->mhi_dev->dev, type: id->driver_data, |
240 | ops: &wwan_pops, NULL, drvdata: mhiwwan); |
241 | if (IS_ERR(ptr: port)) { |
242 | kfree(objp: mhiwwan); |
243 | return PTR_ERR(ptr: port); |
244 | } |
245 | |
246 | mhiwwan->wwan_port = port; |
247 | |
248 | return 0; |
249 | }; |
250 | |
251 | static void mhi_wwan_ctrl_remove(struct mhi_device *mhi_dev) |
252 | { |
253 | struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(dev: &mhi_dev->dev); |
254 | |
255 | wwan_remove_port(port: mhiwwan->wwan_port); |
256 | kfree(objp: mhiwwan); |
257 | } |
258 | |
259 | static const struct mhi_device_id mhi_wwan_ctrl_match_table[] = { |
260 | { .chan = "DUN" , .driver_data = WWAN_PORT_AT }, |
261 | { .chan = "DUN2" , .driver_data = WWAN_PORT_AT }, |
262 | { .chan = "MBIM" , .driver_data = WWAN_PORT_MBIM }, |
263 | { .chan = "QMI" , .driver_data = WWAN_PORT_QMI }, |
264 | { .chan = "DIAG" , .driver_data = WWAN_PORT_QCDM }, |
265 | { .chan = "FIREHOSE" , .driver_data = WWAN_PORT_FIREHOSE }, |
266 | {}, |
267 | }; |
268 | MODULE_DEVICE_TABLE(mhi, mhi_wwan_ctrl_match_table); |
269 | |
270 | static struct mhi_driver mhi_wwan_ctrl_driver = { |
271 | .id_table = mhi_wwan_ctrl_match_table, |
272 | .remove = mhi_wwan_ctrl_remove, |
273 | .probe = mhi_wwan_ctrl_probe, |
274 | .ul_xfer_cb = mhi_ul_xfer_cb, |
275 | .dl_xfer_cb = mhi_dl_xfer_cb, |
276 | .driver = { |
277 | .name = "mhi_wwan_ctrl" , |
278 | }, |
279 | }; |
280 | |
281 | module_mhi_driver(mhi_wwan_ctrl_driver); |
282 | |
283 | MODULE_LICENSE("GPL v2" ); |
284 | MODULE_DESCRIPTION("MHI WWAN CTRL Driver" ); |
285 | MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>" ); |
286 | |