1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2/*
3 * SoundWire AMD Manager driver
4 *
5 * Copyright 2023-24 Advanced Micro Devices, Inc.
6 */
7
8#include <linux/completion.h>
9#include <linux/device.h>
10#include <linux/io.h>
11#include <linux/jiffies.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/soundwire/sdw.h>
16#include <linux/soundwire/sdw_registers.h>
17#include <linux/pm_runtime.h>
18#include <linux/wait.h>
19#include <sound/pcm_params.h>
20#include <sound/soc.h>
21#include "bus.h"
22#include "amd_init.h"
23#include "amd_manager.h"
24
25#define DRV_NAME "amd_sdw_manager"
26
27#define to_amd_sdw(b) container_of(b, struct amd_sdw_manager, bus)
28
29static int amd_init_sdw_manager(struct amd_sdw_manager *amd_manager)
30{
31 u32 val;
32 int ret;
33
34 writel(AMD_SDW_ENABLE, addr: amd_manager->mmio + ACP_SW_EN);
35 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, val, ACP_DELAY_US,
36 AMD_SDW_TIMEOUT);
37 if (ret)
38 return ret;
39
40 /* SoundWire manager bus reset */
41 writel(AMD_SDW_BUS_RESET_REQ, addr: amd_manager->mmio + ACP_SW_BUS_RESET_CTRL);
42 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_BUS_RESET_CTRL, val,
43 (val & AMD_SDW_BUS_RESET_DONE), ACP_DELAY_US, AMD_SDW_TIMEOUT);
44 if (ret)
45 return ret;
46
47 writel(AMD_SDW_BUS_RESET_CLEAR_REQ, addr: amd_manager->mmio + ACP_SW_BUS_RESET_CTRL);
48 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_BUS_RESET_CTRL, val, !val,
49 ACP_DELAY_US, AMD_SDW_TIMEOUT);
50 if (ret) {
51 dev_err(amd_manager->dev, "Failed to reset SoundWire manager instance%d\n",
52 amd_manager->instance);
53 return ret;
54 }
55
56 writel(AMD_SDW_DISABLE, addr: amd_manager->mmio + ACP_SW_EN);
57 return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, !val, ACP_DELAY_US,
58 AMD_SDW_TIMEOUT);
59}
60
61static int amd_enable_sdw_manager(struct amd_sdw_manager *amd_manager)
62{
63 u32 val;
64
65 writel(AMD_SDW_ENABLE, addr: amd_manager->mmio + ACP_SW_EN);
66 return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, val, ACP_DELAY_US,
67 AMD_SDW_TIMEOUT);
68}
69
70static int amd_disable_sdw_manager(struct amd_sdw_manager *amd_manager)
71{
72 u32 val;
73
74 writel(AMD_SDW_DISABLE, addr: amd_manager->mmio + ACP_SW_EN);
75 /*
76 * After invoking manager disable sequence, check whether
77 * manager has executed clock stop sequence. In this case,
78 * manager should ignore checking enable status register.
79 */
80 val = readl(addr: amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
81 if (val)
82 return 0;
83 return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, !val, ACP_DELAY_US,
84 AMD_SDW_TIMEOUT);
85}
86
87static void amd_enable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
88{
89 u32 val;
90
91 mutex_lock(amd_manager->acp_sdw_lock);
92 val = readl(addr: amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
93 val |= sdw_manager_reg_mask_array[amd_manager->instance];
94 writel(val, addr: amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
95 mutex_unlock(lock: amd_manager->acp_sdw_lock);
96
97 writel(AMD_SDW_IRQ_MASK_0TO7, addr: amd_manager->mmio +
98 ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
99 writel(AMD_SDW_IRQ_MASK_8TO11, addr: amd_manager->mmio +
100 ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
101 writel(AMD_SDW_IRQ_ERROR_MASK, addr: amd_manager->mmio + ACP_SW_ERROR_INTR_MASK);
102}
103
104static void amd_disable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
105{
106 u32 val;
107
108 mutex_lock(amd_manager->acp_sdw_lock);
109 val = readl(addr: amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
110 val &= ~sdw_manager_reg_mask_array[amd_manager->instance];
111 writel(val, addr: amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
112 mutex_unlock(lock: amd_manager->acp_sdw_lock);
113
114 writel(val: 0x00, addr: amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
115 writel(val: 0x00, addr: amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
116 writel(val: 0x00, addr: amd_manager->mmio + ACP_SW_ERROR_INTR_MASK);
117}
118
119static int amd_deinit_sdw_manager(struct amd_sdw_manager *amd_manager)
120{
121 amd_disable_sdw_interrupts(amd_manager);
122 return amd_disable_sdw_manager(amd_manager);
123}
124
125static void amd_sdw_set_frameshape(struct amd_sdw_manager *amd_manager)
126{
127 u32 frame_size;
128
129 frame_size = (amd_manager->rows_index << 3) | amd_manager->cols_index;
130 writel(val: frame_size, addr: amd_manager->mmio + ACP_SW_FRAMESIZE);
131}
132
133static void amd_sdw_ctl_word_prep(u32 *lower_word, u32 *upper_word, struct sdw_msg *msg,
134 int cmd_offset)
135{
136 u32 upper_data;
137 u32 lower_data = 0;
138 u16 addr;
139 u8 upper_addr, lower_addr;
140 u8 data = 0;
141
142 addr = msg->addr + cmd_offset;
143 upper_addr = (addr & 0xFF00) >> 8;
144 lower_addr = addr & 0xFF;
145
146 if (msg->flags == SDW_MSG_FLAG_WRITE)
147 data = msg->buf[cmd_offset];
148
149 upper_data = FIELD_PREP(AMD_SDW_MCP_CMD_DEV_ADDR, msg->dev_num);
150 upper_data |= FIELD_PREP(AMD_SDW_MCP_CMD_COMMAND, msg->flags + 2);
151 upper_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_ADDR_HIGH, upper_addr);
152 lower_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_ADDR_LOW, lower_addr);
153 lower_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_DATA, data);
154
155 *upper_word = upper_data;
156 *lower_word = lower_data;
157}
158
159static u64 amd_sdw_send_cmd_get_resp(struct amd_sdw_manager *amd_manager, u32 lower_data,
160 u32 upper_data)
161{
162 u64 resp;
163 u32 lower_resp, upper_resp;
164 u32 sts;
165 int ret;
166
167 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
168 !(sts & AMD_SDW_IMM_CMD_BUSY), ACP_DELAY_US, AMD_SDW_TIMEOUT);
169 if (ret) {
170 dev_err(amd_manager->dev, "SDW%x previous cmd status clear failed\n",
171 amd_manager->instance);
172 return ret;
173 }
174
175 if (sts & AMD_SDW_IMM_RES_VALID) {
176 dev_err(amd_manager->dev, "SDW%x manager is in bad state\n", amd_manager->instance);
177 writel(val: 0x00, addr: amd_manager->mmio + ACP_SW_IMM_CMD_STS);
178 }
179 writel(val: upper_data, addr: amd_manager->mmio + ACP_SW_IMM_CMD_UPPER_WORD);
180 writel(val: lower_data, addr: amd_manager->mmio + ACP_SW_IMM_CMD_LOWER_QWORD);
181
182 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
183 (sts & AMD_SDW_IMM_RES_VALID), ACP_DELAY_US, AMD_SDW_TIMEOUT);
184 if (ret) {
185 dev_err(amd_manager->dev, "SDW%x cmd response timeout occurred\n",
186 amd_manager->instance);
187 return ret;
188 }
189 upper_resp = readl(addr: amd_manager->mmio + ACP_SW_IMM_RESP_UPPER_WORD);
190 lower_resp = readl(addr: amd_manager->mmio + ACP_SW_IMM_RESP_LOWER_QWORD);
191
192 writel(AMD_SDW_IMM_RES_VALID, addr: amd_manager->mmio + ACP_SW_IMM_CMD_STS);
193 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
194 !(sts & AMD_SDW_IMM_RES_VALID), ACP_DELAY_US, AMD_SDW_TIMEOUT);
195 if (ret) {
196 dev_err(amd_manager->dev, "SDW%x cmd status retry failed\n",
197 amd_manager->instance);
198 return ret;
199 }
200 resp = upper_resp;
201 resp = (resp << 32) | lower_resp;
202 return resp;
203}
204
205static enum sdw_command_response
206amd_program_scp_addr(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg)
207{
208 struct sdw_msg scp_msg = {0};
209 u64 response_buf[2] = {0};
210 u32 upper_data = 0, lower_data = 0;
211 int index;
212
213 scp_msg.dev_num = msg->dev_num;
214 scp_msg.addr = SDW_SCP_ADDRPAGE1;
215 scp_msg.buf = &msg->addr_page1;
216 scp_msg.flags = SDW_MSG_FLAG_WRITE;
217 amd_sdw_ctl_word_prep(lower_word: &lower_data, upper_word: &upper_data, msg: &scp_msg, cmd_offset: 0);
218 response_buf[0] = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
219 scp_msg.addr = SDW_SCP_ADDRPAGE2;
220 scp_msg.buf = &msg->addr_page2;
221 amd_sdw_ctl_word_prep(lower_word: &lower_data, upper_word: &upper_data, msg: &scp_msg, cmd_offset: 0);
222 response_buf[1] = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
223
224 for (index = 0; index < 2; index++) {
225 if (response_buf[index] == -ETIMEDOUT) {
226 dev_err_ratelimited(amd_manager->dev,
227 "SCP_addrpage command timeout for Slave %d\n",
228 msg->dev_num);
229 return SDW_CMD_TIMEOUT;
230 } else if (!(response_buf[index] & AMD_SDW_MCP_RESP_ACK)) {
231 if (response_buf[index] & AMD_SDW_MCP_RESP_NACK) {
232 dev_err_ratelimited(amd_manager->dev,
233 "SCP_addrpage NACKed for Slave %d\n",
234 msg->dev_num);
235 return SDW_CMD_FAIL;
236 }
237 dev_dbg_ratelimited(amd_manager->dev, "SCP_addrpage ignored for Slave %d\n",
238 msg->dev_num);
239 return SDW_CMD_IGNORED;
240 }
241 }
242 return SDW_CMD_OK;
243}
244
245static int amd_prep_msg(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg)
246{
247 int ret;
248
249 if (msg->page) {
250 ret = amd_program_scp_addr(amd_manager, msg);
251 if (ret) {
252 msg->len = 0;
253 return ret;
254 }
255 }
256 switch (msg->flags) {
257 case SDW_MSG_FLAG_READ:
258 case SDW_MSG_FLAG_WRITE:
259 break;
260 default:
261 dev_err(amd_manager->dev, "Invalid msg cmd: %d\n", msg->flags);
262 return -EINVAL;
263 }
264 return 0;
265}
266
267static enum sdw_command_response amd_sdw_fill_msg_resp(struct amd_sdw_manager *amd_manager,
268 struct sdw_msg *msg, u64 response,
269 int offset)
270{
271 if (response & AMD_SDW_MCP_RESP_ACK) {
272 if (msg->flags == SDW_MSG_FLAG_READ)
273 msg->buf[offset] = FIELD_GET(AMD_SDW_MCP_RESP_RDATA, response);
274 } else {
275 if (response == -ETIMEDOUT) {
276 dev_err_ratelimited(amd_manager->dev, "command timeout for Slave %d\n",
277 msg->dev_num);
278 return SDW_CMD_TIMEOUT;
279 } else if (response & AMD_SDW_MCP_RESP_NACK) {
280 dev_err_ratelimited(amd_manager->dev,
281 "command response NACK received for Slave %d\n",
282 msg->dev_num);
283 return SDW_CMD_FAIL;
284 }
285 dev_err_ratelimited(amd_manager->dev, "command is ignored for Slave %d\n",
286 msg->dev_num);
287 return SDW_CMD_IGNORED;
288 }
289 return SDW_CMD_OK;
290}
291
292static unsigned int _amd_sdw_xfer_msg(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg,
293 int cmd_offset)
294{
295 u64 response;
296 u32 upper_data = 0, lower_data = 0;
297
298 amd_sdw_ctl_word_prep(lower_word: &lower_data, upper_word: &upper_data, msg, cmd_offset);
299 response = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
300 return amd_sdw_fill_msg_resp(amd_manager, msg, response, offset: cmd_offset);
301}
302
303static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
304{
305 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
306 int ret, i;
307
308 ret = amd_prep_msg(amd_manager, msg);
309 if (ret)
310 return SDW_CMD_FAIL_OTHER;
311 for (i = 0; i < msg->len; i++) {
312 ret = _amd_sdw_xfer_msg(amd_manager, msg, cmd_offset: i);
313 if (ret)
314 return ret;
315 }
316 return SDW_CMD_OK;
317}
318
319static void amd_sdw_fill_slave_status(struct amd_sdw_manager *amd_manager, u16 index, u32 status)
320{
321 switch (status) {
322 case SDW_SLAVE_ATTACHED:
323 case SDW_SLAVE_UNATTACHED:
324 case SDW_SLAVE_ALERT:
325 amd_manager->status[index] = status;
326 break;
327 default:
328 amd_manager->status[index] = SDW_SLAVE_RESERVED;
329 break;
330 }
331}
332
333static void amd_sdw_process_ping_status(u64 response, struct amd_sdw_manager *amd_manager)
334{
335 u64 slave_stat;
336 u32 val;
337 u16 dev_index;
338
339 /* slave status response */
340 slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
341 slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
342 dev_dbg(amd_manager->dev, "slave_stat:0x%llx\n", slave_stat);
343 for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
344 val = (slave_stat >> (dev_index * 2)) & AMD_SDW_MCP_SLAVE_STATUS_MASK;
345 dev_dbg(amd_manager->dev, "val:0x%x\n", val);
346 amd_sdw_fill_slave_status(amd_manager, index: dev_index, status: val);
347 }
348}
349
350static void amd_sdw_read_and_process_ping_status(struct amd_sdw_manager *amd_manager)
351{
352 u64 response;
353
354 mutex_lock(&amd_manager->bus.msg_lock);
355 response = amd_sdw_send_cmd_get_resp(amd_manager, lower_data: 0, upper_data: 0);
356 mutex_unlock(lock: &amd_manager->bus.msg_lock);
357 amd_sdw_process_ping_status(response, amd_manager);
358}
359
360static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
361{
362 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
363 u64 response;
364 u32 slave_stat;
365
366 response = amd_sdw_send_cmd_get_resp(amd_manager, lower_data: 0, upper_data: 0);
367 /* slave status from ping response */
368 slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
369 slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
370 dev_dbg(amd_manager->dev, "slave_stat:0x%x\n", slave_stat);
371 return slave_stat;
372}
373
374static int amd_sdw_compute_params(struct sdw_bus *bus)
375{
376 struct sdw_transport_data t_data = {0};
377 struct sdw_master_runtime *m_rt;
378 struct sdw_port_runtime *p_rt;
379 struct sdw_bus_params *b_params = &bus->params;
380 int port_bo, hstart, hstop, sample_int;
381 unsigned int rate, bps;
382
383 port_bo = 0;
384 hstart = 1;
385 hstop = bus->params.col - 1;
386 t_data.hstop = hstop;
387 t_data.hstart = hstart;
388
389 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
390 rate = m_rt->stream->params.rate;
391 bps = m_rt->stream->params.bps;
392 sample_int = (bus->params.curr_dr_freq / rate);
393 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
394 port_bo = (p_rt->num * 64) + 1;
395 dev_dbg(bus->dev, "p_rt->num=%d hstart=%d hstop=%d port_bo=%d\n",
396 p_rt->num, hstart, hstop, port_bo);
397 sdw_fill_xport_params(params: &p_rt->transport_params, port_num: p_rt->num,
398 grp_ctrl_valid: false, grp_ctrl: SDW_BLK_GRP_CNT_1, sample_int,
399 off1: port_bo, off2: port_bo >> 8, hstart, hstop,
400 pack_mode: SDW_BLK_PKG_PER_PORT, lane_ctrl: 0x0);
401
402 sdw_fill_port_params(params: &p_rt->port_params,
403 port_num: p_rt->num, bps,
404 SDW_PORT_FLOW_MODE_ISOCH,
405 data_mode: b_params->m_data_mode);
406 t_data.hstart = hstart;
407 t_data.hstop = hstop;
408 t_data.block_offset = port_bo;
409 t_data.sub_block_offset = 0;
410 }
411 sdw_compute_slave_ports(m_rt, t_data: &t_data);
412 }
413 return 0;
414}
415
416static int amd_sdw_port_params(struct sdw_bus *bus, struct sdw_port_params *p_params,
417 unsigned int bank)
418{
419 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
420 u32 frame_fmt_reg, dpn_frame_fmt;
421
422 dev_dbg(amd_manager->dev, "p_params->num:0x%x\n", p_params->num);
423 switch (amd_manager->instance) {
424 case ACP_SDW0:
425 frame_fmt_reg = sdw0_manager_dp_reg[p_params->num].frame_fmt_reg;
426 break;
427 case ACP_SDW1:
428 frame_fmt_reg = sdw1_manager_dp_reg[p_params->num].frame_fmt_reg;
429 break;
430 default:
431 return -EINVAL;
432 }
433
434 dpn_frame_fmt = readl(addr: amd_manager->mmio + frame_fmt_reg);
435 u32p_replace_bits(p: &dpn_frame_fmt, val: p_params->flow_mode, AMD_DPN_FRAME_FMT_PFM);
436 u32p_replace_bits(p: &dpn_frame_fmt, val: p_params->data_mode, AMD_DPN_FRAME_FMT_PDM);
437 u32p_replace_bits(p: &dpn_frame_fmt, val: p_params->bps - 1, AMD_DPN_FRAME_FMT_WORD_LEN);
438 writel(val: dpn_frame_fmt, addr: amd_manager->mmio + frame_fmt_reg);
439 return 0;
440}
441
442static int amd_sdw_transport_params(struct sdw_bus *bus,
443 struct sdw_transport_params *params,
444 enum sdw_reg_bank bank)
445{
446 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
447 u32 dpn_frame_fmt;
448 u32 dpn_sampleinterval;
449 u32 dpn_hctrl;
450 u32 dpn_offsetctrl;
451 u32 dpn_lanectrl;
452 u32 frame_fmt_reg, sample_int_reg, hctrl_dp0_reg;
453 u32 offset_reg, lane_ctrl_ch_en_reg;
454
455 switch (amd_manager->instance) {
456 case ACP_SDW0:
457 frame_fmt_reg = sdw0_manager_dp_reg[params->port_num].frame_fmt_reg;
458 sample_int_reg = sdw0_manager_dp_reg[params->port_num].sample_int_reg;
459 hctrl_dp0_reg = sdw0_manager_dp_reg[params->port_num].hctrl_dp0_reg;
460 offset_reg = sdw0_manager_dp_reg[params->port_num].offset_reg;
461 lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg;
462 break;
463 case ACP_SDW1:
464 frame_fmt_reg = sdw1_manager_dp_reg[params->port_num].frame_fmt_reg;
465 sample_int_reg = sdw1_manager_dp_reg[params->port_num].sample_int_reg;
466 hctrl_dp0_reg = sdw1_manager_dp_reg[params->port_num].hctrl_dp0_reg;
467 offset_reg = sdw1_manager_dp_reg[params->port_num].offset_reg;
468 lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg;
469 break;
470 default:
471 return -EINVAL;
472 }
473 writel(AMD_SDW_SSP_COUNTER_VAL, addr: amd_manager->mmio + ACP_SW_SSP_COUNTER);
474
475 dpn_frame_fmt = readl(addr: amd_manager->mmio + frame_fmt_reg);
476 u32p_replace_bits(p: &dpn_frame_fmt, val: params->blk_pkg_mode, AMD_DPN_FRAME_FMT_BLK_PKG_MODE);
477 u32p_replace_bits(p: &dpn_frame_fmt, val: params->blk_grp_ctrl, AMD_DPN_FRAME_FMT_BLK_GRP_CTRL);
478 u32p_replace_bits(p: &dpn_frame_fmt, val: SDW_STREAM_PCM, AMD_DPN_FRAME_FMT_PCM_OR_PDM);
479 writel(val: dpn_frame_fmt, addr: amd_manager->mmio + frame_fmt_reg);
480
481 dpn_sampleinterval = params->sample_interval - 1;
482 writel(val: dpn_sampleinterval, addr: amd_manager->mmio + sample_int_reg);
483
484 dpn_hctrl = FIELD_PREP(AMD_DPN_HCTRL_HSTOP, params->hstop);
485 dpn_hctrl |= FIELD_PREP(AMD_DPN_HCTRL_HSTART, params->hstart);
486 writel(val: dpn_hctrl, addr: amd_manager->mmio + hctrl_dp0_reg);
487
488 dpn_offsetctrl = FIELD_PREP(AMD_DPN_OFFSET_CTRL_1, params->offset1);
489 dpn_offsetctrl |= FIELD_PREP(AMD_DPN_OFFSET_CTRL_2, params->offset2);
490 writel(val: dpn_offsetctrl, addr: amd_manager->mmio + offset_reg);
491
492 /*
493 * lane_ctrl_ch_en_reg will be used to program lane_ctrl and ch_mask
494 * parameters.
495 */
496 dpn_lanectrl = readl(addr: amd_manager->mmio + lane_ctrl_ch_en_reg);
497 u32p_replace_bits(p: &dpn_lanectrl, val: params->lane_ctrl, AMD_DPN_CH_EN_LCTRL);
498 writel(val: dpn_lanectrl, addr: amd_manager->mmio + lane_ctrl_ch_en_reg);
499 return 0;
500}
501
502static int amd_sdw_port_enable(struct sdw_bus *bus,
503 struct sdw_enable_ch *enable_ch,
504 unsigned int bank)
505{
506 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
507 u32 dpn_ch_enable;
508 u32 lane_ctrl_ch_en_reg;
509
510 switch (amd_manager->instance) {
511 case ACP_SDW0:
512 lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg;
513 break;
514 case ACP_SDW1:
515 lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg;
516 break;
517 default:
518 return -EINVAL;
519 }
520
521 /*
522 * lane_ctrl_ch_en_reg will be used to program lane_ctrl and ch_mask
523 * parameters.
524 */
525 dpn_ch_enable = readl(addr: amd_manager->mmio + lane_ctrl_ch_en_reg);
526 u32p_replace_bits(p: &dpn_ch_enable, val: enable_ch->ch_mask, AMD_DPN_CH_EN_CHMASK);
527 if (enable_ch->enable)
528 writel(val: dpn_ch_enable, addr: amd_manager->mmio + lane_ctrl_ch_en_reg);
529 else
530 writel(val: 0, addr: amd_manager->mmio + lane_ctrl_ch_en_reg);
531 return 0;
532}
533
534static int sdw_master_read_amd_prop(struct sdw_bus *bus)
535{
536 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
537 struct fwnode_handle *link;
538 struct sdw_master_prop *prop;
539 u32 quirk_mask = 0;
540 u32 wake_en_mask = 0;
541 u32 power_mode_mask = 0;
542 char name[32];
543
544 prop = &bus->prop;
545 /* Find manager handle */
546 snprintf(buf: name, size: sizeof(name), fmt: "mipi-sdw-link-%d-subproperties", bus->link_id);
547 link = device_get_named_child_node(dev: bus->dev, childname: name);
548 if (!link) {
549 dev_err(bus->dev, "Manager node %s not found\n", name);
550 return -EIO;
551 }
552 fwnode_property_read_u32(fwnode: link, propname: "amd-sdw-enable", val: &quirk_mask);
553 if (!(quirk_mask & AMD_SDW_QUIRK_MASK_BUS_ENABLE))
554 prop->hw_disabled = true;
555 prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH |
556 SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
557
558 fwnode_property_read_u32(fwnode: link, propname: "amd-sdw-wakeup-enable", val: &wake_en_mask);
559 amd_manager->wake_en_mask = wake_en_mask;
560 fwnode_property_read_u32(fwnode: link, propname: "amd-sdw-power-mode", val: &power_mode_mask);
561 amd_manager->power_mode_mask = power_mode_mask;
562 return 0;
563}
564
565static int amd_prop_read(struct sdw_bus *bus)
566{
567 sdw_master_read_prop(bus);
568 sdw_master_read_amd_prop(bus);
569 return 0;
570}
571
572static const struct sdw_master_port_ops amd_sdw_port_ops = {
573 .dpn_set_port_params = amd_sdw_port_params,
574 .dpn_set_port_transport_params = amd_sdw_transport_params,
575 .dpn_port_enable_ch = amd_sdw_port_enable,
576};
577
578static const struct sdw_master_ops amd_sdw_ops = {
579 .read_prop = amd_prop_read,
580 .xfer_msg = amd_sdw_xfer_msg,
581 .read_ping_status = amd_sdw_read_ping_status,
582};
583
584static int amd_sdw_hw_params(struct snd_pcm_substream *substream,
585 struct snd_pcm_hw_params *params,
586 struct snd_soc_dai *dai)
587{
588 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
589 struct sdw_amd_dai_runtime *dai_runtime;
590 struct sdw_stream_config sconfig;
591 struct sdw_port_config *pconfig;
592 int ch, dir;
593 int ret;
594
595 dai_runtime = amd_manager->dai_runtime_array[dai->id];
596 if (!dai_runtime)
597 return -EIO;
598
599 ch = params_channels(p: params);
600 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
601 dir = SDW_DATA_DIR_RX;
602 else
603 dir = SDW_DATA_DIR_TX;
604 dev_dbg(amd_manager->dev, "dir:%d dai->id:0x%x\n", dir, dai->id);
605
606 sconfig.direction = dir;
607 sconfig.ch_count = ch;
608 sconfig.frame_rate = params_rate(p: params);
609 sconfig.type = dai_runtime->stream_type;
610
611 sconfig.bps = snd_pcm_format_width(format: params_format(p: params));
612
613 /* Port configuration */
614 pconfig = kzalloc(size: sizeof(*pconfig), GFP_KERNEL);
615 if (!pconfig) {
616 ret = -ENOMEM;
617 goto error;
618 }
619
620 pconfig->num = dai->id;
621 pconfig->ch_mask = (1 << ch) - 1;
622 ret = sdw_stream_add_master(bus: &amd_manager->bus, stream_config: &sconfig,
623 port_config: pconfig, num_ports: 1, stream: dai_runtime->stream);
624 if (ret)
625 dev_err(amd_manager->dev, "add manager to stream failed:%d\n", ret);
626
627 kfree(objp: pconfig);
628error:
629 return ret;
630}
631
632static int amd_sdw_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
633{
634 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
635 struct sdw_amd_dai_runtime *dai_runtime;
636 int ret;
637
638 dai_runtime = amd_manager->dai_runtime_array[dai->id];
639 if (!dai_runtime)
640 return -EIO;
641
642 ret = sdw_stream_remove_master(bus: &amd_manager->bus, stream: dai_runtime->stream);
643 if (ret < 0)
644 dev_err(dai->dev, "remove manager from stream %s failed: %d\n",
645 dai_runtime->stream->name, ret);
646 return ret;
647}
648
649static int amd_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction)
650{
651 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
652 struct sdw_amd_dai_runtime *dai_runtime;
653
654 dai_runtime = amd_manager->dai_runtime_array[dai->id];
655 if (stream) {
656 /* first paranoia check */
657 if (dai_runtime) {
658 dev_err(dai->dev, "dai_runtime already allocated for dai %s\n", dai->name);
659 return -EINVAL;
660 }
661
662 /* allocate and set dai_runtime info */
663 dai_runtime = kzalloc(size: sizeof(*dai_runtime), GFP_KERNEL);
664 if (!dai_runtime)
665 return -ENOMEM;
666
667 dai_runtime->stream_type = SDW_STREAM_PCM;
668 dai_runtime->bus = &amd_manager->bus;
669 dai_runtime->stream = stream;
670 amd_manager->dai_runtime_array[dai->id] = dai_runtime;
671 } else {
672 /* second paranoia check */
673 if (!dai_runtime) {
674 dev_err(dai->dev, "dai_runtime not allocated for dai %s\n", dai->name);
675 return -EINVAL;
676 }
677
678 /* for NULL stream we release allocated dai_runtime */
679 kfree(objp: dai_runtime);
680 amd_manager->dai_runtime_array[dai->id] = NULL;
681 }
682 return 0;
683}
684
685static int amd_pcm_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction)
686{
687 return amd_set_sdw_stream(dai, stream, direction);
688}
689
690static void *amd_get_sdw_stream(struct snd_soc_dai *dai, int direction)
691{
692 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
693 struct sdw_amd_dai_runtime *dai_runtime;
694
695 dai_runtime = amd_manager->dai_runtime_array[dai->id];
696 if (!dai_runtime)
697 return ERR_PTR(error: -EINVAL);
698
699 return dai_runtime->stream;
700}
701
702static const struct snd_soc_dai_ops amd_sdw_dai_ops = {
703 .hw_params = amd_sdw_hw_params,
704 .hw_free = amd_sdw_hw_free,
705 .set_stream = amd_pcm_set_sdw_stream,
706 .get_stream = amd_get_sdw_stream,
707};
708
709static const struct snd_soc_component_driver amd_sdw_dai_component = {
710 .name = "soundwire",
711};
712
713static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
714{
715 struct sdw_amd_dai_runtime **dai_runtime_array;
716 struct snd_soc_dai_driver *dais;
717 struct snd_soc_pcm_stream *stream;
718 struct device *dev;
719 int i, num_dais;
720
721 dev = amd_manager->dev;
722 num_dais = amd_manager->num_dout_ports + amd_manager->num_din_ports;
723 dais = devm_kcalloc(dev, n: num_dais, size: sizeof(*dais), GFP_KERNEL);
724 if (!dais)
725 return -ENOMEM;
726
727 dai_runtime_array = devm_kcalloc(dev, n: num_dais,
728 size: sizeof(struct sdw_amd_dai_runtime *),
729 GFP_KERNEL);
730 if (!dai_runtime_array)
731 return -ENOMEM;
732 amd_manager->dai_runtime_array = dai_runtime_array;
733 for (i = 0; i < num_dais; i++) {
734 dais[i].name = devm_kasprintf(dev, GFP_KERNEL, fmt: "SDW%d Pin%d", amd_manager->instance,
735 i);
736 if (!dais[i].name)
737 return -ENOMEM;
738 if (i < amd_manager->num_dout_ports)
739 stream = &dais[i].playback;
740 else
741 stream = &dais[i].capture;
742
743 stream->channels_min = 2;
744 stream->channels_max = 2;
745 stream->rates = SNDRV_PCM_RATE_48000;
746 stream->formats = SNDRV_PCM_FMTBIT_S16_LE;
747
748 dais[i].ops = &amd_sdw_dai_ops;
749 dais[i].id = i;
750 }
751
752 return devm_snd_soc_register_component(dev, component_driver: &amd_sdw_dai_component,
753 dai_drv: dais, num_dai: num_dais);
754}
755
756static void amd_sdw_update_slave_status_work(struct work_struct *work)
757{
758 struct amd_sdw_manager *amd_manager =
759 container_of(work, struct amd_sdw_manager, amd_sdw_work);
760 int retry_count = 0;
761
762 if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
763 writel(val: 0, addr: amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
764 writel(val: 0, addr: amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
765 }
766
767update_status:
768 sdw_handle_slave_status(bus: &amd_manager->bus, status: amd_manager->status);
769 /*
770 * During the peripheral enumeration sequence, the SoundWire manager interrupts
771 * are masked. Once the device number programming is done for all peripherals,
772 * interrupts will be unmasked. Read the peripheral device status from ping command
773 * and process the response. This sequence will ensure all peripheral devices enumerated
774 * and initialized properly.
775 */
776 if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
777 if (retry_count++ < SDW_MAX_DEVICES) {
778 writel(AMD_SDW_IRQ_MASK_0TO7, addr: amd_manager->mmio +
779 ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
780 writel(AMD_SDW_IRQ_MASK_8TO11, addr: amd_manager->mmio +
781 ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
782 amd_sdw_read_and_process_ping_status(amd_manager);
783 goto update_status;
784 } else {
785 dev_err_ratelimited(amd_manager->dev,
786 "Device0 detected after %d iterations\n",
787 retry_count);
788 }
789 }
790}
791
792static void amd_sdw_update_slave_status(u32 status_change_0to7, u32 status_change_8to11,
793 struct amd_sdw_manager *amd_manager)
794{
795 u64 slave_stat;
796 u32 val;
797 int dev_index;
798
799 if (status_change_0to7 == AMD_SDW_SLAVE_0_ATTACHED)
800 memset(amd_manager->status, 0, sizeof(amd_manager->status));
801 slave_stat = status_change_0to7;
802 slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STATUS_8TO_11, status_change_8to11) << 32;
803 dev_dbg(amd_manager->dev, "status_change_0to7:0x%x status_change_8to11:0x%x\n",
804 status_change_0to7, status_change_8to11);
805 if (slave_stat) {
806 for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
807 if (slave_stat & AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK(dev_index)) {
808 val = (slave_stat >> AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK(dev_index)) &
809 AMD_SDW_MCP_SLAVE_STATUS_MASK;
810 amd_sdw_fill_slave_status(amd_manager, index: dev_index, status: val);
811 }
812 }
813 }
814}
815
816static void amd_sdw_process_wake_event(struct amd_sdw_manager *amd_manager)
817{
818 pm_request_resume(dev: amd_manager->dev);
819 writel(val: 0x00, addr: amd_manager->acp_mmio + ACP_SW_WAKE_EN(amd_manager->instance));
820 writel(val: 0x00, addr: amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
821}
822
823static void amd_sdw_irq_thread(struct work_struct *work)
824{
825 struct amd_sdw_manager *amd_manager =
826 container_of(work, struct amd_sdw_manager, amd_sdw_irq_thread);
827 u32 status_change_8to11;
828 u32 status_change_0to7;
829
830 status_change_8to11 = readl(addr: amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
831 status_change_0to7 = readl(addr: amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
832 dev_dbg(amd_manager->dev, "[SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n",
833 amd_manager->instance, status_change_0to7, status_change_8to11);
834 if (status_change_8to11 & AMD_SDW_WAKE_STAT_MASK)
835 return amd_sdw_process_wake_event(amd_manager);
836
837 if (status_change_8to11 & AMD_SDW_PREQ_INTR_STAT) {
838 amd_sdw_read_and_process_ping_status(amd_manager);
839 } else {
840 /* Check for the updated status on peripheral device */
841 amd_sdw_update_slave_status(status_change_0to7, status_change_8to11, amd_manager);
842 }
843 if (status_change_8to11 || status_change_0to7)
844 schedule_work(work: &amd_manager->amd_sdw_work);
845 writel(val: 0x00, addr: amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
846 writel(val: 0x00, addr: amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
847}
848
849int amd_sdw_manager_start(struct amd_sdw_manager *amd_manager)
850{
851 struct sdw_master_prop *prop;
852 int ret;
853
854 prop = &amd_manager->bus.prop;
855 if (!prop->hw_disabled) {
856 ret = amd_init_sdw_manager(amd_manager);
857 if (ret)
858 return ret;
859 amd_enable_sdw_interrupts(amd_manager);
860 ret = amd_enable_sdw_manager(amd_manager);
861 if (ret)
862 return ret;
863 amd_sdw_set_frameshape(amd_manager);
864 }
865 /* Enable runtime PM */
866 pm_runtime_set_autosuspend_delay(dev: amd_manager->dev, AMD_SDW_MASTER_SUSPEND_DELAY_MS);
867 pm_runtime_use_autosuspend(dev: amd_manager->dev);
868 pm_runtime_mark_last_busy(dev: amd_manager->dev);
869 pm_runtime_set_active(dev: amd_manager->dev);
870 pm_runtime_enable(dev: amd_manager->dev);
871 return 0;
872}
873
874static int amd_sdw_manager_probe(struct platform_device *pdev)
875{
876 const struct acp_sdw_pdata *pdata = pdev->dev.platform_data;
877 struct resource *res;
878 struct device *dev = &pdev->dev;
879 struct sdw_master_prop *prop;
880 struct sdw_bus_params *params;
881 struct amd_sdw_manager *amd_manager;
882 int ret;
883
884 amd_manager = devm_kzalloc(dev, size: sizeof(struct amd_sdw_manager), GFP_KERNEL);
885 if (!amd_manager)
886 return -ENOMEM;
887
888 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
889 if (!res)
890 return -ENOMEM;
891
892 amd_manager->acp_mmio = devm_ioremap(dev, offset: res->start, size: resource_size(res));
893 if (!amd_manager->acp_mmio) {
894 dev_err(dev, "mmio not found\n");
895 return -ENOMEM;
896 }
897 amd_manager->instance = pdata->instance;
898 amd_manager->mmio = amd_manager->acp_mmio +
899 (amd_manager->instance * SDW_MANAGER_REG_OFFSET);
900 amd_manager->acp_sdw_lock = pdata->acp_sdw_lock;
901 amd_manager->cols_index = sdw_find_col_index(AMD_SDW_DEFAULT_COLUMNS);
902 amd_manager->rows_index = sdw_find_row_index(AMD_SDW_DEFAULT_ROWS);
903 amd_manager->dev = dev;
904 amd_manager->bus.ops = &amd_sdw_ops;
905 amd_manager->bus.port_ops = &amd_sdw_port_ops;
906 amd_manager->bus.compute_params = &amd_sdw_compute_params;
907 amd_manager->bus.clk_stop_timeout = 200;
908 amd_manager->bus.link_id = amd_manager->instance;
909
910 /*
911 * Due to BIOS compatibility, the two links are exposed within
912 * the scope of a single controller. If this changes, the
913 * controller_id will have to be updated with drv_data
914 * information.
915 */
916 amd_manager->bus.controller_id = 0;
917
918 switch (amd_manager->instance) {
919 case ACP_SDW0:
920 amd_manager->num_dout_ports = AMD_SDW0_MAX_TX_PORTS;
921 amd_manager->num_din_ports = AMD_SDW0_MAX_RX_PORTS;
922 break;
923 case ACP_SDW1:
924 amd_manager->num_dout_ports = AMD_SDW1_MAX_TX_PORTS;
925 amd_manager->num_din_ports = AMD_SDW1_MAX_RX_PORTS;
926 break;
927 default:
928 return -EINVAL;
929 }
930
931 params = &amd_manager->bus.params;
932
933 params->col = AMD_SDW_DEFAULT_COLUMNS;
934 params->row = AMD_SDW_DEFAULT_ROWS;
935 prop = &amd_manager->bus.prop;
936 prop->clk_freq = &amd_sdw_freq_tbl[0];
937 prop->mclk_freq = AMD_SDW_BUS_BASE_FREQ;
938 prop->max_clk_freq = AMD_SDW_DEFAULT_CLK_FREQ;
939
940 ret = sdw_bus_master_add(bus: &amd_manager->bus, parent: dev, fwnode: dev->fwnode);
941 if (ret) {
942 dev_err(dev, "Failed to register SoundWire manager(%d)\n", ret);
943 return ret;
944 }
945 ret = amd_sdw_register_dais(amd_manager);
946 if (ret) {
947 dev_err(dev, "CPU DAI registration failed\n");
948 sdw_bus_master_delete(bus: &amd_manager->bus);
949 return ret;
950 }
951 dev_set_drvdata(dev, data: amd_manager);
952 INIT_WORK(&amd_manager->amd_sdw_irq_thread, amd_sdw_irq_thread);
953 INIT_WORK(&amd_manager->amd_sdw_work, amd_sdw_update_slave_status_work);
954 return 0;
955}
956
957static void amd_sdw_manager_remove(struct platform_device *pdev)
958{
959 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev: &pdev->dev);
960 int ret;
961
962 pm_runtime_disable(dev: &pdev->dev);
963 amd_disable_sdw_interrupts(amd_manager);
964 sdw_bus_master_delete(bus: &amd_manager->bus);
965 ret = amd_disable_sdw_manager(amd_manager);
966 if (ret)
967 dev_err(&pdev->dev, "Failed to disable device (%pe)\n", ERR_PTR(ret));
968}
969
970static int amd_sdw_clock_stop(struct amd_sdw_manager *amd_manager)
971{
972 u32 val;
973 int ret;
974
975 ret = sdw_bus_prep_clk_stop(bus: &amd_manager->bus);
976 if (ret < 0 && ret != -ENODATA) {
977 dev_err(amd_manager->dev, "prepare clock stop failed %d", ret);
978 return 0;
979 }
980 ret = sdw_bus_clk_stop(bus: &amd_manager->bus);
981 if (ret < 0 && ret != -ENODATA) {
982 dev_err(amd_manager->dev, "bus clock stop failed %d", ret);
983 return 0;
984 }
985
986 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL, val,
987 (val & AMD_SDW_CLK_STOP_DONE), ACP_DELAY_US, AMD_SDW_TIMEOUT);
988 if (ret) {
989 dev_err(amd_manager->dev, "SDW%x clock stop failed\n", amd_manager->instance);
990 return 0;
991 }
992
993 amd_manager->clk_stopped = true;
994 if (amd_manager->wake_en_mask)
995 writel(val: 0x01, addr: amd_manager->acp_mmio + ACP_SW_WAKE_EN(amd_manager->instance));
996
997 dev_dbg(amd_manager->dev, "SDW%x clock stop successful\n", amd_manager->instance);
998 return 0;
999}
1000
1001static int amd_sdw_clock_stop_exit(struct amd_sdw_manager *amd_manager)
1002{
1003 int ret;
1004 u32 val;
1005
1006 if (amd_manager->clk_stopped) {
1007 val = readl(addr: amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1008 val |= AMD_SDW_CLK_RESUME_REQ;
1009 writel(val, addr: amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1010 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL, val,
1011 (val & AMD_SDW_CLK_RESUME_DONE), ACP_DELAY_US,
1012 AMD_SDW_TIMEOUT);
1013 if (val & AMD_SDW_CLK_RESUME_DONE) {
1014 writel(val: 0, addr: amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1015 ret = sdw_bus_exit_clk_stop(bus: &amd_manager->bus);
1016 if (ret < 0)
1017 dev_err(amd_manager->dev, "bus failed to exit clock stop %d\n",
1018 ret);
1019 amd_manager->clk_stopped = false;
1020 }
1021 }
1022 if (amd_manager->clk_stopped) {
1023 dev_err(amd_manager->dev, "SDW%x clock stop exit failed\n", amd_manager->instance);
1024 return 0;
1025 }
1026 dev_dbg(amd_manager->dev, "SDW%x clock stop exit successful\n", amd_manager->instance);
1027 return 0;
1028}
1029
1030static int amd_resume_child_device(struct device *dev, void *data)
1031{
1032 struct sdw_slave *slave = dev_to_sdw_dev(dev);
1033 int ret;
1034
1035 if (!slave->probed) {
1036 dev_dbg(dev, "skipping device, no probed driver\n");
1037 return 0;
1038 }
1039 if (!slave->dev_num_sticky) {
1040 dev_dbg(dev, "skipping device, never detected on bus\n");
1041 return 0;
1042 }
1043 ret = pm_request_resume(dev);
1044 if (ret < 0) {
1045 dev_err(dev, "pm_request_resume failed: %d\n", ret);
1046 return ret;
1047 }
1048 return 0;
1049}
1050
1051static int __maybe_unused amd_pm_prepare(struct device *dev)
1052{
1053 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
1054 struct sdw_bus *bus = &amd_manager->bus;
1055 int ret;
1056
1057 if (bus->prop.hw_disabled) {
1058 dev_dbg(bus->dev, "SoundWire manager %d is disabled, ignoring\n",
1059 bus->link_id);
1060 return 0;
1061 }
1062 /*
1063 * When multiple peripheral devices connected over the same link, if SoundWire manager
1064 * device is not in runtime suspend state, observed that device alerts are missing
1065 * without pm_prepare on AMD platforms in clockstop mode0.
1066 */
1067 if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
1068 ret = pm_request_resume(dev);
1069 if (ret < 0) {
1070 dev_err(bus->dev, "pm_request_resume failed: %d\n", ret);
1071 return 0;
1072 }
1073 }
1074 /* To force peripheral devices to system level suspend state, resume the devices
1075 * from runtime suspend state first. Without that unable to dispatch the alert
1076 * status to peripheral driver during system level resume as they are in runtime
1077 * suspend state.
1078 */
1079 ret = device_for_each_child(dev: bus->dev, NULL, fn: amd_resume_child_device);
1080 if (ret < 0)
1081 dev_err(dev, "amd_resume_child_device failed: %d\n", ret);
1082 return 0;
1083}
1084
1085static int __maybe_unused amd_suspend(struct device *dev)
1086{
1087 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
1088 struct sdw_bus *bus = &amd_manager->bus;
1089 int ret;
1090
1091 if (bus->prop.hw_disabled) {
1092 dev_dbg(bus->dev, "SoundWire manager %d is disabled, ignoring\n",
1093 bus->link_id);
1094 return 0;
1095 }
1096
1097 if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
1098 return amd_sdw_clock_stop(amd_manager);
1099 } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
1100 /*
1101 * As per hardware programming sequence on AMD platforms,
1102 * clock stop should be invoked first before powering-off
1103 */
1104 ret = amd_sdw_clock_stop(amd_manager);
1105 if (ret)
1106 return ret;
1107 return amd_deinit_sdw_manager(amd_manager);
1108 }
1109 return 0;
1110}
1111
1112static int __maybe_unused amd_suspend_runtime(struct device *dev)
1113{
1114 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
1115 struct sdw_bus *bus = &amd_manager->bus;
1116 int ret;
1117
1118 if (bus->prop.hw_disabled) {
1119 dev_dbg(bus->dev, "SoundWire manager %d is disabled,\n",
1120 bus->link_id);
1121 return 0;
1122 }
1123 if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
1124 return amd_sdw_clock_stop(amd_manager);
1125 } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
1126 ret = amd_sdw_clock_stop(amd_manager);
1127 if (ret)
1128 return ret;
1129 return amd_deinit_sdw_manager(amd_manager);
1130 }
1131 return 0;
1132}
1133
1134static int __maybe_unused amd_resume_runtime(struct device *dev)
1135{
1136 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
1137 struct sdw_bus *bus = &amd_manager->bus;
1138 int ret;
1139 u32 val;
1140
1141 if (bus->prop.hw_disabled) {
1142 dev_dbg(bus->dev, "SoundWire manager %d is disabled, ignoring\n",
1143 bus->link_id);
1144 return 0;
1145 }
1146
1147 if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
1148 return amd_sdw_clock_stop_exit(amd_manager);
1149 } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
1150 val = readl(addr: amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1151 if (val) {
1152 val |= AMD_SDW_CLK_RESUME_REQ;
1153 writel(val, addr: amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1154 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL, val,
1155 (val & AMD_SDW_CLK_RESUME_DONE), ACP_DELAY_US,
1156 AMD_SDW_TIMEOUT);
1157 if (val & AMD_SDW_CLK_RESUME_DONE) {
1158 writel(val: 0, addr: amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1159 amd_manager->clk_stopped = false;
1160 }
1161 }
1162 sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1163 amd_init_sdw_manager(amd_manager);
1164 amd_enable_sdw_interrupts(amd_manager);
1165 ret = amd_enable_sdw_manager(amd_manager);
1166 if (ret)
1167 return ret;
1168 amd_sdw_set_frameshape(amd_manager);
1169 }
1170 return 0;
1171}
1172
1173static const struct dev_pm_ops amd_pm = {
1174 .prepare = amd_pm_prepare,
1175 SET_SYSTEM_SLEEP_PM_OPS(amd_suspend, amd_resume_runtime)
1176 SET_RUNTIME_PM_OPS(amd_suspend_runtime, amd_resume_runtime, NULL)
1177};
1178
1179static struct platform_driver amd_sdw_driver = {
1180 .probe = &amd_sdw_manager_probe,
1181 .remove_new = &amd_sdw_manager_remove,
1182 .driver = {
1183 .name = "amd_sdw_manager",
1184 .pm = &amd_pm,
1185 }
1186};
1187module_platform_driver(amd_sdw_driver);
1188
1189MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
1190MODULE_DESCRIPTION("AMD SoundWire driver");
1191MODULE_LICENSE("Dual BSD/GPL");
1192MODULE_ALIAS("platform:" DRV_NAME);
1193

source code of linux/drivers/soundwire/amd_manager.c