1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
|
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018 Intel Corporation. All rights reserved.
//
// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
//
#include "ops.h"
#include "sof-priv.h"
static int sof_restore_kcontrols(struct snd_sof_dev *sdev)
{
struct snd_sof_control *scontrol;
int ipc_cmd, ctrl_type;
int ret = 0;
/* restore kcontrol values */
list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
/* reset readback offset for scontrol after resuming */
scontrol->readback_offset = 0;
/* notify DSP of kcontrol values */
switch (scontrol->cmd) {
case SOF_CTRL_CMD_VOLUME:
case SOF_CTRL_CMD_ENUM:
case SOF_CTRL_CMD_SWITCH:
ipc_cmd = SOF_IPC_COMP_SET_VALUE;
ctrl_type = SOF_CTRL_TYPE_VALUE_CHAN_SET;
ret = snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
ipc_cmd, ctrl_type,
scontrol->cmd,
true);
break;
case SOF_CTRL_CMD_BINARY:
ipc_cmd = SOF_IPC_COMP_SET_DATA;
ctrl_type = SOF_CTRL_TYPE_DATA_SET;
ret = snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
ipc_cmd, ctrl_type,
scontrol->cmd,
true);
break;
default:
break;
}
if (ret < 0) {
dev_err(sdev->dev,
"error: failed kcontrol value set for widget: %d\n",
scontrol->comp_id);
return ret;
}
}
return 0;
}
static int sof_restore_pipelines(struct snd_sof_dev *sdev)
{
struct snd_sof_widget *swidget;
struct snd_sof_route *sroute;
struct sof_ipc_pipe_new *pipeline;
struct snd_sof_dai *dai;
struct sof_ipc_comp_dai *comp_dai;
struct sof_ipc_cmd_hdr *hdr;
int ret;
/* restore pipeline components */
list_for_each_entry_reverse(swidget, &sdev->widget_list, list) {
struct sof_ipc_comp_reply r;
/* skip if there is no private data */
if (!swidget->private)
continue;
switch (swidget->id) {
case snd_soc_dapm_dai_in:
case snd_soc_dapm_dai_out:
dai = swidget->private;
comp_dai = &dai->comp_dai;
ret = sof_ipc_tx_message(sdev->ipc,
comp_dai->comp.hdr.cmd,
comp_dai, sizeof(*comp_dai),
&r, sizeof(r));
break;
case snd_soc_dapm_scheduler:
/*
* During suspend, all DSP cores are powered off.
* Therefore upon resume, create the pipeline comp
* and power up the core that the pipeline is
* scheduled on.
*/
pipeline = swidget->private;
ret = sof_load_pipeline_ipc(sdev, pipeline, &r);
break;
default:
hdr = swidget->private;
ret = sof_ipc_tx_message(sdev->ipc, hdr->cmd,
swidget->private, hdr->size,
&r, sizeof(r));
break;
}
if (ret < 0) {
dev_err(sdev->dev,
"error: failed to load widget type %d with ID: %d\n",
swidget->widget->id, swidget->comp_id);
return ret;
}
}
/* restore pipeline connections */
list_for_each_entry_reverse(sroute, &sdev->route_list, list) {
struct sof_ipc_pipe_comp_connect *connect;
struct sof_ipc_reply reply;
/* skip if there's no private data */
if (!sroute->private)
continue;
connect = sroute->private;
/* send ipc */
ret = sof_ipc_tx_message(sdev->ipc,
connect->hdr.cmd,
connect, sizeof(*connect),
&reply, sizeof(reply));
if (ret < 0) {
dev_err(sdev->dev,
"error: failed to load route sink %s control %s source %s\n",
sroute->route->sink,
sroute->route->control ? sroute->route->control
: "none",
sroute->route->source);
return ret;
}
}
/* restore dai links */
list_for_each_entry_reverse(dai, &sdev->dai_list, list) {
struct sof_ipc_reply reply;
struct sof_ipc_dai_config *config = dai->dai_config;
if (!config) {
dev_err(sdev->dev, "error: no config for DAI %s\n",
dai->name);
continue;
}
/*
* The link DMA channel would be invalidated for running
* streams but not for streams that were in the PAUSED
* state during suspend. So invalidate it here before setting
* the dai config in the DSP.
*/
if (config->type == SOF_DAI_INTEL_HDA)
config->hda.link_dma_ch = DMA_CHAN_INVALID;
ret = sof_ipc_tx_message(sdev->ipc,
config->hdr.cmd, config,
config->hdr.size,
&reply, sizeof(reply));
if (ret < 0) {
dev_err(sdev->dev,
"error: failed to set dai config for %s\n",
dai->name);
return ret;
}
}
/* complete pipeline */
list_for_each_entry(swidget, &sdev->widget_list, list) {
switch (swidget->id) {
case snd_soc_dapm_scheduler:
swidget->complete =
snd_sof_complete_pipeline(sdev, swidget);
break;
default:
break;
}
}
/* restore pipeline kcontrols */
ret = sof_restore_kcontrols(sdev);
if (ret < 0)
dev_err(sdev->dev,
"error: restoring kcontrols after resume\n");
return ret;
}
static int sof_send_pm_ctx_ipc(struct snd_sof_dev *sdev, int cmd)
{
struct sof_ipc_pm_ctx pm_ctx;
struct sof_ipc_reply reply;
memset(&pm_ctx, 0, sizeof(pm_ctx));
/* configure ctx save ipc message */
pm_ctx.hdr.size = sizeof(pm_ctx);
pm_ctx.hdr.cmd = SOF_IPC_GLB_PM_MSG | cmd;
/* send ctx save ipc to dsp */
return sof_ipc_tx_message(sdev->ipc, pm_ctx.hdr.cmd, &pm_ctx,
sizeof(pm_ctx), &reply, sizeof(reply));
}
static int sof_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
{
struct snd_pcm_substream *substream;
struct snd_sof_pcm *spcm;
snd_pcm_state_t state;
int dir;
/*
* SOF requires hw_params to be set-up internally upon resume.
* So, set the flag to indicate this for those streams that
* have been suspended.
*/
list_for_each_entry(spcm, &sdev->pcm_list, list) {
for (dir = 0; dir <= SNDRV_PCM_STREAM_CAPTURE; dir++) {
substream = spcm->stream[dir].substream;
if (!substream || !substream->runtime)
continue;
state = substream->runtime->status->state;
if (state == SNDRV_PCM_STATE_SUSPENDED)
spcm->prepared[dir] = false;
}
}
/* set internal flag for BE */
return snd_sof_dsp_hw_params_upon_resume(sdev);
}
#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
static void sof_cache_debugfs(struct snd_sof_dev *sdev)
{
struct snd_sof_dfsentry *dfse;
list_for_each_entry(dfse, &sdev->dfsentry_list, list) {
/* nothing to do if debugfs buffer is not IO mem */
if (dfse->type == SOF_DFSENTRY_TYPE_BUF)
continue;
/* cache memory that is only accessible in D0 */
if (dfse->access_type == SOF_DEBUGFS_ACCESS_D0_ONLY)
memcpy_fromio(dfse->cache_buf, dfse->io_mem,
dfse->size);
}
}
#endif
static int sof_resume(struct device *dev, bool runtime_resume)
{
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
int ret;
/* do nothing if dsp resume callbacks are not set */
if (!sof_ops(sdev)->resume || !sof_ops(sdev)->runtime_resume)
return 0;
/*
* if the runtime_resume flag is set, call the runtime_resume routine
* or else call the system resume routine
*/
if (runtime_resume)
ret = snd_sof_dsp_runtime_resume(sdev);
else
ret = snd_sof_dsp_resume(sdev);
if (ret < 0) {
dev_err(sdev->dev,
"error: failed to power up DSP after resume\n");
return ret;
}
/* load the firmware */
ret = snd_sof_load_firmware(sdev);
if (ret < 0) {
dev_err(sdev->dev,
"error: failed to load DSP firmware after resume %d\n",
ret);
return ret;
}
/* boot the firmware */
ret = snd_sof_run_firmware(sdev);
if (ret < 0) {
dev_err(sdev->dev,
"error: failed to boot DSP firmware after resume %d\n",
ret);
return ret;
}
/* resume DMA trace, only need send ipc */
ret = snd_sof_init_trace_ipc(sdev);
if (ret < 0) {
/* non fatal */
dev_warn(sdev->dev,
"warning: failed to init trace after resume %d\n",
ret);
}
/* restore pipelines */
ret = sof_restore_pipelines(sdev);
if (ret < 0) {
dev_err(sdev->dev,
"error: failed to restore pipeline after resume %d\n",
ret);
return ret;
}
/* notify DSP of system resume */
ret = sof_send_pm_ctx_ipc(sdev, SOF_IPC_PM_CTX_RESTORE);
if (ret < 0)
dev_err(sdev->dev,
"error: ctx_restore ipc error during resume %d\n",
ret);
/* initialize default D0 sub-state */
sdev->d0_substate = SOF_DSP_D0I0;
return ret;
}
static int sof_suspend(struct device *dev, bool runtime_suspend)
{
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
int ret;
/* do nothing if dsp suspend callback is not set */
if (!sof_ops(sdev)->suspend)
return 0;
/* release trace */
snd_sof_release_trace(sdev);
/* set restore_stream for all streams during system suspend */
if (!runtime_suspend) {
ret = sof_set_hw_params_upon_resume(sdev);
if (ret < 0) {
dev_err(sdev->dev,
"error: setting hw_params flag during suspend %d\n",
ret);
return ret;
}
}
#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
/* cache debugfs contents during runtime suspend */
if (runtime_suspend)
sof_cache_debugfs(sdev);
#endif
/* notify DSP of upcoming power down */
ret = sof_send_pm_ctx_ipc(sdev, SOF_IPC_PM_CTX_SAVE);
if (ret == -EBUSY || ret == -EAGAIN) {
/*
* runtime PM has logic to handle -EBUSY/-EAGAIN so
* pass these errors up
*/
dev_err(sdev->dev,
"error: ctx_save ipc error during suspend %d\n",
ret);
return ret;
} else if (ret < 0) {
/* FW in unexpected state, continue to power down */
dev_warn(sdev->dev,
"ctx_save ipc error %d, proceeding with suspend\n",
ret);
}
/* power down all DSP cores */
if (runtime_suspend)
ret = snd_sof_dsp_runtime_suspend(sdev);
else
ret = snd_sof_dsp_suspend(sdev);
if (ret < 0)
dev_err(sdev->dev,
"error: failed to power down DSP during suspend %d\n",
ret);
return ret;
}
int snd_sof_runtime_suspend(struct device *dev)
{
return sof_suspend(dev, true);
}
EXPORT_SYMBOL(snd_sof_runtime_suspend);
int snd_sof_runtime_idle(struct device *dev)
{
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
return snd_sof_dsp_runtime_idle(sdev);
}
EXPORT_SYMBOL(snd_sof_runtime_idle);
int snd_sof_runtime_resume(struct device *dev)
{
return sof_resume(dev, true);
}
EXPORT_SYMBOL(snd_sof_runtime_resume);
int snd_sof_set_d0_substate(struct snd_sof_dev *sdev,
enum sof_d0_substate d0_substate)
{
int ret;
/* do platform specific set_state */
ret = snd_sof_dsp_set_power_state(sdev, d0_substate);
if (ret < 0)
return ret;
/* update dsp D0 sub-state */
sdev->d0_substate = d0_substate;
return 0;
}
EXPORT_SYMBOL(snd_sof_set_d0_substate);
int snd_sof_resume(struct device *dev)
{
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
int ret;
if (sdev->s0_suspend) {
/* resume from D0I3 */
dev_dbg(sdev->dev, "DSP will exit from D0i3...\n");
ret = snd_sof_set_d0_substate(sdev, SOF_DSP_D0I0);
if (ret == -ENOTSUPP) {
/* fallback to resume from D3 */
dev_dbg(sdev->dev, "D0i3 not supported, fall back to resume from D3...\n");
goto d3_resume;
} else if (ret < 0) {
dev_err(sdev->dev, "error: failed to exit from D0I3 %d\n",
ret);
return ret;
}
/* platform-specific resume from D0i3 */
return snd_sof_dsp_resume(sdev);
}
d3_resume:
/* resume from D3 */
return sof_resume(dev, false);
}
EXPORT_SYMBOL(snd_sof_resume);
int snd_sof_suspend(struct device *dev)
{
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
int ret;
if (sdev->s0_suspend) {
/* suspend to D0i3 */
dev_dbg(sdev->dev, "DSP is trying to enter D0i3...\n");
ret = snd_sof_set_d0_substate(sdev, SOF_DSP_D0I3);
if (ret == -ENOTSUPP) {
/* fallback to D3 suspend */
dev_dbg(sdev->dev, "D0i3 not supported, fall back to D3...\n");
goto d3_suspend;
} else if (ret < 0) {
dev_err(sdev->dev, "error: failed to enter D0I3, %d\n",
ret);
return ret;
}
/* platform-specific suspend to D0i3 */
return snd_sof_dsp_suspend(sdev);
}
d3_suspend:
/* suspend to D3 */
return sof_suspend(dev, false);
}
EXPORT_SYMBOL(snd_sof_suspend);
int snd_sof_prepare(struct device *dev)
{
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
#if defined(CONFIG_ACPI)
sdev->s0_suspend = acpi_target_system_state() == ACPI_STATE_S0;
#else
/* will suspend to S3 by default */
sdev->s0_suspend = false;
#endif
return 0;
}
EXPORT_SYMBOL(snd_sof_prepare);
void snd_sof_complete(struct device *dev)
{
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
sdev->s0_suspend = false;
}
EXPORT_SYMBOL(snd_sof_complete);
|