// SPDX-License-Identifier: GPL-2.0-only OR MIT /* Copyright 2021 Alyssa Rosenzweig */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "dcp.h" #include "dcp-internal.h" #include "iomfb.h" #include "iomfb_internal.h" #include "parser.h" #include "trace.h" static int dcp_tx_offset(enum dcp_context_id id) { switch (id) { case DCP_CONTEXT_CB: case DCP_CONTEXT_CMD: return 0x00000; case DCP_CONTEXT_OOBCB: case DCP_CONTEXT_OOBCMD: return 0x08000; default: return -EINVAL; } } static int dcp_channel_offset(enum dcp_context_id id) { switch (id) { case DCP_CONTEXT_ASYNC: return 0x40000; case DCP_CONTEXT_OOBASYNC: return 0x48000; case DCP_CONTEXT_CB: return 0x60000; case DCP_CONTEXT_OOBCB: return 0x68000; default: return dcp_tx_offset(id); } } static inline u64 dcpep_set_shmem(u64 dart_va) { return FIELD_PREP(IOMFB_MESSAGE_TYPE, IOMFB_MESSAGE_TYPE_SET_SHMEM) | FIELD_PREP(IOMFB_SHMEM_FLAG, IOMFB_SHMEM_FLAG_VALUE) | FIELD_PREP(IOMFB_SHMEM_DVA, dart_va); } static inline u64 dcpep_msg(enum dcp_context_id id, u32 length, u16 offset) { return FIELD_PREP(IOMFB_MESSAGE_TYPE, IOMFB_MESSAGE_TYPE_MSG) | FIELD_PREP(IOMFB_MSG_CONTEXT, id) | FIELD_PREP(IOMFB_MSG_OFFSET, offset) | FIELD_PREP(IOMFB_MSG_LENGTH, length); } static inline u64 dcpep_ack(enum dcp_context_id id) { return dcpep_msg(id, 0, 0) | IOMFB_MSG_ACK; } /* * A channel is busy if we have sent a message that has yet to be * acked. The driver must not sent a message to a busy channel. */ static bool dcp_channel_busy(struct dcp_channel *ch) { return (ch->depth != 0); } /* * Get the context ID passed to the DCP for a command we push. The rule is * simple: callback contexts are used when replying to the DCP, command * contexts are used otherwise. That corresponds to a non/zero call stack * depth. This rule frees the caller from tracking the call context manually. */ static enum dcp_context_id dcp_call_context(struct apple_dcp *dcp, bool oob) { u8 depth = oob ? dcp->ch_oobcmd.depth : dcp->ch_cmd.depth; if (depth) return oob ? DCP_CONTEXT_OOBCB : DCP_CONTEXT_CB; else return oob ? DCP_CONTEXT_OOBCMD : DCP_CONTEXT_CMD; } /* Get a channel for a context */ static struct dcp_channel *dcp_get_channel(struct apple_dcp *dcp, enum dcp_context_id context) { switch (context) { case DCP_CONTEXT_CB: return &dcp->ch_cb; case DCP_CONTEXT_CMD: return &dcp->ch_cmd; case DCP_CONTEXT_OOBCB: return &dcp->ch_oobcb; case DCP_CONTEXT_OOBCMD: return &dcp->ch_oobcmd; case DCP_CONTEXT_ASYNC: return &dcp->ch_async; case DCP_CONTEXT_OOBASYNC: return &dcp->ch_oobasync; default: return NULL; } } /* Get the start of a packet: after the end of the previous packet */ static u16 dcp_packet_start(struct dcp_channel *ch, u8 depth) { if (depth > 0) return ch->end[depth - 1]; else return 0; } /* Pushes and pops the depth of the call stack with safety checks */ static u8 dcp_push_depth(u8 *depth) { u8 ret = (*depth)++; WARN_ON(ret >= DCP_MAX_CALL_DEPTH); return ret; } static u8 dcp_pop_depth(u8 *depth) { WARN_ON((*depth) == 0); return --(*depth); } /* Call a DCP function given by a tag */ void dcp_push(struct apple_dcp *dcp, bool oob, const struct dcp_method_entry *call, u32 in_len, u32 out_len, void *data, dcp_callback_t cb, void *cookie) { enum dcp_context_id context = dcp_call_context(dcp, oob); struct dcp_channel *ch = dcp_get_channel(dcp, context); struct dcp_packet_header header = { .in_len = in_len, .out_len = out_len, /* Tag is reversed due to endianness of the fourcc */ .tag[0] = call->tag[3], .tag[1] = call->tag[2], .tag[2] = call->tag[1], .tag[3] = call->tag[0], }; u8 depth = dcp_push_depth(&ch->depth); u16 offset = dcp_packet_start(ch, depth); void *out = dcp->shmem + dcp_tx_offset(context) + offset; void *out_data = out + sizeof(header); size_t data_len = sizeof(header) + in_len + out_len; memcpy(out, &header, sizeof(header)); if (in_len > 0) memcpy(out_data, data, in_len); trace_iomfb_push(dcp, call, context, offset, depth); ch->callbacks[depth] = cb; ch->cookies[depth] = cookie; ch->output[depth] = out + sizeof(header) + in_len; ch->end[depth] = offset + ALIGN(data_len, DCP_PACKET_ALIGNMENT); dcp_send_message(dcp, IOMFB_ENDPOINT, dcpep_msg(context, data_len, offset)); } /* Parse a callback tag "D123" into the ID 123. Returns -EINVAL on failure. */ int dcp_parse_tag(char tag[4]) { u32 d[3]; int i; if (tag[3] != 'D') return -EINVAL; for (i = 0; i < 3; ++i) { d[i] = (u32)(tag[i] - '0'); if (d[i] > 9) return -EINVAL; } return d[0] + (d[1] * 10) + (d[2] * 100); } /* Ack a callback from the DCP */ void dcp_ack(struct apple_dcp *dcp, enum dcp_context_id context) { struct dcp_channel *ch = dcp_get_channel(dcp, context); dcp_pop_depth(&ch->depth); dcp_send_message(dcp, IOMFB_ENDPOINT, dcpep_ack(context)); } /* * Helper to send a DRM hotplug event. The DCP is accessed from a single * (RTKit) thread. To handle hotplug callbacks, we need to call * drm_kms_helper_hotplug_event, which does an atomic commit (via DCP) and * waits for vblank (a DCP callback). That means we deadlock if we call from * the RTKit thread! Instead, move the call to another thread via a workqueue. */ void dcp_hotplug(struct work_struct *work) { struct apple_connector *connector; struct apple_dcp *dcp; connector = container_of(work, struct apple_connector, hotplug_wq); dcp = platform_get_drvdata(connector->dcp); dev_info(dcp->dev, "%s() connected:%d valid_mode:%d nr_modes:%u\n", __func__, connector->connected, dcp->valid_mode, dcp->nr_modes); /* * DCP defers link training until we set a display mode. But we set * display modes from atomic_flush, so userspace needs to trigger a * flush, or the CRTC gets no signal. */ if (connector->base.state && !dcp->valid_mode && connector->connected) drm_connector_set_link_status_property(&connector->base, DRM_MODE_LINK_STATUS_BAD); drm_kms_helper_connector_hotplug_event(&connector->base); } EXPORT_SYMBOL_GPL(dcp_hotplug); static void dcpep_handle_cb(struct apple_dcp *dcp, enum dcp_context_id context, void *data, u32 length, u16 offset) { struct device *dev = dcp->dev; struct dcp_packet_header *hdr = data; void *in, *out; int tag = dcp_parse_tag(hdr->tag); struct dcp_channel *ch = dcp_get_channel(dcp, context); u8 depth; if (tag < 0 || tag >= IOMFB_MAX_CB || !dcp->cb_handlers || !dcp->cb_handlers[tag]) { dev_warn(dev, "received unknown callback %c%c%c%c\n", hdr->tag[3], hdr->tag[2], hdr->tag[1], hdr->tag[0]); return; } in = data + sizeof(*hdr); out = in + hdr->in_len; // TODO: verify that in_len and out_len match our prototypes // for now just clear the out data to have at least consistent results if (hdr->out_len) memset(out, 0, hdr->out_len); depth = dcp_push_depth(&ch->depth); ch->output[depth] = out; ch->end[depth] = offset + ALIGN(length, DCP_PACKET_ALIGNMENT); if (dcp->cb_handlers[tag](dcp, tag, out, in)) dcp_ack(dcp, context); } static void dcpep_handle_ack(struct apple_dcp *dcp, enum dcp_context_id context, void *data, u32 length) { struct dcp_packet_header *header = data; struct dcp_channel *ch = dcp_get_channel(dcp, context); void *cookie; dcp_callback_t cb; if (!ch) { dev_warn(dcp->dev, "ignoring ack on context %X\n", context); return; } dcp_pop_depth(&ch->depth); cb = ch->callbacks[ch->depth]; cookie = ch->cookies[ch->depth]; ch->callbacks[ch->depth] = NULL; ch->cookies[ch->depth] = NULL; if (cb) cb(dcp, data + sizeof(*header) + header->in_len, cookie); } static void dcpep_got_msg(struct apple_dcp *dcp, u64 message) { enum dcp_context_id ctx_id; u16 offset; u32 length; int channel_offset; void *data; ctx_id = FIELD_GET(IOMFB_MSG_CONTEXT, message); offset = FIELD_GET(IOMFB_MSG_OFFSET, message); length = FIELD_GET(IOMFB_MSG_LENGTH, message); channel_offset = dcp_channel_offset(ctx_id); if (channel_offset < 0) { dev_warn(dcp->dev, "invalid context received %u\n", ctx_id); return; } data = dcp->shmem + channel_offset + offset; if (FIELD_GET(IOMFB_MSG_ACK, message)) dcpep_handle_ack(dcp, ctx_id, data, length); else dcpep_handle_cb(dcp, ctx_id, data, length, offset); } /* * DRM specifies rectangles as start and end coordinates. DCP specifies * rectangles as a start coordinate and a width/height. Convert a DRM rectangle * to a DCP rectangle. */ struct dcp_rect drm_to_dcp_rect(struct drm_rect *rect) { return (struct dcp_rect){ .x = rect->x1, .y = rect->y1, .w = drm_rect_width(rect), .h = drm_rect_height(rect) }; } u32 drm_format_to_dcp(u32 drm) { switch (drm) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: return fourcc_code('A', 'R', 'G', 'B'); case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: return fourcc_code('A', 'B', 'G', 'R'); case DRM_FORMAT_XRGB2101010: return fourcc_code('r', '0', '3', 'w'); } pr_warn("DRM format %X not supported in DCP\n", drm); return 0; } int dcp_get_modes(struct drm_connector *connector) { struct apple_connector *apple_connector = to_apple_connector(connector); struct platform_device *pdev = apple_connector->dcp; struct apple_dcp *dcp = platform_get_drvdata(pdev); struct drm_device *dev = connector->dev; struct drm_display_mode *mode; int i; for (i = 0; i < dcp->nr_modes; ++i) { mode = drm_mode_duplicate(dev, &dcp->modes[i].mode); if (!mode) { dev_err(dev->dev, "Failed to duplicate display mode\n"); return 0; } drm_mode_probed_add(connector, mode); } return dcp->nr_modes; } EXPORT_SYMBOL_GPL(dcp_get_modes); /* The user may own drm_display_mode, so we need to search for our copy */ struct dcp_display_mode *lookup_mode(struct apple_dcp *dcp, const struct drm_display_mode *mode) { int i; for (i = 0; i < dcp->nr_modes; ++i) { if (drm_mode_match(mode, &dcp->modes[i].mode, DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_CLOCK)) return &dcp->modes[i]; } return NULL; } int dcp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct apple_connector *apple_connector = to_apple_connector(connector); struct platform_device *pdev = apple_connector->dcp; struct apple_dcp *dcp = platform_get_drvdata(pdev); return lookup_mode(dcp, mode) ? MODE_OK : MODE_BAD; } EXPORT_SYMBOL_GPL(dcp_mode_valid); int dcp_crtc_atomic_modeset(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct apple_crtc *apple_crtc = to_apple_crtc(crtc); struct apple_dcp *dcp = platform_get_drvdata(apple_crtc->dcp); struct drm_crtc_state *crtc_state; int ret = -EIO; bool modeset; crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (!crtc_state) return 0; modeset = drm_atomic_crtc_needs_modeset(crtc_state) || !dcp->valid_mode; if (!modeset) return 0; /* ignore no mode, poweroff is handled elsewhere */ if (crtc_state->mode.hdisplay == 0 && crtc_state->mode.vdisplay == 0) return 0; switch (dcp->fw_compat) { case DCP_FIRMWARE_V_12_3: ret = iomfb_modeset_v12_3(dcp, crtc_state); break; case DCP_FIRMWARE_V_13_5: ret = iomfb_modeset_v13_3(dcp, crtc_state); break; default: WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat); break; } return ret; } EXPORT_SYMBOL_GPL(dcp_crtc_atomic_modeset); bool dcp_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct apple_crtc *apple_crtc = to_apple_crtc(crtc); struct platform_device *pdev = apple_crtc->dcp; struct apple_dcp *dcp = platform_get_drvdata(pdev); /* TODO: support synthesized modes through scaling */ return lookup_mode(dcp, mode) != NULL; } EXPORT_SYMBOL(dcp_crtc_mode_fixup); void dcp_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct platform_device *pdev = to_apple_crtc(crtc)->dcp; struct apple_dcp *dcp = platform_get_drvdata(pdev); if (dcp_channel_busy(&dcp->ch_cmd)) { dev_err(dcp->dev, "unexpected busy command channel\n"); /* HACK: issue a delayed vblank event to avoid timeouts in * drm_atomic_helper_wait_for_vblanks(). */ schedule_work(&dcp->vblank_wq); return; } switch (dcp->fw_compat) { case DCP_FIRMWARE_V_12_3: iomfb_flush_v12_3(dcp, crtc, state); break; case DCP_FIRMWARE_V_13_5: iomfb_flush_v13_3(dcp, crtc, state); break; default: WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat); break; } } EXPORT_SYMBOL_GPL(dcp_flush); static void iomfb_start(struct apple_dcp *dcp) { switch (dcp->fw_compat) { case DCP_FIRMWARE_V_12_3: iomfb_start_v12_3(dcp); break; case DCP_FIRMWARE_V_13_5: iomfb_start_v13_3(dcp); break; default: WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat); break; } } bool dcp_is_initialized(struct platform_device *pdev) { struct apple_dcp *dcp = platform_get_drvdata(pdev); return dcp->active; } EXPORT_SYMBOL_GPL(dcp_is_initialized); void iomfb_recv_msg(struct apple_dcp *dcp, u64 message) { enum dcpep_type type = FIELD_GET(IOMFB_MESSAGE_TYPE, message); if (type == IOMFB_MESSAGE_TYPE_INITIALIZED) iomfb_start(dcp); else if (type == IOMFB_MESSAGE_TYPE_MSG) dcpep_got_msg(dcp, message); else dev_warn(dcp->dev, "Ignoring unknown message %llx\n", message); } int iomfb_start_rtkit(struct apple_dcp *dcp) { dma_addr_t shmem_iova; apple_rtkit_start_ep(dcp->rtk, IOMFB_ENDPOINT); dcp->shmem = dma_alloc_coherent(dcp->dev, DCP_SHMEM_SIZE, &shmem_iova, GFP_KERNEL); dcp_send_message(dcp, IOMFB_ENDPOINT, dcpep_set_shmem(shmem_iova)); return 0; } void iomfb_shutdown(struct apple_dcp *dcp) { /* We're going down */ dcp->active = false; dcp->valid_mode = false; switch (dcp->fw_compat) { case DCP_FIRMWARE_V_12_3: iomfb_shutdown_v12_3(dcp); break; case DCP_FIRMWARE_V_13_5: iomfb_shutdown_v13_3(dcp); break; default: WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat); break; } }