diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst index 35d673bf9b5695..2a6bc4cec0bdc5 100644 --- a/Documentation/gpu/index.rst +++ b/Documentation/gpu/index.rst @@ -19,6 +19,8 @@ Linux GPU Driver Developer's Guide vga-switcheroo vgaarbiter bridge/dw-hdmi + xen-front + xen-zcopy todo .. only:: subproject and html diff --git a/Documentation/gpu/xen-front.rst b/Documentation/gpu/xen-front.rst new file mode 100644 index 00000000000000..d988da7d198322 --- /dev/null +++ b/Documentation/gpu/xen-front.rst @@ -0,0 +1,31 @@ +==================================================== + drm/xen-front Xen para-virtualized frontend driver +==================================================== + +This frontend driver implements Xen para-virtualized display +according to the display protocol described at +include/xen/interface/io/displif.h + +Driver modes of operation in terms of display buffers used +========================================================== + +.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h + :doc: Driver modes of operation in terms of display buffers used + +Buffers allocated by the frontend driver +---------------------------------------- + +.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h + :doc: Buffers allocated by the frontend driver + +Buffers allocated by the backend +-------------------------------- + +.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h + :doc: Buffers allocated by the backend + +Driver limitations +================== + +.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h + :doc: Driver limitations diff --git a/Documentation/gpu/xen-zcopy.rst b/Documentation/gpu/xen-zcopy.rst new file mode 100644 index 00000000000000..86447b0cc53479 --- /dev/null +++ b/Documentation/gpu/xen-zcopy.rst @@ -0,0 +1,32 @@ +============================================== + drm/xen-zcopy Xen zero-copy helper DRM driver +============================================== + +This helper driver allows implementing zero-copying use-cases +when using Xen para-virtualized frontend display driver: + + - a dumb buffer created on backend's side can be shared + with the Xen PV frontend driver, so it directly writes + into backend's domain memory (into the buffer exported from + DRM/KMS driver of a physical display device) + - a dumb buffer allocated by the frontend can be imported + into physical device DRM/KMS driver, thus allowing to + achieve no copying as well + +DRM_XEN_ZCOPY_DUMB_FROM_REFS IOCTL +================================== + +.. kernel-doc:: include/uapi/drm/xen_zcopy_drm.h + :doc: DRM_XEN_ZCOPY_DUMB_FROM_REFS + +DRM_XEN_ZCOPY_DUMB_TO_REFS IOCTL +================================ + +.. kernel-doc:: include/uapi/drm/xen_zcopy_drm.h + :doc: DRM_XEN_ZCOPY_DUMB_TO_REFS + +DRM_XEN_ZCOPY_DUMB_WAIT_FREE IOCTL +================================== + +.. kernel-doc:: include/uapi/drm/xen_zcopy_drm.h + :doc: DRM_XEN_ZCOPY_DUMB_WAIT_FREE diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index e0f74ddc22b73c..594c228d2f0211 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -195,6 +195,7 @@ struct bus_type amba_bustype = { .match = amba_match, .uevent = amba_uevent, .pm = &amba_pm, + .force_dma = true, }; static int __init amba_init(void) diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 9045c5f3734e8d..c203fb90c1a01d 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1143,6 +1143,7 @@ struct bus_type platform_bus_type = { .match = platform_match, .uevent = platform_uevent, .pm = &platform_dev_pm_ops, + .force_dma = true, }; EXPORT_SYMBOL_GPL(platform_bus_type); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 83cb2a88c204f7..22faa65368e7b8 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -278,6 +278,8 @@ source "drivers/gpu/drm/tinydrm/Kconfig" source "drivers/gpu/drm/pl111/Kconfig" +source "drivers/gpu/drm/xen/Kconfig" + # Keep legacy drivers last menuconfig DRM_LEGACY diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 8ce07039bb898a..b6223fbf2316dc 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -101,3 +101,4 @@ obj-$(CONFIG_DRM_ZTE) += zte/ obj-$(CONFIG_DRM_MXSFB) += mxsfb/ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ obj-$(CONFIG_DRM_PL111) += pl111/ +obj-$(CONFIG_DRM_XEN) += xen/ diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index be38ac7050d473..66ceb0edad2c00 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -74,6 +75,8 @@ static bool drm_core_init_complete = false; static struct dentry *drm_debugfs_root; +DEFINE_STATIC_SRCU(drm_unplug_srcu); + #define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV" void drm_dev_printk(const struct device *dev, const char *level, @@ -364,18 +367,51 @@ void drm_put_dev(struct drm_device *dev) } EXPORT_SYMBOL(drm_put_dev); -static void drm_device_set_unplugged(struct drm_device *dev) +/** + * drm_dev_enter - Enter device critical section + * @dev: DRM device + * @idx: Pointer to index that will be passed to the matching drm_dev_exit() + * + * This function marks and protects the beginning of a section that should not + * be entered after the device has been unplugged. The section end is marked + * with drm_dev_exit(). Calls to this function can be nested. + * + * Returns: + * True if it is OK to enter the section, false otherwise. + */ +bool drm_dev_enter(struct drm_device *dev, int *idx) +{ + *idx = srcu_read_lock(&drm_unplug_srcu); + + if (dev->unplugged) { + srcu_read_unlock(&drm_unplug_srcu, *idx); + return false; + } + + return true; +} +EXPORT_SYMBOL(drm_dev_enter); + +/** + * drm_dev_exit - Exit device critical section + * @idx: index returned from drm_dev_enter() + * + * This function marks the end of a section that should not be entered after + * the device has been unplugged. + */ +void drm_dev_exit(int idx) { - smp_wmb(); - atomic_set(&dev->unplugged, 1); + srcu_read_unlock(&drm_unplug_srcu, idx); } +EXPORT_SYMBOL(drm_dev_exit); /** * drm_dev_unplug - unplug a DRM device * @dev: DRM device * * This unplugs a hotpluggable DRM device, which makes it inaccessible to - * userspace operations. Entry-points can use drm_dev_is_unplugged(). This + * userspace operations. Entry-points can use drm_dev_enter() and + * drm_dev_exit() to protect device resources in a race free manner. This * essentially unregisters the device like drm_dev_unregister(), but can be * called while there are still open users of @dev. */ @@ -384,10 +420,18 @@ void drm_dev_unplug(struct drm_device *dev) drm_dev_unregister(dev); mutex_lock(&drm_global_mutex); - drm_device_set_unplugged(dev); if (dev->open_count == 0) drm_dev_unref(dev); mutex_unlock(&drm_global_mutex); + + /* + * After synchronizing any critical read section is guaranteed to see + * the new value of ->unplugged, and any critical section which might + * still have seen the old value of ->unplugged is guaranteed to have + * finished. + */ + dev->unplugged = true; + synchronize_srcu(&drm_unplug_srcu); } EXPORT_SYMBOL(drm_dev_unplug); diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index dc9fd109de14f0..1f7b9166b9f6ec 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -34,6 +34,20 @@ static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = { .destroy = drm_encoder_cleanup, }; +static enum drm_mode_status +drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(crtc, struct drm_simple_display_pipe, crtc); + if (!pipe->funcs || !pipe->funcs->mode_valid) + /* Anything goes */ + return MODE_OK; + + return pipe->funcs->mode_valid(crtc, mode); +} + static int drm_simple_kms_crtc_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -50,13 +64,15 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc, static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { + struct drm_plane *plane; struct drm_simple_display_pipe *pipe; pipe = container_of(crtc, struct drm_simple_display_pipe, crtc); if (!pipe->funcs || !pipe->funcs->enable) return; - pipe->funcs->enable(pipe, crtc->state); + plane = &pipe->plane; + pipe->funcs->enable(pipe, crtc->state, plane->state); } static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc, @@ -72,6 +88,7 @@ static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = { + .mode_valid = drm_simple_kms_crtc_mode_valid, .atomic_check = drm_simple_kms_crtc_check, .atomic_enable = drm_simple_kms_crtc_enable, .atomic_disable = drm_simple_kms_crtc_disable, @@ -97,11 +114,11 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane, pipe = container_of(plane, struct drm_simple_display_pipe, plane); crtc_state = drm_atomic_get_new_crtc_state(plane_state->state, &pipe->crtc); - if (!crtc_state->enable) - return 0; /* nothing to check when disabling or disabled */ - clip.x2 = crtc_state->adjusted_mode.hdisplay; - clip.y2 = crtc_state->adjusted_mode.vdisplay; + if (crtc_state) { + clip.x2 = crtc_state->adjusted_mode.hdisplay; + clip.y2 = crtc_state->adjusted_mode.vdisplay; + } ret = drm_plane_helper_check_state(plane_state, &clip, DRM_PLANE_HELPER_NO_SCALING, @@ -111,7 +128,9 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane, return ret; if (!plane_state->visible) - return -EINVAL; + return 0; + + drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2); if (!pipe->funcs || !pipe->funcs->check) return 0; diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 7fbad9cb656e70..c31f9749d7cfd4 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -98,7 +98,8 @@ static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = { }; static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *crtc_state) + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plane_state) { struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index b58c988d9da011..07265b3d1646d1 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -86,7 +86,8 @@ static int pl111_display_check(struct drm_simple_display_pipe *pipe, } static void pl111_display_enable(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *cstate) + struct drm_crtc_state *cstate, + struct drm_plane_state *plane_state) { struct drm_crtc *crtc = &pipe->crtc; struct drm_plane *plane = &pipe->plane; diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c index 30dc97b3ff2188..cff6be392afd22 100644 --- a/drivers/gpu/drm/tinydrm/repaper.c +++ b/drivers/gpu/drm/tinydrm/repaper.c @@ -659,7 +659,8 @@ static void power_off(struct repaper_epd *epd) } static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *crtc_state) + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plane_state) { struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); struct repaper_epd *epd = epd_from_tinydrm(tdev); diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c index b439956a07f417..9ca9579cea5343 100644 --- a/drivers/gpu/drm/tinydrm/st7586.c +++ b/drivers/gpu/drm/tinydrm/st7586.c @@ -173,7 +173,8 @@ static const struct drm_framebuffer_funcs st7586_fb_funcs = { }; static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *crtc_state) + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plane_state) { struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig new file mode 100644 index 00000000000000..ddeff9182e0e58 --- /dev/null +++ b/drivers/gpu/drm/xen/Kconfig @@ -0,0 +1,42 @@ +config DRM_XEN + bool "DRM Support for Xen guest OS" + depends on XEN + help + Choose this option if you want to enable DRM support + for Xen. + +choice + prompt "Xen DRM drivers selection" + depends on DRM_XEN + +config DRM_XEN_FRONTEND + tristate "Para-virtualized frontend driver for Xen guest OS" + depends on DRM_XEN + depends on DRM + select DRM_KMS_HELPER + select VIDEOMODE_HELPERS + select XEN_XENBUS_FRONTEND + help + Choose this option if you want to enable a para-virtualized + frontend DRM/KMS driver for Xen guest OSes. + +config DRM_XEN_ZCOPY + tristate "Zero copy helper DRM driver for Xen" + depends on DRM_XEN + depends on DRM + select DRM_KMS_HELPER + help + Choose this option if you want to enable a zero copy + helper DRM driver for Xen. This is implemented via mapping + of foreign display buffer pages into current domain and + exporting a dumb via PRIME interface. This allows + driver domains to use buffers of unpriveledged guests without + additional memory copying. + +config DRM_XEN_ZCOPY_CMA + bool "Use CMA to allocate buffers" + depends on DRM_XEN_ZCOPY + help + Use CMA to allocate display buffers. + +endchoice diff --git a/drivers/gpu/drm/xen/Makefile b/drivers/gpu/drm/xen/Makefile new file mode 100644 index 00000000000000..5dabc429be7a8b --- /dev/null +++ b/drivers/gpu/drm/xen/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 OR MIT + +drm_xen_front-objs := xen_drm_front.o \ + xen_drm_front_kms.o \ + xen_drm_front_conn.o \ + xen_drm_front_evtchnl.o \ + xen_drm_front_shbuf.o \ + xen_drm_front_cfg.o \ + xen_drm_front_gem.o + +obj-$(CONFIG_DRM_XEN_FRONTEND) += drm_xen_front.o + +drm_xen_zcopy-objs := xen_drm_zcopy.o \ + xen_drm_zcopy_balloon.o + +obj-$(CONFIG_DRM_XEN_ZCOPY) += drm_xen_zcopy.o diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c new file mode 100644 index 00000000000000..1b0ea9ac330ebb --- /dev/null +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -0,0 +1,840 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT + +/* + * Xen para-virtual DRM device + * + * Copyright (C) 2016-2018 EPAM Systems Inc. + * + * Author: Oleksandr Andrushchenko + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include + +#include "xen_drm_front.h" +#include "xen_drm_front_cfg.h" +#include "xen_drm_front_evtchnl.h" +#include "xen_drm_front_gem.h" +#include "xen_drm_front_kms.h" +#include "xen_drm_front_shbuf.h" + +struct xen_drm_front_dbuf { + struct list_head list; + u64 dbuf_cookie; + u64 fb_cookie; + struct xen_drm_front_shbuf *shbuf; +}; + +static int dbuf_add_to_list(struct xen_drm_front_info *front_info, + struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie) +{ + struct xen_drm_front_dbuf *dbuf; + + dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL); + if (!dbuf) + return -ENOMEM; + + dbuf->dbuf_cookie = dbuf_cookie; + dbuf->shbuf = shbuf; + list_add(&dbuf->list, &front_info->dbuf_list); + return 0; +} + +static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list, + u64 dbuf_cookie) +{ + struct xen_drm_front_dbuf *buf, *q; + + list_for_each_entry_safe(buf, q, dbuf_list, list) + if (buf->dbuf_cookie == dbuf_cookie) + return buf; + + return NULL; +} + +static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie) +{ + struct xen_drm_front_dbuf *buf, *q; + + list_for_each_entry_safe(buf, q, dbuf_list, list) + if (buf->fb_cookie == fb_cookie) + xen_drm_front_shbuf_flush(buf->shbuf); +} + +static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie) +{ + struct xen_drm_front_dbuf *buf, *q; + + list_for_each_entry_safe(buf, q, dbuf_list, list) + if (buf->dbuf_cookie == dbuf_cookie) { + list_del(&buf->list); + xen_drm_front_shbuf_unmap(buf->shbuf); + xen_drm_front_shbuf_free(buf->shbuf); + kfree(buf); + break; + } +} + +static void dbuf_free_all(struct list_head *dbuf_list) +{ + struct xen_drm_front_dbuf *buf, *q; + + list_for_each_entry_safe(buf, q, dbuf_list, list) { + list_del(&buf->list); + xen_drm_front_shbuf_unmap(buf->shbuf); + xen_drm_front_shbuf_free(buf->shbuf); + kfree(buf); + } +} + +static struct xendispl_req * +be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation) +{ + struct xendispl_req *req; + + req = RING_GET_REQUEST(&evtchnl->u.req.ring, + evtchnl->u.req.ring.req_prod_pvt); + req->operation = operation; + req->id = evtchnl->evt_next_id++; + evtchnl->evt_id = req->id; + return req; +} + +static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl, + struct xendispl_req *req) +{ + reinit_completion(&evtchnl->u.req.completion); + if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED)) + return -EIO; + + xen_drm_front_evtchnl_flush(evtchnl); + return 0; +} + +static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl) +{ + if (wait_for_completion_timeout(&evtchnl->u.req.completion, + msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0) + return -ETIMEDOUT; + + return evtchnl->u.req.resp_status; +} + +int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline, + u32 x, u32 y, u32 width, u32 height, + u32 bpp, u64 fb_cookie) +{ + struct xen_drm_front_evtchnl *evtchnl; + struct xen_drm_front_info *front_info; + struct xendispl_req *req; + unsigned long flags; + int ret; + + front_info = pipeline->drm_info->front_info; + evtchnl = &front_info->evt_pairs[pipeline->index].req; + if (unlikely(!evtchnl)) + return -EIO; + + mutex_lock(&evtchnl->u.req.req_io_lock); + + spin_lock_irqsave(&front_info->io_lock, flags); + req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG); + req->op.set_config.x = x; + req->op.set_config.y = y; + req->op.set_config.width = width; + req->op.set_config.height = height; + req->op.set_config.bpp = bpp; + req->op.set_config.fb_cookie = fb_cookie; + + ret = be_stream_do_io(evtchnl, req); + spin_unlock_irqrestore(&front_info->io_lock, flags); + + if (ret == 0) + ret = be_stream_wait_io(evtchnl); + + mutex_unlock(&evtchnl->u.req.req_io_lock); + return ret; +} + +int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, + u64 dbuf_cookie, u32 width, u32 height, + u32 bpp, u64 size, struct page **pages) +{ + struct xen_drm_front_evtchnl *evtchnl; + struct xen_drm_front_shbuf *shbuf; + struct xendispl_req *req; + struct xen_drm_front_shbuf_cfg buf_cfg; + unsigned long flags; + int ret; + + evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; + if (unlikely(!evtchnl)) + return -EIO; + + memset(&buf_cfg, 0, sizeof(buf_cfg)); + buf_cfg.xb_dev = front_info->xb_dev; + buf_cfg.pages = pages; + buf_cfg.size = size; + buf_cfg.be_alloc = front_info->cfg.be_alloc; + + shbuf = xen_drm_front_shbuf_alloc(&buf_cfg); + if (!shbuf) + return -ENOMEM; + + ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie); + if (ret < 0) { + xen_drm_front_shbuf_free(shbuf); + return ret; + } + + mutex_lock(&evtchnl->u.req.req_io_lock); + + spin_lock_irqsave(&front_info->io_lock, flags); + req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE); + req->op.dbuf_create.gref_directory = + xen_drm_front_shbuf_get_dir_start(shbuf); + req->op.dbuf_create.buffer_sz = size; + req->op.dbuf_create.dbuf_cookie = dbuf_cookie; + req->op.dbuf_create.width = width; + req->op.dbuf_create.height = height; + req->op.dbuf_create.bpp = bpp; + if (buf_cfg.be_alloc) + req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC; + + ret = be_stream_do_io(evtchnl, req); + spin_unlock_irqrestore(&front_info->io_lock, flags); + + if (ret < 0) + goto fail; + + ret = be_stream_wait_io(evtchnl); + if (ret < 0) + goto fail; + + ret = xen_drm_front_shbuf_map(shbuf); + if (ret < 0) + goto fail; + + mutex_unlock(&evtchnl->u.req.req_io_lock); + return 0; + +fail: + mutex_unlock(&evtchnl->u.req.req_io_lock); + dbuf_free(&front_info->dbuf_list, dbuf_cookie); + return ret; +} + +static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info, + u64 dbuf_cookie) +{ + struct xen_drm_front_evtchnl *evtchnl; + struct xendispl_req *req; + unsigned long flags; + bool be_alloc; + int ret; + + evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; + if (unlikely(!evtchnl)) + return -EIO; + + be_alloc = front_info->cfg.be_alloc; + + /* + * For the backend allocated buffer release references now, so backend + * can free the buffer. + */ + if (be_alloc) + dbuf_free(&front_info->dbuf_list, dbuf_cookie); + + mutex_lock(&evtchnl->u.req.req_io_lock); + + spin_lock_irqsave(&front_info->io_lock, flags); + req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY); + req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie; + + ret = be_stream_do_io(evtchnl, req); + spin_unlock_irqrestore(&front_info->io_lock, flags); + + if (ret == 0) + ret = be_stream_wait_io(evtchnl); + + /* + * Do this regardless of communication status with the backend: + * if we cannot remove remote resources remove what we can locally. + */ + if (!be_alloc) + dbuf_free(&front_info->dbuf_list, dbuf_cookie); + + mutex_unlock(&evtchnl->u.req.req_io_lock); + return ret; +} + +int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info, + u64 dbuf_cookie, u64 fb_cookie, u32 width, + u32 height, u32 pixel_format) +{ + struct xen_drm_front_evtchnl *evtchnl; + struct xen_drm_front_dbuf *buf; + struct xendispl_req *req; + unsigned long flags; + int ret; + + evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; + if (unlikely(!evtchnl)) + return -EIO; + + buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie); + if (!buf) + return -EINVAL; + + buf->fb_cookie = fb_cookie; + + mutex_lock(&evtchnl->u.req.req_io_lock); + + spin_lock_irqsave(&front_info->io_lock, flags); + req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH); + req->op.fb_attach.dbuf_cookie = dbuf_cookie; + req->op.fb_attach.fb_cookie = fb_cookie; + req->op.fb_attach.width = width; + req->op.fb_attach.height = height; + req->op.fb_attach.pixel_format = pixel_format; + + ret = be_stream_do_io(evtchnl, req); + spin_unlock_irqrestore(&front_info->io_lock, flags); + + if (ret == 0) + ret = be_stream_wait_io(evtchnl); + + mutex_unlock(&evtchnl->u.req.req_io_lock); + return ret; +} + +int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info, + u64 fb_cookie) +{ + struct xen_drm_front_evtchnl *evtchnl; + struct xendispl_req *req; + unsigned long flags; + int ret; + + evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; + if (unlikely(!evtchnl)) + return -EIO; + + mutex_lock(&evtchnl->u.req.req_io_lock); + + spin_lock_irqsave(&front_info->io_lock, flags); + req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH); + req->op.fb_detach.fb_cookie = fb_cookie; + + ret = be_stream_do_io(evtchnl, req); + spin_unlock_irqrestore(&front_info->io_lock, flags); + + if (ret == 0) + ret = be_stream_wait_io(evtchnl); + + mutex_unlock(&evtchnl->u.req.req_io_lock); + return ret; +} + +int xen_drm_front_page_flip(struct xen_drm_front_info *front_info, + int conn_idx, u64 fb_cookie) +{ + struct xen_drm_front_evtchnl *evtchnl; + struct xendispl_req *req; + unsigned long flags; + int ret; + + if (unlikely(conn_idx >= front_info->num_evt_pairs)) + return -EINVAL; + + dbuf_flush_fb(&front_info->dbuf_list, fb_cookie); + evtchnl = &front_info->evt_pairs[conn_idx].req; + + mutex_lock(&evtchnl->u.req.req_io_lock); + + spin_lock_irqsave(&front_info->io_lock, flags); + req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP); + req->op.pg_flip.fb_cookie = fb_cookie; + + ret = be_stream_do_io(evtchnl, req); + spin_unlock_irqrestore(&front_info->io_lock, flags); + + if (ret == 0) + ret = be_stream_wait_io(evtchnl); + + mutex_unlock(&evtchnl->u.req.req_io_lock); + return ret; +} + +void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info, + int conn_idx, u64 fb_cookie) +{ + struct xen_drm_front_drm_info *drm_info = front_info->drm_info; + + if (unlikely(conn_idx >= front_info->cfg.num_connectors)) + return; + + xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx], + fb_cookie); +} + +static int xen_drm_drv_dumb_create(struct drm_file *filp, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct xen_drm_front_drm_info *drm_info = dev->dev_private; + struct drm_gem_object *obj; + int ret; + + /* + * Dumb creation is a two stage process: first we create a fully + * constructed GEM object which is communicated to the backend, and + * only after that we can create GEM's handle. This is done so, + * because of the possible races: once you create a handle it becomes + * immediately visible to user-space, so the latter can try accessing + * object without pages etc. + * For details also see drm_gem_handle_create + */ + args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + args->size = args->pitch * args->height; + + obj = xen_drm_front_gem_create(dev, args->size); + if (IS_ERR_OR_NULL(obj)) { + ret = PTR_ERR(obj); + goto fail; + } + + ret = xen_drm_front_dbuf_create(drm_info->front_info, + xen_drm_front_dbuf_to_cookie(obj), + args->width, args->height, args->bpp, + args->size, + xen_drm_front_gem_get_pages(obj)); + if (ret) + goto fail_backend; + + /* This is the tail of GEM object creation */ + ret = drm_gem_handle_create(filp, obj, &args->handle); + if (ret) + goto fail_handle; + + /* Drop reference from allocate - handle holds it now */ + drm_gem_object_put_unlocked(obj); + return 0; + +fail_handle: + xen_drm_front_dbuf_destroy(drm_info->front_info, + xen_drm_front_dbuf_to_cookie(obj)); +fail_backend: + /* drop reference from allocate */ + drm_gem_object_put_unlocked(obj); +fail: + DRM_ERROR("Failed to create dumb buffer: %d\n", ret); + return ret; +} + +static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj) +{ + struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private; + int idx; + + if (drm_dev_enter(obj->dev, &idx)) { + xen_drm_front_dbuf_destroy(drm_info->front_info, + xen_drm_front_dbuf_to_cookie(obj)); + drm_dev_exit(idx); + } else { + dbuf_free(&drm_info->front_info->dbuf_list, + xen_drm_front_dbuf_to_cookie(obj)); + } + + xen_drm_front_gem_free_object_unlocked(obj); +} + +static void xen_drm_drv_release(struct drm_device *dev) +{ + struct xen_drm_front_drm_info *drm_info = dev->dev_private; + struct xen_drm_front_info *front_info = drm_info->front_info; + + xen_drm_front_kms_fini(drm_info); + + drm_atomic_helper_shutdown(dev); + drm_mode_config_cleanup(dev); + + drm_dev_fini(dev); + kfree(dev); + + if (front_info->cfg.be_alloc) + xenbus_switch_state(front_info->xb_dev, + XenbusStateInitialising); + + kfree(drm_info); +} + +static const struct file_operations xen_drm_dev_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = xen_drm_front_gem_mmap, +}; + +static const struct vm_operations_struct xen_drm_drv_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static struct drm_driver xen_drm_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | + DRIVER_PRIME | DRIVER_ATOMIC, + .release = xen_drm_drv_release, + .gem_vm_ops = &xen_drm_drv_vm_ops, + .gem_free_object_unlocked = xen_drm_drv_free_object_unlocked, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, + .gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table, + .gem_prime_vmap = xen_drm_front_gem_prime_vmap, + .gem_prime_vunmap = xen_drm_front_gem_prime_vunmap, + .gem_prime_mmap = xen_drm_front_gem_prime_mmap, + .dumb_create = xen_drm_drv_dumb_create, + .fops = &xen_drm_dev_fops, + .name = "xendrm-du", + .desc = "Xen PV DRM Display Unit", + .date = "20180221", + .major = 1, + .minor = 0, + +}; + +static int xen_drm_drv_init(struct xen_drm_front_info *front_info) +{ + struct device *dev = &front_info->xb_dev->dev; + struct xen_drm_front_drm_info *drm_info; + struct drm_device *drm_dev; + int ret; + + DRM_INFO("Creating %s\n", xen_drm_driver.desc); + + drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL); + if (!drm_info) { + ret = -ENOMEM; + goto fail; + } + + drm_info->front_info = front_info; + front_info->drm_info = drm_info; + + drm_dev = drm_dev_alloc(&xen_drm_driver, dev); + if (!drm_dev) { + ret = -ENOMEM; + goto fail; + } + + drm_info->drm_dev = drm_dev; + + drm_dev->dev_private = drm_info; + + ret = xen_drm_front_kms_init(drm_info); + if (ret) { + DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret); + goto fail_modeset; + } + + ret = drm_dev_register(drm_dev, 0); + if (ret) + goto fail_register; + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + xen_drm_driver.name, xen_drm_driver.major, + xen_drm_driver.minor, xen_drm_driver.patchlevel, + xen_drm_driver.date, drm_dev->primary->index); + + return 0; + +fail_register: + drm_dev_unregister(drm_dev); +fail_modeset: + drm_kms_helper_poll_fini(drm_dev); + drm_mode_config_cleanup(drm_dev); +fail: + kfree(drm_info); + return ret; +} + +static void xen_drm_drv_fini(struct xen_drm_front_info *front_info) +{ + struct xen_drm_front_drm_info *drm_info = front_info->drm_info; + struct drm_device *dev; + + if (!drm_info) + return; + + dev = drm_info->drm_dev; + if (!dev) + return; + + /* Nothing to do if device is already unplugged */ + if (drm_dev_is_unplugged(dev)) + return; + + drm_kms_helper_poll_fini(dev); + drm_dev_unplug(dev); + + front_info->drm_info = NULL; + + xen_drm_front_evtchnl_free_all(front_info); + dbuf_free_all(&front_info->dbuf_list); + + /* + * If we are not using backend allocated buffers, then tell the + * backend we are ready to (re)initialize. Otherwise, wait for + * drm_driver.release. + */ + if (!front_info->cfg.be_alloc) + xenbus_switch_state(front_info->xb_dev, + XenbusStateInitialising); +} + +static int displback_initwait(struct xen_drm_front_info *front_info) +{ + struct xen_drm_front_cfg *cfg = &front_info->cfg; + int ret; + + cfg->front_info = front_info; + ret = xen_drm_front_cfg_card(front_info, cfg); + if (ret < 0) + return ret; + + DRM_INFO("Have %d conector(s)\n", cfg->num_connectors); + /* Create event channels for all connectors and publish */ + ret = xen_drm_front_evtchnl_create_all(front_info); + if (ret < 0) + return ret; + + return xen_drm_front_evtchnl_publish_all(front_info); +} + +static int displback_connect(struct xen_drm_front_info *front_info) +{ + xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED); + return xen_drm_drv_init(front_info); +} + +static void displback_disconnect(struct xen_drm_front_info *front_info) +{ + if (!front_info->drm_info) + return; + + /* Tell the backend to wait until we release the DRM driver. */ + xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring); + + xen_drm_drv_fini(front_info); +} + +static void displback_changed(struct xenbus_device *xb_dev, + enum xenbus_state backend_state) +{ + struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev); + int ret; + + DRM_DEBUG("Backend state is %s, front is %s\n", + xenbus_strstate(backend_state), + xenbus_strstate(xb_dev->state)); + + switch (backend_state) { + case XenbusStateReconfiguring: + /* fall through */ + case XenbusStateReconfigured: + /* fall through */ + case XenbusStateInitialised: + break; + + case XenbusStateInitialising: + if (xb_dev->state == XenbusStateReconfiguring) + break; + + /* recovering after backend unexpected closure */ + displback_disconnect(front_info); + break; + + case XenbusStateInitWait: + if (xb_dev->state == XenbusStateReconfiguring) + break; + + /* recovering after backend unexpected closure */ + displback_disconnect(front_info); + if (xb_dev->state != XenbusStateInitialising) + break; + + ret = displback_initwait(front_info); + if (ret < 0) + xenbus_dev_fatal(xb_dev, ret, "initializing frontend"); + else + xenbus_switch_state(xb_dev, XenbusStateInitialised); + break; + + case XenbusStateConnected: + if (xb_dev->state != XenbusStateInitialised) + break; + + ret = displback_connect(front_info); + if (ret < 0) { + displback_disconnect(front_info); + xenbus_dev_fatal(xb_dev, ret, "connecting backend"); + } else { + xenbus_switch_state(xb_dev, XenbusStateConnected); + } + break; + + case XenbusStateClosing: + /* + * in this state backend starts freeing resources, + * so let it go into closed state, so we can also + * remove ours + */ + break; + + case XenbusStateUnknown: + /* fall through */ + case XenbusStateClosed: + if (xb_dev->state == XenbusStateClosed) + break; + + displback_disconnect(front_info); + break; + } +} + +static int xen_drv_probe(struct xenbus_device *xb_dev, + const struct xenbus_device_id *id) +{ + struct xen_drm_front_info *front_info; + struct device *dev = &xb_dev->dev; + int ret; + + /* + * The device is not spawn from a device tree, so arch_setup_dma_ops + * is not called, thus leaving the device with dummy DMA ops. + * This makes the device return error on PRIME buffer import, which + * is not correct: to fix this call of_dma_configure() with a NULL + * node to set default DMA ops. + */ + dev->bus->force_dma = true; + dev->coherent_dma_mask = DMA_BIT_MASK(32); + ret = of_dma_configure(dev, NULL); + if (ret < 0) { + DRM_ERROR("Cannot setup DMA ops, ret %d", ret); + return ret; + } + + front_info = devm_kzalloc(&xb_dev->dev, + sizeof(*front_info), GFP_KERNEL); + if (!front_info) + return -ENOMEM; + + front_info->xb_dev = xb_dev; + spin_lock_init(&front_info->io_lock); + INIT_LIST_HEAD(&front_info->dbuf_list); + dev_set_drvdata(&xb_dev->dev, front_info); + + return xenbus_switch_state(xb_dev, XenbusStateInitialising); +} + +static int xen_drv_remove(struct xenbus_device *dev) +{ + struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev); + int to = 100; + + xenbus_switch_state(dev, XenbusStateClosing); + + /* + * On driver removal it is disconnected from XenBus, + * so no backend state change events come via .otherend_changed + * callback. This prevents us from exiting gracefully, e.g. + * signaling the backend to free event channels, waiting for its + * state to change to XenbusStateClosed and cleaning at our end. + * Normally when front driver removed backend will finally go into + * XenbusStateInitWait state. + * + * Workaround: read backend's state manually and wait with time-out. + */ + while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state", + XenbusStateUnknown) != XenbusStateInitWait) && + to--) + msleep(10); + + if (!to) { + unsigned int state; + + state = xenbus_read_unsigned(front_info->xb_dev->otherend, + "state", XenbusStateUnknown); + DRM_ERROR("Backend state is %s while removing driver\n", + xenbus_strstate(state)); + } + + xen_drm_drv_fini(front_info); + xenbus_frontend_closed(dev); + return 0; +} + +static const struct xenbus_device_id xen_driver_ids[] = { + { XENDISPL_DRIVER_NAME }, + { "" } +}; + +static struct xenbus_driver xen_driver = { + .ids = xen_driver_ids, + .probe = xen_drv_probe, + .remove = xen_drv_remove, + .otherend_changed = displback_changed, +}; + +static int __init xen_drv_init(void) +{ + /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */ + if (XEN_PAGE_SIZE != PAGE_SIZE) { + DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n", + XEN_PAGE_SIZE, PAGE_SIZE); + return -ENODEV; + } + + if (!xen_domain()) + return -ENODEV; + + if (!xen_has_pv_devices()) + return -ENODEV; + + DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n"); + return xenbus_register_frontend(&xen_driver); +} + +static void __exit xen_drv_fini(void) +{ + DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n"); + xenbus_unregister_driver(&xen_driver); +} + +module_init(xen_drv_init); +module_exit(xen_drv_fini); + +MODULE_DESCRIPTION("Xen para-virtualized display device frontend"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME); diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h new file mode 100644 index 00000000000000..2c2479b571ae6b --- /dev/null +++ b/drivers/gpu/drm/xen/xen_drm_front.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +/* + * Xen para-virtual DRM device + * + * Copyright (C) 2016-2018 EPAM Systems Inc. + * + * Author: Oleksandr Andrushchenko + */ + +#ifndef __XEN_DRM_FRONT_H_ +#define __XEN_DRM_FRONT_H_ + +#include +#include + +#include + +#include "xen_drm_front_cfg.h" + +/** + * DOC: Driver modes of operation in terms of display buffers used + * + * Depending on the requirements for the para-virtualized environment, namely + * requirements dictated by the accompanying DRM/(v)GPU drivers running in both + * host and guest environments, display buffers can be allocated by either + * frontend driver or backend. + */ + +/** + * DOC: Buffers allocated by the frontend driver + * + * In this mode of operation driver allocates buffers from system memory. + * + * Note! If used with accompanying DRM/(v)GPU drivers this mode of operation + * may require IOMMU support on the platform, so accompanying DRM/vGPU + * hardware can still reach display buffer memory while importing PRIME + * buffers from the frontend driver. + */ + +/** + * DOC: Buffers allocated by the backend + * + * This mode of operation is run-time configured via guest domain configuration + * through XenStore entries. + * + * For systems which do not provide IOMMU support, but having specific + * requirements for display buffers it is possible to allocate such buffers + * at backend side and share those with the frontend. + * For example, if host domain is 1:1 mapped and has DRM/GPU hardware expecting + * physically contiguous memory, this allows implementing zero-copying + * use-cases. + * + * Note, while using this scenario the following should be considered: + * + * #. If guest domain dies then pages/grants received from the backend + * cannot be claimed back + * + * #. Misbehaving guest may send too many requests to the + * backend exhausting its grant references and memory + * (consider this from security POV) + */ + +/** + * DOC: Driver limitations + * + * #. Only primary plane without additional properties is supported. + * + * #. Only one video mode per connector supported which is configured + * via XenStore. + * + * #. All CRTCs operate at fixed frequency of 60Hz. + */ + +/* timeout in ms to wait for backend to respond */ +#define XEN_DRM_FRONT_WAIT_BACK_MS 3000 + +#ifndef GRANT_INVALID_REF +/* + * Note on usage of grant reference 0 as invalid grant reference: + * grant reference 0 is valid, but never exposed to a PV driver, + * because of the fact it is already in use/reserved by the PV console. + */ +#define GRANT_INVALID_REF 0 +#endif + +struct xen_drm_front_info { + struct xenbus_device *xb_dev; + struct xen_drm_front_drm_info *drm_info; + + /* to protect data between backend IO code and interrupt handler */ + spinlock_t io_lock; + + int num_evt_pairs; + struct xen_drm_front_evtchnl_pair *evt_pairs; + struct xen_drm_front_cfg cfg; + + /* display buffers */ + struct list_head dbuf_list; +}; + +struct xen_drm_front_drm_pipeline { + struct xen_drm_front_drm_info *drm_info; + + int index; + + struct drm_simple_display_pipe pipe; + + struct drm_connector conn; + /* These are only for connector mode checking */ + int width, height; + + struct drm_pending_vblank_event *pending_event; + + struct delayed_work pflip_to_worker; + + bool conn_connected; +}; + +struct xen_drm_front_drm_info { + struct xen_drm_front_info *front_info; + struct drm_device *drm_dev; + + struct xen_drm_front_drm_pipeline pipeline[XEN_DRM_FRONT_MAX_CRTCS]; +}; + +static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb) +{ + return (u64)fb; +} + +static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj) +{ + return (u64)gem_obj; +} + +int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline, + u32 x, u32 y, u32 width, u32 height, + u32 bpp, u64 fb_cookie); + +int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, + u64 dbuf_cookie, u32 width, u32 height, + u32 bpp, u64 size, struct page **pages); + +int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info, + u64 dbuf_cookie, u64 fb_cookie, u32 width, + u32 height, u32 pixel_format); + +int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info, + u64 fb_cookie); + +int xen_drm_front_page_flip(struct xen_drm_front_info *front_info, + int conn_idx, u64 fb_cookie); + +void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info, + int conn_idx, u64 fb_cookie); + +#endif /* __XEN_DRM_FRONT_H_ */ diff --git a/drivers/gpu/drm/xen/xen_drm_front_cfg.c b/drivers/gpu/drm/xen/xen_drm_front_cfg.c new file mode 100644 index 00000000000000..5baf2b9de93c43 --- /dev/null +++ b/drivers/gpu/drm/xen/xen_drm_front_cfg.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT + +/* + * Xen para-virtual DRM device + * + * Copyright (C) 2016-2018 EPAM Systems Inc. + * + * Author: Oleksandr Andrushchenko + */ + +#include + +#include + +#include +#include + +#include "xen_drm_front.h" +#include "xen_drm_front_cfg.h" + +static int cfg_connector(struct xen_drm_front_info *front_info, + struct xen_drm_front_cfg_connector *connector, + const char *path, int index) +{ + char *connector_path; + + connector_path = devm_kasprintf(&front_info->xb_dev->dev, + GFP_KERNEL, "%s/%d", path, index); + if (!connector_path) + return -ENOMEM; + + if (xenbus_scanf(XBT_NIL, connector_path, XENDISPL_FIELD_RESOLUTION, + "%d" XENDISPL_RESOLUTION_SEPARATOR "%d", + &connector->width, &connector->height) < 0) { + /* either no entry configured or wrong resolution set */ + connector->width = 0; + connector->height = 0; + return -EINVAL; + } + + connector->xenstore_path = connector_path; + + DRM_INFO("Connector %s: resolution %dx%d\n", + connector_path, connector->width, connector->height); + return 0; +} + +int xen_drm_front_cfg_card(struct xen_drm_front_info *front_info, + struct xen_drm_front_cfg *cfg) +{ + struct xenbus_device *xb_dev = front_info->xb_dev; + int ret, i; + + if (xenbus_read_unsigned(front_info->xb_dev->nodename, + XENDISPL_FIELD_BE_ALLOC, 0)) { + DRM_INFO("Backend can provide display buffers\n"); + cfg->be_alloc = true; + } + + cfg->num_connectors = 0; + for (i = 0; i < ARRAY_SIZE(cfg->connectors); i++) { + ret = cfg_connector(front_info, &cfg->connectors[i], + xb_dev->nodename, i); + if (ret < 0) + break; + cfg->num_connectors++; + } + + if (!cfg->num_connectors) { + DRM_ERROR("No connector(s) configured at %s\n", + xb_dev->nodename); + return -ENODEV; + } + + return 0; +} + diff --git a/drivers/gpu/drm/xen/xen_drm_front_cfg.h b/drivers/gpu/drm/xen/xen_drm_front_cfg.h new file mode 100644 index 00000000000000..aa8490ba914690 --- /dev/null +++ b/drivers/gpu/drm/xen/xen_drm_front_cfg.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +/* + * Xen para-virtual DRM device + * + * Copyright (C) 2016-2018 EPAM Systems Inc. + * + * Author: Oleksandr Andrushchenko + */ + +#ifndef __XEN_DRM_FRONT_CFG_H_ +#define __XEN_DRM_FRONT_CFG_H_ + +#include + +#define XEN_DRM_FRONT_MAX_CRTCS 4 + +struct xen_drm_front_cfg_connector { + int width; + int height; + char *xenstore_path; +}; + +struct xen_drm_front_cfg { + struct xen_drm_front_info *front_info; + /* number of connectors in this configuration */ + int num_connectors; + /* connector configurations */ + struct xen_drm_front_cfg_connector connectors[XEN_DRM_FRONT_MAX_CRTCS]; + /* set if dumb buffers are allocated externally on backend side */ + bool be_alloc; +}; + +int xen_drm_front_cfg_card(struct xen_drm_front_info *front_info, + struct xen_drm_front_cfg *cfg); + +#endif /* __XEN_DRM_FRONT_CFG_H_ */ diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.c b/drivers/gpu/drm/xen/xen_drm_front_conn.c new file mode 100644 index 00000000000000..c91ae532fa55ed --- /dev/null +++ b/drivers/gpu/drm/xen/xen_drm_front_conn.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT + +/* + * Xen para-virtual DRM device + * + * Copyright (C) 2016-2018 EPAM Systems Inc. + * + * Author: Oleksandr Andrushchenko + */ + +#include +#include + +#include