VirtualBox

儲存庫 vbox 的更動 67395


忽略:
時間撮記:
2017-6-14 下午12:53:00 (7 年 以前)
作者:
vboxsync
訊息:

bugref:8524: Additions/linux: play nicely with distribution-installed Additions
Additions: linux/drm: Change vbox_drv.c to kernel coding style

This is the result of running linux/scripts/Lindent + manual cleanups.
After this the file passes linux/scripts/checkpatch -f
except for the LINUX_VERSION_CODE checks.

This patch contains no functional changes, only coding style fixes,
including changing uintXX_t types to uXX.

Signed-off-by: Hans de Goede <hdegoede@…>

檔案:
修改 1 筆資料

圖例:

未更動
新增
刪除
  • trunk/src/VBox/Additions/linux/drm/vbox_drv.c

    r67177 r67395  
    5252static struct drm_driver driver;
    5353
    54 static const struct pci_device_id pciidlist[] =
    55 {
    56     {0x80ee, 0xbeef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
    57     {0, 0, 0},
    58 };
    59 
     54static const struct pci_device_id pciidlist[] = {
     55        { 0x80ee, 0xbeef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
     56        { 0, 0, 0},
     57};
    6058MODULE_DEVICE_TABLE(pci, pciidlist);
    6159
    6260static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
    6361{
    64     return drm_get_pci_dev(pdev, ent, &driver);
    65 }
    66 
     62        return drm_get_pci_dev(pdev, ent, &driver);
     63}
    6764
    6865static void vbox_pci_remove(struct pci_dev *pdev)
    6966{
    70     struct drm_device *dev = pci_get_drvdata(pdev);
    71 
    72     drm_put_dev(dev);
    73 }
    74 
    75 
     67        struct drm_device *dev = pci_get_drvdata(pdev);
     68
     69        drm_put_dev(dev);
     70}
    7671
    7772static int vbox_drm_freeze(struct drm_device *dev)
    7873{
    79     drm_kms_helper_poll_disable(dev);
    80 
    81     pci_save_state(dev->pdev);
    82 
    83     console_lock();
    84     vbox_fbdev_set_suspend(dev, 1);
    85     console_unlock();
    86     return 0;
     74        drm_kms_helper_poll_disable(dev);
     75
     76        pci_save_state(dev->pdev);
     77
     78        console_lock();
     79        vbox_fbdev_set_suspend(dev, 1);
     80        console_unlock();
     81
     82        return 0;
    8783}
    8884
    8985static int vbox_drm_thaw(struct drm_device *dev)
    9086{
    91     int error = 0;
    92 
    93     drm_mode_config_reset(dev);
    94     drm_helper_resume_force_mode(dev);
    95 
    96     console_lock();
    97     vbox_fbdev_set_suspend(dev, 0);
    98     console_unlock();
    99     return error;
     87        drm_mode_config_reset(dev);
     88        drm_helper_resume_force_mode(dev);
     89
     90        console_lock();
     91        vbox_fbdev_set_suspend(dev, 0);
     92        console_unlock();
     93
     94        return 0;
    10095}
    10196
    10297static int vbox_drm_resume(struct drm_device *dev)
    10398{
    104     int ret;
    105 
    106     if (pci_enable_device(dev->pdev))
    107         return -EIO;
    108 
    109        ret = vbox_drm_thaw(dev);
    110     if (ret)
    111        return ret;
    112 
    113     drm_kms_helper_poll_enable(dev);
    114     return 0;
     99        int ret;
     100
     101        if (pci_enable_device(dev->pdev))
     102                return -EIO;
     103
     104        ret = vbox_drm_thaw(dev);
     105        if (ret)
     106                return ret;
     107
     108        drm_kms_helper_poll_enable(dev);
     109
     110        return 0;
    115111}
    116112
    117113static int vbox_pm_suspend(struct device *dev)
    118114{
    119     struct pci_dev *pdev = to_pci_dev(dev);
    120     struct drm_device *ddev = pci_get_drvdata(pdev);
    121     int error;
    122 
    123     error = vbox_drm_freeze(ddev);
    124     if (error)
    125         return error;
    126 
    127     pci_disable_device(pdev);
    128     pci_set_power_state(pdev, PCI_D3hot);
    129     return 0;
     115        struct pci_dev *pdev = to_pci_dev(dev);
     116        struct drm_device *ddev = pci_get_drvdata(pdev);
     117        int error;
     118
     119        error = vbox_drm_freeze(ddev);
     120        if (error)
     121                return error;
     122
     123        pci_disable_device(pdev);
     124        pci_set_power_state(pdev, PCI_D3hot);
     125
     126        return 0;
    130127}
    131128
    132129static int vbox_pm_resume(struct device *dev)
    133130{
    134     struct pci_dev *pdev = to_pci_dev(dev);
    135     struct drm_device *ddev = pci_get_drvdata(pdev);
    136     return vbox_drm_resume(ddev);
     131        struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
     132
     133        return vbox_drm_resume(ddev);
    137134}
    138135
    139136static int vbox_pm_freeze(struct device *dev)
    140137{
    141     struct pci_dev *pdev = to_pci_dev(dev);
    142     struct drm_device *ddev = pci_get_drvdata(pdev);
    143 
    144     if (!ddev || !ddev->dev_private)
    145         return -ENODEV;
    146     return vbox_drm_freeze(ddev);
    147 
     138        struct pci_dev *pdev = to_pci_dev(dev);
     139        struct drm_device *ddev = pci_get_drvdata(pdev);
     140
     141        if (!ddev || !ddev->dev_private)
     142                return -ENODEV;
     143
     144        return vbox_drm_freeze(ddev);
    148145}
    149146
    150147static int vbox_pm_thaw(struct device *dev)
    151148{
    152     struct pci_dev *pdev = to_pci_dev(dev);
    153     struct drm_device *ddev = pci_get_drvdata(pdev);
    154     return vbox_drm_thaw(ddev);
     149        struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
     150
     151        return vbox_drm_thaw(ddev);
    155152}
    156153
    157154static int vbox_pm_poweroff(struct device *dev)
    158155{
    159     struct pci_dev *pdev = to_pci_dev(dev);
    160     struct drm_device *ddev = pci_get_drvdata(pdev);
    161 
    162     return vbox_drm_freeze(ddev);
     156        struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
     157
     158        return vbox_drm_freeze(ddev);
    163159}
    164160
    165161static const struct dev_pm_ops vbox_pm_ops = {
    166     .suspend = vbox_pm_suspend,
    167     .resume = vbox_pm_resume,
    168     .freeze = vbox_pm_freeze,
    169     .thaw = vbox_pm_thaw,
    170     .poweroff = vbox_pm_poweroff,
    171     .restore = vbox_pm_resume,
    172 };
    173 
    174 static struct pci_driver vbox_pci_driver =
    175 {
    176     .name = DRIVER_NAME,
    177     .id_table = pciidlist,
    178     .probe = vbox_pci_probe,
    179     .remove = vbox_pci_remove,
    180     .driver.pm = &vbox_pm_ops,
     162        .suspend = vbox_pm_suspend,
     163        .resume = vbox_pm_resume,
     164        .freeze = vbox_pm_freeze,
     165        .thaw = vbox_pm_thaw,
     166        .poweroff = vbox_pm_poweroff,
     167        .restore = vbox_pm_resume,
     168};
     169
     170static struct pci_driver vbox_pci_driver = {
     171        .name = DRIVER_NAME,
     172        .id_table = pciidlist,
     173        .probe = vbox_pci_probe,
     174        .remove = vbox_pci_remove,
     175        .driver.pm = &vbox_pm_ops,
    181176};
    182177
     
    188183 * why I am limiting it to certain kernel versions.  We can increase the
    189184 * limit if some distributions uses old X servers with new kernels. */
    190 long vbox_ioctl(struct file *filp,
    191                 unsigned int cmd, unsigned long arg)
    192 {
    193     long rc = drm_ioctl(filp, cmd, arg);
    194     if (cmd == DRM_IOCTL_MODE_DIRTYFB && rc == -EINVAL)
    195         return -EOVERFLOW;
    196     return rc;
     185long vbox_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
     186{
     187        long rc = drm_ioctl(filp, cmd, arg);
     188
     189        if (cmd == DRM_IOCTL_MODE_DIRTYFB && rc == -EINVAL)
     190                return -EOVERFLOW;
     191
     192        return rc;
    197193}
    198194#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) */
    199195
    200 static const struct file_operations vbox_fops =
    201 {
    202     .owner = THIS_MODULE,
    203     .open = drm_open,
    204     .release = drm_release,
     196static const struct file_operations vbox_fops = {
     197        .owner = THIS_MODULE,
     198        .open = drm_open,
     199        .release = drm_release,
    205200#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
    206     .unlocked_ioctl = vbox_ioctl,
     201        .unlocked_ioctl = vbox_ioctl,
    207202#else
    208     .unlocked_ioctl = drm_ioctl,
    209 #endif
    210     .mmap = vbox_mmap,
    211     .poll = drm_poll,
     203        .unlocked_ioctl = drm_ioctl,
     204#endif
     205        .mmap = vbox_mmap,
     206        .poll = drm_poll,
    212207#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
    213     .fasync = drm_fasync,
     208        .fasync = drm_fasync,
    214209#endif
    215210#ifdef CONFIG_COMPAT
    216     .compat_ioctl = drm_compat_ioctl,
    217 #endif
    218     .read = drm_read,
     211        .compat_ioctl = drm_compat_ioctl,
     212#endif
     213        .read = drm_read,
    219214};
    220215
    221216static int vbox_master_set(struct drm_device *dev,
    222                            struct drm_file *file_priv,
    223                            bool from_open)
    224 {
    225     struct vbox_private *vbox = dev->dev_private;
    226     /* We do not yet know whether the new owner can handle hotplug, so we
    227      * do not advertise dynamic modes on the first query and send a
    228      * tentative hotplug notification after that to see if they query again. */
    229     vbox->initial_mode_queried = false;
    230     mutex_lock(&vbox->hw_mutex);
    231     /* Disable VBVA when someone releases master in case the next person tries
    232      * to do VESA. */
    233     /** @todo work out if anyone is likely to and whether it will even work. */
    234     /* Update: we also disable it because if the new master does not do dirty
    235      * rectangle reporting (e.g. old versions of Plymouth) then at least the
    236      * first screen will still be updated.  We enable it as soon as we
    237      * receive a dirty rectangle report. */
    238     vbox_disable_accel(vbox);
    239     mutex_unlock(&vbox->hw_mutex);
    240     return 0;
     217                           struct drm_file *file_priv, bool from_open)
     218{
     219        struct vbox_private *vbox = dev->dev_private;
     220
     221        /*
     222         * We do not yet know whether the new owner can handle hotplug, so we
     223         * do not advertise dynamic modes on the first query and send a
     224         * tentative hotplug notification after that to see if they query again.
     225         */
     226        vbox->initial_mode_queried = false;
     227
     228        mutex_lock(&vbox->hw_mutex);
     229        /*
     230         * Disable VBVA when someone releases master in case the next person
     231         * tries tries to do VESA.
     232         */
     233        /** @todo work out if anyone is likely to and whether it will work. */
     234        /*
     235         * Update: we also disable it because if the new master does not do
     236         * dirty rectangle reporting (e.g. old versions of Plymouth) then at
     237         * least the first screen will still be updated. We enable it as soon
     238         * as we receive a dirty rectangle report.
     239         */
     240        vbox_disable_accel(vbox);
     241        mutex_unlock(&vbox->hw_mutex);
     242
     243        return 0;
    241244}
    242245
    243246#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
    244247static void vbox_master_drop(struct drm_device *dev,
    245                              struct drm_file *file_priv,
    246                              bool from_release)
     248                             struct drm_file *file_priv, bool from_release)
    247249#else
    248 static void vbox_master_drop(struct drm_device *dev,
    249                              struct drm_file *file_priv)
    250 #endif
    251 {
    252     struct vbox_private *vbox = dev->dev_private;
    253     /* See vbox_master_set() */
    254     vbox->initial_mode_queried = false;
    255     mutex_lock(&vbox->hw_mutex);
    256     vbox_disable_accel(vbox);
    257     mutex_unlock(&vbox->hw_mutex);
    258 }
    259 
    260 static struct drm_driver driver =
    261 {
    262     .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_PRIME,
    263     .dev_priv_size = 0,
    264 
    265     .load = vbox_driver_load,
    266     .unload = vbox_driver_unload,
    267     .lastclose = vbox_driver_lastclose,
    268     .master_set = vbox_master_set,
    269     .master_drop = vbox_master_drop,
     250static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
     251#endif
     252{
     253        struct vbox_private *vbox = dev->dev_private;
     254
     255        /* See vbox_master_set() */
     256        vbox->initial_mode_queried = false;
     257
     258        mutex_lock(&vbox->hw_mutex);
     259        vbox_disable_accel(vbox);
     260        mutex_unlock(&vbox->hw_mutex);
     261}
     262
     263static struct drm_driver driver = {
     264        .driver_features =
     265            DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
     266            DRIVER_PRIME,
     267        .dev_priv_size = 0,
     268
     269        .load = vbox_driver_load,
     270        .unload = vbox_driver_unload,
     271        .lastclose = vbox_driver_lastclose,
     272        .master_set = vbox_master_set,
     273        .master_drop = vbox_master_drop,
    270274#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
    271     .set_busid = drm_pci_set_busid,
    272 #endif
    273 
    274     .fops = &vbox_fops,
    275     .irq_handler = vbox_irq_handler,
    276     .name = DRIVER_NAME,
    277     .desc = DRIVER_DESC,
    278     .date = DRIVER_DATE,
    279     .major = DRIVER_MAJOR,
    280     .minor = DRIVER_MINOR,
    281     .patchlevel = DRIVER_PATCHLEVEL,
    282 
    283     .gem_free_object = vbox_gem_free_object,
    284     .dumb_create = vbox_dumb_create,
    285     .dumb_map_offset = vbox_dumb_mmap_offset,
     275        .set_busid = drm_pci_set_busid,
     276#endif
     277
     278        .fops = &vbox_fops,
     279        .irq_handler = vbox_irq_handler,
     280        .name = DRIVER_NAME,
     281        .desc = DRIVER_DESC,
     282        .date = DRIVER_DATE,
     283        .major = DRIVER_MAJOR,
     284        .minor = DRIVER_MINOR,
     285        .patchlevel = DRIVER_PATCHLEVEL,
     286
     287        .gem_free_object = vbox_gem_free_object,
     288        .dumb_create = vbox_dumb_create,
     289        .dumb_map_offset = vbox_dumb_mmap_offset,
    286290#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
    287     .dumb_destroy = vbox_dumb_destroy,
     291        .dumb_destroy = vbox_dumb_destroy,
    288292#else
    289     .dumb_destroy = drm_gem_dumb_destroy,
    290 #endif
    291     .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
    292     .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
    293     .gem_prime_export = drm_gem_prime_export,
    294     .gem_prime_import = drm_gem_prime_import,
    295     .gem_prime_pin = vbox_gem_prime_pin,
    296     .gem_prime_unpin = vbox_gem_prime_unpin,
    297     .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
    298     .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
    299     .gem_prime_vmap = vbox_gem_prime_vmap,
    300     .gem_prime_vunmap = vbox_gem_prime_vunmap,
    301     .gem_prime_mmap = vbox_gem_prime_mmap,
    302 
     293        .dumb_destroy = drm_gem_dumb_destroy,
     294#endif
     295        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
     296        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
     297        .gem_prime_export = drm_gem_prime_export,
     298        .gem_prime_import = drm_gem_prime_import,
     299        .gem_prime_pin = vbox_gem_prime_pin,
     300        .gem_prime_unpin = vbox_gem_prime_unpin,
     301        .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
     302        .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
     303        .gem_prime_vmap = vbox_gem_prime_vmap,
     304        .gem_prime_vunmap = vbox_gem_prime_vunmap,
     305        .gem_prime_mmap = vbox_gem_prime_mmap,
    303306};
    304307
     
    306309{
    307310#ifdef CONFIG_VGA_CONSOLE
    308     if (vgacon_text_force() && vbox_modeset == -1)
    309         return -EINVAL;
    310 #endif
    311 
    312     if (vbox_modeset == 0)
    313         return -EINVAL;
    314 
    315     return drm_pci_init(&driver, &vbox_pci_driver);
    316 }
     311        if (vgacon_text_force() && vbox_modeset == -1)
     312                return -EINVAL;
     313#endif
     314
     315        if (vbox_modeset == 0)
     316                return -EINVAL;
     317
     318        return drm_pci_init(&driver, &vbox_pci_driver);
     319}
     320
    317321static void __exit vbox_exit(void)
    318322{
    319     drm_pci_exit(&driver, &vbox_pci_driver);
     323        drm_pci_exit(&driver, &vbox_pci_driver);
    320324}
    321325
注意: 瀏覽 TracChangeset 來幫助您使用更動檢視器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette