Skip to content

Commit 433fc58

Browse files
Asias Hemstsirkin
authored andcommittedAug 1, 2016
VSOCK: Introduce vhost_vsock.ko
VM sockets vhost transport implementation. This driver runs on the host. Signed-off-by: Asias He <asias@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
1 parent 0ea9e1d commit 433fc58

File tree

3 files changed

+729
-0
lines changed

3 files changed

+729
-0
lines changed
 

‎MAINTAINERS

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12148,6 +12148,8 @@ F: include/linux/virtio_vsock.h
1214812148
F: include/uapi/linux/virtio_vsock.h
1214912149
F: net/vmw_vsock/virtio_transport_common.c
1215012150
F: net/vmw_vsock/virtio_transport.c
12151+
F: drivers/vhost/vsock.c
12152+
F: drivers/vhost/vsock.h
1215112153

1215212154
VIRTUAL SERIO DEVICE DRIVER
1215312155
M: Stephen Chandler Paul <thatslyude@gmail.com>

‎drivers/vhost/vsock.c

Lines changed: 722 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,722 @@
1+
/*
2+
* vhost transport for vsock
3+
*
4+
* Copyright (C) 2013-2015 Red Hat, Inc.
5+
* Author: Asias He <asias@redhat.com>
6+
* Stefan Hajnoczi <stefanha@redhat.com>
7+
*
8+
* This work is licensed under the terms of the GNU GPL, version 2.
9+
*/
10+
#include <linux/miscdevice.h>
11+
#include <linux/atomic.h>
12+
#include <linux/module.h>
13+
#include <linux/mutex.h>
14+
#include <linux/vmalloc.h>
15+
#include <net/sock.h>
16+
#include <linux/virtio_vsock.h>
17+
#include <linux/vhost.h>
18+
19+
#include <net/af_vsock.h>
20+
#include "vhost.h"
21+
22+
#define VHOST_VSOCK_DEFAULT_HOST_CID 2
23+
24+
enum {
25+
VHOST_VSOCK_FEATURES = VHOST_FEATURES,
26+
};
27+
28+
/* Used to track all the vhost_vsock instances on the system. */
29+
static DEFINE_SPINLOCK(vhost_vsock_lock);
30+
static LIST_HEAD(vhost_vsock_list);
31+
32+
struct vhost_vsock {
33+
struct vhost_dev dev;
34+
struct vhost_virtqueue vqs[2];
35+
36+
/* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
37+
struct list_head list;
38+
39+
struct vhost_work send_pkt_work;
40+
spinlock_t send_pkt_list_lock;
41+
struct list_head send_pkt_list; /* host->guest pending packets */
42+
43+
atomic_t queued_replies;
44+
45+
u32 guest_cid;
46+
};
47+
48+
static u32 vhost_transport_get_local_cid(void)
49+
{
50+
return VHOST_VSOCK_DEFAULT_HOST_CID;
51+
}
52+
53+
static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
54+
{
55+
struct vhost_vsock *vsock;
56+
57+
spin_lock_bh(&vhost_vsock_lock);
58+
list_for_each_entry(vsock, &vhost_vsock_list, list) {
59+
u32 other_cid = vsock->guest_cid;
60+
61+
/* Skip instances that have no CID yet */
62+
if (other_cid == 0)
63+
continue;
64+
65+
if (other_cid == guest_cid) {
66+
spin_unlock_bh(&vhost_vsock_lock);
67+
return vsock;
68+
}
69+
}
70+
spin_unlock_bh(&vhost_vsock_lock);
71+
72+
return NULL;
73+
}
74+
75+
static void
76+
vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
77+
struct vhost_virtqueue *vq)
78+
{
79+
struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
80+
bool added = false;
81+
bool restart_tx = false;
82+
83+
mutex_lock(&vq->mutex);
84+
85+
if (!vq->private_data)
86+
goto out;
87+
88+
/* Avoid further vmexits, we're already processing the virtqueue */
89+
vhost_disable_notify(&vsock->dev, vq);
90+
91+
for (;;) {
92+
struct virtio_vsock_pkt *pkt;
93+
struct iov_iter iov_iter;
94+
unsigned out, in;
95+
size_t nbytes;
96+
size_t len;
97+
int head;
98+
99+
spin_lock_bh(&vsock->send_pkt_list_lock);
100+
if (list_empty(&vsock->send_pkt_list)) {
101+
spin_unlock_bh(&vsock->send_pkt_list_lock);
102+
vhost_enable_notify(&vsock->dev, vq);
103+
break;
104+
}
105+
106+
pkt = list_first_entry(&vsock->send_pkt_list,
107+
struct virtio_vsock_pkt, list);
108+
list_del_init(&pkt->list);
109+
spin_unlock_bh(&vsock->send_pkt_list_lock);
110+
111+
head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
112+
&out, &in, NULL, NULL);
113+
if (head < 0) {
114+
spin_lock_bh(&vsock->send_pkt_list_lock);
115+
list_add(&pkt->list, &vsock->send_pkt_list);
116+
spin_unlock_bh(&vsock->send_pkt_list_lock);
117+
break;
118+
}
119+
120+
if (head == vq->num) {
121+
spin_lock_bh(&vsock->send_pkt_list_lock);
122+
list_add(&pkt->list, &vsock->send_pkt_list);
123+
spin_unlock_bh(&vsock->send_pkt_list_lock);
124+
125+
/* We cannot finish yet if more buffers snuck in while
126+
* re-enabling notify.
127+
*/
128+
if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
129+
vhost_disable_notify(&vsock->dev, vq);
130+
continue;
131+
}
132+
break;
133+
}
134+
135+
if (out) {
136+
virtio_transport_free_pkt(pkt);
137+
vq_err(vq, "Expected 0 output buffers, got %u\n", out);
138+
break;
139+
}
140+
141+
len = iov_length(&vq->iov[out], in);
142+
iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
143+
144+
nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
145+
if (nbytes != sizeof(pkt->hdr)) {
146+
virtio_transport_free_pkt(pkt);
147+
vq_err(vq, "Faulted on copying pkt hdr\n");
148+
break;
149+
}
150+
151+
nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
152+
if (nbytes != pkt->len) {
153+
virtio_transport_free_pkt(pkt);
154+
vq_err(vq, "Faulted on copying pkt buf\n");
155+
break;
156+
}
157+
158+
vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
159+
added = true;
160+
161+
if (pkt->reply) {
162+
int val;
163+
164+
val = atomic_dec_return(&vsock->queued_replies);
165+
166+
/* Do we have resources to resume tx processing? */
167+
if (val + 1 == tx_vq->num)
168+
restart_tx = true;
169+
}
170+
171+
virtio_transport_free_pkt(pkt);
172+
}
173+
if (added)
174+
vhost_signal(&vsock->dev, vq);
175+
176+
out:
177+
mutex_unlock(&vq->mutex);
178+
179+
if (restart_tx)
180+
vhost_poll_queue(&tx_vq->poll);
181+
}
182+
183+
static void vhost_transport_send_pkt_work(struct vhost_work *work)
184+
{
185+
struct vhost_virtqueue *vq;
186+
struct vhost_vsock *vsock;
187+
188+
vsock = container_of(work, struct vhost_vsock, send_pkt_work);
189+
vq = &vsock->vqs[VSOCK_VQ_RX];
190+
191+
vhost_transport_do_send_pkt(vsock, vq);
192+
}
193+
194+
static int
195+
vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
196+
{
197+
struct vhost_vsock *vsock;
198+
struct vhost_virtqueue *vq;
199+
int len = pkt->len;
200+
201+
/* Find the vhost_vsock according to guest context id */
202+
vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
203+
if (!vsock) {
204+
virtio_transport_free_pkt(pkt);
205+
return -ENODEV;
206+
}
207+
208+
vq = &vsock->vqs[VSOCK_VQ_RX];
209+
210+
if (pkt->reply)
211+
atomic_inc(&vsock->queued_replies);
212+
213+
spin_lock_bh(&vsock->send_pkt_list_lock);
214+
list_add_tail(&pkt->list, &vsock->send_pkt_list);
215+
spin_unlock_bh(&vsock->send_pkt_list_lock);
216+
217+
vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
218+
return len;
219+
}
220+
221+
static struct virtio_vsock_pkt *
222+
vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
223+
unsigned int out, unsigned int in)
224+
{
225+
struct virtio_vsock_pkt *pkt;
226+
struct iov_iter iov_iter;
227+
size_t nbytes;
228+
size_t len;
229+
230+
if (in != 0) {
231+
vq_err(vq, "Expected 0 input buffers, got %u\n", in);
232+
return NULL;
233+
}
234+
235+
pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
236+
if (!pkt)
237+
return NULL;
238+
239+
len = iov_length(vq->iov, out);
240+
iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
241+
242+
nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
243+
if (nbytes != sizeof(pkt->hdr)) {
244+
vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
245+
sizeof(pkt->hdr), nbytes);
246+
kfree(pkt);
247+
return NULL;
248+
}
249+
250+
if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
251+
pkt->len = le32_to_cpu(pkt->hdr.len);
252+
253+
/* No payload */
254+
if (!pkt->len)
255+
return pkt;
256+
257+
/* The pkt is too big */
258+
if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
259+
kfree(pkt);
260+
return NULL;
261+
}
262+
263+
pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
264+
if (!pkt->buf) {
265+
kfree(pkt);
266+
return NULL;
267+
}
268+
269+
nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
270+
if (nbytes != pkt->len) {
271+
vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
272+
pkt->len, nbytes);
273+
virtio_transport_free_pkt(pkt);
274+
return NULL;
275+
}
276+
277+
return pkt;
278+
}
279+
280+
/* Is there space left for replies to rx packets? */
281+
static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
282+
{
283+
struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
284+
int val;
285+
286+
smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
287+
val = atomic_read(&vsock->queued_replies);
288+
289+
return val < vq->num;
290+
}
291+
292+
static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
293+
{
294+
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
295+
poll.work);
296+
struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
297+
dev);
298+
struct virtio_vsock_pkt *pkt;
299+
int head;
300+
unsigned int out, in;
301+
bool added = false;
302+
303+
mutex_lock(&vq->mutex);
304+
305+
if (!vq->private_data)
306+
goto out;
307+
308+
vhost_disable_notify(&vsock->dev, vq);
309+
for (;;) {
310+
if (!vhost_vsock_more_replies(vsock)) {
311+
/* Stop tx until the device processes already
312+
* pending replies. Leave tx virtqueue
313+
* callbacks disabled.
314+
*/
315+
goto no_more_replies;
316+
}
317+
318+
head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
319+
&out, &in, NULL, NULL);
320+
if (head < 0)
321+
break;
322+
323+
if (head == vq->num) {
324+
if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
325+
vhost_disable_notify(&vsock->dev, vq);
326+
continue;
327+
}
328+
break;
329+
}
330+
331+
pkt = vhost_vsock_alloc_pkt(vq, out, in);
332+
if (!pkt) {
333+
vq_err(vq, "Faulted on pkt\n");
334+
continue;
335+
}
336+
337+
/* Only accept correctly addressed packets */
338+
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
339+
virtio_transport_recv_pkt(pkt);
340+
else
341+
virtio_transport_free_pkt(pkt);
342+
343+
vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
344+
added = true;
345+
}
346+
347+
no_more_replies:
348+
if (added)
349+
vhost_signal(&vsock->dev, vq);
350+
351+
out:
352+
mutex_unlock(&vq->mutex);
353+
}
354+
355+
static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
356+
{
357+
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
358+
poll.work);
359+
struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
360+
dev);
361+
362+
vhost_transport_do_send_pkt(vsock, vq);
363+
}
364+
365+
static int vhost_vsock_start(struct vhost_vsock *vsock)
366+
{
367+
size_t i;
368+
int ret;
369+
370+
mutex_lock(&vsock->dev.mutex);
371+
372+
ret = vhost_dev_check_owner(&vsock->dev);
373+
if (ret)
374+
goto err;
375+
376+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
377+
struct vhost_virtqueue *vq = &vsock->vqs[i];
378+
379+
mutex_lock(&vq->mutex);
380+
381+
if (!vhost_vq_access_ok(vq)) {
382+
ret = -EFAULT;
383+
mutex_unlock(&vq->mutex);
384+
goto err_vq;
385+
}
386+
387+
if (!vq->private_data) {
388+
vq->private_data = vsock;
389+
vhost_vq_init_access(vq);
390+
}
391+
392+
mutex_unlock(&vq->mutex);
393+
}
394+
395+
mutex_unlock(&vsock->dev.mutex);
396+
return 0;
397+
398+
err_vq:
399+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
400+
struct vhost_virtqueue *vq = &vsock->vqs[i];
401+
402+
mutex_lock(&vq->mutex);
403+
vq->private_data = NULL;
404+
mutex_unlock(&vq->mutex);
405+
}
406+
err:
407+
mutex_unlock(&vsock->dev.mutex);
408+
return ret;
409+
}
410+
411+
static int vhost_vsock_stop(struct vhost_vsock *vsock)
412+
{
413+
size_t i;
414+
int ret;
415+
416+
mutex_lock(&vsock->dev.mutex);
417+
418+
ret = vhost_dev_check_owner(&vsock->dev);
419+
if (ret)
420+
goto err;
421+
422+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
423+
struct vhost_virtqueue *vq = &vsock->vqs[i];
424+
425+
mutex_lock(&vq->mutex);
426+
vq->private_data = NULL;
427+
mutex_unlock(&vq->mutex);
428+
}
429+
430+
err:
431+
mutex_unlock(&vsock->dev.mutex);
432+
return ret;
433+
}
434+
435+
static void vhost_vsock_free(struct vhost_vsock *vsock)
436+
{
437+
if (is_vmalloc_addr(vsock))
438+
vfree(vsock);
439+
else
440+
kfree(vsock);
441+
}
442+
443+
static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
444+
{
445+
struct vhost_virtqueue **vqs;
446+
struct vhost_vsock *vsock;
447+
int ret;
448+
449+
/* This struct is large and allocation could fail, fall back to vmalloc
450+
* if there is no other way.
451+
*/
452+
vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
453+
if (!vsock) {
454+
vsock = vmalloc(sizeof(*vsock));
455+
if (!vsock)
456+
return -ENOMEM;
457+
}
458+
459+
vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
460+
if (!vqs) {
461+
ret = -ENOMEM;
462+
goto out;
463+
}
464+
465+
atomic_set(&vsock->queued_replies, 0);
466+
467+
vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
468+
vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
469+
vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
470+
vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
471+
472+
vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
473+
474+
file->private_data = vsock;
475+
spin_lock_init(&vsock->send_pkt_list_lock);
476+
INIT_LIST_HEAD(&vsock->send_pkt_list);
477+
vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
478+
479+
spin_lock_bh(&vhost_vsock_lock);
480+
list_add_tail(&vsock->list, &vhost_vsock_list);
481+
spin_unlock_bh(&vhost_vsock_lock);
482+
return 0;
483+
484+
out:
485+
vhost_vsock_free(vsock);
486+
return ret;
487+
}
488+
489+
static void vhost_vsock_flush(struct vhost_vsock *vsock)
490+
{
491+
int i;
492+
493+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
494+
if (vsock->vqs[i].handle_kick)
495+
vhost_poll_flush(&vsock->vqs[i].poll);
496+
vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
497+
}
498+
499+
static void vhost_vsock_reset_orphans(struct sock *sk)
500+
{
501+
struct vsock_sock *vsk = vsock_sk(sk);
502+
503+
/* vmci_transport.c doesn't take sk_lock here either. At least we're
504+
* under vsock_table_lock so the sock cannot disappear while we're
505+
* executing.
506+
*/
507+
508+
if (!vhost_vsock_get(vsk->local_addr.svm_cid)) {
509+
sock_set_flag(sk, SOCK_DONE);
510+
vsk->peer_shutdown = SHUTDOWN_MASK;
511+
sk->sk_state = SS_UNCONNECTED;
512+
sk->sk_err = ECONNRESET;
513+
sk->sk_error_report(sk);
514+
}
515+
}
516+
517+
static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
518+
{
519+
struct vhost_vsock *vsock = file->private_data;
520+
521+
spin_lock_bh(&vhost_vsock_lock);
522+
list_del(&vsock->list);
523+
spin_unlock_bh(&vhost_vsock_lock);
524+
525+
/* Iterating over all connections for all CIDs to find orphans is
526+
* inefficient. Room for improvement here. */
527+
vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
528+
529+
vhost_vsock_stop(vsock);
530+
vhost_vsock_flush(vsock);
531+
vhost_dev_stop(&vsock->dev);
532+
533+
spin_lock_bh(&vsock->send_pkt_list_lock);
534+
while (!list_empty(&vsock->send_pkt_list)) {
535+
struct virtio_vsock_pkt *pkt;
536+
537+
pkt = list_first_entry(&vsock->send_pkt_list,
538+
struct virtio_vsock_pkt, list);
539+
list_del_init(&pkt->list);
540+
virtio_transport_free_pkt(pkt);
541+
}
542+
spin_unlock_bh(&vsock->send_pkt_list_lock);
543+
544+
vhost_dev_cleanup(&vsock->dev, false);
545+
kfree(vsock->dev.vqs);
546+
vhost_vsock_free(vsock);
547+
return 0;
548+
}
549+
550+
static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
551+
{
552+
struct vhost_vsock *other;
553+
554+
/* Refuse reserved CIDs */
555+
if (guest_cid <= VMADDR_CID_HOST ||
556+
guest_cid == U32_MAX)
557+
return -EINVAL;
558+
559+
/* 64-bit CIDs are not yet supported */
560+
if (guest_cid > U32_MAX)
561+
return -EINVAL;
562+
563+
/* Refuse if CID is already in use */
564+
other = vhost_vsock_get(guest_cid);
565+
if (other && other != vsock)
566+
return -EADDRINUSE;
567+
568+
spin_lock_bh(&vhost_vsock_lock);
569+
vsock->guest_cid = guest_cid;
570+
spin_unlock_bh(&vhost_vsock_lock);
571+
572+
return 0;
573+
}
574+
575+
static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
576+
{
577+
struct vhost_virtqueue *vq;
578+
int i;
579+
580+
if (features & ~VHOST_VSOCK_FEATURES)
581+
return -EOPNOTSUPP;
582+
583+
mutex_lock(&vsock->dev.mutex);
584+
if ((features & (1 << VHOST_F_LOG_ALL)) &&
585+
!vhost_log_access_ok(&vsock->dev)) {
586+
mutex_unlock(&vsock->dev.mutex);
587+
return -EFAULT;
588+
}
589+
590+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
591+
vq = &vsock->vqs[i];
592+
mutex_lock(&vq->mutex);
593+
vq->acked_features = features;
594+
mutex_unlock(&vq->mutex);
595+
}
596+
mutex_unlock(&vsock->dev.mutex);
597+
return 0;
598+
}
599+
600+
static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
601+
unsigned long arg)
602+
{
603+
struct vhost_vsock *vsock = f->private_data;
604+
void __user *argp = (void __user *)arg;
605+
u64 guest_cid;
606+
u64 features;
607+
int start;
608+
int r;
609+
610+
switch (ioctl) {
611+
case VHOST_VSOCK_SET_GUEST_CID:
612+
if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
613+
return -EFAULT;
614+
return vhost_vsock_set_cid(vsock, guest_cid);
615+
case VHOST_VSOCK_SET_RUNNING:
616+
if (copy_from_user(&start, argp, sizeof(start)))
617+
return -EFAULT;
618+
if (start)
619+
return vhost_vsock_start(vsock);
620+
else
621+
return vhost_vsock_stop(vsock);
622+
case VHOST_GET_FEATURES:
623+
features = VHOST_VSOCK_FEATURES;
624+
if (copy_to_user(argp, &features, sizeof(features)))
625+
return -EFAULT;
626+
return 0;
627+
case VHOST_SET_FEATURES:
628+
if (copy_from_user(&features, argp, sizeof(features)))
629+
return -EFAULT;
630+
return vhost_vsock_set_features(vsock, features);
631+
default:
632+
mutex_lock(&vsock->dev.mutex);
633+
r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
634+
if (r == -ENOIOCTLCMD)
635+
r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
636+
else
637+
vhost_vsock_flush(vsock);
638+
mutex_unlock(&vsock->dev.mutex);
639+
return r;
640+
}
641+
}
642+
643+
static const struct file_operations vhost_vsock_fops = {
644+
.owner = THIS_MODULE,
645+
.open = vhost_vsock_dev_open,
646+
.release = vhost_vsock_dev_release,
647+
.llseek = noop_llseek,
648+
.unlocked_ioctl = vhost_vsock_dev_ioctl,
649+
};
650+
651+
static struct miscdevice vhost_vsock_misc = {
652+
.minor = MISC_DYNAMIC_MINOR,
653+
.name = "vhost-vsock",
654+
.fops = &vhost_vsock_fops,
655+
};
656+
657+
static struct virtio_transport vhost_transport = {
658+
.transport = {
659+
.get_local_cid = vhost_transport_get_local_cid,
660+
661+
.init = virtio_transport_do_socket_init,
662+
.destruct = virtio_transport_destruct,
663+
.release = virtio_transport_release,
664+
.connect = virtio_transport_connect,
665+
.shutdown = virtio_transport_shutdown,
666+
667+
.dgram_enqueue = virtio_transport_dgram_enqueue,
668+
.dgram_dequeue = virtio_transport_dgram_dequeue,
669+
.dgram_bind = virtio_transport_dgram_bind,
670+
.dgram_allow = virtio_transport_dgram_allow,
671+
672+
.stream_enqueue = virtio_transport_stream_enqueue,
673+
.stream_dequeue = virtio_transport_stream_dequeue,
674+
.stream_has_data = virtio_transport_stream_has_data,
675+
.stream_has_space = virtio_transport_stream_has_space,
676+
.stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
677+
.stream_is_active = virtio_transport_stream_is_active,
678+
.stream_allow = virtio_transport_stream_allow,
679+
680+
.notify_poll_in = virtio_transport_notify_poll_in,
681+
.notify_poll_out = virtio_transport_notify_poll_out,
682+
.notify_recv_init = virtio_transport_notify_recv_init,
683+
.notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
684+
.notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
685+
.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
686+
.notify_send_init = virtio_transport_notify_send_init,
687+
.notify_send_pre_block = virtio_transport_notify_send_pre_block,
688+
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
689+
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
690+
691+
.set_buffer_size = virtio_transport_set_buffer_size,
692+
.set_min_buffer_size = virtio_transport_set_min_buffer_size,
693+
.set_max_buffer_size = virtio_transport_set_max_buffer_size,
694+
.get_buffer_size = virtio_transport_get_buffer_size,
695+
.get_min_buffer_size = virtio_transport_get_min_buffer_size,
696+
.get_max_buffer_size = virtio_transport_get_max_buffer_size,
697+
},
698+
699+
.send_pkt = vhost_transport_send_pkt,
700+
};
701+
702+
static int __init vhost_vsock_init(void)
703+
{
704+
int ret;
705+
706+
ret = vsock_core_init(&vhost_transport.transport);
707+
if (ret < 0)
708+
return ret;
709+
return misc_register(&vhost_vsock_misc);
710+
};
711+
712+
static void __exit vhost_vsock_exit(void)
713+
{
714+
misc_deregister(&vhost_vsock_misc);
715+
vsock_core_exit();
716+
};
717+
718+
module_init(vhost_vsock_init);
719+
module_exit(vhost_vsock_exit);
720+
MODULE_LICENSE("GPL v2");
721+
MODULE_AUTHOR("Asias He");
722+
MODULE_DESCRIPTION("vhost transport for vsock ");

‎include/uapi/linux/vhost.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -175,4 +175,9 @@ struct vhost_scsi_target {
175175
#define VHOST_SCSI_SET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x43, __u32)
176176
#define VHOST_SCSI_GET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x44, __u32)
177177

178+
/* VHOST_VSOCK specific defines */
179+
180+
#define VHOST_VSOCK_SET_GUEST_CID _IOW(VHOST_VIRTIO, 0x60, __u64)
181+
#define VHOST_VSOCK_SET_RUNNING _IOW(VHOST_VIRTIO, 0x61, int)
182+
178183
#endif

0 commit comments

Comments
 (0)
Please sign in to comment.