1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
7*4882a593Smuzhiyun * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun * Cross Partition (XP) base.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * XP provides a base from which its users can interact
14*4882a593Smuzhiyun * with XPC, yet not be dependent on XPC.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/device.h>
20*4882a593Smuzhiyun #include "xp.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /* define the XP debug device structures to be used with dev_dbg() et al */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun struct device_driver xp_dbg_name = {
25*4882a593Smuzhiyun .name = "xp"
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun struct device xp_dbg_subname = {
29*4882a593Smuzhiyun .init_name = "", /* set to "" */
30*4882a593Smuzhiyun .driver = &xp_dbg_name
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun struct device *xp = &xp_dbg_subname;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /* max #of partitions possible */
36*4882a593Smuzhiyun short xp_max_npartitions;
37*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xp_max_npartitions);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun short xp_partition_id;
40*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xp_partition_id);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun u8 xp_region_size;
43*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xp_region_size);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun unsigned long (*xp_pa) (void *addr);
46*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xp_pa);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun unsigned long (*xp_socket_pa) (unsigned long gpa);
49*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xp_socket_pa);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
52*4882a593Smuzhiyun const unsigned long src_gpa, size_t len);
53*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xp_remote_memcpy);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun int (*xp_cpu_to_nasid) (int cpuid);
56*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xp_cpu_to_nasid);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr,
59*4882a593Smuzhiyun unsigned long size);
60*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xp_expand_memprotect);
61*4882a593Smuzhiyun enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr,
62*4882a593Smuzhiyun unsigned long size);
63*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xp_restrict_memprotect);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
67*4882a593Smuzhiyun * users of XPC.
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS];
70*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xpc_registrations);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * Initialize the XPC interface to NULL to indicate that XPC isn't loaded.
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun struct xpc_interface xpc_interface = { };
76*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xpc_interface);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * XPC calls this when it (the XPC module) has been loaded.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun void
xpc_set_interface(void (* connect)(int),void (* disconnect)(int),enum xp_retval (* send)(short,int,u32,void *,u16),enum xp_retval (* send_notify)(short,int,u32,void *,u16,xpc_notify_func,void *),void (* received)(short,int,void *),enum xp_retval (* partid_to_nasids)(short,void *))82*4882a593Smuzhiyun xpc_set_interface(void (*connect) (int),
83*4882a593Smuzhiyun void (*disconnect) (int),
84*4882a593Smuzhiyun enum xp_retval (*send) (short, int, u32, void *, u16),
85*4882a593Smuzhiyun enum xp_retval (*send_notify) (short, int, u32, void *, u16,
86*4882a593Smuzhiyun xpc_notify_func, void *),
87*4882a593Smuzhiyun void (*received) (short, int, void *),
88*4882a593Smuzhiyun enum xp_retval (*partid_to_nasids) (short, void *))
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun xpc_interface.connect = connect;
91*4882a593Smuzhiyun xpc_interface.disconnect = disconnect;
92*4882a593Smuzhiyun xpc_interface.send = send;
93*4882a593Smuzhiyun xpc_interface.send_notify = send_notify;
94*4882a593Smuzhiyun xpc_interface.received = received;
95*4882a593Smuzhiyun xpc_interface.partid_to_nasids = partid_to_nasids;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xpc_set_interface);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * XPC calls this when it (the XPC module) is being unloaded.
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun void
xpc_clear_interface(void)103*4882a593Smuzhiyun xpc_clear_interface(void)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun memset(&xpc_interface, 0, sizeof(xpc_interface));
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xpc_clear_interface);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * Register for automatic establishment of a channel connection whenever
111*4882a593Smuzhiyun * a partition comes up.
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * Arguments:
114*4882a593Smuzhiyun *
115*4882a593Smuzhiyun * ch_number - channel # to register for connection.
116*4882a593Smuzhiyun * func - function to call for asynchronous notification of channel
117*4882a593Smuzhiyun * state changes (i.e., connection, disconnection, error) and
118*4882a593Smuzhiyun * the arrival of incoming messages.
119*4882a593Smuzhiyun * key - pointer to optional user-defined value that gets passed back
120*4882a593Smuzhiyun * to the user on any callouts made to func.
121*4882a593Smuzhiyun * payload_size - size in bytes of the XPC message's payload area which
122*4882a593Smuzhiyun * contains a user-defined message. The user should make
123*4882a593Smuzhiyun * this large enough to hold their largest message.
124*4882a593Smuzhiyun * nentries - max #of XPC message entries a message queue can contain.
125*4882a593Smuzhiyun * The actual number, which is determined when a connection
126*4882a593Smuzhiyun * is established and may be less then requested, will be
127*4882a593Smuzhiyun * passed to the user via the xpConnected callout.
128*4882a593Smuzhiyun * assigned_limit - max number of kthreads allowed to be processing
129*4882a593Smuzhiyun * messages (per connection) at any given instant.
130*4882a593Smuzhiyun * idle_limit - max number of kthreads allowed to be idle at any given
131*4882a593Smuzhiyun * instant.
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun enum xp_retval
xpc_connect(int ch_number,xpc_channel_func func,void * key,u16 payload_size,u16 nentries,u32 assigned_limit,u32 idle_limit)134*4882a593Smuzhiyun xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
135*4882a593Smuzhiyun u16 nentries, u32 assigned_limit, u32 idle_limit)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct xpc_registration *registration;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
140*4882a593Smuzhiyun DBUG_ON(payload_size == 0 || nentries == 0);
141*4882a593Smuzhiyun DBUG_ON(func == NULL);
142*4882a593Smuzhiyun DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (XPC_MSG_SIZE(payload_size) > XPC_MSG_MAX_SIZE)
145*4882a593Smuzhiyun return xpPayloadTooBig;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun registration = &xpc_registrations[ch_number];
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (mutex_lock_interruptible(®istration->mutex) != 0)
150*4882a593Smuzhiyun return xpInterrupted;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* if XPC_CHANNEL_REGISTERED(ch_number) */
153*4882a593Smuzhiyun if (registration->func != NULL) {
154*4882a593Smuzhiyun mutex_unlock(®istration->mutex);
155*4882a593Smuzhiyun return xpAlreadyRegistered;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* register the channel for connection */
159*4882a593Smuzhiyun registration->entry_size = XPC_MSG_SIZE(payload_size);
160*4882a593Smuzhiyun registration->nentries = nentries;
161*4882a593Smuzhiyun registration->assigned_limit = assigned_limit;
162*4882a593Smuzhiyun registration->idle_limit = idle_limit;
163*4882a593Smuzhiyun registration->key = key;
164*4882a593Smuzhiyun registration->func = func;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun mutex_unlock(®istration->mutex);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (xpc_interface.connect)
169*4882a593Smuzhiyun xpc_interface.connect(ch_number);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun return xpSuccess;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xpc_connect);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * Remove the registration for automatic connection of the specified channel
177*4882a593Smuzhiyun * when a partition comes up.
178*4882a593Smuzhiyun *
179*4882a593Smuzhiyun * Before returning this xpc_disconnect() will wait for all connections on the
180*4882a593Smuzhiyun * specified channel have been closed/torndown. So the caller can be assured
181*4882a593Smuzhiyun * that they will not be receiving any more callouts from XPC to their
182*4882a593Smuzhiyun * function registered via xpc_connect().
183*4882a593Smuzhiyun *
184*4882a593Smuzhiyun * Arguments:
185*4882a593Smuzhiyun *
186*4882a593Smuzhiyun * ch_number - channel # to unregister.
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun void
xpc_disconnect(int ch_number)189*4882a593Smuzhiyun xpc_disconnect(int ch_number)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct xpc_registration *registration;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun registration = &xpc_registrations[ch_number];
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * We've decided not to make this a down_interruptible(), since we
199*4882a593Smuzhiyun * figured XPC's users will just turn around and call xpc_disconnect()
200*4882a593Smuzhiyun * again anyways, so we might as well wait, if need be.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun mutex_lock(®istration->mutex);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* if !XPC_CHANNEL_REGISTERED(ch_number) */
205*4882a593Smuzhiyun if (registration->func == NULL) {
206*4882a593Smuzhiyun mutex_unlock(®istration->mutex);
207*4882a593Smuzhiyun return;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* remove the connection registration for the specified channel */
211*4882a593Smuzhiyun registration->func = NULL;
212*4882a593Smuzhiyun registration->key = NULL;
213*4882a593Smuzhiyun registration->nentries = 0;
214*4882a593Smuzhiyun registration->entry_size = 0;
215*4882a593Smuzhiyun registration->assigned_limit = 0;
216*4882a593Smuzhiyun registration->idle_limit = 0;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (xpc_interface.disconnect)
219*4882a593Smuzhiyun xpc_interface.disconnect(ch_number);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun mutex_unlock(®istration->mutex);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xpc_disconnect);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun static int __init
xp_init(void)228*4882a593Smuzhiyun xp_init(void)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun enum xp_retval ret;
231*4882a593Smuzhiyun int ch_number;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* initialize the connection registration mutex */
234*4882a593Smuzhiyun for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
235*4882a593Smuzhiyun mutex_init(&xpc_registrations[ch_number].mutex);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (is_uv_system())
238*4882a593Smuzhiyun ret = xp_init_uv();
239*4882a593Smuzhiyun else
240*4882a593Smuzhiyun ret = 0;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (ret != xpSuccess)
243*4882a593Smuzhiyun return ret;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun return 0;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun module_init(xp_init);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun static void __exit
xp_exit(void)251*4882a593Smuzhiyun xp_exit(void)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun if (is_uv_system())
254*4882a593Smuzhiyun xp_exit_uv();
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun module_exit(xp_exit);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun MODULE_AUTHOR("Silicon Graphics, Inc.");
260*4882a593Smuzhiyun MODULE_DESCRIPTION("Cross Partition (XP) base");
261*4882a593Smuzhiyun MODULE_LICENSE("GPL");
262