• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/misc/sgi-xp/

Lines Matching refs:mq

105 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
107 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
110 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
112 if (mq->irq < 0) {
114 -mq->irq);
115 return mq->irq;
118 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
122 mq->irq = SGI_XPC_ACTIVATE;
124 mq->irq = SGI_XPC_NOTIFY;
128 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
129 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
138 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
141 uv_teardown_irq(mq->irq);
147 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
150 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
157 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
162 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
164 ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
165 mq->order, &mq->mmr_offset);
172 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
173 mq->order, &mq->mmr_offset);
183 mq->watchlist_num = ret;
188 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
191 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
194 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
197 ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
214 struct xpc_gru_mq_uv *mq;
217 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
218 if (mq == NULL) {
225 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
227 if (mq->gru_mq_desc == NULL) {
235 mq->order = pg_order + PAGE_SHIFT;
236 mq_size = 1UL << mq->order;
238 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
245 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
249 mq->address = page_address(page);
251 /* enable generation of irq when GRU mq operation occurs to this mq */
252 ret = xpc_gru_mq_watchlist_alloc_uv(mq);
256 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
260 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
263 mq->irq, -ret);
269 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
270 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
279 /* allow other partitions to access this GRU mq */
280 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
286 return mq;
290 free_irq(mq->irq, NULL);
292 xpc_release_gru_mq_irq_uv(mq);
294 xpc_gru_mq_watchlist_free_uv(mq);
296 free_pages((unsigned long)mq->address, pg_order);
298 kfree(mq->gru_mq_desc);
300 kfree(mq);
306 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
312 /* disallow other partitions to access GRU mq */
313 mq_size = 1UL << mq->order;
314 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
317 /* unregister irq handler and release mq irq/vector mapping */
318 free_irq(mq->irq, NULL);
319 xpc_release_gru_mq_irq_uv(mq);
321 /* disable generation of irq when GRU mq op occurs to this mq */
322 xpc_gru_mq_watchlist_free_uv(mq);
324 pg_order = mq->order - PAGE_SHIFT;
325 free_pages((unsigned long)mq->address, pg_order);
327 kfree(mq);
664 gru_mq_desc->mq = NULL;
763 * !!! mq message our way by doing what the activate IRQ handler would