1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
|
/*
* (c) 1999--2000 Martin Mares <mj@suse.cz>
* (c) 2003 Eric Biederman <ebiederm@xmission.com>
* (c) 2003 Linux Networx
*/
/* lots of mods by ron minnich (rminnich@lanl.gov), with
* the final architecture guidance from Tom Merritt (tjm@codegen.com)
* In particular, we changed from the one-pass original version to
* Tom's recommended multiple-pass version. I wasn't sure about doing
* it with multiple passes, until I actually started doing it and saw
* the wisdom of Tom's recommendations ...
*
* Lots of cleanups by Eric Biederman to handle bridges, and to
* handle resource allocation for non-pci devices.
*/
#include <console/console.h>
#include <bitops.h>
#include <arch/io.h>
#include <device/device.h>
#include <device/pci.h>
#include <device/pci_ids.h>
#include <stdlib.h>
#include <string.h>
/** Linked list of ALL devices */
struct device *all_devices = &dev_root;
/** Pointer to the last device */
static struct device **last_dev_p = &dev_root.next;
/** The upper limit of MEM resource of the devices.
* Reserve 20M for the system */
#define DEVICE_MEM_HIGH 0xFEC00000UL
/** The lower limit of IO resource of the devices.
* Reserve 4k for ISA/Legacy devices */
#define DEVICE_IO_START 0x1000
/**
* @brief Allocate a new device structure.
*
* Allocte a new device structure and attached it to the device tree as a
* child of the parent bus.
*
* @param parent parent bus the newly created device attached to.
* @param path path to the device to be created.
*
* @return pointer to the newly created device structure.
*
* @see device_path
*/
device_t alloc_dev(struct bus *parent, struct device_path *path)
{
device_t dev, child;
int link;
/* Find the last child of our parent */
for(child = parent->children; child && child->sibling; ) {
child = child->sibling;
}
dev = malloc(sizeof(*dev));
if (dev == 0) {
die("DEV: out of memory.\n");
}
memset(dev, 0, sizeof(*dev));
memcpy(&dev->path, path, sizeof(*path));
/* Append a new device to the global device list.
* The list is used to find devices once everything is set up.
*/
*last_dev_p = dev;
last_dev_p = &dev->next;
/* Initialize the back pointers in the link fields */
for(link = 0; link < MAX_LINKS; link++) {
dev->link[link].dev = dev;
dev->link[link].link = link;
}
/* Add the new device to the list of children of the bus. */
dev->bus = parent;
if (child) {
child->sibling = dev;
} else {
parent->children = dev;
}
/* If we don't have any other information about a device enable it */
dev->enabled = 1;
return dev;
}
/**
* @brief round a number up to an alignment.
* @param val the starting value
* @param roundup Alignment as a power of two
* @returns rounded up number
*/
static unsigned long round(unsigned long val, unsigned long roundup)
{
/* ROUNDUP MUST BE A POWER OF TWO. */
unsigned long inverse;
inverse = ~(roundup - 1);
val += (roundup - 1);
val &= inverse;
return val;
}
static unsigned long round_down(unsigned long val, unsigned long round_down)
{
/* ROUND_DOWN MUST BE A POWER OF TWO. */
unsigned long inverse;
inverse = ~(round_down - 1);
val &= inverse;
return val;
}
/** Read the resources on all devices of a given bus.
* @param bus bus to read the resources on.
*/
static void read_resources(struct bus *bus)
{
struct device *curdev;
/* Walk through all of the devices and find which resources they need. */
for(curdev = bus->children; curdev; curdev = curdev->sibling) {
unsigned links;
int i;
if (curdev->resources > 0) {
continue;
}
if (!curdev->ops || !curdev->ops->read_resources) {
printk_err("%s missing read_resources\n",
dev_path(curdev));
continue;
}
if (!curdev->enabled) {
continue;
}
curdev->ops->read_resources(curdev);
/* Read in subtractive resources behind the current device */
links = 0;
for(i = 0; i < curdev->resources; i++) {
struct resource *resource;
resource = &curdev->resource[i];
if ((resource->flags & IORESOURCE_SUBTRACTIVE) &&
(!(links & (1 << resource->index))))
{
links |= (1 << resource->index);
read_resources(&curdev->link[resource->index]);
}
}
}
}
struct pick_largest_state {
struct resource *last;
struct device *result_dev;
struct resource *result;
int seen_last;
};
static void pick_largest_resource(struct pick_largest_state *state,
struct device *dev, struct resource *resource)
{
struct resource *last;
last = state->last;
/* Be certain to pick the successor to last */
if (resource == last) {
state->seen_last = 1;
return;
}
if (last && (
(last->align < resource->align) ||
((last->align == resource->align) &&
(last->size < resource->size)) ||
((last->align == resource->align) &&
(last->size == resource->size) &&
(!state->seen_last)))) {
return;
}
if (!state->result ||
(state->result->align < resource->align) ||
((state->result->align == resource->align) &&
(state->result->size < resource->size))) {
state->result_dev = dev;
state->result = resource;
}
}
static void find_largest_resource(struct pick_largest_state *state,
struct bus *bus, unsigned long type_mask, unsigned long type)
{
struct device *curdev;
for(curdev = bus->children; curdev; curdev = curdev->sibling) {
int i;
for(i = 0; i < curdev->resources; i++) {
struct resource *resource = &curdev->resource[i];
/* If it isn't the right kind of resource ignore it */
if ((resource->flags & type_mask) != type) {
continue;
}
/* If it is a subtractive resource recurse */
if (resource->flags & IORESOURCE_SUBTRACTIVE) {
struct bus *subbus;
subbus = &curdev->link[resource->index];
find_largest_resource(state, subbus, type_mask, type);
continue;
}
/* See if this is the largest resource */
pick_largest_resource(state, curdev, resource);
}
}
}
static struct device *largest_resource(struct bus *bus,
struct resource **result_res,
unsigned long type_mask,
unsigned long type)
{
struct pick_largest_state state;
state.last = *result_res;
state.result_dev = 0;
state.result = 0;
state.seen_last = 0;
find_largest_resource(&state, bus, type_mask, type);
*result_res = state.result;
return state.result_dev;
}
/* Compute allocate resources is the guts of the resource allocator.
*
* The problem.
* - Allocate resources locations for every device.
* - Don't overlap, and follow the rules of bridges.
* - Don't overlap with resources in fixed locations.
* - Be efficient so we don't have ugly strategies.
*
* The strategy.
* - Devices that have fixed addresses are the minority so don't
* worry about them too much. Instead only use part of the address
* space for devices with programmable addresses. This easily handles
* everything except bridges.
*
* - PCI devices are required to have thier sizes and their alignments
* equal. In this case an optimal solution to the packing problem
* exists. Allocate all devices from highest alignment to least
* alignment or vice versa. Use this.
*
* - So we can handle more than PCI run two allocation passes on
* bridges. The first to see how large the resources are behind
* the bridge, and what their alignment requirements are. The
* second to assign a safe address to the devices behind the
* bridge. This allows me to treat a bridge as just a device with
* a couple of resources, and not need to special case it in the
* allocator. Also this allows handling of other types of bridges.
*
*/
void compute_allocate_resource(
struct bus *bus,
struct resource *bridge,
unsigned long type_mask,
unsigned long type)
{
struct device *dev;
struct resource *resource;
unsigned long base;
unsigned long align, min_align;
min_align = 0;
base = bridge->base;
printk_spew("%s compute_allocate_%s: base: %08lx size: %08lx align: %d gran: %d\n",
dev_path(bus->dev),
(bridge->flags & IORESOURCE_IO)? "io":
(bridge->flags & IORESOURCE_PREFETCH)? "prefmem" : "mem",
base, bridge->size, bridge->align, bridge->gran);
/* We want different minimum alignments for different kinds of
* resources. These minimums are not device type specific
* but resource type specific.
*/
if (bridge->flags & IORESOURCE_IO) {
min_align = log2(DEVICE_IO_ALIGN);
}
if (bridge->flags & IORESOURCE_MEM) {
min_align = log2(DEVICE_MEM_ALIGN);
}
/* Make certain I have read in all of the resources */
read_resources(bus);
/* Remember I haven't found anything yet. */
resource = 0;
/* Walk through all the devices on the current bus and
* compute the addresses.
*/
while((dev = largest_resource(bus, &resource, type_mask, type))) {
unsigned long size;
/* Do NOT I repeat do not ignore resources which have zero size.
* If they need to be ignored dev->read_resources should not even
* return them. Some resources must be set even when they have
* no size. PCI bridge resources are a good example of this.
*/
/* Propogate the resource alignment to the bridge register */
if (resource->align > bridge->align) {
bridge->align = resource->align;
}
/* Make certain we are dealing with a good minimum size */
size = resource->size;
align = resource->align;
if (align < min_align) {
align = min_align;
}
if (resource->flags & IORESOURCE_FIXED) {
continue;
}
if (resource->flags & IORESOURCE_IO) {
/* Don't allow potential aliases over the
* legacy pci expansion card addresses.
* The legacy pci decodes only 10 bits,
* uses 100h - 3ffh. Therefor, only 0 - ff
* can be used out of each 400h block of io
* space.
*/
if ((base & 0x300) != 0) {
base = (base & ~0x3ff) + 0x400;
}
/* Don't allow allocations in the VGA IO range.
* PCI has special cases for that.
*/
else if ((base >= 0x3b0) && (base <= 0x3df)) {
base = 0x3e0;
}
}
if (((round(base, 1UL << align) + size) -1) <= resource->limit) {
/* base must be aligned to size */
base = round(base, 1UL << align);
resource->base = base;
resource->flags |= IORESOURCE_ASSIGNED;
resource->flags &= ~IORESOURCE_STORED;
base += size;
printk_spew(
"%s %02x * [0x%08lx - 0x%08lx] %s\n",
dev_path(dev),
resource->index,
resource->base, resource->base + resource->size - 1,
(resource->flags & IORESOURCE_IO)? "io":
(resource->flags & IORESOURCE_PREFETCH)? "prefmem": "mem");
}
}
/* A pci bridge resource does not need to be a power
* of two size, but it does have a minimum granularity.
* Round the size up to that minimum granularity so we
* know not to place something else at an address postitively
* decoded by the bridge.
*/
bridge->size = round(base, 1UL << bridge->gran) - bridge->base;
printk_spew("%s compute_allocate_%s: base: %08lx size: %08lx align: %d gran: %d done\n",
dev_path(dev),
(bridge->flags & IORESOURCE_IO)? "io":
(bridge->flags & IORESOURCE_PREFETCH)? "prefmem" : "mem",
base, bridge->size, bridge->align, bridge->gran);
}
static void allocate_vga_resource(void)
{
#warning "FIXME modify allocate_vga_resource so it is less pci centric!"
#warning "This function knows to much about PCI stuff, it should be just a ietrator/visitor."
/* FIXME handle the VGA pallette snooping */
struct device *dev, *vga;
struct bus *bus;
bus = 0;
vga = 0;
for(dev = all_devices; dev; dev = dev->next) {
if (((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) &&
((dev->class >> 8) != PCI_CLASS_DISPLAY_OTHER)) {
if (!vga) {
printk_debug("Allocating VGA resource %s\n",
dev_path(dev));
vga = dev;
}
if (vga == dev) {
/* All legacy VGA cards have MEM & I/O space registers */
dev->command |= PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
} else {
/* It isn't safe to enable other VGA cards */
dev->command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
}
}
}
if (vga) {
bus = vga->bus;
}
/* Now walk up the bridges setting the VGA enable */
while(bus) {
bus->bridge_ctrl |= PCI_BRIDGE_CTL_VGA;
bus = (bus == bus->dev->bus)? 0 : bus->dev->bus;
}
}
/** Assign the computed resources to the bridges and devices on the bus.
* Recurse to any bridges found on this bus first. Then do the devices
* on this bus.
*
* @param bus Pointer to the structure for this bus
*/
void assign_resources(struct bus *bus)
{
struct device *curdev;
printk_debug("ASSIGN RESOURCES, bus %d\n", bus->secondary);
for (curdev = bus->children; curdev; curdev = curdev->sibling) {
if (!curdev->ops || !curdev->ops->set_resources) {
printk_err("%s missing set_resources\n",
dev_path(curdev));
continue;
}
if (!curdev->enabled) {
continue;
}
curdev->ops->set_resources(curdev);
}
printk_debug("ASSIGNED RESOURCES, bus %d\n", bus->secondary);
}
/**
* @brief Enable the resources for a specific device
*
* @param dev the device whose resources are to be enabled
*
* Enable resources of the device by calling the device specific
* enable_resources() method.
*
* The parent's resources should be enabled first to avoid having enabling
* order problem. This is done by calling the parent's enable_resources()
* method and let that method to call it's children's enable_resoruces() via
* enable_childrens_resources().
*
* Indirect mutual recursion:
*/
void enable_resources(struct device *dev)
{
if (!dev->ops || !dev->ops->enable_resources) {
printk_err("%s missing enable_resources\n", dev_path(dev));
return;
}
if (!dev->enabled) {
return;
}
dev->ops->enable_resources(dev);
}
/**
* @brief Determine the existence of dynamic devices and construct dynamic
* device tree.
*
* Start from the root device 'dev_root', scan the buses in the system
* recursively, build the dynamic device tree according to the result
* of the probe.
*
* This function has no idea how to scan and probe buses and devices at all.
* It depends on the bus/device specific scan_bus() method to do it. The
* scan_bus() function also has to create the device structure and attach
* it to the device tree.
*/
void dev_enumerate(void)
{
struct device *root;
unsigned subordinate;
printk_info("Enumerating buses...\n");
root = &dev_root;
if (!root->ops || !root->ops->scan_bus) {
printk_err("dev_root missing scan_bus operation");
return;
}
subordinate = root->ops->scan_bus(root, 0);
printk_info("done\n");
}
/**
* @brief Configure devices on the devices tree.
*
* Starting at the root of the dynamic device tree, travel recursively,
* compute resources needed by each device and allocate them.
*
* I/O resources start at DEVICE_IO_START and grow upward. MEM resources start
* at DEVICE_MEM_START and grow downward.
*
* Since the assignment is hierarchical we set the values into the dev_root
* struct.
*/
void dev_configure(void)
{
struct device *root;
printk_info("Allocating resources...\n");
root = &dev_root;
if (!root->ops || !root->ops->read_resources) {
printk_err("dev_root missing read_resources\n");
return;
}
if (!root->ops || !root->ops->set_resources) {
printk_err("dev_root missing set_resources\n");
return;
}
root->ops->read_resources(root);
/* Make certain the io devices are allocated somewhere safe. */
root->resource[0].base = DEVICE_IO_START;
root->resource[0].flags |= IORESOURCE_ASSIGNED;
root->resource[0].flags &= ~IORESOURCE_STORED;
/* Now reallocate the pci resources memory with the
* highest addresses I can manage.
*/
root->resource[1].base =
round_down(DEVICE_MEM_HIGH - root->resource[1].size,
1UL << root->resource[1].align);
root->resource[1].flags |= IORESOURCE_ASSIGNED;
root->resource[1].flags &= ~IORESOURCE_STORED;
/* Allocate the VGA I/O resource.. */
allocate_vga_resource();
/* Store the computed resource allocations into device registers ... */
root->ops->set_resources(root);
printk_info("done.\n");
}
/**
* @brief Enable devices on the device tree.
*
* Starting at the root, walk the tree and enable all devices/bridges by
* calling the device's enable_resources() method.
*/
void dev_enable(void)
{
printk_info("Enabling resourcess...\n");
/* now enable everything. */
enable_resources(&dev_root);
printk_info("done.\n");
}
/**
* @brief Initialize all devices in the global device list.
*
* Starting at the first device on the global device link list,
* walk the list and call a driver to do device specific setup.
*/
void dev_initialize(void)
{
struct device *dev;
printk_info("Initializing devices...\n");
for (dev = all_devices; dev; dev = dev->next) {
if (dev->enabled && dev->ops && dev->ops->init) {
printk_debug("%s init\n", dev_path(dev));
dev->ops->init(dev);
}
}
printk_info("Devices initialized\n");
}
|