Featured post

Top 5 books to refer for a VHDL beginner

VHDL (VHSIC-HDL, Very High-Speed Integrated Circuit Hardware Description Language) is a hardware description language used in electronic des...

Monday, 21 January 2013

SystemVerilog Modports

Modports in SystemVerilog are used to restrict interface access within a interface. The keyword modport indicates that the directions are declared as if inside the module.

Modports can have

  • input : Ports that need to be input.
  • output : Ports that need to be output.
  • inout : Ports that need to be inout
  • ref : Ports that need to be ref.

Few of the examples usages of modports are, we can have interface file for

memory controller where, we can have

  • Modports for memory
  • Modports for system side
  • Modports for testbench

Adding modports to an interface does not require that any of the modports be used when the interface is used. If no modport is specified in the module header or in the port connection, then all the nets and variables in the interface are accessible with direction inout or ref, Below example shows this in greater detail.

 //+++++++++++++++++++++++++++++++++++++++++++++++++
// Define the interface
//+++++++++++++++++++++++++++++++++++++++++++++++++
interface mem_if (input wire clk);
logic reset;
logic we_sys;
logic cmd_valid_sys;
logic ready_sys;
logic [7:0] data_sys;
logic [7:0] addr_sys;
logic we_mem;
logic ce_mem;
logic [7:0] datao_mem;
logic [7:0] datai_mem;
logic [7:0] addr_mem;
//=================================================
//
Modport for System interface
//=================================================
modport system (input clk,reset,we_sys, cmd_valid_sys,
addr_sys, datao_mem,
output we_mem, ce_mem, addr_mem,
datai_mem, ready_sys, ref data_sys);
//=================================================
// Modport for memory interface
//=================================================
modport memory (input clk,reset,we_mem, ce_mem,
addr_mem, datai_mem, output datao_mem);
//=================================================
// Modport for testbench
//=================================================
modport tb (input clk, ready_sys,
output reset,we_sys, cmd_valid_sys, addr_sys,
ref data_sys);

endinterface

//+++++++++++++++++++++++++++++++++++++++++++++++++
// Memory Model
//+++++++++++++++++++++++++++++++++++++++++++++++++
module memory_model (mem_if.memory mif);
// Memory array
logic [7:0] mem [0:255];

//=================================================
// Write Logic
//=================================================
always @ (posedge mif.clk)
if (mif.ce_mem && mif.we_mem) begin
mem[mif.addr_mem] <= mif.datai_mem;
end

//=================================================
// Read Logic
//=================================================
always @ (posedge mif.clk)
if (mif.ce_mem && ~mif.we_mem) begin
mif.datao_mem <= mem[mif.addr_mem];
end

endmodule

//+++++++++++++++++++++++++++++++++++++++++++++++++
// Memory Controller
//+++++++++++++++++++++++++++++++++++++++++++++++++
module memory_ctrl (mem_if.system sif);

typedef enum {IDLE,WRITE,READ,DONE} fsm_t;

fsm_t state;

always @ (posedge sif.clk)
if (sif.reset) begin
state <= IDLE;
sif.ready_sys <= 0;
sif.we_mem <= 0;
sif.ce_mem <= 0;
sif.addr_mem <= 0;
sif.datai_mem <= 0;
sif.data_sys <= 8'bz;
end else begin
case(state)
IDLE : begin
sif.ready_sys <= 1'b0;
if (sif.cmd_valid_sys && sif.we_sys) begin
sif.addr_mem <= sif.addr_sys;
sif.datai_mem <= sif.data_sys;
sif.we_mem <= 1'b1;
sif.ce_mem <= 1'b1;
state <= WRITE;
end
if (sif.cmd_valid_sys && ~sif.we_sys) begin
sif.addr_mem <= sif.addr_sys;
sif.datai_mem <= sif.data_sys;
sif.we_mem <= 1'b0;
sif.ce_mem <= 1'b1;
state <= READ;
end
end
WRITE : begin
sif.ready_sys <= 1'b1;
if (~sif.cmd_valid_sys) begin
sif.addr_mem <= 8'b0;
sif.datai_mem <= 8'b0;
sif.we_mem <= 1'b0;
sif.ce_mem <= 1'b0;
state <= IDLE;
end
end
READ : begin
sif.ready_sys <= 1'b1;
sif.data_sys <= sif.datao_mem;
if (~sif.cmd_valid_sys) begin
sif.addr_mem <= 8'b0;
sif.datai_mem <= 8'b0;
sif.we_mem <= 1'b0;
sif.ce_mem <= 1'b0;
sif.ready_sys <= 1'b1;
state <= IDLE;
sif.data_sys <= 8'bz;
end
end
endcase
end

endmodule

//+++++++++++++++++++++++++++++++++++++++++++++++++
// Test program
//+++++++++++++++++++++++++++++++++++++++++++++++++
program test(mem_if.tb tif);

initial begin
tif.reset <= 1;
tif.we_sys <= 0;
tif.cmd_valid_sys <= 0;
tif.addr_sys <= 0;
tif.data_sys <= 8'bz;
#100 tif.reset <= 0;
for (int i = 0; i < 4; i ++) begin
@ (posedge tif.clk);
tif.addr_sys <= i;
tif.data_sys <= $random;
tif.cmd_valid_sys <= 1;
tif.we_sys <= 1;
@ (posedge tif.ready_sys);
$display("@%0dns Writing address %0d with data %0x",
$time, i,tif.data_sys);
@ (posedge tif.clk);
tif.addr_sys <= 0;
tif.data_sys <= 8'bz;
tif.cmd_valid_sys <= 0;
tif.we_sys <= 0;
end
repeat (10) @ (posedge tif.clk);
for (int i= 0; i < 4; i ++) begin
@ (posedge tif.clk);
tif.addr_sys <= i;
tif.cmd_valid_sys <= 1;
tif.we_sys <= 0;
@ (posedge tif.ready_sys);
@ (posedge tif.clk);
$display("@%0dns Reading address %0d, Got data %0x",
$time, i,tif.data_sys);
tif.addr_sys <= 0;
tif.cmd_valid_sys <= 0;
end
#10 $finish;
end

endprogram

//+++++++++++++++++++++++++++++++++++++++++++++++++
// Testbench
//+++++++++++++++++++++++++++++++++++++++++++++++++
module interface_modports();

logic clk = 0;
always #10 clk++;
//=================================================
// Instianciate Interface and DUT
//=================================================
mem_if miff(clk);
memory_ctrl U_ctrl(miff);
memory_model U_model(miff);
test U_test(miff);

endmodule

You could download file interface_modports.sv here

 


Simulation Result : Modport

 @150ns Writing address 0 with data 24
@230ns Writing address 1 with data 81
@310ns Writing address 2 with data 9
@390ns Writing address 3 with data 63
@690ns Reading address 0, Got data 24
@770ns Reading address 1, Got data 81
@850ns Reading address 2, Got data 9
@930ns Reading address 3, Got data 63








Get free daily email updates!



Follow us!


Is ReRAM the end of NAND flash?

a85bdfc871ed6d6fa42b53abc31e313c A primary storage technology: ReRAM.

NAND flash stores data in a little cloud of electrons in a quantum well. The presence or absence of charge - or the strength of the charge - tells us what bits are stored.

ReRAM stores data through changes in the resistance of a cell. There are a variety of ReRAM technologies in development, including phase-change memory (PCM) and HP's memristors, based on at least a half-dozen competing materials.

Expect healthy competition as the industry and buyers sort out the details.

Advantages

While different implementations have different specs, all ReRAM has key advantages over today's common NAND flash.

  • Speed. ReRAM can be written much faster - in nanoseconds rather than milliseconds - making it better for high-performance applications.
  • Endurance. MLC flash - the most common - can only handle about 10,000 writes. ReRAM can handle millions.
  • Power. Researchers have demonstrated micro-Amp write power and expect to get in the nano-Amp range soon, which makes ReRAM much more power efficient than NAND flash, which requires voltage pumps to achieve the 20 volts required for writes.

The Storage Bits take

NAND flash will retain advantages in cost and density for the foreseeable future, meaning that it will be here for decades to come. So where will ReRAM fit in the storage hierarchy?

  • Data integrity. Losing a snapshot is no big deal. Losing your checking account deposit is. Mission critical applications will prefer ReRAM devices - and can afford them.
  • Performance. Today's SSDs go through many contortions to give good performance - and don't succeed all that well. A fast medium removes complexity as well as increasing performance.
  • Mobility. Depending on how the never-ending tug-of-war between network bandwidth and memory capacity develops, consumers may come to prefer large capacity storage on their mobile devices. If so, ReRAM's power-sipping ways will be an asset on high-end products.

Toshiba is well-positioned to enter these high-end markets with SSDs analogous to today's 15k disks. It may not be a huge market, but the margins will make it worthwhile.

Other vendors, including Panasonic, Micron and Samsung, are also working on ReRAM products. Another interesting question: to what extent will fast ReRAM replace DRAM in systems?

Get free daily email updates!

Follow us!

Tuesday, 15 January 2013

SEMI Industry spending $32.4B this year on IC gear

ics Fab equipment spending saw a drastic dip in 2H12 and 1Q13 is expected to be even lower, says SEMI, which reckons that the projected number of facilities equipping will drop from 212 in 2012 to 182 in 2013.

Spending on fab equipment for System LSI is expected to drop in 2013. Spending for Flash declined rapidly in 2H12 (by over 40 %) but is expected to pick up by 2H13. The foundry sector is expected to increase spending in 2013, led by major player TSMC, as well as Samsung and Global foundries.

Fab construction:
While fab construction spending slowed in 2012, at -15%,  SEMI  projects an increase of 3.7 % in 2013 (from $5.6bn in 2012 to $5.8bn  in 2013).

The report tracks 34 fab construction projects for 2013 (down from 51 in 2012).  An additional 10 new construction projects with various probabilities may start in 2013. The largest increase for construction spending in 2013 is expected to be for dedicated foundries and Flash related facilities.

Many device manufacturers are hesitating to add capacity due to declining average selling prices and high inventories.

However SEMI reckons flash capacity will grow 6%  by mid-2013, with nearly 6 % growth, adding over 70,000wpm.

SEMI also foresees a rapid increase of installed capacity for new technology nodes, not only for 28nm but also from 24nm to 18nm and first ramps for 17nm to 13nm in 2013.

SEMI cautiously forecasts  fab equipment spending in 2013 to range from minus 5 to plus 3.

Get free daily email updates!

Follow us!

Sunday, 13 January 2013

Full Speed Ahead For FPGA

droppedImageIn the world of high-frequency trading, where speed matters most, technology that can gain a crucial split-second advantage over a rival is valued above all others.

And in what could be the next phase of HFT, firms are looking more and more to hardware solutions, such as field-programmable gate array (FPGA), as it can offer speed gains on the current software used by HFT firms.

FPGA technology, which allows for an integrated circuit to be designed or configured after manufacturing, has been around for decades but has only been on the radar of HFT firms for a couple of years. But new solutions are beginning to pop up that may eventually see FPGA become more viable and be the latest must-have tool in the high-speed arms race.

For instance, a risk calculation that can take 30 microseconds to perform by a software-based algorithm takes just three microseconds with FPGA.

Current HFT platforms are typically implemented using software on computers with high-performance network adapters. However, the downside of FPGA is that it is generally complicated and time consuming to set up, as well as to re-program, as the programmer has to translate an algorithm into the design of an electronic circuit and describe that design in specialized hardware description language.

The programming space on FPGA is also limited, so programs can’t be too big currently. Although, some tasks such as ‘circuit breakers’ are an ideal current use for FPGA technology.

It is the drawbacks, as well as the costs involved, that are, at present, are holding back trading firms from taking up FPGA in greater numbers. However, because of the speed gains that it offers, much resources are being poured into FPGA in a bid to make the technology more accessible—and some technology firms are now beginning to claim significant speed savings with their products.

Cheetah Solutions, a provider of hardware solutions for financial trading, is one firm that says it can now offer reconfigurable FPGA systems to trading firms. It says its Cheetah Framework provides building blocks which can be configured in real time by a host server and an algorithm can execute entirely in an FPGA-enabled network card with the server software taking only a supervisory role by monitoring the algo’s performance and adapting the hardware algo on the go.

“True low latency will only be achieved through total hardware solutions which guarantee deterministic low latency,” said Peter Fall, chief executive of Cheetah Solutions. “But if the market moves, you want to be able to tweak an algorithm or change it completely to take advantage of current conditions. Traditional FPGA programming may take weeks to make even a simple change whereas Cheetah Framework provides on-the-fly reconfigurability.”

Another technology firm to claim that it can make automated trading strategies even faster and more efficient is U.K.-based Celoxica, which recently debuted its new futures trading platform, based on FPGA technology, which involves a circuit on one small chip that can be programmed by the customer.

Celoxica says the platform is designed to accelerate the flow of market data into trading algorithms to make trading faster. It covers multiple trading strategies and asset classes including fixed income, commodities and foreign exchange.

“For futures trading, processing speed, determinism and throughput continue to play a crucial role in the success of principle trading firms and hedge funds trading on the global futures markets,” said Jean Marc Bouleier, chairman and chief executive at Celoxica. “Our clients and partners can increase focus on their trading strategies for CME, ICE, CFE, Liffe US, Liffe and Eurex.”

While last August, Fixnetix, a U.K. trading technology firm, said that it had signed up a handful of top-tier brokers to use its FPGA hardware chip, which executes deals, compliance and risk checks, suggesting that this niche technology is picking up speed rapidly.

Tuesday, 8 January 2013

Metric driven Verification methodology

As the design complexity increases, the use of traditional verification methodology becomes minimal for verifying hardware designs. Directed Tests were used quite long back. Later, Coverage Driven Verification methodology (CDV) came up. In directed tests approach, verification engineer is going to state exactly what stimulus should be applied to the Design Under Test (DUT). This can be applied only for small designs which has very limited features.

As the design became more complex, verification engineers started looking for the possibility of checking the effectiveness of the verification, or in other words the features covered during verification. This is the whole idea behind CDV, which is done by setting up cover-groups for the features to be verified and also for coverage closure. The stimulus generation is random (by using Constrained Random Generation method) for CDV, so this approach is much more effective than directed tests. CDV improves productivity and also quality, but you will find difficulties in planning and estimating the verification completion. For complex designs there will be thousands of cover-groups and it is difficult to map with the specification.

MDV_glimpse1

Metric Driven Verification (MDV) is a proven methodology for verifying hardware designs which has been introduced by Cadence. This is based on CDV approach, but overcomes pitfalls in CDV approach. In MDV flow, features are stated in an executable verification plan. This is the first phase for the verification and later this will be correlated with the actual cover-groups. This uses constrained random for stimulus generation which helps to have better coverage than traditional simulation method.

Different stages in Metric Driven Verification Flow:
The different stages in MDV flow are plan, construct, execute, measure and analyze. The coverage information from “measure” stage will be mapped to verification plan and do the analysis to see which features are already verified with existing tests and the given seeds. Having this information upfront helps to improve the verification environment and hence there will not be any chance of missing out the planned features.

The verification plan is a living document to achieve the goal of verifying the functionality of the design completely. This needs to correlate functional specification, designers’ intent and implementation of test-bench. The plan can be an XML file, a spreadsheet, a document or a text file and defines exactly what needs to be verified. Different sections can be made in verification plan like interested features, co-features, interface features etc. A good and meaningful verification plan always helps the verification engineers to achieve his final goal by correlating different coverage results to each feature. It also helps to measure the progress of verification at different stages and can re-evaluate estimated effort if required.

Without a plan it is always difficult to differentiate high priority and low priority features and all coverage information will appear flat. The verification engineers will not have a clear picture on the progress or verification closure.

MDV_Flow2

The next step is to construct a verification environment. The verification engineers start constructing an environment by reusing existing verification IPs, reusing available UVM/OVM libraries and/or developing from scratch some part of the environment. This depends on what you decide in the planning stage. The test-bench and some of the test cases will be ready by this time.

Once the verification environment is ready, test cases can be executed and results checked. The tool vManager from Cadence can fire the regression and can easily capture the result and correlate with verification plan, if you specify the v-plan feature information while defining the coverage in your code. Incisive Metric Centre is now the default way of viewing coverage as a unified coverage browser, which clearly shows up what part of the design has been exercised.

Once the coverage information is available, this should be analyzed with the v-plan. Cadence INCISIV tool package helps to get a clear picture on v-plan to feature mapping against the coverage result. It also shows coverage based ranking to see which test is most effective and which tests are redundant. The tests with ranking id of -1 is redundant and can be filtered out while ranking id of 0 would be the most effective test. We can find out the ranking of other tests as well and the effective improvement in the coverage by executing those tests.

By having better verification planning and management and correlating with coverage, MDV flow significantly improves the productivity of your verification.

Get free daily email updates!

Follow us!

Creating .lib file from Verilog netlist

Creating a dummy .lib file is something every physical design engineer has done now and then. If you have a verilog model of the block available, your task gets easier. The following script automates .lib generation from your verilog netlist. Use this as a dummy .lib to get your design flow going or use it as a template for your analog blocks for modifying the values.

Customize, edit and use. The script creates a simple Data Structure. So any modification can be done pretty easily. The script parses the verilog for the module name specified, and collects the ports & directions. In the .lib file written out, a default capacitance and transition value is specified. This is a good starting point for your blocks.

Usage: create_lib <verilog_netlist> <module_name> [transition_value] [capacitance_value]

#!/usr/bin/perl

use strict;

if ($#ARGV < 1 ) {
    print "usage: create_lib <verilog_netlist> <module_name> \n";
    exit;
}

my $netlist = $ARGV[0] ;
my $module  = $ARGV[1] ;
my $tran = 2.5 ;
my $cap = 0.001;
my $signal_level = "VDD" ;

if(defined $ARGV[2]) {$tran = $ARGV[2];}
if(defined $ARGV[3]) {$cap = $ARGV[3];}
if(defined $ARGV[4]) {$signal_level = $ARGV[4];}

my $FF;
my $FO;
open $FF, "< $ARGV[0]" or die "Can't open $ARGV[0] : $!";
open $FO, ">$module.lib" or die "Can't open $module.lib for write : $!";

my $db = createTopLevelDB();
createDotLib($db,$FO);

sub createDotLib
{
    my $topLevelDBRef = shift;
    my $FO = shift ;   
    ### Header
    print $FO "library\($topLevelDBRef->{'design'}->{'cell'}\) {\n";
    print $FO "\n /* unit attributes */\n";
    print $FO "  time_unit : \"1ns\"\;\n";
    print $FO "  voltage_unit : \"1V\"\;\n";
    print $FO "  current_unit : \"1uA\"\;\n";
    print $FO "  pulling_resistance_unit : \"1kohm\"\;\n";
    print $FO "  leakage_power_unit : \"1nW\"\;\n";
    print $FO "  capacitive_load_unit\(1,pf\)\;\n\n";
    foreach my $direction (keys(%{$topLevelDBRef->{'bus'}})) {
        foreach my $bus_type (keys %{$topLevelDBRef->{'bus'}->{$direction}}) {
            my @bus_width =  split(/_/, $bus_type);
            my $bus_hi = $bus_width[1] ;
            my $bus_lo = $bus_width[2] ;
            my $bus_width = $bus_hi+1-$bus_lo;
            print $FO " type \($bus_type\) { \n";
            print $FO "   base_type : array ; \n" ;
                print $FO "   data_type : bit  \n" ;;
                print $FO "   bit_width : $bus_width   \n" ;;
                print $FO "   bit_from : $bus_hi  \n" ;;
                print $FO "   bit_to : $bus_lo ; \n" ;
                print $FO "   downto : true ; \n" ;
                print $FO " } \n" ;
        }
    }
    print $FO "\n  cell\($topLevelDBRef->{'design'}->{'cell'}\) {\n";
    foreach my $direction (keys(%{$topLevelDBRef->{'pins'}})) {
        foreach my $pin_name (@{$topLevelDBRef->{'pins'}->{$direction}}) {
            print $FO ("    pin\($pin_name\) { \n");
            print $FO ("\tdirection : $direction ;\n");
            if($direction eq "input") {
                print $FO ("\tmax_transition : $tran;\n");
            }
            print $FO ("\tcapacitance : $cap; \n");     
            print $FO ("    } \n") ;
        }
    }
    foreach my $direction (keys(%{$topLevelDBRef->{'bus'}})) {
        foreach my $bus_type (keys %{$topLevelDBRef->{'bus'}->{$direction}}) {
            my @bus_width =  split(/_/, $bus_type);
            my $bus_hi = $bus_width[1] ;
            my $bus_lo = $bus_width[2] ;
            foreach my $bus_name (@{$topLevelDBRef->{'bus'}->{$direction}{$bus_type}}) {
                                        chomp($bus_name);
                print "BUS $bus_name : $bus_type : $direction \n" ;
                print $FO ("    bus\($bus_name\) { \n");
                print $FO ("\tbus_type : $bus_type ;\n");
                print $FO ("\tdirection : $direction ;\n");
                if($direction eq "input") {
                    print $FO ("\tmax_transition : $tran;\n");
                }   
                for(my $i=$bus_lo; $i<=$bus_hi; $i++) {
                    print $FO ("\tpin\($bus_name\[$i\]\) { \n");
                    print $FO ("\t\tcapacitance : $cap; \n"); 
                    print $FO ("\t} \n") ;
                }
                print $FO ("    } \n") ;
            }
        }
    }
    print $FO ("  } \n") ;
    print $FO ("} \n") ;
}

sub createTopLevelDB
{
    my $find_top_module = 0;
    my %topLevelDB = () ;
    my %pins = () ;
    my %bus = () ;
    my @input_pins ;
    my @output_pins ;
    my @inout_pins ;
    my @bus_types ;
    my %input_bus = () ;
    my %output_bus = () ;
    my %inout_bus = () ;
    my %design = ();
    $design{'cell'} = $module;
    $design{'tran'} = $tran;
    $design{'cap'} = $cap;
    $design{'signal_level'} = $signal_level;
    while(my $line = <$FF>) {
        last if($find_top_module == 1);
        if($line=~/module\s+$module/) {
            $find_top_module = 1 ;
            while(my $line = <$FF>) {
                next if($line =~ "\s*//" );
                chomp($line);
                if ($line =~/input\s+/ ) {
                    $line=~s/\s*input\s+//;
                    $line=~s/;//;
                    if($line =~/\[(\d+):(\d+)\]/) {
                        my $bus_type = "bus_$1_$2";
                        $line=~s/\[(\d+):(\d+)\]//;
                        my @line =  split(/,/, $line);
                        unless(grep {$_ eq $bus_type} @bus_types) { 
                            push(@bus_types,$bus_type);
                        }
                        foreach my $pin (@line) {
                            $pin=~s/\s+//;
                            push(@{$input_bus{$bus_type}}, $pin );
                        }
                    }
                    else {
                        my @line =  split(/,/, $line);
                        foreach my $pin (@line) {
                            $pin=~s/\s+//;
                            push(@input_pins, $pin);
                        }
                    }
                }
                if ($line =~/output\s+/ ) {
                    $line=~s/\s*output\s+//;
                    $line=~s/;//;
                    if($line =~/\[(\d+):(\d+)\]/) {
                        my $bus_type = "bus_$1_$2";
                        $line=~s/\[(\d+):(\d+)\]//;
                        my @line =  split(/,/, $line);
                        unless(grep {$_ eq $bus_type} @bus_types) { 
                            push(@bus_types,$bus_type);
                        }
                        foreach my $pin (@line) {
                            $pin=~s/\s+//;
                            push(@{$output_bus{$bus_type}}, $pin );
                        }
                    }
                    else {
                        my @line =  split(/,/, $line);
                        foreach my $pin (@line) {
                            $pin=~s/\s+//;
                            push(@output_pins, $pin);
                        }
                    }

                }
                if ($line =~/inout\s+/ ) {
                    $line=~s/\s*inout\s+//;
                    $line=~s/;//;
                    if($line =~/\[(\d+):(\d+)\]/) {
                        my $bus_type = "bus_$1_$2";
                        $line=~s/\[(\d+):(\d+)\]//;
                        my @line =  split(/,/, $line);
                        unless(grep {$_ eq $bus_type} @bus_types) { 
                            push(@bus_types,$bus_type);
                        }
                        foreach my $pin (@line) {
                            $pin=~s/\s+//;
                            push(@{$inout_bus{$bus_type}}, $pin );
                        }
                    }
                    else {
                        my @line =  split(/,/, $line);
                        foreach my $pin (@line) {
                            $pin=~s/\s+//;
                            push(@inout_pins, $pin);
                        }
                    }

                }

                last if($line=~/endmodule/);
            }

        }
    }
    $pins{'input'} = \@input_pins;
    $pins{'output'} = \@output_pins;
    $pins{'inout'} = \@inout_pins;
    $bus{'input'} = \%input_bus;
    $bus{'output'} = \%output_bus;
    $bus{'inout'} = \%inout_bus;
    $topLevelDB{'pins'} = \%pins;
    $topLevelDB{'bus'} = \%bus;
    $topLevelDB{'design'} = \%design;
    return \%topLevelDB;
}

Monday, 7 January 2013

What to expect at the VLSI Conf Pune 2013

The 26th International Conference on VLSI Design is currently going on at the Hyatt, in Pune (Jan 7th to 9th, 2013), and with a large number of keynotes, research paper presentations, panel discussions, workshops and other things being presented in 5 different tracks to 800+ delegates, it is full of activity for anyone interested in the field of VLSI design/EDA/Embedded systems.

Here are the details of the technical program:

  • New Research Ideas
    • Often 5-10 years from practical applications
    • 3 parallel tracks from 7th to 9th Jan
  • User/Designer Track
    • Novel ideas to and from practitioners (not researchers)
    • Can be used today to enhance your next design
    • One track on 7th and 8th Jan
  • Student-Oriented Talks/Workshops
  • Design Contest
  • Industry Form
  • 9 keynotes: by people from LSI, IBM, Marvell, APM, Howard Hughes Medical Institute, Intel, Solarsis, Xilinx
  • Panels and Embedded Tutorials for people new to the field
  • 66 accepted talks, out of 310 submissions, in all areas of design/EDA/embedded systems

There are 23 companies who have set up exhibit stalls, and includes Intel, QLogic, Xilinx, TI, ARM, Agilent and a bunch of other companies, and includes many companies who have significant Pune presence.

 VLSI AND EMBEDDED SYSTEMS CONFERENCE PROGRAM

Day 1 7th January 2013 Inauguration, Technical Sessions, Exhibits, Student Conference
Day 2 8th January 2013 Valedictory/Award, Technical Sessions, Exhibits, Student Conference
Day 3 9th January 2013 Technical Sessions, Exhibits, RASDAT 2013
Day 4 10th January 2013 RASDAT 2013
Day T1 5th January 2013 Tutorials
Day T2 6th January 2013 Tutorials

VLSI AND EMBEDDED SYSTEMS CONFERENCE IMPORTANT DATES

Regular Paper Submissions

24th July, 2012 (Closed)

Tutorial Submissions

17th August, 2012 (Closed)

User/Designer Submissions

24th August, 2012 (Closed)

Call for embedded tutorials,

24th August, 2012 (Closed)

special sessions, and panels

Design Contest Submission

30th November, 2012 (Final deadline)

User Track Acceptance Notification

19th October, 2012 (Closed)

Camera ready paper due

15th October, 2012 (Closed)

Get free daily email updates!

Follow us!